From 47fd3ee25e13cc5add48ba2ed71f7ee964b9c3a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 3 Jun 2020 10:22:26 +0200 Subject: [PATCH 0001/2056] mwifiex: Fix firmware filename for sd8977 chipset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firmware for sd8977 chipset is distributed by Marvell package and also as part of the linux-firmware repository in filename sdsd8977_combo_v2.bin. This patch fixes mwifiex driver to load correct firmware file for sd8977. Fixes: 1a0f547831dce ("mwifiex: add support for sd8977 chipset") Signed-off-by: Pali Rohár Acked-by: Ganapathi Bhat Signed-off-by: Marcel Holtmann --- drivers/net/wireless/marvell/mwifiex/sdio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index 71cd8629b28e..0cac2296ed53 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -36,7 +36,7 @@ #define SD8897_DEFAULT_FW_NAME "mrvl/sd8897_uapsta.bin" #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" -#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin" +#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin" #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" #define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" From 2e1fcac52a9ea53e5a13a585d48a29a0fb4a9daf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 3 Jun 2020 10:22:27 +0200 Subject: [PATCH 0002/2056] mwifiex: Fix firmware filename for sd8997 chipset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firmware for sd8997 chipset is distributed by Marvell package and also as part of the linux-firmware repository in filename sdsd8997_combo_v4.bin. This patch fixes mwifiex driver to load correct firmware file for sd8997. Fixes: 6d85ef00d9dfe ("mwifiex: add support for 8997 chipset") Signed-off-by: Pali Rohár Acked-by: Ganapathi Bhat Signed-off-by: Marcel Holtmann --- drivers/net/wireless/marvell/mwifiex/sdio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h index 0cac2296ed53..8b476b007c5e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.h +++ b/drivers/net/wireless/marvell/mwifiex/sdio.h @@ -38,7 +38,7 @@ #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" #define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin" #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" -#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin" +#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin" #define BLOCK_MODE 1 #define BYTE_MODE 0 From dbec3af5f13b88a96e31f252957ae1a82484a923 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 3 Jun 2020 10:22:28 +0200 Subject: [PATCH 0003/2056] btmrvl: Fix firmware filename for sd8977 chipset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firmware for sd8977 chipset is distributed by Marvell package and also as part of the linux-firmware repository in filename sdsd8977_combo_v2.bin. This patch fixes mwifiex driver to load correct firmware file for sd8977. Fixes: 8c57983bf7a79 ("Bluetooth: btmrvl: add support for sd8977 chipset") Signed-off-by: Pali Rohár Acked-by: Ganapathi Bhat Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 0f3a020703ab..7aa2c94720bc 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -328,7 +328,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { .helper = NULL, - .firmware = "mrvl/sd8977_uapsta.bin", + .firmware = "mrvl/sdsd8977_combo_v2.bin", .reg = &btmrvl_reg_8977, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, @@ -1831,6 +1831,6 @@ MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin"); MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); From 00eb0cb36fad53315047af12e83c643d3a2c2e49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 3 Jun 2020 10:22:29 +0200 Subject: [PATCH 0004/2056] btmrvl: Fix firmware filename for sd8997 chipset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Firmware for sd8997 chipset is distributed by Marvell package and also as part of the linux-firmware repository in filename sdsd8997_combo_v4.bin. This patch fixes mwifiex driver to load correct firmware file for sd8997. Fixes: f0ef67485f591 ("Bluetooth: btmrvl: add sd8997 chipset support") Signed-off-by: Pali Rohár Acked-by: Ganapathi Bhat Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 7aa2c94720bc..4c7978cb1786 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -346,7 +346,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = { static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { .helper = NULL, - .firmware = "mrvl/sd8997_uapsta.bin", + .firmware = "mrvl/sdsd8997_combo_v4.bin", .reg = &btmrvl_reg_8997, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, @@ -1833,4 +1833,4 @@ MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin"); MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); -MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin"); +MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin"); From 6fb00d4e94bc28c39fa077b03e6531956de87802 Mon Sep 17 00:00:00 2001 From: Manish Mandlik Date: Mon, 1 Jun 2020 18:42:51 -0700 Subject: [PATCH 0005/2056] Bluetooth: Check scan state before disabling during suspend Check current scan state by checking HCI_LE_SCAN flag and send scan disable command only if scan is already enabled. Signed-off-by: Manish Mandlik Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1fc55685da62..1acf5b8e0910 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -998,8 +998,9 @@ static void hci_req_set_event_filter(struct hci_request *req) static void hci_req_config_le_suspend_scan(struct hci_request *req) { - /* Can't change params without disabling first */ - hci_req_add_le_scan_disable(req); + /* Before changing params disable scan if enabled */ + if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN)) + hci_req_add_le_scan_disable(req); /* Configure params and enable scanning */ hci_req_add_le_passive_scan(req); @@ -1065,8 +1066,9 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) page_scan = SCAN_DISABLED; hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan); - /* Disable LE passive scan */ - hci_req_add_le_scan_disable(&req); + /* Disable LE passive scan if enabled */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) + hci_req_add_le_scan_disable(&req); /* Mark task needing completion */ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); From 30965242268c39eaaa4e845e0559cd2635d016a6 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 1 Jun 2020 14:20:59 +0000 Subject: [PATCH 0006/2056] Bluetooth: Removing noisy dbg message This patch removes a particularly noisy dbg message. The debug message isn't particularly interesting for debuggability so it was simply removed to reduce noise in dbg logs. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- net/bluetooth/af_bluetooth.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 3fd124927d4d..b751a7c1b20f 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -453,8 +453,6 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock, struct sock *sk = sock->sk; __poll_t mask = 0; - BT_DBG("sock %p, sk %p", sock, sk); - poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == BT_LISTEN) From 4c07a5d7aeb39f559b29aa58ec9a8a5ab4282cb0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 29 May 2020 12:59:48 +0300 Subject: [PATCH 0007/2056] Bluetooth: hci_qca: Fix an error pointer dereference When a function like devm_clk_get_optional() function returns both error pointers on error and NULL then the NULL return means that the optional feature is deliberately disabled. It is a special sort of success and should not trigger an error message. The surrounding code should be written to check for NULL and not crash. On the other hand, if we encounter an error, then the probe from should clean up and return a failure. In this code, if devm_clk_get_optional() returns an error pointer then the kernel will crash inside the call to: clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); The error handling must be updated to prevent that. Fixes: 77131dfec6af ("Bluetooth: hci_qca: Replace devm_gpiod_get() with devm_gpiod_get_optional()") Signed-off-by: Dan Carpenter Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 81c3c38baba1..836949d827ee 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1962,17 +1962,17 @@ static int qca_serdev_probe(struct serdev_device *serdev) } qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); - if (!qcadev->susclk) { + if (IS_ERR(qcadev->susclk)) { dev_warn(&serdev->dev, "failed to acquire clk\n"); - } else { - err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); - if (err) - return err; - - err = clk_prepare_enable(qcadev->susclk); - if (err) - return err; + return PTR_ERR(qcadev->susclk); } + err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); + if (err) + return err; + + err = clk_prepare_enable(qcadev->susclk); + if (err) + return err; err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); if (err) { From 33e3fd99ec6cde00e0c04a1395ae535e0c00a844 Mon Sep 17 00:00:00 2001 From: Alexander Wetzel Date: Mon, 19 Aug 2019 20:05:40 +0200 Subject: [PATCH 0008/2056] iwlwifi: Extended Key ID support for mvm and dvm All iwlwifi cards below the 22000 series are able to handle multiple keyids per STA and allow the selection of the encryption key per MPDU. These are therefore fully compatible with the Extended Key ID support implementation in mac80211. Enable Extended Key ID support for all dvm cards and the mvm cards not using the incompatible new Tx API introduced for the 22000 series. Signed-off-by: Alexander Wetzel Link: https://lore.kernel.org/r/20190819180540.2855-1-alexander@wetzel-home.de Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c | 1 + drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 6512d25e3563..423d3c396b2d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -200,6 +200,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, iwl_leds_init(priv); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID); ret = ieee80211_register_hw(priv->hw); if (ret) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 77916231ff7d..ad308d8c5ebf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -543,6 +543,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + + /* The new Tx API does not allow to pass the key or keyid of a MPDU to + * the hw, preventing us to control which key(id) to use per MPDU. + * Till that's fixed we can't use Extended Key ID for the newer cards. + */ + if (!iwl_mvm_has_new_tx_api(mvm)) + wiphy_ext_feature_set(hw->wiphy, + NL80211_EXT_FEATURE_EXT_KEY_ID); hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; From 66d0a3b57bbe0083080682fe74236174f3cadad7 Mon Sep 17 00:00:00 2001 From: Chris Down Date: Thu, 17 Oct 2019 10:48:41 -0400 Subject: [PATCH 0009/2056] iwlwifi: Don't IWL_WARN on FW reconfiguration IWL_WARN seems excessive here since this can happen during normal operation. Every time I connect to a new network with 8086:24fd I get this as KERN_WARNING on the console, which mildly distracts from other more pressing messages. For example: % sudo journalctl _TRANSPORT=kernel | grep -c 'FW already configured' 403 Signed-off-by: Chris Down Cc: Shahar S Matityahu Cc: Luca Coelho Cc: linux-kernel@vger.kernel.org Cc: linux-wireless@vger.kernel.org Link: https://lore.kernel.org/r/20191017144841.GA16393@chrisdown.name Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 4d3687cc83a4..7ea55cfdd8a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2554,7 +2554,7 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) return -EINVAL; if (fwrt->dump.conf != FW_DBG_INVALID) - IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", + IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n", fwrt->dump.conf); /* Send all HCMDs for configuring the FW debug */ From 7310dd3fbaa5a23f2b4a1c17ece2a0c7434a1d98 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Sat, 6 Jun 2020 09:53:06 -0700 Subject: [PATCH 0010/2056] Bluetooth: hci_qca: Simplify determination of serial clock on/off state from votes The serial clocks should be on when there is a vote for at least one of the clocks (RX or TX), and off when there is no 'on' vote. The current logic to determine the combined state is a bit redundant in the code paths for different types of votes, use a single statement in the common path instead. Signed-off-by: Matthias Kaehlcke Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 836949d827ee..997ddab26a33 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -289,25 +289,21 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) case HCI_IBS_TX_VOTE_CLOCK_ON: qca->tx_vote = true; qca->tx_votes_on++; - new_vote = true; break; case HCI_IBS_RX_VOTE_CLOCK_ON: qca->rx_vote = true; qca->rx_votes_on++; - new_vote = true; break; case HCI_IBS_TX_VOTE_CLOCK_OFF: qca->tx_vote = false; qca->tx_votes_off++; - new_vote = qca->rx_vote | qca->tx_vote; break; case HCI_IBS_RX_VOTE_CLOCK_OFF: qca->rx_vote = false; qca->rx_votes_off++; - new_vote = qca->rx_vote | qca->tx_vote; break; default: @@ -315,6 +311,8 @@ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) return; } + new_vote = qca->rx_vote | qca->tx_vote; + if (new_vote != old_vote) { if (new_vote) __serial_clock_on(hu->tty); From eff981f6579d5797d68d27afc0eede529ac8778a Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Fri, 5 Jun 2020 11:46:09 -0700 Subject: [PATCH 0011/2056] Bluetooth: hci_qca: Only remove TX clock vote after TX is completed qca_suspend() removes the vote for the UART TX clock after writing an IBS sleep request to the serial buffer. This is not a good idea since there is no guarantee that the request has been sent at this point. Instead remove the vote after successfully entering IBS sleep. This also fixes the issue of the vote being removed in case of an aborted suspend due to a failure of entering IBS sleep. Fixes: 41d5b25fed0a0 ("Bluetooth: hci_qca: add PM support") Signed-off-by: Matthias Kaehlcke Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 997ddab26a33..bce290bbf22d 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2081,8 +2081,6 @@ static int __maybe_unused qca_suspend(struct device *dev) qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->ibs_sent_slps++; - - qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off); break; case HCI_IBS_TX_ASLEEP: @@ -2110,8 +2108,10 @@ static int __maybe_unused qca_suspend(struct device *dev) qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS)); - if (ret > 0) + if (ret > 0) { + qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off); return 0; + } if (ret == 0) ret = -ETIMEDOUT; From e2a119cd84600f7e2d94d3e8781946b69b6e2376 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Fri, 5 Jun 2020 11:46:10 -0700 Subject: [PATCH 0012/2056] Bluetooth: hci_qca: Skip serdev wait when no transfer is pending qca_suspend() calls serdev_device_wait_until_sent() regardless of whether a transfer is pending. While it does no active harm since the function should return immediately it makes the code more confusing. Add a flag to track whether a transfer is pending and only call serdev_device_wait_until_sent() is needed. Signed-off-by: Matthias Kaehlcke Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index bce290bbf22d..d073cc20e402 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2048,6 +2048,7 @@ static int __maybe_unused qca_suspend(struct device *dev) struct hci_uart *hu = &qcadev->serdev_hu; struct qca_data *qca = hu->priv; unsigned long flags; + bool tx_pending = false; int ret = 0; u8 cmd; @@ -2081,6 +2082,7 @@ static int __maybe_unused qca_suspend(struct device *dev) qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->ibs_sent_slps++; + tx_pending = true; break; case HCI_IBS_TX_ASLEEP: @@ -2097,8 +2099,10 @@ static int __maybe_unused qca_suspend(struct device *dev) if (ret < 0) goto error; - serdev_device_wait_until_sent(hu->serdev, - msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); + if (tx_pending) { + serdev_device_wait_until_sent(hu->serdev, + msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); + } /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going * to sleep, so that the packet does not wake the system later. From 4da385f742d485fe3135a2711299cbf28ea24bc9 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Fri, 5 Jun 2020 11:46:11 -0700 Subject: [PATCH 0013/2056] Bluetooth: hci_qca: Refactor error handling in qca_suspend() If waiting for IBS sleep times out jump to the error handler, this is easier to read than multiple 'if' branches and a fall through to the error handler. Signed-off-by: Matthias Kaehlcke Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index d073cc20e402..28c34a15e029 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2107,18 +2107,16 @@ static int __maybe_unused qca_suspend(struct device *dev) /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going * to sleep, so that the packet does not wake the system later. */ - ret = wait_event_interruptible_timeout(qca->suspend_wait_q, qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS)); - - if (ret > 0) { - qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off); - return 0; + if (ret == 0) { + ret = -ETIMEDOUT; + goto error; } - if (ret == 0) - ret = -ETIMEDOUT; + qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off); + return 0; error: clear_bit(QCA_SUSPENDING, &qca->flags); From a9ec8423134a54c9f0ae8d4ef59e1e833ca917c2 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Fri, 5 Jun 2020 13:50:15 -0700 Subject: [PATCH 0014/2056] Bluetooth: Allow suspend even when preparation has failed It is preferable to allow suspend even when Bluetooth has problems preparing for sleep. When Bluetooth fails to finish preparing for suspend, log the error and allow the suspend notifier to continue instead. To also make it clearer why suspend failed, change bt_dev_dbg to bt_dev_err when handling the suspend timeout. Fixes: dd522a7429b07e ("Bluetooth: Handle LE devices during suspend") Reported-by: Len Brown Signed-off-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_core.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index dbe2d79f233f..83ce665d3cbf 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3289,10 +3289,10 @@ static int hci_suspend_wait_event(struct hci_dev *hdev) WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); if (ret == 0) { - bt_dev_dbg(hdev, "Timed out waiting for suspend"); + bt_dev_err(hdev, "Timed out waiting for suspend events"); for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { if (test_bit(i, hdev->suspend_tasks)) - bt_dev_dbg(hdev, "Bit %d is set", i); + bt_dev_err(hdev, "Suspend timeout bit: %d", i); clear_bit(i, hdev->suspend_tasks); } @@ -3360,12 +3360,15 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, ret = hci_change_suspend_state(hdev, BT_RUNNING); } - /* If suspend failed, restore it to running */ - if (ret && action == PM_SUSPEND_PREPARE) - hci_change_suspend_state(hdev, BT_RUNNING); - done: - return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP; + /* We always allow suspend even if suspend preparation failed and + * attempt to recover in resume. + */ + if (ret) + bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", + action, ret); + + return NOTIFY_STOP; } /* Alloc HCI device */ From 88ad368a84d8df81c7b832a6aba3487c63019b29 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Tue, 26 Nov 2019 13:47:44 -0800 Subject: [PATCH 0015/2056] iwlwifi: mvm: Fix avg-power report On AX200, the average power was showing positive instead of negative, but otherwise matched the expected RSSI. Flip the value to negative before giving to mac80211. Signed-off-by: Ben Greear Link: https://lore.kernel.org/r/20191126214744.1283-1-greearb@candelatech.com [removed unnecessary check and some unnecessary parentheses] Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index ad308d8c5ebf..9374c85c5caf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4911,7 +4911,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if (mvmsta->avg_energy) { - sinfo->signal_avg = mvmsta->avg_energy; + sinfo->signal_avg = -(s8)mvmsta->avg_energy; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } From 0f736f3bec8db0603f625c8c377a5010a5a1b066 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Mon, 10 Feb 2020 14:58:17 +0100 Subject: [PATCH 0016/2056] iwlwifi: fix config variable name in comment The correct variable name was replaced here by mistake by commit ab27926d9e4a ("iwlwifi: fix devices with PCI Device ID 0x34F0 and 11ac RF modules"). Signed-off-by: Luca Ceresoli Link: https://lore.kernel.org/r/20200210135817.31994-1-luca@lucaceresoli.net Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 244899f3f3bf..e27c13263a23 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -641,6 +641,6 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0; extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long; extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0; extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0; -#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */ +#endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ From ce475a8fb2229ffdb14ae011625097b3d3a8e39d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 20 Apr 2020 23:24:49 +0100 Subject: [PATCH 0017/2056] iwlwifi: mvm: remove redundant assignment to variable ret The variable ret is being assigned with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20200420222449.99481-1-colin.king@canonical.com Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index a7264b282d79..86b2ebb5d5fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -603,7 +603,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, struct iwl_lq_sta *lq_data, u8 tid, struct ieee80211_sta *sta) { - int ret = -EAGAIN; + int ret; IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); From a738e766e3ed92c4ee5ec967777276b5ce11dd2c Mon Sep 17 00:00:00 2001 From: Evan Green Date: Thu, 4 Jun 2020 10:59:11 -0700 Subject: [PATCH 0018/2056] ath10k: Acquire tx_lock in tx error paths ath10k_htt_tx_free_msdu_id() has a lockdep assertion that htt->tx_lock is held. Acquire the lock in a couple of error paths when calling that function to ensure this condition is met. Fixes: 6421969f248fd ("ath10k: refactor tx pending management") Fixes: e62ee5c381c59 ("ath10k: Add support for htt_data_tx_desc_64 descriptor") Signed-off-by: Evan Green Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604105901.1.I5b8b0c7ee0d3e51a73248975a9da61401b8f3900@changeid --- drivers/net/wireless/ath/ath10k/htt_tx.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 4fd10ac3a941..bbe869575855 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -1591,7 +1591,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } @@ -1798,7 +1800,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt, err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_msdu_id: + spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); + spin_unlock_bh(&htt->tx_lock); err: return res; } From 1141215c745b152da6cb632f3532f7384ced8410 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:06 +0200 Subject: [PATCH 0019/2056] ath: fix wiki website url In some ath files, the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200605154112.16277-4-f.suligoi@asem.it --- drivers/net/wireless/ath/Kconfig | 4 ++-- drivers/net/wireless/ath/ath9k/Kconfig | 5 +++-- drivers/net/wireless/ath/ath9k/hw.c | 2 +- drivers/net/wireless/ath/carl9170/Kconfig | 2 +- drivers/net/wireless/ath/carl9170/usb.c | 2 +- drivers/net/wireless/ath/wil6210/Kconfig | 2 +- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig index b10972b6cba4..0138d415f4e1 100644 --- a/drivers/net/wireless/ath/Kconfig +++ b/drivers/net/wireless/ath/Kconfig @@ -15,11 +15,11 @@ config WLAN_VENDOR_ATH For more information and documentation on this module you can visit: - http://wireless.kernel.org/en/users/Drivers/ath + https://wireless.wiki.kernel.org/en/users/Drivers/ath For information on all Atheros wireless drivers visit: - http://wireless.kernel.org/en/users/Drivers/Atheros + https://wireless.wiki.kernel.org/en/users/Drivers/Atheros if WLAN_VENDOR_ATH diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 78620c6b64a2..28b46ca9ee3b 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -34,7 +34,7 @@ config ATH9K APs that come with these cards refer to ath9k wiki products page: - http://wireless.kernel.org/en/users/Drivers/ath9k/products + https://wireless.wiki.kernel.org/en/users/Drivers/ath9k/products If you choose to build a module, it'll be called ath9k. @@ -185,7 +185,8 @@ config ATH9K_HTC Support for Atheros HTC based cards. Chipsets supported: AR9271 - For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc + For more information: + https://wireless.wiki.kernel.org/en/users/Drivers/ath9k_htc The built module will be ath9k_htc. diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 052deffb4c9d..8c97db73e34c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2410,7 +2410,7 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask) * of tests. The testing requirements are going to be documented. Desired * test requirements are documented at: * - * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs + * https://wireless.wiki.kernel.org/en/users/Drivers/ath9k/dfs * * Once a new chipset gets properly tested an individual commit can be used * to document the testing for DFS for that chipset. diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig index b1bce7aad399..b2d760873992 100644 --- a/drivers/net/wireless/ath/carl9170/Kconfig +++ b/drivers/net/wireless/ath/carl9170/Kconfig @@ -10,7 +10,7 @@ config CARL9170 It needs a special firmware (carl9170-1.fw), which can be downloaded from our wiki here: - + If you choose to build a module, it'll be called carl9170. diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 486957a04bd1..ead79335823a 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -61,7 +61,7 @@ MODULE_ALIAS("arusb_lnx"); * Note: * * Always update our wiki's device list (located at: - * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ), + * https://wireless.wiki.kernel.org/en/users/Drivers/ar9170/devices ), * whenever you add a new device. */ static const struct usb_device_id carl9170_usb_ids[] = { diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig index 0d1a8dab30ed..8c9dd673b9e7 100644 --- a/drivers/net/wireless/ath/wil6210/Kconfig +++ b/drivers/net/wireless/ath/wil6210/Kconfig @@ -10,7 +10,7 @@ config WIL6210 wil6210 chip by Wilocity. It supports operation on the 60 GHz band, covered by the IEEE802.11ad standard. - http://wireless.kernel.org/en/users/Drivers/wil6210 + https://wireless.wiki.kernel.org/en/users/Drivers/wil6210 If you choose to build it as a module, it will be called wil6210 From 9cec1d547cb739f8bac2de833487116e0fe896d2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 30 Apr 2020 23:30:44 +0200 Subject: [PATCH 0020/2056] iwlwifi: mvm: fix gcc-10 zero-length-bounds warning gcc-10 complains when a zero-length array is accessed: drivers/net/wireless/intel/iwlwifi/mvm/tx.c: In function 'iwl_mvm_rx_ba_notif': drivers/net/wireless/intel/iwlwifi/mvm/tx.c:1929:17: warning: array subscript 9 is outside the bounds of an interior zero-length array 'struct iwl_mvm_compressed_ba_tfd[0]' [-Wzero-length-bounds] 1929 | &ba_res->tfd[i]; | ~~~~~~~~~~~^~~ In file included from drivers/net/wireless/intel/iwlwifi/mvm/../fw/api/tdls.h:68, from drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h:68, from drivers/net/wireless/intel/iwlwifi/mvm/sta.h:73, from drivers/net/wireless/intel/iwlwifi/mvm/mvm.h:83, from drivers/net/wireless/intel/iwlwifi/mvm/tx.c:72: drivers/net/wireless/intel/iwlwifi/mvm/../fw/api/tx.h:769:35: note: while referencing 'tfd' 769 | struct iwl_mvm_compressed_ba_tfd tfd[0]; | ^~~ Change this structure to use a flexible-array member for 'tfd' instead, along with the various structures using an zero-length ieee80211_hdr array that do not show warnings today but might be affected by similar issues in the future. Fixes: 6f68cc367ab6 ("iwlwifi: api: annotate compressed BA notif array sizes") Fixes: c46e7724bfe9 ("iwlwifi: mvm: support new BA notification response") Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20200430213101.135134-3-arnd@arndb.de Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index f1d1fe96fecc..82d59b5a5f8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -293,7 +293,7 @@ struct iwl_tx_cmd { __le16 pm_frame_timeout; __le16 reserved4; u8 payload[0]; - struct ieee80211_hdr hdr[0]; + struct ieee80211_hdr hdr[]; } __packed; /* TX_CMD_API_S_VER_6 */ struct iwl_dram_sec_info { @@ -319,7 +319,7 @@ struct iwl_tx_cmd_gen2 { __le32 flags; struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; - struct ieee80211_hdr hdr[0]; + struct ieee80211_hdr hdr[]; } __packed; /* TX_CMD_API_S_VER_7 */ /** @@ -342,7 +342,7 @@ struct iwl_tx_cmd_gen3 { struct iwl_dram_sec_info dram_info; __le32 rate_n_flags; __le64 ttl; - struct ieee80211_hdr hdr[0]; + struct ieee80211_hdr hdr[]; } __packed; /* TX_CMD_API_S_VER_8 */ /* @@ -766,8 +766,8 @@ struct iwl_mvm_compressed_ba_notif { __le32 tx_rate; __le16 tfd_cnt; __le16 ra_tid_cnt; - struct iwl_mvm_compressed_ba_tfd tfd[0]; struct iwl_mvm_compressed_ba_ratid ra_tid[0]; + struct iwl_mvm_compressed_ba_tfd tfd[]; } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ /** @@ -784,7 +784,7 @@ struct iwl_mac_beacon_cmd_v6 { __le32 template_id; __le32 tim_idx; __le32 tim_size; - struct ieee80211_hdr frame[0]; + struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** @@ -805,7 +805,7 @@ struct iwl_mac_beacon_cmd_v7 { __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; - struct ieee80211_hdr frame[0]; + struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ enum iwl_mac_beacon_flags { @@ -840,7 +840,7 @@ struct iwl_mac_beacon_cmd { __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; - struct ieee80211_hdr frame[0]; + struct ieee80211_hdr frame[]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */ struct iwl_beacon_notif { From f98aa80ff78c34fe328eb9cd3e2cc3058e42bcfd Mon Sep 17 00:00:00 2001 From: Venkata Lakshmi Narayana Gubba Date: Tue, 9 Jun 2020 21:27:08 +0530 Subject: [PATCH 0021/2056] Bluetooth: hci_qca: Bug fix during SSR timeout Due to race conditions between qca_hw_error and qca_controller_memdump during SSR timeout,the same pointer is freed twice. This results in a double free. Now a lock is acquired before checking the stauts of SSR state. Fixes: d841502c79e3 ("Bluetooth: hci_qca: Collect controller memory dump during SSR") Signed-off-by: Venkata Lakshmi Narayana Gubba Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 28c34a15e029..f3fde99970c1 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -981,8 +981,11 @@ static void qca_controller_memdump(struct work_struct *work) while ((skb = skb_dequeue(&qca->rx_memdump_q))) { mutex_lock(&qca->hci_memdump_lock); - /* Skip processing the received packets if timeout detected. */ - if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) { + /* Skip processing the received packets if timeout detected + * or memdump collection completed. + */ + if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || + qca->memdump_state == QCA_MEMDUMP_COLLECTED) { mutex_unlock(&qca->hci_memdump_lock); return; } @@ -1483,8 +1486,6 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; - struct qca_memdump_data *qca_memdump = qca->qca_memdump; - char *memdump_buf = NULL; set_bit(QCA_HW_ERROR_EVENT, &qca->flags); bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); @@ -1507,19 +1508,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code) qca_wait_for_dump_collection(hdev); } + mutex_lock(&qca->hci_memdump_lock); if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout"); - mutex_lock(&qca->hci_memdump_lock); - if (qca_memdump) - memdump_buf = qca_memdump->memdump_buf_head; - vfree(memdump_buf); - kfree(qca_memdump); - qca->qca_memdump = NULL; + if (qca->qca_memdump) { + vfree(qca->qca_memdump->memdump_buf_head); + kfree(qca->qca_memdump); + qca->qca_memdump = NULL; + } qca->memdump_state = QCA_MEMDUMP_TIMEOUT; cancel_delayed_work(&qca->ctrl_memdump_timeout); - skb_queue_purge(&qca->rx_memdump_q); - mutex_unlock(&qca->hci_memdump_lock); + } + mutex_unlock(&qca->hci_memdump_lock); + + if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || + qca->memdump_state == QCA_MEMDUMP_COLLECTED) { cancel_work_sync(&qca->ctrl_memdump_evt); + skb_queue_purge(&qca->rx_memdump_q); } clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); From d74abe2138b39ae34c274f5ba5dec47408036c0b Mon Sep 17 00:00:00 2001 From: Chethan T N Date: Mon, 8 Jun 2020 17:57:46 +0530 Subject: [PATCH 0022/2056] Bluetooth: btusb: Add support to read Intel debug feature The command shall read the Intel controller supported debug feature. Based on the supported features additional debug configuration shall be enabled. Signed-off-by: Chethan T N Signed-off-by: Ps AyappadasX Signed-off-by: Kiran K Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btintel.c | 32 ++++++++++++++++++++++++++++++++ drivers/bluetooth/btintel.h | 15 +++++++++++++++ drivers/bluetooth/btusb.c | 6 ++++++ 3 files changed, 53 insertions(+) diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 6a0e2c5a8beb..105ab28836b8 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -754,6 +754,38 @@ void btintel_reset_to_bootloader(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(btintel_reset_to_bootloader); +int btintel_read_debug_features(struct hci_dev *hdev, + struct intel_debug_features *features) +{ + struct sk_buff *skb; + u8 page_no = 1; + + /* Intel controller supports two pages, each page is of 128-bit + * feature bit mask. And each bit defines specific feature support + */ + skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Reading supported features failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != (sizeof(features->page1) + 3)) { + bt_dev_err(hdev, "Supported features event size mismatch"); + kfree_skb(skb); + return -EILSEQ; + } + + memcpy(features->page1, skb->data + 3, sizeof(features->page1)); + + /* Read the supported features page2 if required in future. + */ + kfree_skb(skb); + return 0; +} +EXPORT_SYMBOL_GPL(btintel_read_debug_features); + MODULE_AUTHOR("Marcel Holtmann "); MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); MODULE_VERSION(VERSION); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index a69ea8a87b9b..7cd813fc5db4 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -62,6 +62,10 @@ struct intel_reset { __le32 boot_param; } __packed; +struct intel_debug_features { + __u8 page1[16]; +} __packed; + #if IS_ENABLED(CONFIG_BT_INTEL) int btintel_check_bdaddr(struct hci_dev *hdev); @@ -88,6 +92,10 @@ int btintel_read_boot_params(struct hci_dev *hdev, int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw, u32 *boot_param); void btintel_reset_to_bootloader(struct hci_dev *hdev); + +int btintel_read_debug_features(struct hci_dev *hdev, + struct intel_debug_features *features); + #else static inline int btintel_check_bdaddr(struct hci_dev *hdev) @@ -186,4 +194,11 @@ static inline int btintel_download_firmware(struct hci_dev *dev, static inline void btintel_reset_to_bootloader(struct hci_dev *hdev) { } + +static inline int btintel_read_debug_features(struct hci_dev *hdev, + struct intel_debug_features *features) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5f022e9cf667..e12d88e19098 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2267,6 +2267,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) struct btusb_data *data = hci_get_drvdata(hdev); struct intel_version ver; struct intel_boot_params params; + struct intel_debug_features features; const struct firmware *fw; u32 boot_param; char fwname[64]; @@ -2542,6 +2543,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) */ btintel_load_ddc_config(hdev, fwname); + /* Read the Intel supported features and if new exception formats + * supported, need to load the additional DDC config to enable. + */ + btintel_read_debug_features(hdev, &features); + /* Read the Intel version information after loading the FW */ err = btintel_read_version(hdev, &ver); if (err) From c453b10c2b28580849e382565c2e8a18ecc8bc24 Mon Sep 17 00:00:00 2001 From: Chethan T N Date: Mon, 8 Jun 2020 17:57:47 +0530 Subject: [PATCH 0023/2056] Bluetooth: btusb: Configure Intel debug feature based on available support This patch shall enable the Intel telemetry exception format based on the supported features Signed-off-by: Chethan T N Signed-off-by: Ps AyappadasX Signed-off-by: Kiran K Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btintel.c | 27 +++++++++++++++++++++++++++ drivers/bluetooth/btintel.h | 10 ++++++++-- drivers/bluetooth/btusb.c | 3 +++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 105ab28836b8..5fa5be3c5598 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -786,6 +786,33 @@ int btintel_read_debug_features(struct hci_dev *hdev, } EXPORT_SYMBOL_GPL(btintel_read_debug_features); +int btintel_set_debug_features(struct hci_dev *hdev, + const struct intel_debug_features *features) +{ + u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00 }; + struct sk_buff *skb; + + if (!features) + return -EINVAL; + + if (!(features->page1[0] & 0x3f)) { + bt_dev_info(hdev, "Telemetry exception format not supported"); + return 0; + } + + skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} +EXPORT_SYMBOL_GPL(btintel_set_debug_features); + MODULE_AUTHOR("Marcel Holtmann "); MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); MODULE_VERSION(VERSION); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 7cd813fc5db4..08e20606fb58 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -92,10 +92,10 @@ int btintel_read_boot_params(struct hci_dev *hdev, int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw, u32 *boot_param); void btintel_reset_to_bootloader(struct hci_dev *hdev); - int btintel_read_debug_features(struct hci_dev *hdev, struct intel_debug_features *features); - +int btintel_set_debug_features(struct hci_dev *hdev, + const struct intel_debug_features *features); #else static inline int btintel_check_bdaddr(struct hci_dev *hdev) @@ -201,4 +201,10 @@ static inline int btintel_read_debug_features(struct hci_dev *hdev, return -EOPNOTSUPP; } +static inline int btintel_set_debug_features(struct hci_dev *hdev, + const struct intel_debug_features *features) +{ + return -EOPNOTSUPP; +} + #endif diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e12d88e19098..c7cc8e594166 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2548,6 +2548,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) */ btintel_read_debug_features(hdev, &features); + /* Set DDC mask for available debug features */ + btintel_set_debug_features(hdev, &features); + /* Read the Intel version information after loading the FW */ err = btintel_read_version(hdev, &ver); if (err) From f12694634153e6698b9ba01ede27e6cdc41e295b Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 6 May 2020 21:42:17 +0800 Subject: [PATCH 0024/2056] iwlwifi: mvm: Remove unused inline function iwl_mvm_tid_to_ac_queue commit cfbc6c4c5b91 ("iwlwifi: mvm: support mac80211 TXQs model") left behind this, remove it. Signed-off-by: YueHaibing Link: https://lore.kernel.org/r/20200506134217.49760-1-yuehaibing@huawei.com Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index fee01cbbd3ac..53af3c910eae 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1369,14 +1369,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, return ret; } -static inline u8 iwl_mvm_tid_to_ac_queue(int tid) -{ - if (tid == IWL_MAX_TID_COUNT) - return IEEE80211_AC_VO; /* MGMT */ - - return tid_to_mac80211_ac[tid]; -} - void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, From 45c21a0e5ba461b5fe537ac20d6506f63995ce2d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 7 May 2020 13:55:38 -0500 Subject: [PATCH 0025/2056] iwlwifi: Replace zero-length array with flexible-array The current codebase makes use of the zero-length array language extension to the C90 standard, but the preferred mechanism to declare variable-length types such as these ones is a flexible array member[1][2], introduced in C99: struct foo { int stuff; struct boo array[]; }; By making use of the mechanism above, we will get a compiler warning in case the flexible array does not occur last in the structure, which will help us prevent some kind of undefined behavior bugs from being inadvertently introduced[3] to the codebase from now on. Also, notice that, dynamic memory allocations won't be affected by this change: "Flexible array members have incomplete type, and so the sizeof operator may not be applied. As a quirk of the original implementation of zero-length arrays, sizeof evaluates to zero."[1] sizeof(flexible-array-member) triggers a warning because flexible array members have incomplete type[1]. There are some instances of code in which the sizeof operator is being incorrectly/erroneously applied to zero-length arrays and the result is zero. Such instances may be hiding some bugs. So, this work (flexible-array member conversions) will also help to get completely rid of those sorts of issues. This issue was found with the help of Coccinelle. [1] https://gcc.gnu.org/onlinedocs/gcc/Zero-Length.html [2] https://github.com/KSPP/linux/issues/21 [3] commit 76497732932f ("cxgb3/l2t: Fix undefined behaviour") Signed-off-by: Gustavo A. R. Silva Link: https://lore.kernel.org/r/20200507185538.GA14674@embeddedor Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/commands.h | 12 ++++++------ drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/fw/debugfs.c | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h | 2 +- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 0f4be4be181c..fdcc1292a92b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1023,7 +1023,7 @@ struct iwl_wep_cmd { u8 global_key_type; u8 flags; u8 reserved; - struct iwl_wep_key key[0]; + struct iwl_wep_key key[]; } __packed; #define WEP_KEY_WEP_TYPE 1 @@ -1305,7 +1305,7 @@ struct iwl_tx_cmd { * length is 26 or 30 bytes, followed by payload data */ u8 payload[0]; - struct ieee80211_hdr hdr[0]; + struct ieee80211_hdr hdr[]; } __packed; /* @@ -2380,7 +2380,7 @@ struct iwl_scan_cmd { * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION) * before requesting another scan. */ - u8 data[0]; + u8 data[]; } __packed; /* Can abort will notify by complete notification with abort status. */ @@ -2475,7 +2475,7 @@ struct iwl_tx_beacon_cmd { __le16 tim_idx; u8 tim_size; u8 reserved1; - struct ieee80211_hdr frame[0]; /* beacon frame */ + struct ieee80211_hdr frame[]; /* beacon frame */ } __packed; /****************************************************************************** @@ -3188,7 +3188,7 @@ struct iwl_calib_hdr { struct iwl_calib_cmd { struct iwl_calib_hdr hdr; - u8 data[0]; + u8 data[]; } __packed; struct iwl_calib_xtal_freq_cmd { @@ -3216,7 +3216,7 @@ struct iwl_calib_temperature_offset_v2_cmd { /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */ struct iwl_calib_chain_noise_reset_cmd { struct iwl_calib_hdr hdr; - u8 data[0]; + u8 data[]; }; /* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index fd719c37428c..b6c31f01ea9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -361,7 +361,7 @@ struct iwl_mcc_update_resp_v3 { __le16 time; __le16 geo_info; __le32 n_channels; - __le32 channels[0]; + __le32 channels[]; } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */ /** @@ -390,7 +390,7 @@ struct iwl_mcc_update_resp { u8 source_id; u8 reserved[3]; __le32 n_channels; - __le32 channels[0]; + __le32 channels[]; } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */ /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 6e72c27f527b..267ad4eddb5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -260,7 +260,7 @@ struct hcmd_write_data { __be32 cmd_id; __be32 flags; __be16 length; - u8 data[0]; + u8 data[]; } __packed; static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index 3008a5246be8..b35b8920941b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -175,7 +175,7 @@ void iwl_opmode_deregister(const char *name); struct iwl_op_mode { const struct iwl_op_mode_ops *ops; - char op_mode_specific[0] __aligned(sizeof(void *)); + char op_mode_specific[] __aligned(sizeof(void *)); }; static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index a301e2484cdb..34788e7afc7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -1006,7 +1006,7 @@ struct iwl_trans { /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ - char trans_specific[0] __aligned(sizeof(void *)); + char trans_specific[] __aligned(sizeof(void *)); }; const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); From e00c6d8d491b3b8b93eeac1aae51c78a5217cde6 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:09 +0200 Subject: [PATCH 0026/2056] net: wireless: intel: fix wiki website url In some Intel files, the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Link: https://lore.kernel.org/r/20200605154112.16277-7-f.suligoi@asem.it Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 2 +- drivers/net/wireless/intel/iwlwifi/Kconfig | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index da6d4202611c..ad9d7e702a29 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -1415,7 +1415,7 @@ il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) /* * mac80211 queues, ACs, hardware queues, FIFOs. * - * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues + * Cf. https://wireless.wiki.kernel.org/en/developers/Documentation/mac80211/queues * * Mac80211 uses the following numbers, which we get as from it * by way of skb_get_queue_mapping(skb): diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 091d621ad25f..dc69cdb84d53 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -31,7 +31,7 @@ config IWLWIFI In order to use this driver, you will need a firmware image for it. You can obtain the microcode from: - . + . The firmware is typically installed in /lib/firmware. You can look in the hotplug script /etc/hotplug/firmware.agent to From c5625abaf8266307d2237a75143b27be7afafcdf Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:02 +0300 Subject: [PATCH 0027/2056] ath11k: add 6G frequency list supported by driver This patch adds support for 6GHz frequency listing. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-2-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/mac.c | 71 +++++++++++++++++++++++++++ 1 file changed, 71 insertions(+) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 2836a0f197ab..b55a374c845f 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -33,6 +33,15 @@ .max_power = 30, \ } +#define CHAN6G(_channel, _freq, _flags) { \ + .band = NL80211_BAND_6GHZ, \ + .hw_value = (_channel), \ + .center_freq = (_freq), \ + .flags = (_flags), \ + .max_antenna_gain = 0, \ + .max_power = 30, \ +} + /* frame mode values are mapped as per enum ath11k_hw_txrx_mode */ static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI; module_param_named(frame_mode, ath11k_frame_mode, uint, 0644); @@ -86,6 +95,68 @@ static const struct ieee80211_channel ath11k_5ghz_channels[] = { CHAN5G(173, 5865, 0), }; +static const struct ieee80211_channel ath11k_6ghz_channels[] = { + CHAN6G(1, 5955, 0), + CHAN6G(5, 5975, 0), + CHAN6G(9, 5995, 0), + CHAN6G(13, 6015, 0), + CHAN6G(17, 6035, 0), + CHAN6G(21, 6055, 0), + CHAN6G(25, 6075, 0), + CHAN6G(29, 6095, 0), + CHAN6G(33, 6115, 0), + CHAN6G(37, 6135, 0), + CHAN6G(41, 6155, 0), + CHAN6G(45, 6175, 0), + CHAN6G(49, 6195, 0), + CHAN6G(53, 6215, 0), + CHAN6G(57, 6235, 0), + CHAN6G(61, 6255, 0), + CHAN6G(65, 6275, 0), + CHAN6G(69, 6295, 0), + CHAN6G(73, 6315, 0), + CHAN6G(77, 6335, 0), + CHAN6G(81, 6355, 0), + CHAN6G(85, 6375, 0), + CHAN6G(89, 6395, 0), + CHAN6G(93, 6415, 0), + CHAN6G(97, 6435, 0), + CHAN6G(101, 6455, 0), + CHAN6G(105, 6475, 0), + CHAN6G(109, 6495, 0), + CHAN6G(113, 6515, 0), + CHAN6G(117, 6535, 0), + CHAN6G(121, 6555, 0), + CHAN6G(125, 6575, 0), + CHAN6G(129, 6595, 0), + CHAN6G(133, 6615, 0), + CHAN6G(137, 6635, 0), + CHAN6G(141, 6655, 0), + CHAN6G(145, 6675, 0), + CHAN6G(149, 6695, 0), + CHAN6G(153, 6715, 0), + CHAN6G(157, 6735, 0), + CHAN6G(161, 6755, 0), + CHAN6G(165, 6775, 0), + CHAN6G(169, 6795, 0), + CHAN6G(173, 6815, 0), + CHAN6G(177, 6835, 0), + CHAN6G(181, 6855, 0), + CHAN6G(185, 6875, 0), + CHAN6G(189, 6895, 0), + CHAN6G(193, 6915, 0), + CHAN6G(197, 6935, 0), + CHAN6G(201, 6955, 0), + CHAN6G(205, 6975, 0), + CHAN6G(209, 6995, 0), + CHAN6G(213, 7015, 0), + CHAN6G(217, 7035, 0), + CHAN6G(221, 7055, 0), + CHAN6G(225, 7075, 0), + CHAN6G(229, 7095, 0), + CHAN6G(233, 7115, 0), +}; + static struct ieee80211_rate ath11k_legacy_rates[] = { { .bitrate = 10, .hw_value = ATH11K_HW_RATE_CCK_LP_1M }, From 22eeadcdeab63e88983401f699f61a0121c03a0d Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:02 +0300 Subject: [PATCH 0028/2056] ath11k: add support for 6GHz radio in driver This patch adds 6GHz band support and mac80211 registration for the 6G phy radio. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-3-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/core.h | 6 +- drivers/net/wireless/ath/ath11k/mac.c | 91 +++++++++++++++++++++----- drivers/net/wireless/ath/ath11k/wmi.c | 16 ++++- 3 files changed, 92 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index e04f0e711779..67dee661d6bb 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -353,7 +353,10 @@ struct ath11k_sta { #endif }; -#define ATH11K_NUM_CHANS 41 +#define ATH11K_MIN_5G_FREQ 4150 +#define ATH11K_MIN_6G_FREQ 5945 +#define ATH11K_MAX_6G_FREQ 7115 +#define ATH11K_NUM_CHANS 100 #define ATH11K_MAX_5G_CHAN 173 enum ath11k_state { @@ -431,6 +434,7 @@ struct ath11k { u32 vht_cap_info; struct ath11k_he ar_he; enum ath11k_state state; + bool supports_6ghz; struct { struct completion started; struct completion completed; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index b55a374c845f..ea732f973079 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -205,6 +205,17 @@ ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = { [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, }, + [NL80211_BAND_6GHZ] = { + [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, + [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20, + [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20, + [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40, + [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80, + [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160, + [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80, + }, + }; const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = { @@ -1560,6 +1571,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar, } break; case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: /* Check HE first */ if (sta->he_cap.has_he) { phymode = ath11k_mac_get_phymode_he(ar, sta); @@ -3482,7 +3494,7 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar, rate_cap_rx_chainmask); } - if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { + if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) { band = &ar->mac.sbands[NL80211_BAND_5GHZ]; ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; if (ht_cap_info) @@ -3714,6 +3726,16 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar, band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ]; band->n_iftype_data = count; } + + if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && + ar->supports_6ghz) { + count = ath11k_mac_copy_he_cap(ar, cap, + ar->mac.iftype[NL80211_BAND_6GHZ], + NL80211_BAND_6GHZ); + band = &ar->mac.sbands[NL80211_BAND_6GHZ]; + band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ]; + band->n_iftype_data = count; + } } static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant) @@ -4156,6 +4178,11 @@ ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif, params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; } + if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP && + ar->supports_6ghz) { + params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains; + params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains; + } } static u32 @@ -5288,7 +5315,7 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar, rate_idx = ffs(mask->control[band].legacy) - 1; - if (band == NL80211_BAND_5GHZ) + if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX; hw_rate = ath11k_legacy_rates[rate_idx].hw_value; @@ -5754,7 +5781,8 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, void *channels; BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) + - ARRAY_SIZE(ath11k_5ghz_channels)) != + ARRAY_SIZE(ath11k_5ghz_channels) + + ARRAY_SIZE(ath11k_6ghz_channels)) != ATH11K_NUM_CHANS); reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; @@ -5767,6 +5795,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, return -ENOMEM; band = &ar->mac.sbands[NL80211_BAND_2GHZ]; + band->band = NL80211_BAND_2GHZ; band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels); band->channels = channels; band->n_bitrates = ath11k_g_rates_size; @@ -5778,23 +5807,48 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar, } if (supported_bands & WMI_HOST_WLAN_5G_CAP) { - channels = kmemdup(ath11k_5ghz_channels, - sizeof(ath11k_5ghz_channels), - GFP_KERNEL); - if (!channels) { - kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); - return -ENOMEM; + if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) { + channels = kmemdup(ath11k_6ghz_channels, + sizeof(ath11k_6ghz_channels), GFP_KERNEL); + if (!channels) { + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + return -ENOMEM; + } + + ar->supports_6ghz = true; + band = &ar->mac.sbands[NL80211_BAND_6GHZ]; + band->band = NL80211_BAND_6GHZ; + band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels); + band->channels = channels; + band->n_bitrates = ath11k_a_rates_size; + band->bitrates = ath11k_a_rates; + ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band; + ath11k_mac_update_ch_list(ar, band, + reg_cap->low_5ghz_chan, + reg_cap->high_5ghz_chan); } - band = &ar->mac.sbands[NL80211_BAND_5GHZ]; - band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels); - band->channels = channels; - band->n_bitrates = ath11k_a_rates_size; - band->bitrates = ath11k_a_rates; - ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; - ath11k_mac_update_ch_list(ar, band, - reg_cap->low_5ghz_chan, - reg_cap->high_5ghz_chan); + if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) { + channels = kmemdup(ath11k_5ghz_channels, + sizeof(ath11k_5ghz_channels), + GFP_KERNEL); + if (!channels) { + kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); + return -ENOMEM; + } + + band = &ar->mac.sbands[NL80211_BAND_5GHZ]; + band->band = NL80211_BAND_5GHZ; + band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels); + band->channels = channels; + band->n_bitrates = ath11k_a_rates_size; + band->bitrates = ath11k_a_rates; + ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; + ath11k_mac_update_ch_list(ar, band, + reg_cap->low_5ghz_chan, + reg_cap->high_5ghz_chan); + } } return 0; @@ -5848,6 +5902,7 @@ static void __ath11k_mac_unregister(struct ath11k *ar) kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); + kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); SET_IEEE80211_DEV(ar->hw, NULL); } diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index c2a972377687..291fb274134f 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -368,6 +368,17 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle, memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, sizeof(struct ath11k_ppe_threshold)); + cap_band = &pdev_cap->band[NL80211_BAND_6GHZ]; + cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g; + cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g; + cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g; + cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext; + cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g; + memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g, + sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE); + memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g, + sizeof(struct ath11k_ppe_threshold)); + return 0; } @@ -5206,9 +5217,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) goto exit; } - if (rx_ev.phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ) + if (rx_ev.phy_mode == MODE_11B && + (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ)) ath11k_dbg(ab, ATH11K_DBG_WMI, - "wmi mgmt rx 11b (CCK) on 5GHz\n"); + "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band); sband = &ar->mac.sbands[status->band]; From 5dcf42f8b79d1419ad7f6d46d7b5f7dc5bf9cdba Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:03 +0300 Subject: [PATCH 0029/2056] ath11k: Use freq instead of channel number in rx path As 6GHz cahnnel numbers overlap with those of 5GHz and 2GHz bands, it is necessary to use frequency when determining the band info in rx path. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-4-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/dp_rx.c | 6 +++++- drivers/net/wireless/ath/ath11k/wmi.c | 5 ++++- drivers/net/wireless/ath/ath11k/wmi.h | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index a54610d75c40..112f130f91c2 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -2162,6 +2162,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, struct ieee80211_rx_status *rx_status) { u8 channel_num; + u32 center_freq; rx_status->freq = 0; rx_status->rate_idx = 0; @@ -2172,8 +2173,11 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc, rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc); + center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16; - if (channel_num >= 1 && channel_num <= 14) { + if (center_freq >= 5935 && center_freq <= 7105) { + rx_status->band = NL80211_BAND_6GHZ; + } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 173) { rx_status->band = NL80211_BAND_5GHZ; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 291fb274134f..500108fa59d9 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -3833,6 +3833,7 @@ static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab, } hdr->pdev_id = ev->pdev_id; + hdr->chan_freq = ev->chan_freq; hdr->channel = ev->channel; hdr->snr = ev->snr; hdr->rate = ev->rate; @@ -5204,7 +5205,9 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb) if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { + if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) { + status->band = NL80211_BAND_6GHZ; + } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) { status->band = NL80211_BAND_5GHZ; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index b9f3e559ced7..afa3c4cf90e9 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -4228,6 +4228,7 @@ struct wmi_pdev_temperature_event { #define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4 struct mgmt_rx_event_params { + u32 chan_freq; u32 channel; u32 snr; u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA]; @@ -4257,6 +4258,7 @@ struct wmi_mgmt_rx_hdr { u32 rx_tsf_l32; u32 rx_tsf_u32; u32 pdev_id; + u32 chan_freq; } __packed; #define MAX_ANTENNA_EIGHT 8 From 91270d709b210a1c08d67faa3be3ae3e459a0889 Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:03 +0300 Subject: [PATCH 0030/2056] ath11k: extend peer_assoc_cmd for 6GHz band MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support required for configuring min_data_rate of 6GHz oper IE and peer_he_caps_6ghz in peer assoc command. The Minimum Rate field indicates the minimum rate, in units of 1 Mb/s, that the non-AP STA is allowed to use for sending PPDUs as defined in IEEE P802.11ax™/D6.0. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-5-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/wmi.c | 2 ++ drivers/net/wireless/ath/ath11k/wmi.h | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 500108fa59d9..a6bbb3badd1f 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1789,6 +1789,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0]; cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1]; cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal; + cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz; cmd->peer_he_ops = param->peer_he_ops; memcpy(&cmd->peer_he_cap_phy, ¶m->peer_he_cap_phyinfo, sizeof(param->peer_he_cap_phyinfo)); @@ -1842,6 +1843,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar, /* HE Rates */ cmd->peer_he_mcs = param->peer_he_mcs_count; + cmd->min_data_rate = param->min_data_rate; ptr += sizeof(*mcs); diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index afa3c4cf90e9..fd82cff7537b 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -1011,6 +1011,7 @@ enum wmi_tlv_vdev_param { WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME, WMI_VDEV_PARAM_BA_MODE = 0x7e, WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87, + WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99, WMI_VDEV_PARAM_PROTOTYPE = 0x8000, WMI_VDEV_PARAM_BSS_COLOR, WMI_VDEV_PARAM_SET_HEMU_MODE, @@ -3444,6 +3445,7 @@ struct peer_assoc_params { u32 tx_max_rate; u32 tx_mcs_set; u8 vht_capable; + u8 min_data_rate; u32 tx_max_mcs_nss; u32 peer_bw_rxnss_override; bool is_pmf_enabled; @@ -3472,6 +3474,7 @@ struct peer_assoc_params { bool he_flag; u32 peer_he_cap_macinfo[2]; u32 peer_he_cap_macinfo_internal; + u32 peer_he_caps_6ghz; u32 peer_he_ops; u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE]; u32 peer_he_mcs_count; @@ -3509,6 +3512,8 @@ struct wmi_peer_assoc_complete_cmd { u32 peer_he_mcs; u32 peer_he_cap_info_ext; u32 peer_he_cap_info_internal; + u32 min_data_rate; + u32 peer_he_caps_6ghz; } __packed; struct wmi_stop_scan_cmd { From d387503df0cd7f7d5a3b1366f6a40664465e090c Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:03 +0300 Subject: [PATCH 0031/2056] ath11k: set psc channel flag when sending channel list to firmware. If 6 ghz channel is a Preferred Scanning Channel(PSC), mark the channel flag accordingly when updating channel list to firmware. This will be used when making scanning decision in 6GHz channels. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-6-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/reg.c | 4 ++++ drivers/net/wireless/ath/ath11k/wmi.h | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 453aa9c06969..7c9dc91cc48a 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -161,6 +161,10 @@ int ath11k_reg_update_chan_list(struct ath11k *ar) else ch->phy_mode = MODE_11A; + if (channel->band == NL80211_BAND_6GHZ && + cfg80211_channel_is_psc(channel)) + ch->psc_channel = true; + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", i, params->nallchans, diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index fd82cff7537b..76c2da2c5db7 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -2520,7 +2520,8 @@ struct channel_param { allow_ht:1, allow_vht:1, allow_he:1, - set_agile:1; + set_agile:1, + psc_channel:1; u32 phy_mode; u32 cfreq1; u32 cfreq2; From 194b8ea1ce5a9c15a73e441380ed678fa009d3ed Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:04 +0300 Subject: [PATCH 0032/2056] ath11k: Add 6G scan dwell time parameter in scan request command Add 6G scan active and passive dwell time parameter to scan request. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-7-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/wmi.c | 4 ++++ drivers/net/wireless/ath/ath11k/wmi.h | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index a6bbb3badd1f..239a336f55f4 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1899,6 +1899,8 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar, arg->dwell_time_active = 50; arg->dwell_time_active_2g = 0; arg->dwell_time_passive = 150; + arg->dwell_time_active_6g = 40; + arg->dwell_time_passive_6g = 30; arg->min_rest_time = 50; arg->max_rest_time = 500; arg->repeat_probe_time = 0; @@ -2045,6 +2047,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, cmd->dwell_time_active = params->dwell_time_active; cmd->dwell_time_active_2g = params->dwell_time_active_2g; cmd->dwell_time_passive = params->dwell_time_passive; + cmd->dwell_time_active_6g = params->dwell_time_active_6g; + cmd->dwell_time_passive_6g = params->dwell_time_passive_6g; cmd->min_rest_time = params->min_rest_time; cmd->max_rest_time = params->max_rest_time; cmd->repeat_probe_time = params->repeat_probe_time; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 76c2da2c5db7..4937d02839fc 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -3061,6 +3061,9 @@ struct wmi_start_scan_cmd { u32 num_vendor_oui; u32 scan_ctrl_flags_ext; u32 dwell_time_active_2g; + u32 dwell_time_active_6g; + u32 dwell_time_passive_6g; + u32 scan_start_offset; } __packed; #define WMI_SCAN_FLAG_PASSIVE 0x1 @@ -3127,6 +3130,8 @@ struct scan_req_params { u32 dwell_time_active; u32 dwell_time_active_2g; u32 dwell_time_passive; + u32 dwell_time_active_6g; + u32 dwell_time_passive_6g; u32 min_rest_time; u32 max_rest_time; u32 repeat_probe_time; From bff621fd113ff782b91795a9b2baac0b226bc069 Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:04 +0300 Subject: [PATCH 0033/2056] ath11k: Send multiple scan_chan_list messages if required With addition of 6Ghz channels, it is possible that wmi buffer size can exceed the maximum wmi buffer size. So iterate over the channel list, and send multiple messages till channel list is empty. Also mark PSC channel flag for 6GHz channels accordingly. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-8-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/wmi.c | 163 ++++++++++++++------------ drivers/net/wireless/ath/ath11k/wmi.h | 3 + 2 files changed, 94 insertions(+), 72 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 239a336f55f4..2f9a459c4230 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -2195,91 +2195,110 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar, struct wmi_tlv *tlv; void *ptr; int i, ret, len; + u16 num_send_chans, num_sends = 0, max_chan_limit = 0; u32 *reg1, *reg2; - len = sizeof(*cmd) + TLV_HDR_SIZE + - sizeof(*chan_info) * chan_list->nallchans; - - skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); - if (!skb) - return -ENOMEM; - - cmd = (struct wmi_scan_chan_list_cmd *)skb->data; - cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) | - FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); - - ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI no.of chan = %d len = %d\n", chan_list->nallchans, len); - cmd->pdev_id = chan_list->pdev_id; - cmd->num_scan_chans = chan_list->nallchans; - - ptr = skb->data + sizeof(*cmd); - - len = sizeof(*chan_info) * chan_list->nallchans; - tlv = ptr; - tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | - FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); - ptr += TLV_HDR_SIZE; - tchan_info = &chan_list->ch_param[0]; + while (chan_list->nallchans) { + len = sizeof(*cmd) + TLV_HDR_SIZE; + max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) / + sizeof(*chan_info); - for (i = 0; i < chan_list->nallchans; ++i) { - chan_info = ptr; - memset(chan_info, 0, sizeof(*chan_info)); - len = sizeof(*chan_info); - chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, - WMI_TAG_CHANNEL) | - FIELD_PREP(WMI_TLV_LEN, - len - TLV_HDR_SIZE); + if (chan_list->nallchans > max_chan_limit) + num_send_chans = max_chan_limit; + else + num_send_chans = chan_list->nallchans; - reg1 = &chan_info->reg_info_1; - reg2 = &chan_info->reg_info_2; - chan_info->mhz = tchan_info->mhz; - chan_info->band_center_freq1 = tchan_info->cfreq1; - chan_info->band_center_freq2 = tchan_info->cfreq2; + chan_list->nallchans -= num_send_chans; + len += sizeof(*chan_info) * num_send_chans; - if (tchan_info->is_chan_passive) - chan_info->info |= WMI_CHAN_INFO_PASSIVE; - if (tchan_info->allow_he) - chan_info->info |= WMI_CHAN_INFO_ALLOW_HE; - else if (tchan_info->allow_vht) - chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT; - else if (tchan_info->allow_ht) - chan_info->info |= WMI_CHAN_INFO_ALLOW_HT; - if (tchan_info->half_rate) - chan_info->info |= WMI_CHAN_INFO_HALF_RATE; - if (tchan_info->quarter_rate) - chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE; + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; - chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, - tchan_info->phy_mode); - *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR, - tchan_info->minpower); - *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, - tchan_info->maxpower); - *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, - tchan_info->maxregpower); - *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS, - tchan_info->reg_class_id); - *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, - tchan_info->antennamax); + cmd = (struct wmi_scan_chan_list_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + cmd->pdev_id = chan_list->pdev_id; + cmd->num_scan_chans = num_send_chans; + if (num_sends) + cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG; ath11k_dbg(ar->ab, ATH11K_DBG_WMI, - "WMI chan scan list chan[%d] = %u\n", - i, chan_info->mhz); + "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n", + num_send_chans, len, cmd->pdev_id, num_sends); - ptr += sizeof(*chan_info); + ptr = skb->data + sizeof(*cmd); - tchan_info++; + len = sizeof(*chan_info) * num_send_chans; + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE); + ptr += TLV_HDR_SIZE; + + for (i = 0; i < num_send_chans; ++i) { + chan_info = ptr; + memset(chan_info, 0, sizeof(*chan_info)); + len = sizeof(*chan_info); + chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_CHANNEL) | + FIELD_PREP(WMI_TLV_LEN, + len - TLV_HDR_SIZE); + + reg1 = &chan_info->reg_info_1; + reg2 = &chan_info->reg_info_2; + chan_info->mhz = tchan_info->mhz; + chan_info->band_center_freq1 = tchan_info->cfreq1; + chan_info->band_center_freq2 = tchan_info->cfreq2; + + if (tchan_info->is_chan_passive) + chan_info->info |= WMI_CHAN_INFO_PASSIVE; + if (tchan_info->allow_he) + chan_info->info |= WMI_CHAN_INFO_ALLOW_HE; + else if (tchan_info->allow_vht) + chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT; + else if (tchan_info->allow_ht) + chan_info->info |= WMI_CHAN_INFO_ALLOW_HT; + if (tchan_info->half_rate) + chan_info->info |= WMI_CHAN_INFO_HALF_RATE; + if (tchan_info->quarter_rate) + chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE; + if (tchan_info->psc_channel) + chan_info->info |= WMI_CHAN_INFO_PSC; + + chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, + tchan_info->phy_mode); + *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR, + tchan_info->minpower); + *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR, + tchan_info->maxpower); + *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR, + tchan_info->maxregpower); + *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS, + tchan_info->reg_class_id); + *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX, + tchan_info->antennamax); + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n", + i, chan_info->mhz, chan_info->info); + + ptr += sizeof(*chan_info); + + tchan_info++; + } + + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); + if (ret) { + ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); + dev_kfree_skb(skb); + return ret; + } + + num_sends++; } - ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID); - if (ret) { - ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n"); - dev_kfree_skb(skb); - } - - return ret; + return 0; } int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id, diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 4937d02839fc..319ad7241e01 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -54,6 +54,8 @@ struct wmi_tlv { #define WLAN_SCAN_PARAMS_MAX_BSSID 4 #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 +#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1 + #define WMI_BA_MODE_BUFFER_SIZE_256 3 /* * HW mode config type replicated from FW header @@ -3271,6 +3273,7 @@ struct wmi_bcn_send_from_host_cmd { #define WMI_CHAN_INFO_QUARTER_RATE BIT(15) #define WMI_CHAN_INFO_DFS_FREQ2 BIT(16) #define WMI_CHAN_INFO_ALLOW_HE BIT(17) +#define WMI_CHAN_INFO_PSC BIT(18) #define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0) #define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8) From 74601ecfef6e46cd8bc98e881e620bfc81bfddfb Mon Sep 17 00:00:00 2001 From: Pradeep Kumar Chitrapu Date: Tue, 9 Jun 2020 09:31:04 +0300 Subject: [PATCH 0034/2056] ath11k: Add support for 6g scan hint Add support for 6Ghz short ssid and bssid hint mechanism as part of scan command. Signed-off-by: Pradeep Kumar Chitrapu Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603001724.12161-9-pradeepc@codeaurora.org --- drivers/net/wireless/ath/ath11k/wmi.c | 72 +++++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/wmi.h | 22 ++++++++ 2 files changed, 94 insertions(+) diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 2f9a459c4230..8191cb0a997d 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -2005,6 +2005,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, int i, ret, len; u32 *tmp_ptr; u8 extraie_len_with_pad = 0; + struct hint_short_ssid *s_ssid = NULL; + struct hint_bssid *hint_bssid = NULL; len = sizeof(*cmd); @@ -2026,6 +2028,14 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, roundup(params->extraie.len, sizeof(u32)); len += extraie_len_with_pad; + if (params->num_hint_bssid) + len += TLV_HDR_SIZE + + params->num_hint_bssid * sizeof(struct hint_bssid); + + if (params->num_hint_s_ssid) + len += TLV_HDR_SIZE + + params->num_hint_s_ssid * sizeof(struct hint_short_ssid); + skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len); if (!skb) return -ENOMEM; @@ -2126,6 +2136,68 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar, ptr += extraie_len_with_pad; + if (params->num_hint_s_ssid) { + len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, len); + ptr += TLV_HDR_SIZE; + s_ssid = ptr; + for (i = 0; i < params->num_hint_s_ssid; ++i) { + s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; + s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; + s_ssid++; + } + ptr += len; + } + + if (params->num_hint_bssid) { + len = params->num_hint_bssid * sizeof(struct hint_bssid); + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, len); + ptr += TLV_HDR_SIZE; + hint_bssid = ptr; + for (i = 0; i < params->num_hint_bssid; ++i) { + hint_bssid->freq_flags = + params->hint_bssid[i].freq_flags; + ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], + &hint_bssid->bssid.addr[0]); + hint_bssid++; + } + } + + len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid); + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, len); + ptr += TLV_HDR_SIZE; + if (params->num_hint_s_ssid) { + s_ssid = ptr; + for (i = 0; i < params->num_hint_s_ssid; ++i) { + s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags; + s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid; + s_ssid++; + } + } + ptr += len; + + len = params->num_hint_bssid * sizeof(struct hint_bssid); + tlv = ptr; + tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) | + FIELD_PREP(WMI_TLV_LEN, len); + ptr += TLV_HDR_SIZE; + if (params->num_hint_bssid) { + hint_bssid = ptr; + for (i = 0; i < params->num_hint_bssid; ++i) { + hint_bssid->freq_flags = + params->hint_bssid[i].freq_flags; + ether_addr_copy(¶ms->hint_bssid[i].bssid.addr[0], + &hint_bssid->bssid.addr[0]); + hint_bssid++; + } + } + ret = ath11k_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID); if (ret) { diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 319ad7241e01..1162bd7a5f87 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -50,6 +50,14 @@ struct wmi_tlv { #define WMI_MAX_MEM_REQS 32 #define ATH11K_MAX_HW_LISTEN_INTERVAL 5 +#define WLAN_SCAN_MAX_HINT_S_SSID 10 +#define WLAN_SCAN_MAX_HINT_BSSID 10 +#define MAX_RNR_BSS 5 + +#define WLAN_SCAN_MAX_HINT_S_SSID 10 +#define WLAN_SCAN_MAX_HINT_BSSID 10 +#define MAX_RNR_BSS 5 + #define WLAN_SCAN_PARAMS_MAX_SSID 16 #define WLAN_SCAN_PARAMS_MAX_BSSID 4 #define WLAN_SCAN_PARAMS_MAX_IE_LEN 256 @@ -3105,6 +3113,16 @@ enum { ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \ WMI_SCAN_DWELL_MODE_MASK)) +struct hint_short_ssid { + u32 freq_flags; + u32 short_ssid; +}; + +struct hint_bssid { + u32 freq_flags; + struct wmi_mac_addr bssid; +}; + struct scan_req_params { u32 scan_id; u32 scan_req_id; @@ -3184,6 +3202,10 @@ struct scan_req_params { struct element_info extraie; struct element_info htcap; struct element_info vhtcap; + u32 num_hint_s_ssid; + u32 num_hint_bssid; + struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID]; + struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID]; }; struct wmi_ssid_arg { From f28b7b78aad32bcbec49e77baf140be14c7dd99b Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Tue, 9 Jun 2020 09:31:05 +0300 Subject: [PATCH 0035/2056] ath11k: build HE 6 GHz capability Build 6 GHz band capability from HT and VHT capabilities reported by firmware. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591057888-2862-1-git-send-email-rmanohar@codeaurora.org --- drivers/net/wireless/ath/ath11k/core.h | 1 + drivers/net/wireless/ath/ath11k/mac.c | 34 ++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 67dee661d6bb..0cb844aceba4 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -552,6 +552,7 @@ struct ath11k_band_cap { u32 he_mcs; u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE]; struct ath11k_ppe_threshold he_ppet; + u16 he_6ghz_capa; }; struct ath11k_pdev_cap { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index ea732f973079..b3ae1a0bceb3 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -3615,6 +3615,35 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) he_cap_elem->phy_cap_info[9] &= ~m; } +static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap, + struct ath11k_band_cap *bcap) +{ + u8 val; + + bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE; + if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) + bcap->he_6ghz_capa |= + FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS, + WLAN_HT_CAP_SM_PS_DYNAMIC); + else + bcap->he_6ghz_capa |= + FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS, + WLAN_HT_CAP_SM_PS_DISABLED); + val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, + pcap->vht_cap); + bcap->he_6ghz_capa |= + FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val); + val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap); + bcap->he_6ghz_capa |= + FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val); + if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) + bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; + if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) + bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; + + return cpu_to_le16(bcap->he_6ghz_capa); +} + static int ath11k_mac_copy_he_cap(struct ath11k *ar, struct ath11k_pdev_cap *cap, struct ieee80211_sband_iftype_data *data, @@ -3697,6 +3726,11 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar, IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) ath11k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres); + + if (band == NL80211_BAND_6GHZ) { + data[idx].he_6ghz_capa.capa = + ath11k_mac_setup_he_6ghz_cap(cap, band_cap); + } idx++; } From 70a7808b50b119fa5c00e5704e20e27d79b8cf4c Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 10 Jun 2020 18:53:53 -0700 Subject: [PATCH 0036/2056] Bluetooth: btmrvl_sdio: Set parent dev to hdev Set the correct parent dev when registering hdev. This allows userspace tools to find the parent device (for example, to set the power/wakeup property). Before this change, the path was /sys/devices/virtual/bluetooth/hci0 and after this change, it looks more like: /sys/bus/mmc/devices/mmc1:0001/mmc1:0001:2/bluetooth/hci0 Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Douglas Anderson Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 708ad21683eb..47e450903af4 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -669,6 +669,7 @@ static int btmrvl_service_main_thread(void *data) int btmrvl_register_hdev(struct btmrvl_private *priv) { struct hci_dev *hdev = NULL; + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret; hdev = hci_alloc_dev(); @@ -687,6 +688,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv) hdev->send = btmrvl_send_frame; hdev->setup = btmrvl_setup; hdev->set_bdaddr = btmrvl_set_bdaddr; + SET_HCIDEV_DEV(hdev, &card->func->dev); hdev->dev_type = priv->btmrvl_dev.dev_type; From 76d4c130ea44404ce6d6b1008f4729a6547c22c9 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 10 Jun 2020 18:53:54 -0700 Subject: [PATCH 0037/2056] Bluetooth: btmrvl_sdio: Implement prevent_wake Use the parent device's power/wakeup to control whether we support remote wake. If remote wakeup is disabled, Bluetooth will not enable scanning for incoming connections. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Douglas Anderson Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_main.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c index 47e450903af4..8b9d78ce6bb2 100644 --- a/drivers/bluetooth/btmrvl_main.c +++ b/drivers/bluetooth/btmrvl_main.c @@ -587,6 +587,14 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) return 0; } +static bool btmrvl_prevent_wake(struct hci_dev *hdev) +{ + struct btmrvl_private *priv = hci_get_drvdata(hdev); + struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; + + return !device_may_wakeup(&card->func->dev); +} + /* * This function handles the event generated by firmware, rx data * received from firmware, and tx data sent from kernel. @@ -688,6 +696,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv) hdev->send = btmrvl_send_frame; hdev->setup = btmrvl_setup; hdev->set_bdaddr = btmrvl_set_bdaddr; + hdev->prevent_wake = btmrvl_prevent_wake; SET_HCIDEV_DEV(hdev, &card->func->dev); hdev->dev_type = priv->btmrvl_dev.dev_type; From e660b3510eb4b3c06ce1188a1d305b6f653106fc Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 10 Jun 2020 18:53:55 -0700 Subject: [PATCH 0038/2056] Bluetooth: btmrvl_sdio: Refactor irq wakeup Use device_init_wakeup to allow the Bluetooth dev to wake the system from suspend. Currently, the device can wake the system but no power/wakeup entry is created in sysfs to allow userspace to disable wakeup. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Douglas Anderson Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 4c7978cb1786..cfb9f9db44a0 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -111,6 +111,9 @@ static int btmrvl_sdio_probe_of(struct device *dev, "Failed to request irq_bt %d (%d)\n", cfg->irq_bt, ret); } + + /* Configure wakeup (enabled by default) */ + device_init_wakeup(dev, true); disable_irq(cfg->irq_bt); } } @@ -1654,6 +1657,7 @@ static void btmrvl_sdio_remove(struct sdio_func *func) MODULE_SHUTDOWN_REQ); btmrvl_sdio_disable_host_int(card); } + BT_DBG("unregister dev"); card->priv->surprise_removed = true; btmrvl_sdio_unregister_dev(card); @@ -1690,7 +1694,8 @@ static int btmrvl_sdio_suspend(struct device *dev) } /* Enable platform specific wakeup interrupt */ - if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && + device_may_wakeup(dev)) { card->plt_wake_cfg->wake_by_bt = false; enable_irq(card->plt_wake_cfg->irq_bt); enable_irq_wake(card->plt_wake_cfg->irq_bt); @@ -1707,7 +1712,8 @@ static int btmrvl_sdio_suspend(struct device *dev) BT_ERR("HS not activated, suspend failed!"); /* Disable platform specific wakeup interrupt */ if (card->plt_wake_cfg && - card->plt_wake_cfg->irq_bt >= 0) { + card->plt_wake_cfg->irq_bt >= 0 && + device_may_wakeup(dev)) { disable_irq_wake(card->plt_wake_cfg->irq_bt); disable_irq(card->plt_wake_cfg->irq_bt); } @@ -1767,7 +1773,8 @@ static int btmrvl_sdio_resume(struct device *dev) hci_resume_dev(hcidev); /* Disable platform specific wakeup interrupt */ - if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0) { + if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && + device_may_wakeup(dev)) { disable_irq_wake(card->plt_wake_cfg->irq_bt); disable_irq(card->plt_wake_cfg->irq_bt); if (card->plt_wake_cfg->wake_by_bt) From 00398e1d518309328e8ba7dff00881538ac22c6a Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 19:50:41 +0000 Subject: [PATCH 0039/2056] Bluetooth: Add support for BT_PKT_STATUS CMSG data for SCO connections This change adds support for reporting the BT_PKT_STATUS to the socket CMSG data to allow the implementation of a packet loss correction on erroneous data received on the SCO socket. The patch was partially developed by Marcel Holtmann and validated by Hsin-yu Chao. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/bluetooth.h | 10 ++++++++++ include/net/bluetooth/sco.h | 2 ++ net/bluetooth/af_bluetooth.c | 3 +++ net/bluetooth/hci_core.c | 1 + net/bluetooth/sco.c | 32 +++++++++++++++++++++++++++++++ 5 files changed, 48 insertions(+) diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 18190055374c..7ee8041af803 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -147,6 +147,10 @@ struct bt_voice { #define BT_MODE_LE_FLOWCTL 0x03 #define BT_MODE_EXT_FLOWCTL 0x04 +#define BT_PKT_STATUS 16 + +#define BT_SCM_PKT_STATUS 0x03 + __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) @@ -286,6 +290,7 @@ struct bt_sock { struct sock *parent; unsigned long flags; void (*skb_msg_name)(struct sk_buff *, void *, int *); + void (*skb_put_cmsg)(struct sk_buff *, struct msghdr *, struct sock *); }; enum { @@ -335,6 +340,10 @@ struct l2cap_ctrl { struct l2cap_chan *chan; }; +struct sco_ctrl { + u8 pkt_status; +}; + struct hci_dev; typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); @@ -361,6 +370,7 @@ struct bt_skb_cb { u8 incoming:1; union { struct l2cap_ctrl l2cap; + struct sco_ctrl sco; struct hci_ctrl hci; }; }; diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h index f40ddb4264fc..1aa2e14b6c94 100644 --- a/include/net/bluetooth/sco.h +++ b/include/net/bluetooth/sco.h @@ -46,4 +46,6 @@ struct sco_conninfo { __u8 dev_class[3]; }; +#define SCO_CMSG_PKT_STATUS 0x01 + #endif /* __SCO_H */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index b751a7c1b20f..4ef6a54403aa 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -286,6 +286,9 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (msg->msg_name && bt_sk(sk)->skb_msg_name) bt_sk(sk)->skb_msg_name(skb, msg->msg_name, &msg->msg_namelen); + + if (bt_sk(sk)->skb_put_cmsg) + bt_sk(sk)->skb_put_cmsg(skb, msg, sk); } skb_free_datagram(sk, skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 83ce665d3cbf..00458a8c26f8 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -4554,6 +4554,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) if (conn) { /* Send to upper protocol */ + bt_cb(skb)->sco.pkt_status = flags & 0x03; sco_recv_scodata(conn, skb); return; } else { diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index c8c3d38cdc7b..83a48860bb5d 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -66,6 +66,7 @@ struct sco_pinfo { bdaddr_t dst; __u32 flags; __u16 setting; + __u8 cmsg_mask; struct sco_conn *conn; }; @@ -449,6 +450,15 @@ static void sco_sock_close(struct sock *sk) sco_sock_kill(sk); } +static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg, + struct sock *sk) +{ + if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS) + put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, + sizeof(bt_cb(skb)->sco.pkt_status), + &bt_cb(skb)->sco.pkt_status); +} + static void sco_sock_init(struct sock *sk, struct sock *parent) { BT_DBG("sk %p", sk); @@ -457,6 +467,8 @@ static void sco_sock_init(struct sock *sk, struct sock *parent) sk->sk_type = parent->sk_type; bt_sk(sk)->flags = bt_sk(parent)->flags; security_sk_clone(parent, sk); + } else { + bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg; } } @@ -846,6 +858,18 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, sco_pi(sk)->setting = voice.setting; break; + case BT_PKT_STATUS: + if (get_user(opt, (u32 __user *)optval)) { + err = -EFAULT; + break; + } + + if (opt) + sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; + else + sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; + break; + default: err = -ENOPROTOOPT; break; @@ -923,6 +947,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, int len, err = 0; struct bt_voice voice; u32 phys; + int pkt_status; BT_DBG("sk %p", sk); @@ -969,6 +994,13 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, err = -EFAULT; break; + case BT_PKT_STATUS: + pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS); + + if (put_user(pkt_status, (int __user *)optval)) + err = -EFAULT; + break; + default: err = -ENOPROTOOPT; break; From 32929e1f4ad9adf71f655028e4dd5d87adb97f52 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 14:26:10 +0000 Subject: [PATCH 0040/2056] Bluetooth: Use only 8 bits for the HCI CMSG state flags This change implements suggestions from the code review of the SCO CMSG state flag patch. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_sock.h | 4 ++-- net/bluetooth/hci_sock.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 9352bb1bf34c..9949870f7d78 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -31,8 +31,8 @@ #define HCI_TIME_STAMP 3 /* CMSG flags */ -#define HCI_CMSG_DIR 0x0001 -#define HCI_CMSG_TSTAMP 0x0002 +#define HCI_CMSG_DIR 0x01 +#define HCI_CMSG_TSTAMP 0x02 struct sockaddr_hci { sa_family_t hci_family; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index caf38a8ea6a8..d5627967fc25 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -52,7 +52,7 @@ struct hci_pinfo { struct bt_sock bt; struct hci_dev *hdev; struct hci_filter filter; - __u32 cmsg_mask; + __u8 cmsg_mask; unsigned short channel; unsigned long flags; __u32 cookie; @@ -1399,7 +1399,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { - __u32 mask = hci_pi(sk)->cmsg_mask; + __u8 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; From 590deccf4c0690597ca69349fe46ee124944d8c5 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 12 Jun 2020 17:50:28 +0530 Subject: [PATCH 0041/2056] Bluetooth: hci_qca: Disable SoC debug logging for WCN3991 By default, WCN3991 sent debug packets to HOST via ACL packet with header 0xDC2E. This logging is not required on commercial devices. With this patch SoC logging is disabled post fw download. Signed-off-by: Balakrishna Godavarthi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqca.c | 27 +++++++++++++++++++++++++++ drivers/bluetooth/btqca.h | 2 ++ 2 files changed, 29 insertions(+) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index c5984966f315..ce9dcffdc5bf 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -400,6 +400,27 @@ static int qca_download_firmware(struct hci_dev *hdev, return ret; } +static int qca_disable_soc_logging(struct hci_dev *hdev) +{ + struct sk_buff *skb; + u8 cmd[2]; + int err; + + cmd[0] = QCA_DISABLE_LOGGING_SUB_OP; + cmd[1] = 0x00; + skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd, + HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err); + return err; + } + + kfree_skb(skb); + + return 0; +} + int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; @@ -486,6 +507,12 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; } + if (soc_type >= QCA_WCN3991) { + err = qca_disable_soc_logging(hdev); + if (err < 0) + return err; + } + /* Perform HCI reset */ err = qca_send_reset(hdev); if (err < 0) { diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 6e1e62dd4b95..d81b74c408a5 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -14,6 +14,7 @@ #define EDL_NVM_ACCESS_SET_REQ_CMD (0x01) #define MAX_SIZE_PER_TLV_SEGMENT (243) #define QCA_PRE_SHUTDOWN_CMD (0xFC08) +#define QCA_DISABLE_LOGGING (0xFC17) #define EDL_CMD_REQ_RES_EVT (0x00) #define EDL_PATCH_VER_RES_EVT (0x19) @@ -22,6 +23,7 @@ #define EDL_CMD_EXE_STATUS_EVT (0x00) #define EDL_SET_BAUDRATE_RSP_EVT (0x92) #define EDL_NVM_ACCESS_CODE_EVT (0x0B) +#define QCA_DISABLE_LOGGING_SUB_OP (0x14) #define EDL_TAG_ID_HCI (17) #define EDL_TAG_ID_DEEP_SLEEP (27) From 2d68476cfc2afa1a1a2d9007a23264ffc6308e77 Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 12 Jun 2020 17:51:31 +0530 Subject: [PATCH 0042/2056] Bluetooth: hci_qca: Increase SoC idle timeout to 200ms In some version of WCN399x, SoC idle timeout is configured as 80ms instead of 20ms or 40ms. To honor all the SoC's supported in the driver increasing SoC idle timeout to 200ms. Fixes: 41d5b25fed0a0 ("Bluetooth: hci_qca: add PM support") Signed-off-by: Balakrishna Godavarthi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index f3fde99970c1..91c9aa642367 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -46,7 +46,7 @@ #define HCI_MAX_IBS_SIZE 10 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100 -#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 40 +#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200 #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 #define CMD_TRANS_TIMEOUT_MS 100 #define MEMDUMP_TIMEOUT_MS 8000 From 201a11246d6018bb4ce648e62ed099950f4b174a Mon Sep 17 00:00:00 2001 From: Balakrishna Godavarthi Date: Fri, 12 Jun 2020 17:45:17 +0530 Subject: [PATCH 0043/2056] Bluetooth: hci_qca: Request Tx clock vote off only when Tx is pending Tx pending flag is set to true when HOST IBS state is AWAKE or AWAKEING. If IBS state is ASLEEP, then Tx clock is already voted off. To optimize further directly calling serial_clock_vote() instead of qca_wq_serial_tx_clock_vote_off(), at this point of qca_suspend() already data is sent out. No need to wake up hci to send data. Signed-off-by: Balakrishna Godavarthi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 91c9aa642367..99d14c777105 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2107,6 +2107,7 @@ static int __maybe_unused qca_suspend(struct device *dev) if (tx_pending) { serdev_device_wait_until_sent(hu->serdev, msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); + serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); } /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going @@ -2120,7 +2121,6 @@ static int __maybe_unused qca_suspend(struct device *dev) goto error; } - qca_wq_serial_tx_clock_vote_off(&qca->ws_tx_vote_off); return 0; error: From 7e90de4ac1099d3f4e26023853d4aefd0d2a1dea Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:55 +0000 Subject: [PATCH 0044/2056] Bluetooth: mgmt: read/set system parameter definitions This patch submits the corresponding kernel definitions to mgmt.h. This is submitted before the implementation to avoid any conflicts in values allocations. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Yu Liu Signed-off-by: Marcel Holtmann --- include/net/bluetooth/mgmt.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 16e0d87bd8fa..e515288f328f 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -52,6 +52,12 @@ struct mgmt_hdr { __le16 len; } __packed; +struct mgmt_tlv { + __le16 type; + __u8 length; + __u8 value[]; +} __packed; + struct mgmt_addr_info { bdaddr_t bdaddr; __u8 type; @@ -702,6 +708,18 @@ struct mgmt_rp_set_exp_feature { __le32 flags; } __packed; +#define MGMT_OP_READ_DEF_SYSTEM_CONFIG 0x004b +#define MGMT_READ_DEF_SYSTEM_CONFIG_SIZE 0 + +#define MGMT_OP_SET_DEF_SYSTEM_CONFIG 0x004c +#define MGMT_SET_DEF_SYSTEM_CONFIG_SIZE 0 + +#define MGMT_OP_READ_DEF_RUNTIME_CONFIG 0x004d +#define MGMT_READ_DEF_RUNTIME_CONFIG_SIZE 0 + +#define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e +#define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; From 10873f99ced274cbfc119f55e7e57a0f047a0799 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:56 +0000 Subject: [PATCH 0045/2056] Bluetooth: centralize default value initialization. This patch centralized the initialization of default parameters. This is required to allow clients to more easily customize the default system parameters. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 18 ++++++++++++++++++ net/bluetooth/hci_conn.c | 14 ++++---------- net/bluetooth/hci_core.c | 14 +++++++++++++- net/bluetooth/hci_request.c | 15 +++++---------- 4 files changed, 40 insertions(+), 21 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index cdd4f1db8670..0d5dbb6cb5a0 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -295,6 +295,14 @@ struct hci_dev { __u8 le_scan_type; __u16 le_scan_interval; __u16 le_scan_window; + __u16 le_scan_int_suspend; + __u16 le_scan_window_suspend; + __u16 le_scan_int_discovery; + __u16 le_scan_window_discovery; + __u16 le_scan_int_adv_monitor; + __u16 le_scan_window_adv_monitor; + __u16 le_scan_int_connect; + __u16 le_scan_window_connect; __u16 le_conn_min_interval; __u16 le_conn_max_interval; __u16 le_conn_latency; @@ -323,6 +331,16 @@ struct hci_dev { __u16 devid_product; __u16 devid_version; + __u8 def_page_scan_type; + __u16 def_page_scan_int; + __u16 def_page_scan_window; + __u8 def_inq_scan_type; + __u16 def_inq_scan_int; + __u16 def_inq_scan_window; + __u16 def_br_lsto; + __u16 def_page_timeout; + __u16 def_multi_adv_rotation_duration; + __u16 pkt_type; __u16 esco_type; __u16 link_policy; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 307800fd18e6..9bdffc4e79b0 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -789,11 +789,8 @@ static void set_ext_conn_params(struct hci_conn *conn, memset(p, 0, sizeof(*p)); - /* Set window to be the same value as the interval to - * enable continuous scanning. - */ - p->scan_interval = cpu_to_le16(hdev->le_scan_interval); - p->scan_window = p->scan_interval; + p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); p->conn_latency = cpu_to_le16(conn->le_conn_latency); @@ -875,11 +872,8 @@ static void hci_req_add_le_create_conn(struct hci_request *req, memset(&cp, 0, sizeof(cp)); - /* Set window to be the same value as the interval to enable - * continuous scanning. - */ - cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); - cp.scan_window = cp.scan_interval; + cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 00458a8c26f8..4f1052a7c488 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2982,7 +2982,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, adv_instance->remaining_time = timeout; if (duration == 0) - adv_instance->duration = HCI_DEFAULT_ADV_DURATION; + adv_instance->duration = hdev->def_multi_adv_rotation_duration; else adv_instance->duration = duration; @@ -3400,6 +3400,12 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_adv_max_interval = 0x0800; hdev->le_scan_interval = 0x0060; hdev->le_scan_window = 0x0030; + hdev->le_scan_int_suspend = 0x0400; + hdev->le_scan_window_suspend = 0x0012; + hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; + hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; + hdev->le_scan_int_connect = 0x0060; + hdev->le_scan_window_connect = 0x0060; hdev->le_conn_min_interval = 0x0018; hdev->le_conn_max_interval = 0x0028; hdev->le_conn_latency = 0x0000; @@ -3415,6 +3421,7 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; + hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; @@ -3423,6 +3430,11 @@ struct hci_dev *hci_alloc_dev(void) hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; + /* default 1.28 sec page scan */ + hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; + hdev->def_page_scan_int = 0x0800; + hdev->def_page_scan_window = 0x0012; + mutex_init(&hdev->lock); mutex_init(&hdev->req_lock); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 1acf5b8e0910..a7f572ad38ef 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -34,9 +34,6 @@ #define HCI_REQ_PEND 1 #define HCI_REQ_CANCELED 2 -#define LE_SUSPEND_SCAN_WINDOW 0x0012 -#define LE_SUSPEND_SCAN_INTERVAL 0x0400 - void hci_req_init(struct hci_request *req, struct hci_dev *hdev) { skb_queue_head_init(&req->cmd_q); @@ -366,13 +363,11 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) /* 160 msec page scan interval */ acp.interval = cpu_to_le16(0x0100); } else { - type = PAGE_SCAN_TYPE_STANDARD; /* default */ - - /* default 1.28 sec page scan */ - acp.interval = cpu_to_le16(0x0800); + type = hdev->def_page_scan_type; + acp.interval = cpu_to_le16(hdev->def_page_scan_int); } - acp.window = cpu_to_le16(0x0012); + acp.window = cpu_to_le16(hdev->def_page_scan_window); if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || __cpu_to_le16(hdev->page_scan_window) != acp.window) @@ -927,8 +922,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req) filter_policy |= 0x02; if (hdev->suspended) { - window = LE_SUSPEND_SCAN_WINDOW; - interval = LE_SUSPEND_SCAN_INTERVAL; + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; From 17896406ff3592d47b476ddd29276bf9cf8a26dd Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Thu, 11 Jun 2020 02:01:57 +0000 Subject: [PATCH 0046/2056] Bluetooth: implement read/set default system parameters mgmt This patch implements the read default system parameters and the set default system parameters mgmt commands. Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- net/bluetooth/Makefile | 2 +- net/bluetooth/mgmt.c | 8 ++ net/bluetooth/mgmt_config.c | 253 ++++++++++++++++++++++++++++++++++++ net/bluetooth/mgmt_config.h | 11 ++ 4 files changed, 273 insertions(+), 1 deletion(-) create mode 100644 net/bluetooth/mgmt_config.c create mode 100644 net/bluetooth/mgmt_config.h diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 41dd541a44a5..1c645fba8c49 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -14,7 +14,7 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ - ecdh_helper.o hci_request.o mgmt_util.o + ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 9e8a3cccc6ca..99fbfd467d04 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -36,6 +36,7 @@ #include "hci_request.h" #include "smp.h" #include "mgmt_util.h" +#include "mgmt_config.h" #define MGMT_VERSION 1 #define MGMT_REVISION 17 @@ -111,6 +112,8 @@ static const u16 mgmt_commands[] = { MGMT_OP_READ_SECURITY_INFO, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_SET_EXP_FEATURE, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, }; static const u16 mgmt_events[] = { @@ -162,6 +165,7 @@ static const u16 mgmt_untrusted_commands[] = { MGMT_OP_READ_EXT_INFO, MGMT_OP_READ_SECURITY_INFO, MGMT_OP_READ_EXP_FEATURES_INFO, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, }; static const u16 mgmt_untrusted_events[] = { @@ -7297,6 +7301,10 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE, HCI_MGMT_VAR_LEN | HCI_MGMT_HDEV_OPTIONAL }, + { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE, + HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c new file mode 100644 index 000000000000..f6dfbe93542c --- /dev/null +++ b/net/bluetooth/mgmt_config.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (C) 2020 Google Corporation + */ + +#include +#include +#include + +#include "mgmt_util.h" +#include "mgmt_config.h" + +#define HDEV_PARAM_U16(_param_code_, _param_name_) \ +{ \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + { cpu_to_le16(hdev->_param_name_) } \ +} + +int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct { + struct mgmt_tlv entry; + union { + /* This is a simplification for now since all values + * are 16 bits. In the future, this code may need + * refactoring to account for variable length values + * and properly calculate the required buffer size. + */ + __le16 value; + }; + } __packed params[] = { + /* Please see mgmt-api.txt for documentation of these values */ + HDEV_PARAM_U16(0x0000, def_page_scan_type), + HDEV_PARAM_U16(0x0001, def_page_scan_int), + HDEV_PARAM_U16(0x0002, def_page_scan_window), + HDEV_PARAM_U16(0x0003, def_inq_scan_type), + HDEV_PARAM_U16(0x0004, def_inq_scan_int), + HDEV_PARAM_U16(0x0005, def_inq_scan_window), + HDEV_PARAM_U16(0x0006, def_br_lsto), + HDEV_PARAM_U16(0x0007, def_page_timeout), + HDEV_PARAM_U16(0x0008, sniff_min_interval), + HDEV_PARAM_U16(0x0009, sniff_max_interval), + HDEV_PARAM_U16(0x000a, le_adv_min_interval), + HDEV_PARAM_U16(0x000b, le_adv_max_interval), + HDEV_PARAM_U16(0x000c, def_multi_adv_rotation_duration), + HDEV_PARAM_U16(0x000d, le_scan_interval), + HDEV_PARAM_U16(0x000e, le_scan_window), + HDEV_PARAM_U16(0x000f, le_scan_int_suspend), + HDEV_PARAM_U16(0x0010, le_scan_window_suspend), + HDEV_PARAM_U16(0x0011, le_scan_int_discovery), + HDEV_PARAM_U16(0x0012, le_scan_window_discovery), + HDEV_PARAM_U16(0x0013, le_scan_int_adv_monitor), + HDEV_PARAM_U16(0x0014, le_scan_window_adv_monitor), + HDEV_PARAM_U16(0x0015, le_scan_int_connect), + HDEV_PARAM_U16(0x0016, le_scan_window_connect), + HDEV_PARAM_U16(0x0017, le_conn_min_interval), + HDEV_PARAM_U16(0x0018, le_conn_max_interval), + HDEV_PARAM_U16(0x0019, le_conn_latency), + HDEV_PARAM_U16(0x001a, le_supv_timeout), + }; + struct mgmt_rp_read_def_system_config *rp = (void *)params; + + bt_dev_dbg(hdev, "sock %p", sk); + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, + 0, rp, sizeof(params)); +} + +#define TO_TLV(x) ((struct mgmt_tlv *)(x)) +#define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value))) + +int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + u16 buffer_left = data_len; + u8 *buffer = data; + + if (buffer_left < sizeof(struct mgmt_tlv)) { + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + + /* First pass to validate the tlv */ + while (buffer_left >= sizeof(struct mgmt_tlv)) { + const u8 len = TO_TLV(buffer)->length; + const u16 exp_len = sizeof(struct mgmt_tlv) + + len; + const u16 type = le16_to_cpu(TO_TLV(buffer)->type); + + if (buffer_left < exp_len) { + bt_dev_warn(hdev, "invalid len left %d, exp >= %d", + buffer_left, exp_len); + + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + + /* Please see mgmt-api.txt for documentation of these values */ + switch (type) { + case 0x0000: + case 0x0001: + case 0x0002: + case 0x0003: + case 0x0004: + case 0x0005: + case 0x0006: + case 0x0007: + case 0x0008: + case 0x0009: + case 0x000a: + case 0x000b: + case 0x000c: + case 0x000d: + case 0x000e: + case 0x000f: + case 0x0010: + case 0x0011: + case 0x0012: + case 0x0013: + case 0x0014: + case 0x0015: + case 0x0016: + case 0x0017: + case 0x0018: + case 0x0019: + case 0x001a: + if (len != sizeof(u16)) { + bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d", + len, sizeof(u16), type); + + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + break; + default: + bt_dev_warn(hdev, "unsupported parameter %u", type); + break; + } + + buffer_left -= exp_len; + buffer += exp_len; + } + + buffer_left = data_len; + buffer = data; + while (buffer_left >= sizeof(struct mgmt_tlv)) { + const u8 len = TO_TLV(buffer)->length; + const u16 exp_len = sizeof(struct mgmt_tlv) + + len; + const u16 type = le16_to_cpu(TO_TLV(buffer)->type); + + switch (type) { + case 0x0000: + hdev->def_page_scan_type = TLV_GET_LE16(buffer); + break; + case 0x0001: + hdev->def_page_scan_int = TLV_GET_LE16(buffer); + break; + case 0x0002: + hdev->def_page_scan_window = TLV_GET_LE16(buffer); + break; + case 0x0003: + hdev->def_inq_scan_type = TLV_GET_LE16(buffer); + break; + case 0x0004: + hdev->def_inq_scan_int = TLV_GET_LE16(buffer); + break; + case 0x0005: + hdev->def_inq_scan_window = TLV_GET_LE16(buffer); + break; + case 0x0006: + hdev->def_br_lsto = TLV_GET_LE16(buffer); + break; + case 0x0007: + hdev->def_page_timeout = TLV_GET_LE16(buffer); + break; + case 0x0008: + hdev->sniff_min_interval = TLV_GET_LE16(buffer); + break; + case 0x0009: + hdev->sniff_max_interval = TLV_GET_LE16(buffer); + break; + case 0x000a: + hdev->le_adv_min_interval = TLV_GET_LE16(buffer); + break; + case 0x000b: + hdev->le_adv_max_interval = TLV_GET_LE16(buffer); + break; + case 0x000c: + hdev->def_multi_adv_rotation_duration = + TLV_GET_LE16(buffer); + break; + case 0x000d: + hdev->le_scan_interval = TLV_GET_LE16(buffer); + break; + case 0x000e: + hdev->le_scan_window = TLV_GET_LE16(buffer); + break; + case 0x000f: + hdev->le_scan_int_suspend = TLV_GET_LE16(buffer); + break; + case 0x0010: + hdev->le_scan_window_suspend = TLV_GET_LE16(buffer); + break; + case 0x0011: + hdev->le_scan_int_discovery = TLV_GET_LE16(buffer); + break; + case 0x00012: + hdev->le_scan_window_discovery = TLV_GET_LE16(buffer); + break; + case 0x00013: + hdev->le_scan_int_adv_monitor = TLV_GET_LE16(buffer); + break; + case 0x00014: + hdev->le_scan_window_adv_monitor = TLV_GET_LE16(buffer); + break; + case 0x00015: + hdev->le_scan_int_connect = TLV_GET_LE16(buffer); + break; + case 0x00016: + hdev->le_scan_window_connect = TLV_GET_LE16(buffer); + break; + case 0x00017: + hdev->le_conn_min_interval = TLV_GET_LE16(buffer); + break; + case 0x00018: + hdev->le_conn_max_interval = TLV_GET_LE16(buffer); + break; + case 0x00019: + hdev->le_conn_latency = TLV_GET_LE16(buffer); + break; + case 0x0001a: + hdev->le_supv_timeout = TLV_GET_LE16(buffer); + break; + default: + bt_dev_warn(hdev, "unsupported parameter %u", type); + break; + } + + buffer_left -= exp_len; + buffer += exp_len; + } + + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_SUCCESS); +} diff --git a/net/bluetooth/mgmt_config.h b/net/bluetooth/mgmt_config.h new file mode 100644 index 000000000000..51da6e63b1a0 --- /dev/null +++ b/net/bluetooth/mgmt_config.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (C) 2020 Google Corporation + */ + +int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + +int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); From bd6478559e27414e7def8247136b0dcb1e1392f8 Mon Sep 17 00:00:00 2001 From: Karthikeyan Periyasamy Date: Tue, 9 Jun 2020 13:03:33 +0530 Subject: [PATCH 0047/2056] ath11k: Add direct buffer ring support Add direct buffer ring (dbring) with helper API, which is used by the spectral scan. Initialise the direct buffer ring based on the dma ring capability, which get announced in the wmi service ready extended event. This ring is slightly changed from data path rings. Compare to data path ring this ring shares the hp and tp address to firmware though WMI commands. Also the replenish buffer size is derived from firmware announcement. driver receive indication through WMI event WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID. Tested-on: IPQ8074 WLAN.HK.2.1.0.1-01228-QCAHKSWPL_SILICONZ-1 Signed-off-by: Karthikeyan Periyasamy Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591688014-26441-1-git-send-email-periyasa@codeaurora.org --- drivers/net/wireless/ath/ath11k/Makefile | 3 +- drivers/net/wireless/ath/ath11k/core.h | 4 + drivers/net/wireless/ath/ath11k/dbring.c | 353 +++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/dbring.h | 79 +++++ drivers/net/wireless/ath/ath11k/dp.c | 3 +- drivers/net/wireless/ath/ath11k/wmi.c | 226 ++++++++++++++- drivers/net/wireless/ath/ath11k/wmi.h | 81 +++++- 7 files changed, 744 insertions(+), 5 deletions(-) create mode 100644 drivers/net/wireless/ath/ath11k/dbring.c create mode 100644 drivers/net/wireless/ath/ath11k/dbring.h diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index fe7736e53583..d17a3b418178 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -15,7 +15,8 @@ ath11k-y += core.o \ dp_rx.o \ debug.o \ ce.o \ - peer.o + peer.o \ + dbring.o ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 0cb844aceba4..a715fe61c01b 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -21,6 +21,7 @@ #include "hal_rx.h" #include "reg.h" #include "thermal.h" +#include "dbring.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -673,6 +674,9 @@ struct ath11k_base { /* Round robbin based TCL ring selector */ atomic_t tcl_ring_selector; + struct ath11k_dbring_cap *db_caps; + u32 num_db_cap; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c new file mode 100644 index 000000000000..b96258489456 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dbring.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + */ + +#include "core.h" +#include "debug.h" + +static int ath11k_dbring_bufs_replenish(struct ath11k *ar, + struct ath11k_dbring *ring, + struct ath11k_dbring_element *buff, + gfp_t gfp) +{ + struct ath11k_base *ab = ar->ab; + struct hal_srng *srng; + dma_addr_t paddr; + void *ptr_aligned, *ptr_unaligned, *desc; + int ret; + int buf_id; + u32 cookie; + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + + lockdep_assert_held(&srng->lock); + + ath11k_hal_srng_access_begin(ab, srng); + + ptr_unaligned = buff->payload; + ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align); + paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz, + DMA_FROM_DEVICE); + + ret = dma_mapping_error(ab->dev, paddr); + if (ret) + goto err; + + spin_lock_bh(&ring->idr_lock); + buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp); + spin_unlock_bh(&ring->idr_lock); + if (buf_id < 0) { + ret = -ENOBUFS; + goto err_dma_unmap; + } + + desc = ath11k_hal_srng_src_get_next_entry(ab, srng); + if (!desc) { + ret = -ENOENT; + goto err_idr_remove; + } + + buff->paddr = paddr; + + cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) | + FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id); + + ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0); + + ath11k_hal_srng_access_end(ab, srng); + + return 0; + +err_idr_remove: + spin_lock_bh(&ring->idr_lock); + idr_remove(&ring->bufs_idr, buf_id); + spin_unlock_bh(&ring->idr_lock); +err_dma_unmap: + dma_unmap_single(ab->dev, paddr, ring->buf_sz, + DMA_FROM_DEVICE); +err: + ath11k_hal_srng_access_end(ab, srng); + return ret; +} + +static int ath11k_dbring_fill_bufs(struct ath11k *ar, + struct ath11k_dbring *ring, + gfp_t gfp) +{ + struct ath11k_dbring_element *buff; + struct hal_srng *srng; + int num_remain, req_entries, num_free; + u32 align; + int size, ret; + + srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id]; + + spin_lock_bh(&srng->lock); + + num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true); + req_entries = min(num_free, ring->bufs_max); + num_remain = req_entries; + align = ring->buf_align; + size = sizeof(*buff) + ring->buf_sz + align - 1; + + while (num_remain > 0) { + buff = kzalloc(size, gfp); + if (!buff) + break; + + ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp); + if (ret) { + ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n", + num_remain, req_entries); + kfree(buff); + break; + } + num_remain--; + } + + spin_unlock_bh(&srng->lock); + + return num_remain; +} + +int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar, + struct ath11k_dbring *ring, + enum wmi_direct_buffer_module id) +{ + struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0}; + int ret; + + if (id >= WMI_DIRECT_BUF_MAX) + return -EINVAL; + + param.pdev_id = DP_SW2HW_MACID(ring->pdev_id); + param.module_id = id; + param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr); + param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr); + param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr); + param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr); + param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr); + param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr); + param.num_elems = ring->bufs_max; + param.buf_size = ring->buf_sz; + param.num_resp_per_event = ring->num_resp_per_event; + param.event_timeout_ms = ring->event_timeout_ms; + + ret = ath11k_wmi_pdev_dma_ring_cfg(ar, ¶m); + if (ret) { + ath11k_warn(ar->ab, "failed to setup db ring cfg\n"); + return ret; + } + + return 0; +} + +int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring, + u32 num_resp_per_event, u32 event_timeout_ms, + int (*handler)(struct ath11k *, + struct ath11k_dbring_data *)) +{ + if (WARN_ON(!ring)) + return -EINVAL; + + ring->num_resp_per_event = num_resp_per_event; + ring->event_timeout_ms = event_timeout_ms; + ring->handler = handler; + + return 0; +} + +int ath11k_dbring_buf_setup(struct ath11k *ar, + struct ath11k_dbring *ring, + struct ath11k_dbring_cap *db_cap) +{ + struct ath11k_base *ab = ar->ab; + struct hal_srng *srng; + int ret; + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + ring->bufs_max = ring->refill_srng.size / + ath11k_hal_srng_get_entrysize(HAL_RXDMA_DIR_BUF); + + ring->buf_sz = db_cap->min_buf_sz; + ring->buf_align = db_cap->min_buf_align; + ring->pdev_id = db_cap->pdev_id; + ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng); + ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng); + + ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL); + + return ret; +} + +int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring, + int ring_num, int num_entries) +{ + int ret; + + ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF, + ring_num, ar->pdev_idx, num_entries); + if (ret < 0) { + ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n", + ret, ring_num); + goto err; + } + + return 0; +err: + ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); + return ret; +} + +int ath11k_dbring_get_cap(struct ath11k_base *ab, + u8 pdev_idx, + enum wmi_direct_buffer_module id, + struct ath11k_dbring_cap *db_cap) +{ + int i; + + if (!ab->num_db_cap || !ab->db_caps) + return -ENOENT; + + if (id >= WMI_DIRECT_BUF_MAX) + return -EINVAL; + + for (i = 0; i < ab->num_db_cap; i++) { + if (pdev_idx == ab->db_caps[i].pdev_id && + id == ab->db_caps[i].id) { + *db_cap = ab->db_caps[i]; + + return 0; + } + } + + return -ENOENT; +} + +int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, + struct ath11k_dbring_buf_release_event *ev) +{ + struct ath11k_dbring *ring; + struct hal_srng *srng; + struct ath11k *ar; + struct ath11k_dbring_element *buff; + struct ath11k_dbring_data handler_data; + struct ath11k_buffer_addr desc; + u8 *vaddr_unalign; + u32 num_entry, num_buff_reaped; + u8 pdev_idx, rbm; + u32 cookie; + int buf_id; + int size; + dma_addr_t paddr; + int ret = 0; + + pdev_idx = ev->fixed.pdev_id; + + if (pdev_idx >= ab->num_radios) { + ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx); + return -EINVAL; + } + + if (ev->fixed.num_buf_release_entry != + ev->fixed.num_meta_data_entry) { + ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n", + ev->fixed.num_buf_release_entry, + ev->fixed.num_meta_data_entry); + return -EINVAL; + } + + ar = ab->pdevs[pdev_idx].ar; + + rcu_read_lock(); + if (!rcu_dereference(ab->pdevs_active[pdev_idx])) { + ret = -EINVAL; + goto rcu_unlock; + } + + switch (ev->fixed.module_id) { + default: + ring = NULL; + ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", + ev->fixed.module_id); + break; + } + + if (!ring) { + ret = -EINVAL; + goto rcu_unlock; + } + + srng = &ab->hal.srng_list[ring->refill_srng.ring_id]; + num_entry = ev->fixed.num_buf_release_entry; + size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1; + num_buff_reaped = 0; + + spin_lock_bh(&srng->lock); + + while (num_buff_reaped < num_entry) { + desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo; + desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi; + handler_data.meta = ev->meta_data[num_buff_reaped]; + + num_buff_reaped++; + + ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm); + + buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); + + spin_lock_bh(&ring->idr_lock); + buff = idr_find(&ring->bufs_idr, buf_id); + if (!buff) { + spin_unlock_bh(&ring->idr_lock); + continue; + } + idr_remove(&ring->bufs_idr, buf_id); + spin_unlock_bh(&ring->idr_lock); + + dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz, + DMA_FROM_DEVICE); + + if (ring->handler) { + vaddr_unalign = buff->payload; + handler_data.data = PTR_ALIGN(vaddr_unalign, + ring->buf_align); + handler_data.data_sz = ring->buf_sz; + + ring->handler(ar, &handler_data); + } + + memset(buff, 0, size); + ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC); + } + + spin_unlock_bh(&srng->lock); + +rcu_unlock: + rcu_read_unlock(); + + return ret; +} + +void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) +{ + ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng); +} + +void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring) +{ + struct ath11k_dbring_element *buff; + int buf_id; + + spin_lock_bh(&ring->idr_lock); + idr_for_each_entry(&ring->bufs_idr, buff, buf_id) { + idr_remove(&ring->bufs_idr, buf_id); + dma_unmap_single(ar->ab->dev, buff->paddr, + ring->buf_sz, DMA_FROM_DEVICE); + kfree(buff); + } + + idr_destroy(&ring->bufs_idr); + spin_unlock_bh(&ring->idr_lock); +} diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h new file mode 100644 index 000000000000..f7fce9ef9c36 --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/dbring.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + */ + +#ifndef ATH11K_DBRING_H +#define ATH11K_DBRING_H + +#include +#include +#include +#include "dp.h" + +struct ath11k_dbring_element { + dma_addr_t paddr; + u8 payload[0]; +}; + +struct ath11k_dbring_data { + void *data; + u32 data_sz; + struct wmi_dma_buf_release_meta_data meta; +}; + +struct ath11k_dbring_buf_release_event { + struct ath11k_wmi_dma_buf_release_fixed_param fixed; + struct wmi_dma_buf_release_entry *buf_entry; + struct wmi_dma_buf_release_meta_data *meta_data; + u32 num_buf_entry; + u32 num_meta; +}; + +struct ath11k_dbring_cap { + u32 pdev_id; + enum wmi_direct_buffer_module id; + u32 min_elem; + u32 min_buf_sz; + u32 min_buf_align; +}; + +struct ath11k_dbring { + struct dp_srng refill_srng; + struct idr bufs_idr; + /* Protects bufs_idr */ + spinlock_t idr_lock; + dma_addr_t tp_addr; + dma_addr_t hp_addr; + int bufs_max; + u32 pdev_id; + u32 buf_sz; + u32 buf_align; + u32 num_resp_per_event; + u32 event_timeout_ms; + int (*handler)(struct ath11k *, struct ath11k_dbring_data *); +}; + +int ath11k_dbring_set_cfg(struct ath11k *ar, + struct ath11k_dbring *ring, + u32 num_resp_per_event, + u32 event_timeout_ms, + int (*handler)(struct ath11k *, + struct ath11k_dbring_data *)); +int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar, + struct ath11k_dbring *ring, + enum wmi_direct_buffer_module id); +int ath11k_dbring_buf_setup(struct ath11k *ar, + struct ath11k_dbring *ring, + struct ath11k_dbring_cap *db_cap); +int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring, + int ring_num, int num_entries); +int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, + struct ath11k_dbring_buf_release_event *ev); +int ath11k_dbring_get_cap(struct ath11k_base *ab, + u8 pdev_idx, + enum wmi_direct_buffer_module id, + struct ath11k_dbring_cap *db_cap); +void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring); +void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring); +#endif /* ATH11K_DBRING_H */ diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 9ae743e528af..1d64c3c51ac9 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -172,11 +172,12 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, case HAL_RXDMA_DST: case HAL_RXDMA_MONITOR_DST: case HAL_RXDMA_MONITOR_DESC: - case HAL_RXDMA_DIR_BUF: params.intr_batch_cntr_thres_entries = HAL_SRNG_INT_BATCH_THRESHOLD_OTHER; params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER; break; + case HAL_RXDMA_DIR_BUF: + break; default: ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type); return -EINVAL; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 8191cb0a997d..6033ec4e11e5 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -27,6 +27,11 @@ struct wmi_tlv_svc_ready_parse { bool wmi_svc_bitmap_done; }; +struct wmi_tlv_dma_ring_caps_parse { + struct wmi_dma_ring_capabilities *dma_ring_caps; + u32 n_dma_ring_caps; +}; + struct wmi_tlv_svc_rdy_ext_parse { struct ath11k_service_ext_param param; struct wmi_soc_mac_phy_hw_mode_caps *hw_caps; @@ -39,9 +44,19 @@ struct wmi_tlv_svc_rdy_ext_parse { struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps; struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps; u32 n_ext_hal_reg_caps; + struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; bool hw_mode_done; bool mac_phy_done; bool ext_hal_reg_done; + bool mac_phy_chainmask_combo_done; + bool mac_phy_chainmask_cap_done; + bool oem_dma_ring_cap_done; + bool dma_ring_cap_done; +}; + +struct wmi_tlv_svc_rdy_ext2_parse { + struct wmi_tlv_dma_ring_caps_parse dma_caps_parse; + bool dma_ring_cap_done; }; struct wmi_tlv_rdy_parse { @@ -3373,6 +3388,52 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); } +int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, + struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param) +{ + struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->pdev_id = param->pdev_id; + cmd->module_id = param->module_id; + cmd->base_paddr_lo = param->base_paddr_lo; + cmd->base_paddr_hi = param->base_paddr_hi; + cmd->head_idx_paddr_lo = param->head_idx_paddr_lo; + cmd->head_idx_paddr_hi = param->head_idx_paddr_hi; + cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo; + cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi; + cmd->num_elems = param->num_elems; + cmd->buf_size = param->buf_size; + cmd->num_resp_per_event = param->num_resp_per_event; + cmd->event_timeout_ms = param->event_timeout_ms; + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, + WMI_PDEV_DMA_RING_CFG_REQ_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "failed to send dma ring cfg req wmi cmd\n"); + goto err; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI DMA ring cfg req cmd pdev_id 0x%x\n", + param->pdev_id); + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) @@ -3553,6 +3614,95 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc, return 0; } +static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_dma_ring_caps_parse *parse = data; + + if (tag != WMI_TAG_DMA_RING_CAPABILITIES) + return -EPROTO; + + parse->n_dma_ring_caps++; + return 0; +} + +static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab, + u32 num_cap) +{ + size_t sz; + void *ptr; + + sz = num_cap * sizeof(struct ath11k_dbring_cap); + ptr = kzalloc(sz, GFP_ATOMIC); + if (!ptr) + return -ENOMEM; + + ab->db_caps = ptr; + ab->num_db_cap = num_cap; + + return 0; +} + +static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab) +{ + kfree(ab->db_caps); + ab->db_caps = NULL; +} + +static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab, + u16 len, const void *ptr, void *data) +{ + struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data; + struct wmi_dma_ring_capabilities *dma_caps; + struct ath11k_dbring_cap *dir_buff_caps; + int ret; + u32 i; + + dma_caps_parse->n_dma_ring_caps = 0; + dma_caps = (struct wmi_dma_ring_capabilities *)ptr; + ret = ath11k_wmi_tlv_iter(ab, ptr, len, + ath11k_wmi_tlv_dma_ring_caps_parse, + dma_caps_parse); + if (ret) { + ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret); + return ret; + } + + if (!dma_caps_parse->n_dma_ring_caps) + return 0; + + if (ab->num_db_cap) { + ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n"); + return 0; + } + + ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps); + if (ret) + return ret; + + dir_buff_caps = ab->db_caps; + for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) { + if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) { + ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id); + ret = -EINVAL; + goto free_dir_buff; + } + + dir_buff_caps[i].id = dma_caps[i].module_id; + dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id); + dir_buff_caps[i].min_elem = dma_caps[i].min_elem; + dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz; + dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align; + } + + return 0; + +free_dir_buff: + ath11k_wmi_free_dbring_caps(ab); + return ret; +} + static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab, u16 tag, u16 len, const void *ptr, void *data) @@ -3609,7 +3759,19 @@ static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab, return ret; svc_rdy_ext->ext_hal_reg_done = true; - complete(&ab->wmi_ab.service_ready); + } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) { + svc_rdy_ext->mac_phy_chainmask_combo_done = true; + } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) { + svc_rdy_ext->mac_phy_chainmask_cap_done = true; + } else if (!svc_rdy_ext->oem_dma_ring_cap_done) { + svc_rdy_ext->oem_dma_ring_cap_done = true; + } else if (!svc_rdy_ext->dma_ring_cap_done) { + ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, + &svc_rdy_ext->dma_caps_parse); + if (ret) + return ret; + + svc_rdy_ext->dma_ring_cap_done = true; } break; @@ -3630,11 +3792,66 @@ static int ath11k_service_ready_ext_event(struct ath11k_base *ab, &svc_rdy_ext); if (ret) { ath11k_warn(ab, "failed to parse tlv %d\n", ret); - return ret; + goto err; } + if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map)) + complete(&ab->wmi_ab.service_ready); + kfree(svc_rdy_ext.mac_phy_caps); return 0; + +err: + ath11k_wmi_free_dbring_caps(ab); + return ret; +} + +static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_svc_rdy_ext2_parse *parse = data; + int ret; + + switch (tag) { + case WMI_TAG_ARRAY_STRUCT: + if (!parse->dma_ring_cap_done) { + ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr, + &parse->dma_caps_parse); + if (ret) + return ret; + + parse->dma_ring_cap_done = true; + } + break; + default: + break; + } + + return 0; +} + +static int ath11k_service_ready_ext2_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { }; + int ret; + + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_svc_rdy_ext2_parse, + &svc_rdy_ext2); + if (ret) { + ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret); + goto err; + } + + complete(&ab->wmi_ab.service_ready); + + return 0; + +err: + ath11k_wmi_free_dbring_caps(ab); + return ret; } static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb, @@ -6045,6 +6262,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_SERVICE_READY_EXT_EVENTID: ath11k_service_ready_ext_event(ab, skb); break; + case WMI_SERVICE_READY_EXT2_EVENTID: + ath11k_service_ready_ext2_event(ab, skb); + break; case WMI_REG_CHAN_LIST_CC_EVENTID: ath11k_reg_chan_list_event(ab, skb); break; @@ -6325,4 +6545,6 @@ void ath11k_wmi_detach(struct ath11k_base *ab) for (i = 0; i < ab->htc.wmi_ep_count; i++) ath11k_wmi_pdev_detach(ab, i); + + ath11k_wmi_free_dbring_caps(ab); } diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 1162bd7a5f87..82c493b94ed8 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -24,6 +24,8 @@ struct ath11k_fw_stats; #define HE_PET_8_USEC 1 #define HE_PET_16_USEC 2 +#define WMI_MAX_CHAINS 8 + #define WMI_MAX_NUM_SS MAX_HE_NSS #define WMI_MAX_NUM_RU MAX_HE_RU @@ -596,6 +598,11 @@ enum wmi_tlv_event_id { WMI_PDEV_DMA_RING_CFG_RSP_EVENTID, WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID, WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID, + WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID, + WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID, + WMI_PDEV_RAP_INFO_EVENTID, + WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID, + WMI_SERVICE_READY_EXT2_EVENTID, WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV), WMI_VDEV_STOPPED_EVENTID, WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID, @@ -2024,9 +2031,9 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211, WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212, WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213, + WMI_TLV_SERVICE_EXT2_MSG = 220, WMI_MAX_EXT_SERVICE - }; enum { @@ -2087,6 +2094,14 @@ enum wmi_beacon_gen_mode { WMI_BEACON_BURST_MODE = 1 }; +enum wmi_direct_buffer_module { + WMI_DIRECT_BUF_SPECTRAL = 0, + WMI_DIRECT_BUF_CFR = 1, + + /* keep it last */ + WMI_DIRECT_BUF_MAX +}; + struct wmi_host_pdev_band_to_mac { u32 pdev_id; u32 start_freq; @@ -2393,6 +2408,15 @@ struct wmi_mac_addr { } __packed; } __packed; +struct wmi_dma_ring_capabilities { + u32 tlv_header; + u32 pdev_id; + u32 module_id; + u32 min_elem; + u32 min_buf_sz; + u32 min_buf_align; +} __packed; + struct wmi_ready_event_min { struct wmi_abi_version fw_abi_vers; struct wmi_mac_addr mac_addr; @@ -4772,6 +4796,59 @@ struct ath11k_wmi_pdev_lro_config_cmd { u32 pdev_id; } __packed; +struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd { + u32 tlv_header; + u32 pdev_id; + u32 module_id; /* see enum wmi_direct_buffer_module */ + u32 base_paddr_lo; + u32 base_paddr_hi; + u32 head_idx_paddr_lo; + u32 head_idx_paddr_hi; + u32 tail_idx_paddr_lo; + u32 tail_idx_paddr_hi; + u32 num_elems; /* Number of elems in the ring */ + u32 buf_size; /* size of allocated buffer in bytes */ + + /* Number of wmi_dma_buf_release_entry packed together */ + u32 num_resp_per_event; + + /* Target should timeout and send whatever resp + * it has if this time expires, units in milliseconds + */ + u32 event_timeout_ms; +} __packed; + +struct ath11k_wmi_dma_buf_release_fixed_param { + u32 pdev_id; + u32 module_id; + u32 num_buf_release_entry; + u32 num_meta_data_entry; +} __packed; + +struct wmi_dma_buf_release_entry { + u32 tlv_header; + u32 paddr_lo; + + /* Bits 11:0: address of data + * Bits 31:12: host context data + */ + u32 paddr_hi; +} __packed; + +#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0) +#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16) + +#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0) + +struct wmi_dma_buf_release_meta_data { + u32 tlv_header; + s32 noise_floor[WMI_MAX_CHAINS]; + u32 reset_delay; + u32 freq1; + u32 freq2; + u32 ch_width; +} __packed; + struct target_resource_config { u32 num_vdevs; u32 num_peers; @@ -4979,4 +5056,6 @@ int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id, int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, bool enable); int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id); +int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, + struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param); #endif From 9d11b7bff9508101d05f06e461ab81b5f0dc90ae Mon Sep 17 00:00:00 2001 From: Karthikeyan Periyasamy Date: Tue, 9 Jun 2020 13:03:34 +0530 Subject: [PATCH 0048/2056] ath11k: add support for spectral scan spectral scan control interface is exposed through debugfs eentry. Relayfs is used to collect the spectral data. These interfaces are similar to ath10k spectral. spectral debugfs interfaces are below, echo background > /sys/kernel/debug/ieee80211/phy0/ath11k/spectral_scan_ctl echo trigger > /sys/kernel/debug/ieee80211/phy0/ath11k/spectral_scan_ctl iw dev wlan0 scan echo disable > /sys/kernel/debug/ieee80211/phy0/ath11k/spectral_scan_ctl cat /sys/kernel/debug/ieee80211/phy0/ath11k/spectral_scan0 > fft_samples.dump Tested-on: IPQ8074 WLAN.HK.2.1.0.1-01228-QCAHKSWPL_SILICONZ-1 Signed-off-by: Karthikeyan Periyasamy Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591688014-26441-2-git-send-email-periyasa@codeaurora.org --- drivers/net/wireless/ath/ath11k/Kconfig | 9 + drivers/net/wireless/ath/ath11k/Makefile | 1 + drivers/net/wireless/ath/ath11k/core.c | 10 + drivers/net/wireless/ath/ath11k/core.h | 5 + drivers/net/wireless/ath/ath11k/dbring.c | 3 + drivers/net/wireless/ath/ath11k/mac.c | 3 + drivers/net/wireless/ath/ath11k/spectral.c | 1023 ++++++++++++++++++++ drivers/net/wireless/ath/ath11k/spectral.h | 82 ++ drivers/net/wireless/ath/ath11k/wmi.c | 198 ++++ drivers/net/wireless/ath/ath11k/wmi.h | 63 ++ drivers/net/wireless/ath/spectral_common.h | 17 + 11 files changed, 1414 insertions(+) create mode 100644 drivers/net/wireless/ath/ath11k/spectral.c create mode 100644 drivers/net/wireless/ath/ath11k/spectral.h diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig index 738f99090d83..093a755a0c46 100644 --- a/drivers/net/wireless/ath/ath11k/Kconfig +++ b/drivers/net/wireless/ath/ath11k/Kconfig @@ -34,3 +34,12 @@ config ATH11K_TRACING depends on ATH11K && EVENT_TRACING ---help--- Select this to use ath11k tracing infrastructure. + +config ATH11K_SPECTRAL + bool "QCA ath11k spectral scan support" + depends on ATH11K_DEBUGFS + depends on RELAY + help + Enable ath11k spectral scan support + + Say Y to enable access to the FFT/spectral data via debugfs. diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile index d17a3b418178..104186373c9e 100644 --- a/drivers/net/wireless/ath/ath11k/Makefile +++ b/drivers/net/wireless/ath/ath11k/Makefile @@ -22,6 +22,7 @@ ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o ath11k-$(CONFIG_ATH11K_TRACING) += trace.o ath11k-$(CONFIG_THERMAL) += thermal.o +ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 02501cc154fe..905cd8beaf28 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -400,8 +400,16 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab) goto err_dp_pdev_free; } + ret = ath11k_spectral_init(ab); + if (ret) { + ath11k_err(ab, "failed to init spectral %d\n", ret); + goto err_thermal_unregister; + } + return 0; +err_thermal_unregister: + ath11k_thermal_unregister(ab); err_dp_pdev_free: ath11k_dp_pdev_free(ab); err_mac_unregister: @@ -414,6 +422,7 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab) static void ath11k_core_pdev_destroy(struct ath11k_base *ab) { + ath11k_spectral_deinit(ab); ath11k_thermal_unregister(ab); ath11k_mac_unregister(ab); ath11k_hif_irq_disable(ab); @@ -582,6 +591,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab) ath11k_thermal_unregister(ab); ath11k_hif_irq_disable(ab); ath11k_dp_pdev_free(ab); + ath11k_spectral_deinit(ab); ath11k_hif_stop(ab); ath11k_wmi_detach(ab); ath11k_dp_pdev_reo_cleanup(ab); diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index a715fe61c01b..7e9e9762b1ca 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -22,6 +22,7 @@ #include "reg.h" #include "thermal.h" #include "dbring.h" +#include "spectral.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -216,6 +217,7 @@ struct ath11k_vif { bool is_started; bool is_up; + bool spectral_enabled; u32 aid; u8 bssid[ETH_ALEN]; struct cfg80211_bitrate_mask bitrate_mask; @@ -541,6 +543,9 @@ struct ath11k { u32 cached_ppdu_id; #ifdef CONFIG_ATH11K_DEBUGFS struct ath11k_debug debug; +#endif +#ifdef CONFIG_ATH11K_SPECTRAL + struct ath11k_spectral spectral; #endif bool dfs_block_radar_events; struct ath11k_thermal thermal; diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c index b96258489456..cf20db370123 100644 --- a/drivers/net/wireless/ath/ath11k/dbring.c +++ b/drivers/net/wireless/ath/ath11k/dbring.c @@ -267,6 +267,9 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab, } switch (ev->fixed.module_id) { + case WMI_DIRECT_BUF_SPECTRAL: + ring = ath11k_spectral_get_dbring(ar); + break; default: ring = NULL; ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n", diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index b3ae1a0bceb3..5ba8f0986079 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2208,6 +2208,9 @@ static int ath11k_start_scan(struct ath11k *ar, lockdep_assert_held(&ar->conf_mutex); + if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND) + ath11k_spectral_reset_buffer(ar); + ret = ath11k_wmi_send_scan_start_cmd(ar, arg); if (ret) return ret; diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c new file mode 100644 index 000000000000..1c5d65bb411f --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/spectral.c @@ -0,0 +1,1023 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + */ + +#include +#include "core.h" +#include "debug.h" + +#define ATH11K_SPECTRAL_NUM_RESP_PER_EVENT 2 +#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1 + +#define ATH11K_SPECTRAL_DWORD_SIZE 4 +/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes */ +#define ATH11K_SPECTRAL_BIN_SIZE 4 +#define ATH11K_SPECTRAL_ATH11K_MIN_BINS 64 +#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32 +#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256 + +#define ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK 0xFF + +#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095 + +/* Max channel computed by sum of 2g and 5g band channels */ +#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41 +#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70 +#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE (sizeof(struct fft_sample_ath11k) + \ + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS) +#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \ + ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL) +#define ATH11K_SPECTRAL_SUB_BUFF_SIZE ATH11K_SPECTRAL_PER_SAMPLE_SIZE +#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE + +#define ATH11K_SPECTRAL_20MHZ 20 +#define ATH11K_SPECTRAL_40MHZ 40 +#define ATH11K_SPECTRAL_80MHZ 80 + +#define ATH11K_SPECTRAL_SIGNATURE 0xFA + +#define ATH11K_SPECTRAL_TAG_RADAR_SUMMARY 0x0 +#define ATH11K_SPECTRAL_TAG_RADAR_FFT 0x1 +#define ATH11K_SPECTRAL_TAG_SCAN_SUMMARY 0x2 +#define ATH11K_SPECTRAL_TAG_SCAN_SEARCH 0x3 + +#define SPECTRAL_TLV_HDR_LEN GENMASK(15, 0) +#define SPECTRAL_TLV_HDR_TAG GENMASK(23, 16) +#define SPECTRAL_TLV_HDR_SIGN GENMASK(31, 24) + +#define SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN GENMASK(7, 0) +#define SPECTRAL_SUMMARY_INFO0_OB_FLAG BIT(8) +#define SPECTRAL_SUMMARY_INFO0_GRP_IDX GENMASK(16, 9) +#define SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT BIT(17) +#define SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB GENMASK(27, 18) +#define SPECTRAL_SUMMARY_INFO0_FALSE_SCAN BIT(28) +#define SPECTRAL_SUMMARY_INFO0_DETECTOR_ID GENMASK(30, 29) +#define SPECTRAL_SUMMARY_INFO0_PRI80 BIT(31) + +#define SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX GENMASK(11, 0) +#define SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE GENMASK(21, 12) +#define SPECTRAL_SUMMARY_INFO2_NARROWBAND_MASK GENMASK(29, 22) +#define SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE BIT(30) + +struct spectral_tlv { + __le32 timestamp; + __le32 header; +} __packed; + +struct spectral_summary_fft_report { + __le32 timestamp; + __le32 tlv_header; + __le32 info0; + __le32 reserve0; + __le32 info2; + __le32 reserve1; +} __packed; + +struct ath11k_spectral_summary_report { + struct wmi_dma_buf_release_meta_data meta; + u32 timestamp; + u8 agc_total_gain; + u8 grp_idx; + u16 inb_pwr_db; + s16 peak_idx; + u16 peak_mag; + u8 detector_id; + bool out_of_band_flag; + bool rf_saturation; + bool primary80; + bool gain_change; + bool false_scan; +}; + +#define SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID GENMASK(1, 0) +#define SPECTRAL_FFT_REPORT_INFO0_FFT_NUM GENMASK(4, 2) +#define SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK GENMASK(16, 5) +#define SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX GENMASK(27, 17) +#define SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX GENMASK(30, 28) + +#define SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB GENMASK(8, 0) +#define SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB GENMASK(16, 9) + +#define SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS GENMASK(7, 0) +#define SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE GENMASK(17, 8) +#define SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB GENMASK(24, 18) +#define SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB GENMASK(31, 25) + +struct spectral_search_fft_report { + __le32 timestamp; + __le32 tlv_header; + __le32 info0; + __le32 info1; + __le32 info2; + __le32 reserve0; + u8 bins[0]; +} __packed; + +struct ath11k_spectral_search_report { + u32 timestamp; + u8 detector_id; + u8 fft_count; + u16 radar_check; + s16 peak_idx; + u8 chain_idx; + u16 base_pwr_db; + u8 total_gain_db; + u8 strong_bin_count; + u16 peak_mag; + u8 avg_pwr_db; + u8 rel_pwr_db; +}; + +static struct dentry *create_buf_file_handler(const char *filename, + struct dentry *parent, + umode_t mode, + struct rchan_buf *buf, + int *is_global) +{ + struct dentry *buf_file; + + buf_file = debugfs_create_file(filename, mode, parent, buf, + &relay_file_operations); + *is_global = 1; + return buf_file; +} + +static int remove_buf_file_handler(struct dentry *dentry) +{ + debugfs_remove(dentry); + + return 0; +} + +static struct rchan_callbacks rfs_scan_cb = { + .create_buf_file = create_buf_file_handler, + .remove_buf_file = remove_buf_file_handler, +}; + +static struct ath11k_vif *ath11k_spectral_get_vdev(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + + if (list_empty(&ar->arvifs)) + return NULL; + + /* if there already is a vif doing spectral, return that. */ + list_for_each_entry(arvif, &ar->arvifs, list) + if (arvif->spectral_enabled) + return arvif; + + /* otherwise, return the first vif. */ + return list_first_entry(&ar->arvifs, typeof(*arvif), list); +} + +static int ath11k_spectral_scan_trigger(struct ath11k *ar) +{ + struct ath11k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath11k_spectral_get_vdev(ar); + if (!arvif) + return -ENODEV; + + if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED) + return 0; + + ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, + ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (ret) + return ret; + + ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, + ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER, + ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE); + if (ret) + return ret; + + return 0; +} + +static int ath11k_spectral_scan_config(struct ath11k *ar, + enum ath11k_spectral_mode mode) +{ + struct ath11k_wmi_vdev_spectral_conf_param param = { 0 }; + struct ath11k_vif *arvif; + int ret, count; + + lockdep_assert_held(&ar->conf_mutex); + + arvif = ath11k_spectral_get_vdev(ar); + if (!arvif) + return -ENODEV; + + arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED); + ar->spectral.mode = mode; + + ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id, + ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR, + ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE); + if (ret) { + ath11k_warn(ar->ab, "failed to enable spectral scan: %d\n", ret); + return ret; + } + + if (mode == ATH11K_SPECTRAL_DISABLED) + return 0; + + if (mode == ATH11K_SPECTRAL_BACKGROUND) + count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT; + else + count = max_t(u16, 1, ar->spectral.count); + + param.vdev_id = arvif->vdev_id; + param.scan_count = count; + param.scan_fft_size = ar->spectral.fft_size; + param.scan_period = ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT; + param.scan_priority = ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT; + param.scan_gc_ena = ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT; + param.scan_restart_ena = ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT; + param.scan_noise_floor_ref = ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT; + param.scan_init_delay = ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT; + param.scan_nb_tone_thr = ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT; + param.scan_str_bin_thr = ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT; + param.scan_wb_rpt_mode = ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT; + param.scan_rssi_rpt_mode = ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT; + param.scan_rssi_thr = ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT; + param.scan_pwr_format = ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT; + param.scan_rpt_mode = ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT; + param.scan_bin_scale = ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT; + param.scan_dbm_adj = ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT; + param.scan_chn_mask = ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT; + + ret = ath11k_wmi_vdev_spectral_conf(ar, ¶m); + if (ret) { + ath11k_warn(ar->ab, "failed to configure spectral scan: %d\n", ret); + return ret; + } + + return 0; +} + +static ssize_t ath11k_read_file_spec_scan_ctl(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char *mode = ""; + size_t len; + enum ath11k_spectral_mode spectral_mode; + + mutex_lock(&ar->conf_mutex); + spectral_mode = ar->spectral.mode; + mutex_unlock(&ar->conf_mutex); + + switch (spectral_mode) { + case ATH11K_SPECTRAL_DISABLED: + mode = "disable"; + break; + case ATH11K_SPECTRAL_BACKGROUND: + mode = "background"; + break; + case ATH11K_SPECTRAL_MANUAL: + mode = "manual"; + break; + } + + len = strlen(mode); + return simple_read_from_buffer(user_buf, count, ppos, mode, len); +} + +static ssize_t ath11k_write_file_spec_scan_ctl(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char buf[32]; + ssize_t len; + int ret; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + + mutex_lock(&ar->conf_mutex); + + if (strncmp("trigger", buf, 7) == 0) { + if (ar->spectral.mode == ATH11K_SPECTRAL_MANUAL || + ar->spectral.mode == ATH11K_SPECTRAL_BACKGROUND) { + /* reset the configuration to adopt possibly changed + * debugfs parameters + */ + ret = ath11k_spectral_scan_config(ar, ar->spectral.mode); + if (ret) { + ath11k_warn(ar->ab, "failed to reconfigure spectral scan: %d\n", + ret); + goto unlock; + } + + ret = ath11k_spectral_scan_trigger(ar); + if (ret) { + ath11k_warn(ar->ab, "failed to trigger spectral scan: %d\n", + ret); + } + } else { + ret = -EINVAL; + } + } else if (strncmp("background", buf, 10) == 0) { + ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_BACKGROUND); + } else if (strncmp("manual", buf, 6) == 0) { + ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_MANUAL); + } else if (strncmp("disable", buf, 7) == 0) { + ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED); + } else { + ret = -EINVAL; + } + +unlock: + mutex_unlock(&ar->conf_mutex); + + if (ret) + return ret; + + return count; +} + +static const struct file_operations fops_scan_ctl = { + .read = ath11k_read_file_spec_scan_ctl, + .write = ath11k_write_file_spec_scan_ctl, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath11k_read_file_spectral_count(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char buf[32]; + size_t len; + u16 spectral_count; + + mutex_lock(&ar->conf_mutex); + spectral_count = ar->spectral.count; + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", spectral_count); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath11k_write_file_spectral_count(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.count = val; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_scan_count = { + .read = ath11k_read_file_spectral_count, + .write = ath11k_write_file_spectral_count, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath11k_read_file_spectral_bins(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + char buf[32]; + unsigned int bins, fft_size; + size_t len; + + mutex_lock(&ar->conf_mutex); + + fft_size = ar->spectral.fft_size; + bins = 1 << fft_size; + + mutex_unlock(&ar->conf_mutex); + + len = sprintf(buf, "%d\n", bins); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath11k_write_file_spectral_bins(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath11k *ar = file->private_data; + unsigned long val; + char buf[32]; + ssize_t len; + + len = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, user_buf, len)) + return -EFAULT; + + buf[len] = '\0'; + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val < ATH11K_SPECTRAL_ATH11K_MIN_BINS || + val > SPECTRAL_ATH11K_MAX_NUM_BINS) + return -EINVAL; + + if (!is_power_of_2(val)) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + ar->spectral.fft_size = ilog2(val); + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_scan_bins = { + .read = ath11k_read_file_spectral_bins, + .write = ath11k_write_file_spectral_bins, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath11k_spectral_pull_summary(struct ath11k *ar, + struct wmi_dma_buf_release_meta_data *meta, + struct spectral_summary_fft_report *summary, + struct ath11k_spectral_summary_report *report) +{ + report->timestamp = __le32_to_cpu(summary->timestamp); + report->agc_total_gain = FIELD_GET(SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN, + __le32_to_cpu(summary->info0)); + report->out_of_band_flag = FIELD_GET(SPECTRAL_SUMMARY_INFO0_OB_FLAG, + __le32_to_cpu(summary->info0)); + report->grp_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO0_GRP_IDX, + __le32_to_cpu(summary->info0)); + report->rf_saturation = FIELD_GET(SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT, + __le32_to_cpu(summary->info0)); + report->inb_pwr_db = FIELD_GET(SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB, + __le32_to_cpu(summary->info0)); + report->false_scan = FIELD_GET(SPECTRAL_SUMMARY_INFO0_FALSE_SCAN, + __le32_to_cpu(summary->info0)); + report->detector_id = FIELD_GET(SPECTRAL_SUMMARY_INFO0_DETECTOR_ID, + __le32_to_cpu(summary->info0)); + report->primary80 = FIELD_GET(SPECTRAL_SUMMARY_INFO0_PRI80, + __le32_to_cpu(summary->info0)); + report->peak_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX, + __le32_to_cpu(summary->info2)); + report->peak_mag = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE, + __le32_to_cpu(summary->info2)); + report->gain_change = FIELD_GET(SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE, + __le32_to_cpu(summary->info2)); + + memcpy(&report->meta, meta, sizeof(*meta)); + + return 0; +} + +static int ath11k_spectral_pull_search(struct ath11k *ar, + struct spectral_search_fft_report *search, + struct ath11k_spectral_search_report *report) +{ + report->timestamp = __le32_to_cpu(search->timestamp); + report->detector_id = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID, + __le32_to_cpu(search->info0)); + report->fft_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_FFT_NUM, + __le32_to_cpu(search->info0)); + report->radar_check = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK, + __le32_to_cpu(search->info0)); + report->peak_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX, + __le32_to_cpu(search->info0)); + report->chain_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX, + __le32_to_cpu(search->info0)); + report->base_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB, + __le32_to_cpu(search->info1)); + report->total_gain_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB, + __le32_to_cpu(search->info1)); + report->strong_bin_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS, + __le32_to_cpu(search->info2)); + report->peak_mag = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE, + __le32_to_cpu(search->info2)); + report->avg_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB, + __le32_to_cpu(search->info2)); + report->rel_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB, + __le32_to_cpu(search->info2)); + + return 0; +} + +static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude, + int bin_len, u8 *bins) +{ + int dc_pos; + u8 max_exp; + + dc_pos = bin_len / 2; + + /* peak index outside of bins */ + if (dc_pos <= max_index || -dc_pos >= max_index) + return 0; + + for (max_exp = 0; max_exp < 8; max_exp++) { + if (bins[dc_pos + max_index] == (max_magnitude >> max_exp)) + break; + } + + /* max_exp not found */ + if (bins[dc_pos + max_index] != (max_magnitude >> max_exp)) + return 0; + + return max_exp; +} + +static void ath11k_spectral_parse_16bit_fft(u8 *outbins, u8 *inbins, int num_bins) +{ + int i; + __le16 *data = (__le16 *)inbins; + + i = 0; + while (i < num_bins) { + outbins[i] = (__le16_to_cpu(data[i])) & + ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK; + i++; + } +} + +static +int ath11k_spectral_process_fft(struct ath11k *ar, + struct ath11k_spectral_summary_report *summary, + void *data, + struct fft_sample_ath11k *fft_sample, + u32 data_len) +{ + struct ath11k_base *ab = ar->ab; + struct spectral_search_fft_report *fft_report = data; + struct ath11k_spectral_search_report search; + struct spectral_tlv *tlv; + int tlv_len, bin_len, num_bins; + u16 length, freq; + u8 chan_width_mhz; + int ret; + + lockdep_assert_held(&ar->spectral.lock); + + tlv = (struct spectral_tlv *)data; + tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header)); + /* convert Dword into bytes */ + tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; + bin_len = tlv_len - (sizeof(*fft_report) - sizeof(*tlv)); + + if (data_len < (bin_len + sizeof(*fft_report))) { + ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n", + bin_len, data_len); + return -EINVAL; + } + + num_bins = bin_len / ATH11K_SPECTRAL_BIN_SIZE; + /* Only In-band bins are useful to user for visualize */ + num_bins >>= 1; + + if (num_bins < ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS || + num_bins > ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS || + !is_power_of_2(num_bins)) { + ath11k_warn(ab, "Invalid num of bins %d\n", num_bins); + return -EINVAL; + } + + ret = ath11k_spectral_pull_search(ar, data, &search); + if (ret) { + ath11k_warn(ab, "failed to pull search report %d\n", ret); + return ret; + } + + chan_width_mhz = summary->meta.ch_width; + + switch (chan_width_mhz) { + case ATH11K_SPECTRAL_20MHZ: + case ATH11K_SPECTRAL_40MHZ: + case ATH11K_SPECTRAL_80MHZ: + fft_sample->chan_width_mhz = chan_width_mhz; + break; + default: + ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz); + return -EINVAL; + } + + length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + num_bins; + fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH11K; + fft_sample->tlv.length = __cpu_to_be16(length); + + fft_sample->tsf = __cpu_to_be32(search.timestamp); + fft_sample->max_magnitude = __cpu_to_be16(search.peak_mag); + fft_sample->max_index = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX, + __le32_to_cpu(fft_report->info0)); + + summary->inb_pwr_db >>= 1; + fft_sample->rssi = __cpu_to_be16(summary->inb_pwr_db); + fft_sample->noise = __cpu_to_be32(summary->meta.noise_floor[search.chain_idx]); + + freq = summary->meta.freq1; + fft_sample->freq1 = __cpu_to_be16(freq); + + freq = summary->meta.freq2; + fft_sample->freq2 = __cpu_to_be16(freq); + + ath11k_spectral_parse_16bit_fft(fft_sample->data, + fft_report->bins, + num_bins); + + fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index, + search.peak_mag, + num_bins, + fft_sample->data); + + if (ar->spectral.rfs_scan) + relay_write(ar->spectral.rfs_scan, fft_sample, + length + sizeof(struct fft_sample_tlv)); + + return 0; +} + +static int ath11k_spectral_process_data(struct ath11k *ar, + struct ath11k_dbring_data *param) +{ + struct ath11k_base *ab = ar->ab; + struct spectral_tlv *tlv; + struct spectral_summary_fft_report *summary = NULL; + struct ath11k_spectral_summary_report summ_rpt; + struct fft_sample_ath11k *fft_sample = NULL; + u8 *data; + u32 data_len, i; + u8 sign, tag; + int tlv_len, sample_sz; + int ret; + bool quit = false; + + spin_lock_bh(&ar->spectral.lock); + + if (!ar->spectral.enabled) { + ret = -EINVAL; + goto unlock; + } + + sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS; + fft_sample = kmalloc(sample_sz, GFP_ATOMIC); + if (!fft_sample) { + ret = -ENOBUFS; + goto unlock; + } + + data = param->data; + data_len = param->data_sz; + i = 0; + while (!quit && (i < data_len)) { + if ((i + sizeof(*tlv)) > data_len) { + ath11k_warn(ab, "failed to parse spectral tlv hdr at bytes %d\n", + i); + ret = -EINVAL; + goto err; + } + + tlv = (struct spectral_tlv *)&data[i]; + sign = FIELD_GET(SPECTRAL_TLV_HDR_SIGN, + __le32_to_cpu(tlv->header)); + if (sign != ATH11K_SPECTRAL_SIGNATURE) { + ath11k_warn(ab, "Invalid sign 0x%x at bytes %d\n", + sign, i); + ret = -EINVAL; + goto err; + } + + tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, + __le32_to_cpu(tlv->header)); + /* convert Dword into bytes */ + tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE; + if ((i + sizeof(*tlv) + tlv_len) > data_len) { + ath11k_warn(ab, "failed to parse spectral tlv payload at bytes %d tlv_len:%d data_len:%d\n", + i, tlv_len, data_len); + ret = -EINVAL; + goto err; + } + + tag = FIELD_GET(SPECTRAL_TLV_HDR_TAG, + __le32_to_cpu(tlv->header)); + switch (tag) { + case ATH11K_SPECTRAL_TAG_SCAN_SUMMARY: + /* HW bug in tlv length of summary report, + * HW report 3 DWORD size but the data payload + * is 4 DWORD size (16 bytes). + * Need to remove this workaround once HW bug fixed + */ + tlv_len = sizeof(*summary) - sizeof(*tlv); + + if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) { + ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n", + i, tlv_len); + ret = -EINVAL; + goto err; + } + + summary = (struct spectral_summary_fft_report *)tlv; + ath11k_spectral_pull_summary(ar, ¶m->meta, + summary, &summ_rpt); + break; + case ATH11K_SPECTRAL_TAG_SCAN_SEARCH: + if (tlv_len < (sizeof(struct spectral_search_fft_report) - + sizeof(*tlv))) { + ath11k_warn(ab, "failed to parse spectral search fft at bytes %d\n", + i); + ret = -EINVAL; + goto err; + } + + memset(fft_sample, 0, sample_sz); + ret = ath11k_spectral_process_fft(ar, &summ_rpt, tlv, + fft_sample, + data_len - i); + if (ret) { + ath11k_warn(ab, "failed to process spectral fft at bytes %d\n", + i); + goto err; + } + quit = true; + break; + } + + i += sizeof(*tlv) + tlv_len; + } + +err: + kfree(fft_sample); +unlock: + spin_unlock_bh(&ar->spectral.lock); + return ret; +} + +static int ath11k_spectral_ring_alloc(struct ath11k *ar, + struct ath11k_dbring_cap *db_cap) +{ + struct ath11k_spectral *sp = &ar->spectral; + int ret; + + ret = ath11k_dbring_srng_setup(ar, &sp->rx_ring, + 0, db_cap->min_elem); + if (ret) { + ath11k_warn(ar->ab, "failed to setup db ring\n"); + return ret; + } + + ath11k_dbring_set_cfg(ar, &sp->rx_ring, + ATH11K_SPECTRAL_NUM_RESP_PER_EVENT, + ATH11K_SPECTRAL_EVENT_TIMEOUT_MS, + ath11k_spectral_process_data); + + ret = ath11k_dbring_buf_setup(ar, &sp->rx_ring, db_cap); + if (ret) { + ath11k_warn(ar->ab, "failed to setup db ring buffer\n"); + goto srng_cleanup; + } + + ret = ath11k_dbring_wmi_cfg_setup(ar, &sp->rx_ring, + WMI_DIRECT_BUF_SPECTRAL); + if (ret) { + ath11k_warn(ar->ab, "failed to setup db ring cfg\n"); + goto buffer_cleanup; + } + + return 0; + +buffer_cleanup: + ath11k_dbring_buf_cleanup(ar, &sp->rx_ring); +srng_cleanup: + ath11k_dbring_srng_cleanup(ar, &sp->rx_ring); + return ret; +} + +static inline void ath11k_spectral_ring_free(struct ath11k *ar) +{ + struct ath11k_spectral *sp = &ar->spectral; + + if (!sp->enabled) + return; + + ath11k_dbring_srng_cleanup(ar, &sp->rx_ring); + ath11k_dbring_buf_cleanup(ar, &sp->rx_ring); +} + +static inline void ath11k_spectral_debug_unregister(struct ath11k *ar) +{ + debugfs_remove(ar->spectral.scan_bins); + ar->spectral.scan_bins = NULL; + + debugfs_remove(ar->spectral.scan_count); + ar->spectral.scan_count = NULL; + + debugfs_remove(ar->spectral.scan_ctl); + ar->spectral.scan_ctl = NULL; + + if (ar->spectral.rfs_scan) { + relay_close(ar->spectral.rfs_scan); + ar->spectral.rfs_scan = NULL; + } +} + +int ath11k_spectral_vif_stop(struct ath11k_vif *arvif) +{ + if (!arvif->spectral_enabled) + return 0; + + return ath11k_spectral_scan_config(arvif->ar, ATH11K_SPECTRAL_DISABLED); +} + +void ath11k_spectral_reset_buffer(struct ath11k *ar) +{ + if (!ar->spectral.enabled) + return; + + if (ar->spectral.rfs_scan) + relay_reset(ar->spectral.rfs_scan); +} + +void ath11k_spectral_deinit(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_spectral *sp; + int i; + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + sp = &ar->spectral; + + if (!sp->enabled) + continue; + + ath11k_spectral_debug_unregister(ar); + ath11k_spectral_ring_free(ar); + + spin_lock_bh(&sp->lock); + + sp->mode = ATH11K_SPECTRAL_DISABLED; + sp->enabled = false; + + spin_unlock_bh(&sp->lock); + } +} + +static inline int ath11k_spectral_debug_register(struct ath11k *ar) +{ + int ret; + + ar->spectral.rfs_scan = relay_open("spectral_scan", + ar->debug.debugfs_pdev, + ATH11K_SPECTRAL_SUB_BUFF_SIZE, + ATH11K_SPECTRAL_NUM_SUB_BUF, + &rfs_scan_cb, NULL); + if (!ar->spectral.rfs_scan) { + ath11k_warn(ar->ab, "failed to open relay in pdev %d\n", + ar->pdev_idx); + return -EINVAL; + } + + ar->spectral.scan_ctl = debugfs_create_file("spectral_scan_ctl", + 0600, + ar->debug.debugfs_pdev, ar, + &fops_scan_ctl); + if (!ar->spectral.scan_ctl) { + ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n", + ar->pdev_idx); + ret = -EINVAL; + goto debug_unregister; + } + + ar->spectral.scan_count = debugfs_create_file("spectral_count", + 0600, + ar->debug.debugfs_pdev, ar, + &fops_scan_count); + if (!ar->spectral.scan_count) { + ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n", + ar->pdev_idx); + ret = -EINVAL; + goto debug_unregister; + } + + ar->spectral.scan_bins = debugfs_create_file("spectral_bins", + 0600, + ar->debug.debugfs_pdev, ar, + &fops_scan_bins); + if (!ar->spectral.scan_bins) { + ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n", + ar->pdev_idx); + ret = -EINVAL; + goto debug_unregister; + } + + return 0; + +debug_unregister: + ath11k_spectral_debug_unregister(ar); + return ret; +} + +int ath11k_spectral_init(struct ath11k_base *ab) +{ + struct ath11k *ar; + struct ath11k_spectral *sp; + struct ath11k_dbring_cap db_cap; + int ret; + int i; + + if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA, + ab->wmi_ab.svc_map)) { + ath11k_info(ab, "spectral not supported\n"); + return 0; + } + + for (i = 0; i < ab->num_radios; i++) { + ar = ab->pdevs[i].ar; + sp = &ar->spectral; + + ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx, + WMI_DIRECT_BUF_SPECTRAL, + &db_cap); + if (ret) { + ath11k_info(ab, "spectral not enabled for pdev %d\n", i); + continue; + } + + idr_init(&sp->rx_ring.bufs_idr); + spin_lock_init(&sp->rx_ring.idr_lock); + spin_lock_init(&sp->lock); + + ret = ath11k_spectral_ring_alloc(ar, &db_cap); + if (ret) { + ath11k_warn(ab, "failed to init spectral ring for pdev %d\n", + i); + goto deinit; + } + + spin_lock_bh(&sp->lock); + + sp->mode = ATH11K_SPECTRAL_DISABLED; + sp->count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT; + sp->fft_size = ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT; + sp->enabled = true; + + spin_unlock_bh(&sp->lock); + + ret = ath11k_spectral_debug_register(ar); + if (ret) { + ath11k_warn(ab, "failed to register spectral for pdev %d\n", + i); + goto deinit; + } + } + + return 0; + +deinit: + ath11k_spectral_deinit(ab); + return ret; +} + +enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar) +{ + if (ar->spectral.enabled) + return ar->spectral.mode; + else + return ATH11K_SPECTRAL_DISABLED; +} + +struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar) +{ + if (ar->spectral.enabled) + return &ar->spectral.rx_ring; + else + return NULL; +} diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h new file mode 100644 index 000000000000..081744265f2a --- /dev/null +++ b/drivers/net/wireless/ath/ath11k/spectral.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. + */ + +#ifndef ATH11K_SPECTRAL_H +#define ATH11K_SPECTRAL_H + +#include "../spectral_common.h" +#include "dbring.h" + +/* enum ath11k_spectral_mode: + * + * @SPECTRAL_DISABLED: spectral mode is disabled + * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with + * something else. + * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples + * is performed manually. + */ +enum ath11k_spectral_mode { + ATH11K_SPECTRAL_DISABLED = 0, + ATH11K_SPECTRAL_BACKGROUND, + ATH11K_SPECTRAL_MANUAL, +}; + +struct ath11k_spectral { + struct ath11k_dbring rx_ring; + /* Protects enabled */ + spinlock_t lock; + struct rchan *rfs_scan; /* relay(fs) channel for spectral scan */ + struct dentry *scan_ctl; + struct dentry *scan_count; + struct dentry *scan_bins; + enum ath11k_spectral_mode mode; + u16 count; + u8 fft_size; + bool enabled; +}; + +#ifdef CONFIG_ATH11K_SPECTRAL + +int ath11k_spectral_init(struct ath11k_base *ab); +void ath11k_spectral_deinit(struct ath11k_base *ab); +int ath11k_spectral_vif_stop(struct ath11k_vif *arvif); +void ath11k_spectral_reset_buffer(struct ath11k *ar); +enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar); +struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar); + +#else + +static inline int ath11k_spectral_init(struct ath11k_base *ab) +{ + return 0; +} + +static inline void ath11k_spectral_deinit(struct ath11k_base *ab) +{ +} + +static inline int ath11k_spectral_vif_stop(struct ath11k_vif *arvif) +{ + return 0; +} + +static inline void ath11k_spectral_reset_buffer(struct ath11k *ar) +{ +} + +static inline +enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar) +{ + return ATH11K_SPECTRAL_DISABLED; +} + +static inline +struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar) +{ + return NULL; +} + +#endif /* CONFIG_ATH11K_SPECTRAL */ +#endif /* ATH11K_SPECTRAL_H */ diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 6033ec4e11e5..457a9f79f351 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -63,6 +63,16 @@ struct wmi_tlv_rdy_parse { u32 num_extra_mac_addr; }; +struct wmi_tlv_dma_buf_release_parse { + struct ath11k_wmi_dma_buf_release_fixed_param fixed; + struct wmi_dma_buf_release_entry *buf_entry; + struct wmi_dma_buf_release_meta_data *meta_data; + u32 num_buf_entry; + u32 num_meta; + bool buf_entry_done; + bool meta_data_done; +}; + static const struct wmi_tlv_policy wmi_tlv_policies[] = { [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 }, @@ -3388,6 +3398,80 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab) return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param); } +int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, + struct ath11k_wmi_vdev_spectral_conf_param *param) +{ + struct ath11k_wmi_vdev_spectral_conf_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + memcpy(&cmd->param, param, sizeof(*param)); + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, + WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "failed to send spectral scan config wmi cmd\n"); + goto err; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI spectral scan config cmd vdev_id 0x%x\n", + param->vdev_id); + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + +int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, + u32 trigger, u32 enable) +{ + struct ath11k_wmi_vdev_spectral_enable_cmd *cmd; + struct sk_buff *skb; + int ret; + + skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd)); + if (!skb) + return -ENOMEM; + + cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data; + cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, + WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) | + FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE); + + cmd->vdev_id = vdev_id; + cmd->trigger_cmd = trigger; + cmd->enable_cmd = enable; + + ret = ath11k_wmi_cmd_send(ar->wmi, skb, + WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID); + if (ret) { + ath11k_warn(ar->ab, + "failed to send spectral enable wmi cmd\n"); + goto err; + } + + ath11k_dbg(ar->ab, ATH11K_DBG_WMI, + "WMI spectral enable cmd vdev id 0x%x\n", + vdev_id); + + return 0; +err: + dev_kfree_skb(skb); + return ret; +} + int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param) { @@ -3434,6 +3518,116 @@ int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, return ret; } +static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_dma_buf_release_parse *parse = data; + + if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY) + return -EPROTO; + + if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry) + return -ENOBUFS; + + parse->num_buf_entry++; + return 0; +} + +static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_dma_buf_release_parse *parse = data; + + if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA) + return -EPROTO; + + if (parse->num_meta >= parse->fixed.num_meta_data_entry) + return -ENOBUFS; + + parse->num_meta++; + return 0; +} + +static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + struct wmi_tlv_dma_buf_release_parse *parse = data; + int ret; + + switch (tag) { + case WMI_TAG_DMA_BUF_RELEASE: + memcpy(&parse->fixed, ptr, + sizeof(struct ath11k_wmi_dma_buf_release_fixed_param)); + parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id); + break; + case WMI_TAG_ARRAY_STRUCT: + if (!parse->buf_entry_done) { + parse->num_buf_entry = 0; + parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr; + + ret = ath11k_wmi_tlv_iter(ab, ptr, len, + ath11k_wmi_tlv_dma_buf_entry_parse, + parse); + if (ret) { + ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n", + ret); + return ret; + } + + parse->buf_entry_done = true; + } else if (!parse->meta_data_done) { + parse->num_meta = 0; + parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr; + + ret = ath11k_wmi_tlv_iter(ab, ptr, len, + ath11k_wmi_tlv_dma_buf_meta_parse, + parse); + if (ret) { + ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n", + ret); + return ret; + } + + parse->meta_data_done = true; + } + break; + default: + break; + } + return 0; +} + +static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab, + struct sk_buff *skb) +{ + struct wmi_tlv_dma_buf_release_parse parse = { }; + struct ath11k_dbring_buf_release_event param; + int ret; + + ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len, + ath11k_wmi_tlv_dma_buf_parse, + &parse); + if (ret) { + ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret); + return; + } + + param.fixed = parse.fixed; + param.buf_entry = parse.buf_entry; + param.num_buf_entry = parse.num_buf_entry; + param.meta_data = parse.meta_data; + param.num_meta = parse.num_meta; + + ret = ath11k_dbring_buffer_release_event(ab, ¶m); + if (ret) { + ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret); + return; + } +} + static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc, u16 tag, u16 len, const void *ptr, void *data) @@ -6326,12 +6520,16 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb) case WMI_PDEV_TEMPERATURE_EVENTID: ath11k_wmi_pdev_temperature_event(ab, skb); break; + case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID: + ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb); + break; /* add Unsupported events here */ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID: case WMI_VDEV_DELETE_RESP_EVENTID: case WMI_PEER_OPER_MODE_CHANGE_EVENTID: case WMI_TWT_ENABLE_EVENTID: case WMI_TWT_DISABLE_EVENTID: + case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID: ath11k_dbg(ab, ATH11K_DBG_WMI, "ignoring unsupported event 0x%x\n", id); break; diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h index 82c493b94ed8..5a32ba0eb4f5 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.h +++ b/drivers/net/wireless/ath/ath11k/wmi.h @@ -2031,6 +2031,7 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211, WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212, WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213, + WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219, WMI_TLV_SERVICE_EXT2_MSG = 220, WMI_MAX_EXT_SERVICE @@ -4796,6 +4797,64 @@ struct ath11k_wmi_pdev_lro_config_cmd { u32 pdev_id; } __packed; +#define ATH11K_WMI_SPECTRAL_COUNT_DEFAULT 0 +#define ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT 224 +#define ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT 1 +#define ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7 +#define ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT 1 +#define ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0 +#define ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96 +#define ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80 +#define ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12 +#define ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8 +#define ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0 +#define ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0 +#define ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0 +#define ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0 +#define ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2 +#define ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1 +#define ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1 +#define ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1 + +struct ath11k_wmi_vdev_spectral_conf_param { + u32 vdev_id; + u32 scan_count; + u32 scan_period; + u32 scan_priority; + u32 scan_fft_size; + u32 scan_gc_ena; + u32 scan_restart_ena; + u32 scan_noise_floor_ref; + u32 scan_init_delay; + u32 scan_nb_tone_thr; + u32 scan_str_bin_thr; + u32 scan_wb_rpt_mode; + u32 scan_rssi_rpt_mode; + u32 scan_rssi_thr; + u32 scan_pwr_format; + u32 scan_rpt_mode; + u32 scan_bin_scale; + u32 scan_dbm_adj; + u32 scan_chn_mask; +} __packed; + +struct ath11k_wmi_vdev_spectral_conf_cmd { + u32 tlv_header; + struct ath11k_wmi_vdev_spectral_conf_param param; +} __packed; + +#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1 +#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2 +#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1 +#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2 + +struct ath11k_wmi_vdev_spectral_enable_cmd { + u32 tlv_header; + u32 vdev_id; + u32 trigger_cmd; + u32 enable_cmd; +} __packed; + struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd { u32 tlv_header; u32 pdev_id; @@ -5058,4 +5117,8 @@ int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id, int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id); int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar, struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param); +int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id, + u32 trigger, u32 enable); +int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar, + struct ath11k_wmi_vdev_spectral_conf_param *param); #endif diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h index 0d742acb1599..9c2e5458e425 100644 --- a/drivers/net/wireless/ath/spectral_common.h +++ b/drivers/net/wireless/ath/spectral_common.h @@ -24,6 +24,7 @@ * could be acquired so far. */ #define SPECTRAL_ATH10K_MAX_NUM_BINS 256 +#define SPECTRAL_ATH11K_MAX_NUM_BINS 512 /* FFT sample format given to userspace via debugfs. * @@ -37,6 +38,7 @@ enum ath_fft_sample_type { ATH_FFT_SAMPLE_HT20 = 1, ATH_FFT_SAMPLE_HT20_40, ATH_FFT_SAMPLE_ATH10K, + ATH_FFT_SAMPLE_ATH11K }; struct fft_sample_tlv { @@ -110,4 +112,19 @@ struct fft_sample_ath10k { u8 data[0]; } __packed; +struct fft_sample_ath11k { + struct fft_sample_tlv tlv; + u8 chan_width_mhz; + s8 max_index; + u8 max_exp; + __be16 freq1; + __be16 freq2; + __be16 max_magnitude; + __be16 rssi; + __be32 tsf; + __be32 noise; + + u8 data[0]; +} __packed; + #endif /* SPECTRAL_COMMON_H */ From 8cacd0389c4f6056250377f44fbfe18a2501dd79 Mon Sep 17 00:00:00 2001 From: Sowmiya Sree Elavalagan Date: Thu, 11 Jun 2020 08:09:53 +0300 Subject: [PATCH 0049/2056] ath11k: removing redundant reo unlock followed by immediate lock Removed reo cmd lock and unlock which was acquiring the lock immediately after unlock. Done for code clean up. Signed-off-by: Sowmiya Sree Elavalagan Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591713432-26426-1-git-send-email-ssreeela@codeaurora.org --- drivers/net/wireless/ath/ath11k/dp_rx.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index 112f130f91c2..a9cc1a457c33 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -653,10 +653,8 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx, spin_lock_bh(&dp->reo_cmd_lock); list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list); dp->reo_cmd_cache_flush_count++; - spin_unlock_bh(&dp->reo_cmd_lock); /* Flush and invalidate aged REO desc from HW cache */ - spin_lock_bh(&dp->reo_cmd_lock); list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list, list) { if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD || From 0dd6392ac2c0eb3e229631744f54b61789139e33 Mon Sep 17 00:00:00 2001 From: Sriram R Date: Thu, 11 Jun 2020 08:09:54 +0300 Subject: [PATCH 0050/2056] ath11k: Add dp tx err stats Add support for dp tx error stats which logs tx failure reasons due to ring full condition, etc. This stats is added in soc_dp_stats which was earlier used as soc_rx_stats so that all dp related info are logged in same file. Below is an example usage, root@OpenWrt:/# cat /sys/kernel/debug/ath11k/ipq8074/soc_dp_stats SOC RX STATS: err ring pkts: 0 Invalid RBM: 0 SOC TX STATS: Ring Full Failures: ring0: 4 ring1: 3 ring2: 5 Misc Transmit Failures: 2 Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.1.0.1-01213-QCAHKSWPL_SILICONZ-1 Signed-off-by: Sriram R Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591768308-32005-2-git-send-email-srirrama@codeaurora.org --- drivers/net/wireless/ath/ath11k/core.h | 14 ++++++++++++-- drivers/net/wireless/ath/ath11k/debug.c | 23 +++++++++++++++++------ drivers/net/wireless/ath/ath11k/dp_tx.c | 7 ++++++- 3 files changed, 35 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 7e9e9762b1ca..1ff54c2c2c95 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -590,12 +590,22 @@ struct ath11k_board_data { /* IPQ8074 HW channel counters frequency value in hertz */ #define IPQ8074_CC_FREQ_HERTZ 320000 -struct ath11k_soc_dp_rx_stats { +struct ath11k_soc_dp_tx_err_stats { + /* TCL Ring Descriptor unavailable */ + u32 desc_na[DP_TCL_NUM_RING_MAX]; + /* Other failures during dp_tx due to mem allocation failure + * idr unavailable etc. + */ + atomic_t misc_fail; +}; + +struct ath11k_soc_dp_stats { u32 err_ring_pkts; u32 invalid_rbm; u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX]; u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX]; u32 hal_reo_error[DP_REO_DST_RING_MAX]; + struct ath11k_soc_dp_tx_err_stats tx_err; }; /* Master structure to hold the hw data which may be used in core module */ @@ -664,7 +674,7 @@ struct ath11k_base { struct dentry *debugfs_soc; struct dentry *debugfs_ath11k; #endif - struct ath11k_soc_dp_rx_stats soc_stats; + struct ath11k_soc_dp_stats soc_stats; unsigned long dev_flags; struct completion driver_recovery; diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c index 3fd6b5af073b..647d5a50043f 100644 --- a/drivers/net/wireless/ath/ath11k/debug.c +++ b/drivers/net/wireless/ath/ath11k/debug.c @@ -739,12 +739,12 @@ static const struct file_operations fops_extd_rx_stats = { .open = simple_open, }; -static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file, +static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ath11k_base *ab = file->private_data; - struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats; + struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats; int len = 0, i, retval; const int size = 4096; static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = { @@ -788,6 +788,17 @@ static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file, soc_stats->hal_reo_error[2], soc_stats->hal_reo_error[3]); + len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n"); + len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n"); + + for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) + len += scnprintf(buf + len, size - len, "ring%d: %u\n", + i, soc_stats->tx_err.desc_na[i]); + + len += scnprintf(buf + len, size - len, + "\nMisc Transmit Failures: %d\n", + atomic_read(&soc_stats->tx_err.misc_fail)); + if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); @@ -796,8 +807,8 @@ static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file, return retval; } -static const struct file_operations fops_soc_rx_stats = { - .read = ath11k_debug_dump_soc_rx_stats, +static const struct file_operations fops_soc_dp_stats = { + .read = ath11k_debug_dump_soc_dp_stats, .open = simple_open, .owner = THIS_MODULE, .llseek = default_llseek, @@ -819,8 +830,8 @@ int ath11k_debug_pdev_create(struct ath11k_base *ab) debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab, &fops_simulate_fw_crash); - debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab, - &fops_soc_rx_stats); + debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab, + &fops_soc_dp_stats); return 0; } diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 41c990aec6b7..1af76775b1a8 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -121,8 +121,10 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, spin_unlock_bh(&tx_ring->tx_idr_lock); if (ret < 0) { - if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) + if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) { + atomic_inc(&ab->soc_stats.tx_err.misc_fail); return -ENOSPC; + } /* Check if the next ring is available */ ring_selector++; @@ -180,11 +182,13 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, default: /* TODO: Take care of other encap modes as well */ ret = -EINVAL; + atomic_inc(&ab->soc_stats.tx_err.misc_fail); goto fail_remove_idr; } ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, ti.paddr)) { + atomic_inc(&ab->soc_stats.tx_err.misc_fail); ath11k_warn(ab, "failed to DMA map data Tx buffer\n"); ret = -ENOMEM; goto fail_remove_idr; @@ -208,6 +212,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, * desc because the desc is directly enqueued onto hw queue. */ ath11k_hal_srng_access_end(ab, tcl_ring); + ab->soc_stats.tx_err.desc_na[ti.ring_id]++; spin_unlock_bh(&tcl_ring->lock); ret = -ENOMEM; From 71fbc847978f51629856d0ef7d3645d9322fb340 Mon Sep 17 00:00:00 2001 From: Sriram R Date: Thu, 11 Jun 2020 08:09:54 +0300 Subject: [PATCH 0051/2056] ath11k: Add support for ring backpressure stats Add support for collecting and dumping the ring backpressure stats via debugfs. Stats are dumped only if events are received for the specific ring. Below command can be used to obtain these stats as part of soc dp stats. cat /sys/kernel/debug/ath11k/ipq8074/soc_dp_stats Sample Output - When No stats available: Backpressure Stats ================== No Ring Backpressure stats received Sample Output - When ring bp stats available for specific ring Backpressure Stats ================== Ring: REO2SW1_RING count: 1 hp: 2 tp: 2 seen before: 4ms Tested-on: IPQ8074 hw2.0 AHB WLAN.HK.2.1.0.1-01213-QCAHKSWPL_SILICONZ-1 Signed-off-by: Sriram R Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591768308-32005-3-git-send-email-srirrama@codeaurora.org --- drivers/net/wireless/ath/ath11k/core.h | 20 +++++ drivers/net/wireless/ath/ath11k/debug.c | 105 ++++++++++++++++++++++++ drivers/net/wireless/ath/ath11k/dp.h | 42 ++++++++++ drivers/net/wireless/ath/ath11k/dp_rx.c | 28 ++++++- 4 files changed, 194 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 1ff54c2c2c95..46e53da49884 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -590,6 +590,25 @@ struct ath11k_board_data { /* IPQ8074 HW channel counters frequency value in hertz */ #define IPQ8074_CC_FREQ_HERTZ 320000 +struct ath11k_bp_stats { + /* Head Pointer reported by the last HTT Backpressure event for the ring */ + u16 hp; + + /* Tail Pointer reported by the last HTT Backpressure event for the ring */ + u16 tp; + + /* Number of Backpressure events received for the ring */ + u32 count; + + /* Last recorded event timestamp */ + unsigned long jiffies; +}; + +struct ath11k_dp_ring_bp_stats { + struct ath11k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX]; + struct ath11k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS]; +}; + struct ath11k_soc_dp_tx_err_stats { /* TCL Ring Descriptor unavailable */ u32 desc_na[DP_TCL_NUM_RING_MAX]; @@ -606,6 +625,7 @@ struct ath11k_soc_dp_stats { u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX]; u32 hal_reo_error[DP_REO_DST_RING_MAX]; struct ath11k_soc_dp_tx_err_stats tx_err; + struct ath11k_dp_ring_bp_stats bp_stats; }; /* Master structure to hold the hw data which may be used in core module */ diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c index 647d5a50043f..62a1aa0565a9 100644 --- a/drivers/net/wireless/ath/ath11k/debug.c +++ b/drivers/net/wireless/ath/ath11k/debug.c @@ -12,6 +12,43 @@ #include "debug_htt_stats.h" #include "peer.h" +static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = { + "REO2SW1_RING", + "REO2SW2_RING", + "REO2SW3_RING", + "REO2SW4_RING", + "WBM2REO_LINK_RING", + "REO2TCL_RING", + "REO2FW_RING", + "RELEASE_RING", + "PPE_RELEASE_RING", + "TCL2TQM_RING", + "TQM_RELEASE_RING", + "REO_RELEASE_RING", + "WBM2SW0_RELEASE_RING", + "WBM2SW1_RELEASE_RING", + "WBM2SW2_RELEASE_RING", + "WBM2SW3_RELEASE_RING", + "REO_CMD_RING", + "REO_STATUS_RING", +}; + +static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = { + "FW2RXDMA_BUF_RING", + "FW2RXDMA_STATUS_RING", + "FW2RXDMA_LINK_RING", + "SW2RXDMA_BUF_RING", + "WBM2RXDMA_LINK_RING", + "RXDMA2FW_RING", + "RXDMA2SW_RING", + "RXDMA2RELEASE_RING", + "RXDMA2REO_RING", + "MONITOR_STATUS_RING", + "MONITOR_BUF_RING", + "MONITOR_DESC_RING", + "MONITOR_DEST_RING", +}; + void ath11k_info(struct ath11k_base *ab, const char *fmt, ...) { struct va_format vaf = { @@ -739,6 +776,72 @@ static const struct file_operations fops_extd_rx_stats = { .open = simple_open, }; +static int ath11k_fill_bp_stats(struct ath11k_base *ab, + struct ath11k_bp_stats *bp_stats, + char *buf, int len, int size) +{ + lockdep_assert_held(&ab->base_lock); + + len += scnprintf(buf + len, size - len, "count: %u\n", + bp_stats->count); + len += scnprintf(buf + len, size - len, "hp: %u\n", + bp_stats->hp); + len += scnprintf(buf + len, size - len, "tp: %u\n", + bp_stats->tp); + len += scnprintf(buf + len, size - len, "seen before: %ums\n\n", + jiffies_to_msecs(jiffies - bp_stats->jiffies)); + return len; +} + +static ssize_t ath11k_debug_dump_soc_ring_bp_stats(struct ath11k_base *ab, + char *buf, int size) +{ + struct ath11k_bp_stats *bp_stats; + bool stats_rxd = false; + u8 i, pdev_idx; + int len = 0; + + len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n"); + len += scnprintf(buf + len, size - len, "==================\n"); + + spin_lock_bh(&ab->base_lock); + for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) { + bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i]; + + if (!bp_stats->count) + continue; + + len += scnprintf(buf + len, size - len, "Ring: %s\n", + htt_bp_umac_ring[i]); + len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); + stats_rxd = true; + } + + for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) { + for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) { + bp_stats = + &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx]; + + if (!bp_stats->count) + continue; + + len += scnprintf(buf + len, size - len, "Ring: %s\n", + htt_bp_lmac_ring[i]); + len += scnprintf(buf + len, size - len, "pdev: %d\n", + pdev_idx); + len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size); + stats_rxd = true; + } + } + spin_unlock_bh(&ab->base_lock); + + if (!stats_rxd) + len += scnprintf(buf + len, size - len, + "No Ring Backpressure stats received\n\n"); + + return len; +} + static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -799,6 +902,8 @@ static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file, "\nMisc Transmit Failures: %d\n", atomic_read(&soc_stats->tx_err.misc_fail)); + len += ath11k_debug_dump_soc_ring_bp_stats(ab, buf + len, size - len); + if (len > size) len = size; retval = simple_read_from_buffer(user_buf, count, ppos, buf, len); diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h index 058a5c1d86ff..7587862d2e32 100644 --- a/drivers/net/wireless/ath/ath11k/dp.h +++ b/drivers/net/wireless/ath/ath11k/dp.h @@ -999,6 +999,48 @@ struct htt_resp_msg { #define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0) #define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16) +#define HTT_BACKPRESSURE_UMAC_RING_TYPE 0 +#define HTT_BACKPRESSURE_LMAC_RING_TYPE 1 + +enum htt_backpressure_umac_ringid { + HTT_SW_RING_IDX_REO_REO2SW1_RING, + HTT_SW_RING_IDX_REO_REO2SW2_RING, + HTT_SW_RING_IDX_REO_REO2SW3_RING, + HTT_SW_RING_IDX_REO_REO2SW4_RING, + HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING, + HTT_SW_RING_IDX_REO_REO2TCL_RING, + HTT_SW_RING_IDX_REO_REO2FW_RING, + HTT_SW_RING_IDX_REO_REO_RELEASE_RING, + HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING, + HTT_SW_RING_IDX_TCL_TCL2TQM_RING, + HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING, + HTT_SW_RING_IDX_WBM_REO_RELEASE_RING, + HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING, + HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING, + HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING, + HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING, + HTT_SW_RING_IDX_REO_REO_CMD_RING, + HTT_SW_RING_IDX_REO_REO_STATUS_RING, + HTT_SW_UMAC_RING_IDX_MAX, +}; + +enum htt_backpressure_lmac_ringid { + HTT_SW_RING_IDX_FW2RXDMA_BUF_RING, + HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING, + HTT_SW_RING_IDX_FW2RXDMA_LINK_RING, + HTT_SW_RING_IDX_SW2RXDMA_BUF_RING, + HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING, + HTT_SW_RING_IDX_RXDMA2FW_RING, + HTT_SW_RING_IDX_RXDMA2SW_RING, + HTT_SW_RING_IDX_RXDMA2RELEASE_RING, + HTT_SW_RING_IDX_RXDMA2REO_RING, + HTT_SW_RING_IDX_MONITOR_STATUS_RING, + HTT_SW_RING_IDX_MONITOR_BUF_RING, + HTT_SW_RING_IDX_MONITOR_DESC_RING, + HTT_SW_RING_IDX_MONITOR_DEST_RING, + HTT_SW_LMAC_RING_IDX_MAX, +}; + /* ppdu stats * * @details diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index a9cc1a457c33..791d971784ce 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1501,9 +1501,10 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, struct sk_buff *skb) { u32 *data = (u32 *)skb->data; - u8 pdev_id, ring_type, ring_id; + u8 pdev_id, ring_type, ring_id, pdev_idx; u16 hp, tp; u32 backpressure_time; + struct ath11k_bp_stats *bp_stats; pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data); ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data); @@ -1518,6 +1519,31 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab, ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n", pdev_id, ring_type, ring_id, hp, tp, backpressure_time); + + if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) { + if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX) + return; + + bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id]; + } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) { + pdev_idx = DP_HW2SW_MACID(pdev_id); + + if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS) + return; + + bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx]; + } else { + ath11k_warn(ab, "unknown ring type received in htt bp event %d\n", + ring_type); + return; + } + + spin_lock_bh(&ab->base_lock); + bp_stats->hp = hp; + bp_stats->tp = tp; + bp_stats->count++; + bp_stats->jiffies = jiffies; + spin_unlock_bh(&ab->base_lock); } void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab, From 01e34233c6456ff61f2caab450860f1462acf78e Mon Sep 17 00:00:00 2001 From: Venkateswara Naralasetty Date: Thu, 11 Jun 2020 08:09:55 +0300 Subject: [PATCH 0052/2056] ath11k: fix wmi peer flags in peer assoc command Currently need ptk/gtk wmi peer flags in wmi peer assoc cmd are set based on the rsnie and wpaie of the bss from the bss list. Since this bss list is not updated with current BSSID for AP mode, we may not find bss from the bss list. Which results in ptk/gtk peer flags are not set in the wmi peer assoc cmd. Due to this EAPOL frames are going in data rates instead of management rates. Tested-on: IPQ8074 WLAN.HK.2.1.0.1-01228-QCAHKSWPL_SILICONZ-1 Signed-off-by: Venkateswara Naralasetty Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/1591771841-25503-1-git-send-email-vnaralas@codeaurora.org --- drivers/net/wireless/ath/ath11k/core.h | 2 ++ drivers/net/wireless/ath/ath11k/mac.c | 21 ++++++++++++++++++++- drivers/net/wireless/ath/ath11k/wmi.c | 6 +++--- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 46e53da49884..e5c4e19020ee 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -224,6 +224,8 @@ struct ath11k_vif { int num_legacy_stations; int rtscts_prot_mode; int txpower; + bool rsnie_present; + bool wpaie_present; }; struct ath11k_vif_iter { diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 5ba8f0986079..07d3e031c75a 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -780,6 +780,8 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; struct sk_buff *bcn; + struct ieee80211_mgmt *mgmt; + u8 *ies; int ret; if (arvif->vdev_type != WMI_VDEV_TYPE_AP) @@ -791,6 +793,17 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif) return -EPERM; } + ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); + ies += sizeof(mgmt->u.beacon); + + if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) + arvif->rsnie_present = true; + + if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, + WLAN_OUI_TYPE_MICROSOFT_WPA, + ies, (skb_tail_pointer(bcn) - ies))) + arvif->wpaie_present = true; + ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); kfree_skb(bcn); @@ -880,6 +893,7 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, struct ieee80211_bss_conf *info = &vif->bss_conf; struct cfg80211_chan_def def; struct cfg80211_bss *bss; + struct ath11k_vif *arvif = (struct ath11k_vif *)vif->drv_priv; const u8 *rsnie = NULL; const u8 *wpaie = NULL; @@ -890,7 +904,12 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar, bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); - if (bss) { + + if (arvif->rsnie_present || arvif->wpaie_present) { + arg->need_ptk_4_way = true; + if (arvif->wpaie_present) + arg->need_gtk_2_way = true; + } else if (bss) { const struct cfg80211_bss_ies *ies; rcu_read_lock(); diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 457a9f79f351..8e3437a65673 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -1728,10 +1728,10 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd, */ if (param->auth_flag) cmd->peer_flags |= WMI_PEER_AUTH; - if (param->need_ptk_4_way) + if (param->need_ptk_4_way) { cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; - else - cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY; + cmd->peer_flags &= ~WMI_PEER_AUTH; + } if (param->need_gtk_2_way) cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; /* safe mode bypass the 4-way handshake */ From cad2929dc4321b1f237767e9bd271b61a2eaa752 Mon Sep 17 00:00:00 2001 From: Hoang Huu Le Date: Wed, 17 Jun 2020 13:56:05 +0700 Subject: [PATCH 0053/2056] tipc: update a binding service via broadcast Currently, updating binding table (add service binding to name table/withdraw a service binding) is being sent over replicast. However, if we are scaling up clusters to > 100 nodes/containers this method is less affection because of looping through nodes in a cluster one by one. It is worth to use broadcast to update a binding service. This way, the binding table can be updated on all peer nodes in one shot. Broadcast is used when all peer nodes, as indicated by a new capability flag TIPC_NAMED_BCAST, support reception of this message type. Four problems need to be considered when introducing this feature. 1) When establishing a link to a new peer node we still update this by a unicast 'bulk' update. This may lead to race conditions, where a later broadcast publication/withdrawal bypass the 'bulk', resulting in disordered publications, or even that a withdrawal may arrive before the corresponding publication. We solve this by adding an 'is_last_bulk' bit in the last bulk messages so that it can be distinguished from all other messages. Only when this message has arrived do we open up for reception of broadcast publications/withdrawals. 2) When a first legacy node is added to the cluster all distribution will switch over to use the legacy 'replicast' method, while the opposite happens when the last legacy node leaves the cluster. This entails another risk of message disordering that has to be handled. We solve this by adding a sequence number to the broadcast/replicast messages, so that disordering can be discovered and corrected. Note however that we don't need to consider potential message loss or duplication at this protocol level. 3) Bulk messages don't contain any sequence numbers, and will always arrive in order. Hence we must exempt those from the sequence number control and deliver them unconditionally. We solve this by adding a new 'is_bulk' bit in those messages so that they can be recognized. 4) Legacy messages, which don't contain any new bits or sequence numbers, but neither can arrive out of order, also need to be exempt from the initial synchronization and sequence number check, and delivered unconditionally. Therefore, we add another 'is_not_legacy' bit to all new messages so that those can be distinguished from legacy messages and the latter delivered directly. v1->v2: - fix warning issue reported by kbuild test robot - add santiy check to drop the publication message with a sequence number that is lower than the agreed synch point Signed-off-by: kernel test robot Signed-off-by: Hoang Huu Le Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bcast.c | 6 +-- net/tipc/bcast.h | 4 +- net/tipc/link.c | 2 +- net/tipc/msg.h | 40 +++++++++++++++ net/tipc/name_distr.c | 116 +++++++++++++++++++++++++++++++----------- net/tipc/name_distr.h | 9 ++-- net/tipc/name_table.c | 9 +++- net/tipc/name_table.h | 2 + net/tipc/node.c | 29 ++++++++--- net/tipc/node.h | 8 +-- 10 files changed, 177 insertions(+), 48 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 383f87bc1061..940d176e0e87 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, * Consumes the buffer chain. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE */ -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, - u16 *cong_link_cnt) +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt) { struct tipc_link *l = tipc_bc_sndlink(net); struct sk_buff_head xmitq; @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) nl->local = false; } -u32 tipc_bcast_get_broadcast_mode(struct net *net) +u32 tipc_bcast_get_mode(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 4240c95188b1..2d9352dc7b0e 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, struct tipc_mc_method *method, struct tipc_nlist *dests, u16 *cong_link_cnt); +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt); int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr); @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); -u32 tipc_bcast_get_broadcast_mode(struct net *net); +u32 tipc_bcast_get_mode(struct net *net); u32 tipc_bcast_get_broadcast_ratio(struct net *net); void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, diff --git a/net/tipc/link.c b/net/tipc/link.c index ee3b8d0576b8..eac89a3e22ce 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, void *hdr; struct nlattr *attrs; struct nlattr *prop; - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); + u32 bc_mode = tipc_bcast_get_mode(net); u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); if (!bcl) diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 58660d56bc83..65119e81ff0c 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) msg_set_bits(m, 1, 25, 0xf, err); } +static inline void msg_set_bulk(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 28, 0x1, 1); +} + +static inline u32 msg_is_bulk(struct tipc_msg *m) +{ + return msg_bits(m, 1, 28, 0x1); +} + +static inline void msg_set_last_bulk(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 27, 0x1, 1); +} + +static inline u32 msg_is_last_bulk(struct tipc_msg *m) +{ + return msg_bits(m, 1, 27, 0x1); +} + +static inline void msg_set_non_legacy(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 26, 0x1, 1); +} + +static inline u32 msg_is_legacy(struct tipc_msg *m) +{ + return !msg_bits(m, 1, 26, 0x1); +} + static inline u32 msg_reroute_cnt(struct tipc_msg *m) { return msg_bits(m, 1, 21, 0xf); @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) msg_set_word(m, 4, p); } +static inline u16 msg_named_seqno(struct tipc_msg *m) +{ + return msg_bits(m, 4, 0, 0xffff); +} + +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) +{ + msg_set_bits(m, 4, 0, 0xffff, n); +} + static inline u32 msg_destport(struct tipc_msg *m) { return msg_word(m, 5); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 5feaf3b67380..2f9c148f17e2 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) pr_warn("Publication distribution failure\n"); return NULL; } - + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); + msg_set_non_legacy(buf_msg(skb)); item = (struct distr_item *)msg_data(buf_msg(skb)); publ_to_item(item, publ); return skb; @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) { struct name_table *nt = tipc_name_table(net); - struct sk_buff *buf; struct distr_item *item; + struct sk_buff *skb; write_lock_bh(&nt->cluster_scope_lock); list_del(&publ->binding_node); @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) if (publ->scope == TIPC_NODE_SCOPE) return NULL; - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); - if (!buf) { + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); + if (!skb) { pr_warn("Withdrawal distribution failure\n"); return NULL; } - - item = (struct distr_item *)msg_data(buf_msg(buf)); + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); + msg_set_non_legacy(buf_msg(skb)); + item = (struct distr_item *)msg_data(buf_msg(skb)); publ_to_item(item, publ); - return buf; + return skb; } /** @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) * @pls: linked list of publication items to be packed into buffer chain */ static void named_distribute(struct net *net, struct sk_buff_head *list, - u32 dnode, struct list_head *pls) + u32 dnode, struct list_head *pls, u16 seqno) { struct publication *publ; struct sk_buff *skb = NULL; @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / ITEM_SIZE) * ITEM_SIZE; u32 msg_rem = msg_dsz; + struct tipc_msg *hdr; list_for_each_entry(publ, pls, binding_node) { /* Prepare next buffer: */ @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, pr_warn("Bulk publication failure\n"); return; } - msg_set_bc_ack_invalid(buf_msg(skb), true); - item = (struct distr_item *)msg_data(buf_msg(skb)); + hdr = buf_msg(skb); + msg_set_bc_ack_invalid(hdr, true); + msg_set_bulk(hdr); + msg_set_non_legacy(hdr); + item = (struct distr_item *)msg_data(hdr); } /* Pack publication into message: */ @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, } } if (skb) { - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); + hdr = buf_msg(skb); + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); __skb_queue_tail(list, skb); } + hdr = buf_msg(skb_peek_tail(list)); + msg_set_last_bulk(hdr); + msg_set_named_seqno(hdr, seqno); } /** * tipc_named_node_up - tell specified node about all publications by this node */ -void tipc_named_node_up(struct net *net, u32 dnode) +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) { struct name_table *nt = tipc_name_table(net); + struct tipc_net *tn = tipc_net(net); struct sk_buff_head head; + u16 seqno; __skb_queue_head_init(&head); + spin_lock_bh(&tn->nametbl_lock); + if (!(capabilities & TIPC_NAMED_BCAST)) + nt->rc_dests++; + seqno = nt->snd_nxt; + spin_unlock_bh(&tn->nametbl_lock); read_lock_bh(&nt->cluster_scope_lock); - named_distribute(net, &head, dnode, &nt->cluster_scope); + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); tipc_node_xmit(net, &head, dnode, 0); read_unlock_bh(&nt->cluster_scope_lock); } @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) spin_unlock_bh(&tn->nametbl_lock); } -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, + u32 addr, u16 capabilities) { + struct name_table *nt = tipc_name_table(net); + struct tipc_net *tn = tipc_net(net); + struct publication *publ, *tmp; list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) tipc_publ_purge(net, publ, addr); tipc_dist_queue_purge(net, addr); + spin_lock_bh(&tn->nametbl_lock); + if (!(capabilities & TIPC_NAMED_BCAST)) + nt->rc_dests--; + spin_unlock_bh(&tn->nametbl_lock); } /** @@ -295,29 +320,62 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, return false; } +static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open) +{ + struct sk_buff *skb, *tmp; + struct tipc_msg *hdr; + u16 seqno; + + skb_queue_walk_safe(namedq, skb, tmp) { + skb_linearize(skb); + hdr = buf_msg(skb); + seqno = msg_named_seqno(hdr); + if (msg_is_last_bulk(hdr)) { + *rcv_nxt = seqno; + *open = true; + } + + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { + __skb_unlink(skb, namedq); + return skb; + } + + if (*open && (*rcv_nxt == seqno)) { + (*rcv_nxt)++; + __skb_unlink(skb, namedq); + return skb; + } + + if (less(seqno, *rcv_nxt)) { + __skb_unlink(skb, namedq); + kfree_skb(skb); + continue; + } + } + return NULL; +} + /** * tipc_named_rcv - process name table update messages sent by another node */ -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open) { - struct tipc_net *tn = net_generic(net, tipc_net_id); - struct tipc_msg *msg; + struct tipc_net *tn = tipc_net(net); struct distr_item *item; - uint count; - u32 node; + struct tipc_msg *hdr; struct sk_buff *skb; - int mtype; + u32 count, node; spin_lock_bh(&tn->nametbl_lock); - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { - skb_linearize(skb); - msg = buf_msg(skb); - mtype = msg_type(msg); - item = (struct distr_item *)msg_data(msg); - count = msg_data_sz(msg) / ITEM_SIZE; - node = msg_orignode(msg); + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { + hdr = buf_msg(skb); + node = msg_orignode(hdr); + item = (struct distr_item *)msg_data(hdr); + count = msg_data_sz(hdr) / ITEM_SIZE; while (count--) { - tipc_update_nametbl(net, item, node, mtype); + tipc_update_nametbl(net, item, node, msg_type(hdr)); item++; } kfree_skb(skb); @@ -345,6 +403,6 @@ void tipc_named_reinit(struct net *net) publ->node = self; list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) publ->node = self; - + nt->rc_dests = 0; spin_unlock_bh(&tn->nametbl_lock); } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index 63fc73e0fa6c..092323158f06 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -67,11 +67,14 @@ struct distr_item { __be32 key; }; +void tipc_named_bcast(struct net *net, struct sk_buff *skb); struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); -void tipc_named_node_up(struct net *net, u32 dnode); -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open); void tipc_named_reinit(struct net *net); -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, + u32 addr, u16 capabilities); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 359b2bc888cf..2ac33d32edc2 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, struct tipc_net *tn = tipc_net(net); struct publication *p = NULL; struct sk_buff *skb = NULL; + u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, nt->local_publ_count++; skb = tipc_named_publish(net, p); } + rc_dests = nt->rc_dests; exit: spin_unlock_bh(&tn->nametbl_lock); if (skb) - tipc_node_broadcast(net, skb); + tipc_node_broadcast(net, skb, rc_dests); return p; + } /** @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 self = tipc_own_addr(net); struct sk_buff *skb = NULL; struct publication *p; + u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", type, lower, upper, key); } + rc_dests = nt->rc_dests; spin_unlock_bh(&tn->nametbl_lock); if (skb) { - tipc_node_broadcast(net, skb); + tipc_node_broadcast(net, skb, rc_dests); return 1; } return 0; diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 728bc7016c38..8064e1986e2c 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -106,6 +106,8 @@ struct name_table { struct list_head cluster_scope; rwlock_t cluster_scope_lock; u32 local_publ_count; + u32 rc_dests; + u32 snd_nxt; }; int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/tipc/node.c b/net/tipc/node.c index a4c2816c3746..030a51c4d1fa 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -75,6 +75,8 @@ struct tipc_bclink_entry { struct sk_buff_head arrvq; struct sk_buff_head inputq2; struct sk_buff_head namedq; + u16 named_rcv_nxt; + bool named_open; }; /** @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) write_unlock_bh(&n->lock); if (flags & TIPC_NOTIFY_NODE_DOWN) - tipc_publ_notify(net, publ_list, addr); + tipc_publ_notify(net, publ_list, addr, n->capabilities); if (flags & TIPC_NOTIFY_NODE_UP) - tipc_named_node_up(net, addr); + tipc_named_node_up(net, addr, n->capabilities); if (flags & TIPC_NOTIFY_LINK_UP) { tipc_mon_peer_up(net, addr, bearer_id); @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, /* Clean up broadcast state */ tipc_bcast_remove_peer(n->net, n->bc_entry.link); + __skb_queue_purge(&n->bc_entry.namedq); /* Abort any ongoing link failover */ for (i = 0; i < MAX_BEARERS; i++) { @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) return 0; } -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) { + struct sk_buff_head xmitq; struct sk_buff *txskb; struct tipc_node *n; + u16 dummy; u32 dst; + /* Use broadcast if all nodes support it */ + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { + __skb_queue_head_init(&xmitq); + __skb_queue_tail(&xmitq, skb); + tipc_bcast_xmit(net, &xmitq, &dummy); + return; + } + + /* Otherwise use legacy replicast method */ rcu_read_lock(); list_for_each_entry_rcu(n, tipc_nodes(net), list) { dst = n->addr; @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) tipc_node_xmit_skb(net, txskb, dst, 0); } rcu_read_unlock(); - kfree_skb(skb); } @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ if (!skb_queue_empty(&n->bc_entry.namedq)) - tipc_named_rcv(net, &n->bc_entry.namedq); + tipc_named_rcv(net, &n->bc_entry.namedq, + &n->bc_entry.named_rcv_nxt, + &n->bc_entry.named_open); /* If reassembly or retransmission failure => reset all links to peer */ if (rc & TIPC_LINK_DOWN_EVT) @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) tipc_node_link_down(n, bearer_id, false); if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) - tipc_named_rcv(net, &n->bc_entry.namedq); + tipc_named_rcv(net, &n->bc_entry.namedq, + &n->bc_entry.named_rcv_nxt, + &n->bc_entry.named_open); if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) tipc_node_mcast_rcv(n); diff --git a/net/tipc/node.h b/net/tipc/node.h index a6803b449a2c..9f6f13f1604f 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -55,7 +55,8 @@ enum { TIPC_MCAST_RBCTL = (1 << 7), TIPC_GAP_ACK_BLOCK = (1 << 8), TIPC_TUNNEL_ENHANCED = (1 << 9), - TIPC_NAGLE = (1 << 10) + TIPC_NAGLE = (1 << 10), + TIPC_NAMED_BCAST = (1 << 11) }; #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ @@ -68,7 +69,8 @@ enum { TIPC_MCAST_RBCTL | \ TIPC_GAP_ACK_BLOCK | \ TIPC_TUNNEL_ENHANCED | \ - TIPC_NAGLE) + TIPC_NAGLE | \ + TIPC_NAMED_BCAST) #define INVALID_BEARER_ID -1 @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, u32 selector); void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); From 7bd3a33ae6d2b820bc44a206f9b81b96840219fd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 17 Jun 2020 11:31:32 -0700 Subject: [PATCH 0054/2056] libbpf: Bump version to 0.1.0 Bump libbpf version to 0.1.0, as new development cycle starts. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200617183132.1970836-1-andriin@fb.com --- tools/lib/bpf/libbpf.map | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index f732c77b7ed0..c914347f5065 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -270,3 +270,6 @@ LIBBPF_0.0.9 { ring_buffer__new; ring_buffer__poll; } LIBBPF_0.0.8; + +LIBBPF_0.1.0 { +} LIBBPF_0.0.9; From 682591f7a6fae0cba255af889fae3d6e211d4d25 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 15 Jun 2020 16:18:55 -0500 Subject: [PATCH 0055/2056] liquidio: Replace vmalloc_node + memset with vzalloc_node and use array_size Use vzalloc/vzalloc_node instead of the vmalloc/vzalloc_node and memset. Also, notice that vzalloc_node() function has no 2-factor argument form to calculate the size for the allocation, so multiplication factors need to be wrapped in array_size(). This issue was found with the help of Coccinelle and, audited and fixed manually. Addresses-KSPP-ID: https://github.com/KSPP/linux/issues/83 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/request_manager.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 6dd65f9b347c..8e59c2825533 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -95,12 +95,10 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ - iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), - numa_node); + iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), + numa_node); if (!iq->request_list) - iq->request_list = - vmalloc(array_size(num_descs, - sizeof(*iq->request_list))); + iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list))); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", @@ -108,8 +106,6 @@ int octeon_init_instr_queue(struct octeon_device *oct, return 1; } - memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); - dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); From 3dd1499666f64817173c6390ed4d387471d07df8 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 15 Jun 2020 18:01:07 -0500 Subject: [PATCH 0056/2056] ethtool: ioctl: Use array_size() in copy_to_user() Use array_size() helper instead of the open-coded version in copy_to_user(). These sorts of multiplication factors need to be wrapped in array_size(). This issue was found with the help of Coccinelle and, audited and fixed manually. Addresses-KSPP-ID: https://github.com/KSPP/linux/issues/83 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/ethtool/ioctl.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index b5df90c981c2..be3ed24bfe03 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -1918,7 +1918,7 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); - if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64))) + if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) goto out; ret = 0; @@ -1973,7 +1973,7 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (copy_to_user(useraddr, &stats, sizeof(stats))) goto out; useraddr += sizeof(stats); - if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64))) + if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64)))) goto out; ret = 0; From aececa645dc79ec004bfed3357c15cbf4b9b5746 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Wed, 17 Jun 2020 16:39:07 +0200 Subject: [PATCH 0057/2056] Bluetooth: mgmt: Add commands for runtime configuration This adds the required read/set commands for runtime configuration. Even while currently no parameters are specified, the commands are made available. Signed-off-by: Marcel Holtmann Reviewed-by: Alain Michaud Signed-off-by: Johan Hedberg --- net/bluetooth/mgmt.c | 7 +++++++ net/bluetooth/mgmt_config.c | 18 ++++++++++++++++++ net/bluetooth/mgmt_config.h | 6 ++++++ 3 files changed, 31 insertions(+) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 99fbfd467d04..ecfdfc4df486 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -114,6 +114,8 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_EXP_FEATURE, MGMT_OP_READ_DEF_SYSTEM_CONFIG, MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_OP_READ_DEF_RUNTIME_CONFIG, + MGMT_OP_SET_DEF_RUNTIME_CONFIG, }; static const u16 mgmt_events[] = { @@ -166,6 +168,7 @@ static const u16 mgmt_untrusted_commands[] = { MGMT_OP_READ_SECURITY_INFO, MGMT_OP_READ_EXP_FEATURES_INFO, MGMT_OP_READ_DEF_SYSTEM_CONFIG, + MGMT_OP_READ_DEF_RUNTIME_CONFIG, }; static const u16 mgmt_untrusted_events[] = { @@ -7305,6 +7308,10 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_UNTRUSTED }, { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, + { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, + HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c index f6dfbe93542c..8e7ad2a51dbb 100644 --- a/net/bluetooth/mgmt_config.c +++ b/net/bluetooth/mgmt_config.c @@ -251,3 +251,21 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_STATUS_SUCCESS); } + +int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + bt_dev_dbg(hdev, "sock %p", sk); + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_DEF_RUNTIME_CONFIG, 0, NULL, 0); +} + +int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + bt_dev_dbg(hdev, "sock %p", sk); + + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); +} diff --git a/net/bluetooth/mgmt_config.h b/net/bluetooth/mgmt_config.h index 51da6e63b1a0..a4965f107891 100644 --- a/net/bluetooth/mgmt_config.h +++ b/net/bluetooth/mgmt_config.h @@ -9,3 +9,9 @@ int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len); + +int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + +int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); From 8baaa4038edbff67f318574e233e9e7e43808230 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:08 +0200 Subject: [PATCH 0058/2056] Bluetooth: Add bdaddr_list_with_flags for classic whitelist In order to more easily add device flags to classic devices, create a new type of bdaddr_list that supports setting flags. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 18 ++++++++-- net/bluetooth/hci_core.c | 58 ++++++++++++++++++++++++++++++++ net/bluetooth/hci_event.c | 8 ++--- net/bluetooth/mgmt.c | 5 +-- 4 files changed, 81 insertions(+), 8 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0d5dbb6cb5a0..95a3935325bb 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -136,6 +136,13 @@ struct bdaddr_list_with_irk { u8 local_irk[16]; }; +struct bdaddr_list_with_flags { + struct list_head list; + bdaddr_t bdaddr; + u8 bdaddr_type; + u32 current_flags; +}; + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -1169,12 +1176,19 @@ struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list, struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( struct list_head *list, bdaddr_t *bdaddr, u8 type); +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type); int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, - u8 type, u8 *peer_irk, u8 *local_irk); + u8 type, u8 *peer_irk, u8 *local_irk); +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags); int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type); int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, - u8 type); + u8 type); +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type); void hci_bdaddr_list_clear(struct list_head *list); struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4f1052a7c488..8a471bec2731 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3023,6 +3023,20 @@ struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( return NULL; } +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list_with_flags *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + void hci_bdaddr_list_clear(struct list_head *bdaddr_list) { struct bdaddr_list *b, *n; @@ -3084,6 +3098,30 @@ int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + entry->current_flags = flags; + + list_add(&entry->list, list); + + return 0; +} + int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) { struct bdaddr_list *entry; @@ -3123,6 +3161,26 @@ int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, return 0; } +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + /* This function requires the caller holds hdev->lock */ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index cfeaee347db3..8981954ff4c4 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2697,10 +2697,10 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) */ if (hci_dev_test_flag(hdev, HCI_MGMT) && !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && - !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, - BDADDR_BREDR)) { - hci_reject_conn(hdev, &ev->bdaddr); - return; + !hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &ev->bdaddr, + BDADDR_BREDR)) { + hci_reject_conn(hdev, &ev->bdaddr); + return; } /* Connection accepted */ diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ecfdfc4df486..d0d0fa832c8a 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -6000,8 +6000,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, goto unlock; } - err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr, - cp->addr.type); + err = hci_bdaddr_list_add_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type, 0); if (err) goto unlock; From 7a92906f841db46a91df0179459ad8b2052f2e54 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:09 +0200 Subject: [PATCH 0059/2056] Bluetooth: Replace wakeable list with flag Since the classic device list now supports flags, convert the wakeable list into a flag on the existing device list. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 11 ++++++++++- net/bluetooth/hci_core.c | 1 - net/bluetooth/hci_request.c | 12 ++++++++---- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 95a3935325bb..0643c737ba85 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -143,6 +143,16 @@ struct bdaddr_list_with_flags { u32 current_flags; }; +enum hci_conn_flags { + HCI_CONN_FLAG_REMOTE_WAKEUP, + HCI_CONN_FLAG_MAX +}; + +#define hci_conn_test_flag(nr, flags) ((flags) & (1U << nr)) + +/* Make sure number of flags doesn't exceed sizeof(current_flags) */ +static_assert(HCI_CONN_FLAG_MAX < 32); + struct bt_uuid { struct list_head list; u8 uuid[16]; @@ -463,7 +473,6 @@ struct hci_dev { struct list_head mgmt_pending; struct list_head blacklist; struct list_head whitelist; - struct list_head wakeable; struct list_head uuids; struct list_head link_keys; struct list_head long_term_keys; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8a471bec2731..8e01afb2ee8c 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3499,7 +3499,6 @@ struct hci_dev *hci_alloc_dev(void) INIT_LIST_HEAD(&hdev->mgmt_pending); INIT_LIST_HEAD(&hdev->blacklist); INIT_LIST_HEAD(&hdev->whitelist); - INIT_LIST_HEAD(&hdev->wakeable); INIT_LIST_HEAD(&hdev->uuids); INIT_LIST_HEAD(&hdev->link_keys); INIT_LIST_HEAD(&hdev->long_term_keys); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a7f572ad38ef..a5b53d3ea508 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -968,15 +968,19 @@ static void hci_req_clear_event_filter(struct hci_request *req) static void hci_req_set_event_filter(struct hci_request *req) { - struct bdaddr_list *b; + struct bdaddr_list_with_flags *b; struct hci_cp_set_event_filter f; struct hci_dev *hdev = req->hdev; - u8 scan; + u8 scan = SCAN_DISABLED; /* Always clear event filter when starting */ hci_req_clear_event_filter(req); - list_for_each_entry(b, &hdev->wakeable, list) { + list_for_each_entry(b, &hdev->whitelist, list) { + if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + b->current_flags)) + continue; + memset(&f, 0, sizeof(f)); bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr); f.flt_type = HCI_FLT_CONN_SETUP; @@ -985,9 +989,9 @@ static void hci_req_set_event_filter(struct hci_request *req) bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f); + scan = SCAN_PAGE; } - scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED; hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); } From a1fc7535ec34a5904abe93dd42a6ed7e31c36717 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:10 +0200 Subject: [PATCH 0060/2056] Bluetooth: Replace wakeable in hci_conn_params Replace the wakeable boolean with flags in hci_conn_params and all users of this boolean. This will be used by the get/set device flags mgmt op. Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 2 +- net/bluetooth/hci_request.c | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 0643c737ba85..6f88e5d81bd2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -660,7 +660,7 @@ struct hci_conn_params { struct hci_conn *conn; bool explicit_connect; - bool wakeable; + u32 current_flags; }; extern struct list_head hci_dev_list; diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index a5b53d3ea508..eee9c007a5fb 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -710,7 +710,8 @@ static int add_to_white_list(struct hci_request *req, } /* During suspend, only wakeable devices can be in whitelist */ - if (hdev->suspended && !params->wakeable) + if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP, + params->current_flags)) return 0; *num_entries += 1; From 4c54bf2b093bb2ae95e756342646d868e8101cb4 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 17 Jun 2020 16:39:11 +0200 Subject: [PATCH 0061/2056] Bluetooth: Add get/set device flags mgmt op Add the get device flags and set device flags mgmt ops and the device flags changed event. Their behavior is described in detail in mgmt-api.txt in bluez. Sample btmon trace when a HID device is added (trimmed to 75 chars): @ MGMT Command: Unknown (0x0050) plen 11 {0x0001} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 ........... @ MGMT Event: Unknown (0x002a) plen 15 {0x0004} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0003} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0002} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Command Compl.. (0x0001) plen 10 {0x0001} [hci0] 18:06:14.98 Unknown (0x0050) plen 7 Status: Success (0x00) 90 c5 13 cd f3 cd 02 ....... @ MGMT Command: Add Device (0x0033) plen 8 {0x0001} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0004} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0003} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Device Added (0x001a) plen 8 {0x0002} [hci0] 18:06:14.98 LE Address: CD:F3:CD:13:C5:90 (Static) Action: Auto-connect remote device (0x02) @ MGMT Event: Unknown (0x002a) plen 15 {0x0004} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0003} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0002} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... @ MGMT Event: Unknown (0x002a) plen 15 {0x0001} [hci0] 18:06:14.98 90 c5 13 cd f3 cd 02 01 00 00 00 01 00 00 00 ............... Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/mgmt.h | 28 ++++++++ net/bluetooth/mgmt.c | 128 +++++++++++++++++++++++++++++++++++ 2 files changed, 156 insertions(+) diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e515288f328f..8e47b0c5fe52 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -720,6 +720,27 @@ struct mgmt_rp_set_exp_feature { #define MGMT_OP_SET_DEF_RUNTIME_CONFIG 0x004e #define MGMT_SET_DEF_RUNTIME_CONFIG_SIZE 0 +#define MGMT_OP_GET_DEVICE_FLAGS 0x004F +#define MGMT_GET_DEVICE_FLAGS_SIZE 7 +struct mgmt_cp_get_device_flags { + struct mgmt_addr_info addr; +} __packed; +struct mgmt_rp_get_device_flags { + struct mgmt_addr_info addr; + __le32 supported_flags; + __le32 current_flags; +} __packed; + +#define MGMT_OP_SET_DEVICE_FLAGS 0x0050 +#define MGMT_SET_DEVICE_FLAGS_SIZE 11 +struct mgmt_cp_set_device_flags { + struct mgmt_addr_info addr; + __le32 current_flags; +} __packed; +struct mgmt_rp_set_device_flags { + struct mgmt_addr_info addr; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -951,3 +972,10 @@ struct mgmt_ev_exp_feature_changed { __u8 uuid[16]; __le32 flags; } __packed; + +#define MGMT_EV_DEVICE_FLAGS_CHANGED 0x002a +struct mgmt_ev_device_flags_changed { + struct mgmt_addr_info addr; + __le32 supported_flags; + __le32 current_flags; +} __packed; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d0d0fa832c8a..e409ff48e8e6 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -116,6 +116,8 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEF_SYSTEM_CONFIG, MGMT_OP_READ_DEF_RUNTIME_CONFIG, MGMT_OP_SET_DEF_RUNTIME_CONFIG, + MGMT_OP_GET_DEVICE_FLAGS, + MGMT_OP_SET_DEVICE_FLAGS, }; static const u16 mgmt_events[] = { @@ -156,6 +158,7 @@ static const u16 mgmt_events[] = { MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_PHY_CONFIGURATION_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, + MGMT_EV_DEVICE_FLAGS_CHANGED, }; static const u16 mgmt_untrusted_commands[] = { @@ -3856,6 +3859,120 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_NOT_SUPPORTED); } +#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1) + +static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_cp_get_device_flags *cp = data; + struct mgmt_rp_get_device_flags rp; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 current_flags = 0; + u8 status = MGMT_STATUS_INVALID_PARAMS; + + bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", + &cp->addr.bdaddr, cp->addr.type); + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type); + if (!br_params) + goto done; + + current_flags = br_params->current_flags; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!params) + goto done; + + current_flags = params->current_flags; + } + + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + rp.supported_flags = cpu_to_le32(supported_flags); + rp.current_flags = cpu_to_le32(current_flags); + + status = MGMT_STATUS_SUCCESS; + +done: + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, + &rp, sizeof(rp)); +} + +static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type, + u32 supported_flags, u32 current_flags) +{ + struct mgmt_ev_device_flags_changed ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = bdaddr_type; + ev.supported_flags = cpu_to_le32(supported_flags); + ev.current_flags = cpu_to_le32(current_flags); + + mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); +} + +static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_device_flags *cp = data; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u8 status = MGMT_STATUS_INVALID_PARAMS; + u32 supported_flags = SUPPORTED_DEVICE_FLAGS(); + u32 current_flags = __le32_to_cpu(cp->current_flags); + + bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", + &cp->addr.bdaddr, cp->addr.type, + __le32_to_cpu(current_flags)); + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto done; + } + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, + &cp->addr.bdaddr, + cp->addr.type); + + if (br_params) { + br_params->current_flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", + &cp->addr.bdaddr, cp->addr.type); + } + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (params) { + params->current_flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", + &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + } + } + +done: + if (status == MGMT_STATUS_SUCCESS) + device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, + supported_flags, current_flags); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status, + &cp->addr, sizeof(cp->addr)); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -5973,7 +6090,9 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, { struct mgmt_cp_add_device *cp = data; u8 auto_conn, addr_type; + struct hci_conn_params *params; int err; + u32 current_flags = 0; bt_dev_dbg(hdev, "sock %p", sk); @@ -6041,12 +6160,19 @@ static int add_device(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_FAILED, &cp->addr, sizeof(cp->addr)); goto unlock; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + addr_type); + if (params) + current_flags = params->current_flags; } hci_update_background_scan(hdev); added: device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); + device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, + SUPPORTED_DEVICE_FLAGS(), current_flags); err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, MGMT_STATUS_SUCCESS, &cp->addr, @@ -7313,6 +7439,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_UNTRUSTED }, { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, HCI_MGMT_VAR_LEN }, + { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, + { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) From 7fceb17c6b480e0f2bd0e566a8231039fb8a809e Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:12 +0200 Subject: [PATCH 0062/2056] Bluetooth: Add definitions for advertisement monitor features This adds support for Advertisement Monitor API. Here are the commands and events added. - Read Advertisement Monitor Feature command - Add Advertisement Pattern Monitor command - Remove Advertisement Monitor command - Advertisement Monitor Added event - Advertisement Monitor Removed event Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/mgmt.h | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 8e47b0c5fe52..beae5c3980f0 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -741,6 +741,45 @@ struct mgmt_rp_set_device_flags { struct mgmt_addr_info addr; } __packed; +#define MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS BIT(0) + +#define MGMT_OP_READ_ADV_MONITOR_FEATURES 0x0051 +#define MGMT_READ_ADV_MONITOR_FEATURES_SIZE 0 +struct mgmt_rp_read_adv_monitor_features { + __le32 supported_features; + __le32 enabled_features; + __le16 max_num_handles; + __u8 max_num_patterns; + __le16 num_handles; + __le16 handles[]; +} __packed; + +struct mgmt_adv_pattern { + __u8 ad_type; + __u8 offset; + __u8 length; + __u8 value[31]; +} __packed; + +#define MGMT_OP_ADD_ADV_PATTERNS_MONITOR 0x0052 +struct mgmt_cp_add_adv_patterns_monitor { + __u8 pattern_count; + struct mgmt_adv_pattern patterns[]; +} __packed; +#define MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE 1 +struct mgmt_rp_add_adv_patterns_monitor { + __le16 monitor_handle; +} __packed; + +#define MGMT_OP_REMOVE_ADV_MONITOR 0x0053 +struct mgmt_cp_remove_adv_monitor { + __le16 monitor_handle; +} __packed; +#define MGMT_REMOVE_ADV_MONITOR_SIZE 2 +struct mgmt_rp_remove_adv_monitor { + __le16 monitor_handle; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; @@ -979,3 +1018,13 @@ struct mgmt_ev_device_flags_changed { __le32 supported_flags; __le32 current_flags; } __packed; + +#define MGMT_EV_ADV_MONITOR_ADDED 0x002b +struct mgmt_ev_adv_monitor_added { + __le16 monitor_handle; +} __packed; + +#define MGMT_EV_ADV_MONITOR_REMOVED 0x002c +struct mgmt_ev_adv_monitor_removed { + __le16 monitor_handle; +} __packed; From e5e1e7fd470ccf2eb38ab7fb5a3ab0fc4792fe53 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:13 +0200 Subject: [PATCH 0063/2056] Bluetooth: Add handler of MGMT_OP_READ_ADV_MONITOR_FEATURES This adds the request handler of MGMT_OP_READ_ADV_MONITOR_FEATURES command. Since the controller-based monitoring is not yet in place, this report only the supported features but not the enabled features. The following test was performed. - Issuing btmgmt advmon-features. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 24 ++++++++++++++++ net/bluetooth/hci_core.c | 10 ++++++- net/bluetooth/mgmt.c | 48 ++++++++++++++++++++++++++++++++ net/bluetooth/msft.c | 7 +++++ net/bluetooth/msft.h | 9 ++++++ 5 files changed, 97 insertions(+), 1 deletion(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6f88e5d81bd2..4e9d51087674 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include #include #include @@ -237,6 +238,24 @@ struct adv_info { #define HCI_MAX_ADV_INSTANCES 5 #define HCI_DEFAULT_ADV_DURATION 2 +struct adv_pattern { + struct list_head list; + __u8 ad_type; + __u8 offset; + __u8 length; + __u8 value[HCI_MAX_AD_LENGTH]; +}; + +struct adv_monitor { + struct list_head patterns; + bool active; + __u16 handle; +}; + +#define HCI_MIN_ADV_MONITOR_HANDLE 1 +#define HCI_MAX_ADV_MONITOR_NUM_HANDLES 32 +#define HCI_MAX_ADV_MONITOR_NUM_PATTERNS 16 + #define HCI_MAX_SHORT_NAME_LENGTH 10 /* Min encryption key size to match with SMP */ @@ -511,6 +530,9 @@ struct hci_dev { __u16 adv_instance_timeout; struct delayed_work adv_instance_expire; + struct idr adv_monitors_idr; + unsigned int adv_monitors_cnt; + __u8 irk[16]; __u32 rpa_timeout; struct delayed_work rpa_expired; @@ -1258,6 +1280,8 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); +void hci_adv_monitors_clear(struct hci_dev *hdev); + void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); void hci_init_sysfs(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 8e01afb2ee8c..53aec32a5850 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -26,7 +26,6 @@ /* Bluetooth HCI core. */ #include -#include #include #include #include @@ -2996,6 +2995,12 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, return 0; } +/* This function requires the caller holds hdev->lock */ +void hci_adv_monitors_clear(struct hci_dev *hdev) +{ + idr_destroy(&hdev->adv_monitors_idr); +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { @@ -3646,6 +3651,8 @@ int hci_register_dev(struct hci_dev *hdev) queue_work(hdev->req_workqueue, &hdev->power_on); + idr_init(&hdev->adv_monitors_idr); + return id; err_wqueue: @@ -3716,6 +3723,7 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_smp_irks_clear(hdev); hci_remote_oob_data_clear(hdev); hci_adv_instances_clear(hdev); + hci_adv_monitors_clear(hdev); hci_bdaddr_list_clear(&hdev->le_white_list); hci_bdaddr_list_clear(&hdev->le_resolv_list); hci_conn_params_clear_all(hdev); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index e409ff48e8e6..8aec7fbe9a38 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -37,6 +37,7 @@ #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" +#include "msft.h" #define MGMT_VERSION 1 #define MGMT_REVISION 17 @@ -118,6 +119,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEF_RUNTIME_CONFIG, MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, + MGMT_OP_READ_ADV_MONITOR_FEATURES, }; static const u16 mgmt_events[] = { @@ -3973,6 +3975,51 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, &cp->addr, sizeof(cp->addr)); } +static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct adv_monitor *monitor = NULL; + struct mgmt_rp_read_adv_monitor_features *rp = NULL; + int handle; + size_t rp_size = 0; + __u32 supported = 0; + __u16 num_handles = 0; + __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR) + supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) { + handles[num_handles++] = monitor->handle; + } + + hci_dev_unlock(hdev); + + rp_size = sizeof(*rp) + (num_handles * sizeof(u16)); + rp = kmalloc(rp_size, GFP_KERNEL); + if (!rp) + return -ENOMEM; + + /* Once controller-based monitoring is in place, the enabled_features + * should reflect the use. + */ + rp->supported_features = cpu_to_le32(supported); + rp->enabled_features = 0; + rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); + rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; + rp->num_handles = cpu_to_le16(num_handles); + if (num_handles) + memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_STATUS_SUCCESS, rp, rp_size); +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7441,6 +7488,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { HCI_MGMT_VAR_LEN }, { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, + { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index d6c4e6b5ae77..8579bfeb2836 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -139,3 +139,10 @@ void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) bt_dev_dbg(hdev, "MSFT vendor event %u", event); } + +__u64 msft_get_features(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + return msft ? msft->features : 0; +} diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h index 5aa9130e1f8a..e9c478e890b8 100644 --- a/net/bluetooth/msft.h +++ b/net/bluetooth/msft.h @@ -3,16 +3,25 @@ * Copyright (C) 2020 Google Corporation */ +#define MSFT_FEATURE_MASK_BREDR_RSSI_MONITOR BIT(0) +#define MSFT_FEATURE_MASK_LE_CONN_RSSI_MONITOR BIT(1) +#define MSFT_FEATURE_MASK_LE_ADV_RSSI_MONITOR BIT(2) +#define MSFT_FEATURE_MASK_LE_ADV_MONITOR BIT(3) +#define MSFT_FEATURE_MASK_CURVE_VALIDITY BIT(4) +#define MSFT_FEATURE_MASK_CONCURRENT_ADV_MONITOR BIT(5) + #if IS_ENABLED(CONFIG_BT_MSFTEXT) void msft_do_open(struct hci_dev *hdev); void msft_do_close(struct hci_dev *hdev); void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb); +__u64 msft_get_features(struct hci_dev *hdev); #else static inline void msft_do_open(struct hci_dev *hdev) {} static inline void msft_do_close(struct hci_dev *hdev) {} static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {} +static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } #endif From b139553db5cd940d66095fb97de1727e9a19369f Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:14 +0200 Subject: [PATCH 0064/2056] Bluetooth: Add handler of MGMT_OP_ADD_ADV_PATTERNS_MONITOR This adds the request handler of MGMT_OP_ADD_ADV_PATTERNS_MONITOR command. Note that the controller-based monitoring is not yet in place. This tracks the content of the monitor without sending HCI traffic, so the request returns immediately. The following manual test was performed. - Issue btmgmt advmon-add with valid and invalid inputs. - Issue btmgmt advmon-add more the allowed number of monitors. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 2 + net/bluetooth/hci_core.c | 40 +++++++++++++ net/bluetooth/mgmt.c | 100 +++++++++++++++++++++++++++++++ 3 files changed, 142 insertions(+) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 4e9d51087674..13fad419ae7d 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1281,6 +1281,8 @@ int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance); void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); +void hci_free_adv_monitor(struct adv_monitor *monitor); +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 53aec32a5850..ce481fab349d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2998,9 +2998,49 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, /* This function requires the caller holds hdev->lock */ void hci_adv_monitors_clear(struct hci_dev *hdev) { + struct adv_monitor *monitor; + int handle; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + hci_free_adv_monitor(monitor); + idr_destroy(&hdev->adv_monitors_idr); } +void hci_free_adv_monitor(struct adv_monitor *monitor) +{ + struct adv_pattern *pattern; + struct adv_pattern *tmp; + + if (!monitor) + return; + + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) + kfree(pattern); + + kfree(monitor); +} + +/* This function requires the caller holds hdev->lock */ +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + int min, max, handle; + + if (!monitor) + return -EINVAL; + + min = HCI_MIN_ADV_MONITOR_HANDLE; + max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; + handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, + GFP_KERNEL); + if (handle < 0) + return handle; + + hdev->adv_monitors_cnt++; + monitor->handle = handle; + return 0; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 8aec7fbe9a38..1eca36e51706 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -120,6 +120,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_GET_DEVICE_FLAGS, MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, }; static const u16 mgmt_events[] = { @@ -4020,6 +4021,103 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, MGMT_STATUS_SUCCESS, rp, rp_size); } +static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_add_adv_patterns_monitor *cp = data; + struct mgmt_rp_add_adv_patterns_monitor rp; + struct adv_monitor *m = NULL; + struct adv_pattern *p = NULL; + __u8 cp_ofst = 0, cp_len = 0; + unsigned int mp_cnt = 0; + int err, i; + + BT_DBG("request for %s", hdev->name); + + if (len <= sizeof(*cp) || cp->pattern_count == 0) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + m = kmalloc(sizeof(*m), GFP_KERNEL); + if (!m) { + err = -ENOMEM; + goto failed; + } + + INIT_LIST_HEAD(&m->patterns); + m->active = false; + + for (i = 0; i < cp->pattern_count; i++) { + if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + cp_ofst = cp->patterns[i].offset; + cp_len = cp->patterns[i].length; + if (cp_ofst >= HCI_MAX_AD_LENGTH || + cp_len > HCI_MAX_AD_LENGTH || + (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) { + err = -ENOMEM; + goto failed; + } + + p->ad_type = cp->patterns[i].ad_type; + p->offset = cp->patterns[i].offset; + p->length = cp->patterns[i].length; + memcpy(p->value, cp->patterns[i].value, p->length); + + INIT_LIST_HEAD(&p->list); + list_add(&p->list, &m->patterns); + } + + if (mp_cnt != cp->pattern_count) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_INVALID_PARAMS); + goto failed; + } + + hci_dev_lock(hdev); + + err = hci_add_adv_monitor(hdev, m); + if (err) { + if (err == -ENOSPC) { + mgmt_cmd_status(sk, hdev->id, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_NO_RESOURCES); + } + goto unlock; + } + + hci_dev_unlock(hdev); + + rp.monitor_handle = cpu_to_le16(m->handle); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + +unlock: + hci_dev_unlock(hdev); + +failed: + hci_free_adv_monitor(m); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7489,6 +7587,8 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, + { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, + HCI_MGMT_VAR_LEN }, }; void mgmt_index_added(struct hci_dev *hdev) From bd2fbc6cb815b5171facb42526f6db206d920e13 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:15 +0200 Subject: [PATCH 0065/2056] Bluetooth: Add handler of MGMT_OP_REMOVE_ADV_MONITOR This adds the request handler of MGMT_OP_REMOVE_ADV_MONITOR command. Note that the controller-based monitoring is not yet in place. This removes the internal monitor(s) without sending HCI traffic, so the request returns immediately. The following test was performed. - Issue btmgmt advmon-remove with valid and invalid handles. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 31 ++++++++++++++++++++++++++++ net/bluetooth/mgmt.c | 35 ++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 13fad419ae7d..c54f9295892e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1283,6 +1283,7 @@ void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); +int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index ce481fab349d..59132b3e2cde 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3041,6 +3041,37 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) return 0; } +static int free_adv_monitor(int id, void *ptr, void *data) +{ + struct hci_dev *hdev = data; + struct adv_monitor *monitor = ptr; + + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + hci_free_adv_monitor(monitor); + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) +{ + struct adv_monitor *monitor; + + if (handle) { + monitor = idr_find(&hdev->adv_monitors_idr, handle); + if (!monitor) + return -ENOENT; + + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + hci_free_adv_monitor(monitor); + } else { + /* Remove all monitors if handle is 0. */ + idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); + } + + return 0; +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 1eca36e51706..cff24fde72d2 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -121,6 +121,7 @@ static const u16 mgmt_commands[] = { MGMT_OP_SET_DEVICE_FLAGS, MGMT_OP_READ_ADV_MONITOR_FEATURES, MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_OP_REMOVE_ADV_MONITOR, }; static const u16 mgmt_events[] = { @@ -4118,6 +4119,39 @@ static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, return err; } +static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_remove_adv_monitor *cp = data; + struct mgmt_rp_remove_adv_monitor rp; + u16 handle; + int err; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + handle = __le16_to_cpu(cp->monitor_handle); + + err = hci_remove_adv_monitor(hdev, handle); + if (err == -ENOENT) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_STATUS_INVALID_INDEX); + goto unlock; + } + + hci_dev_unlock(hdev); + + rp.monitor_handle = cp->monitor_handle; + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + +unlock: + hci_dev_unlock(hdev); + return err; +} + static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb) { @@ -7589,6 +7623,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = { { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, HCI_MGMT_VAR_LEN }, + { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, }; void mgmt_index_added(struct hci_dev *hdev) From b52729f27b1e3957bef7306d47abf9cd855524e7 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:16 +0200 Subject: [PATCH 0066/2056] Bluetooth: Notify adv monitor added event This notifies management sockets on MGMT_EV_ADV_MONITOR_ADDED event. The following test was performed. - Start two btmgmt consoles, issue a btmgmt advmon-add command on one console and observe a MGMT_EV_ADV_MONITOR_ADDED event on the other Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- net/bluetooth/mgmt.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index cff24fde72d2..3268d9a00608 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -191,6 +191,7 @@ static const u16 mgmt_untrusted_events[] = { MGMT_EV_EXT_INDEX_REMOVED, MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, + MGMT_EV_ADV_MONITOR_ADDED, }; #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) @@ -3977,6 +3978,16 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, &cp->addr, sizeof(cp->addr)); } +static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, + u16 handle) +{ + struct mgmt_ev_adv_monitor_added ev; + + ev.monitor_handle = cpu_to_le16(handle); + + mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); +} + static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -4029,8 +4040,8 @@ static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, struct mgmt_rp_add_adv_patterns_monitor rp; struct adv_monitor *m = NULL; struct adv_pattern *p = NULL; + unsigned int mp_cnt = 0, prev_adv_monitors_cnt; __u8 cp_ofst = 0, cp_len = 0; - unsigned int mp_cnt = 0; int err, i; BT_DBG("request for %s", hdev->name); @@ -4094,6 +4105,8 @@ static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); + prev_adv_monitors_cnt = hdev->adv_monitors_cnt; + err = hci_add_adv_monitor(hdev, m); if (err) { if (err == -ENOSPC) { @@ -4104,6 +4117,9 @@ static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, goto unlock; } + if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt) + mgmt_adv_monitor_added(sk, hdev, m->handle); + hci_dev_unlock(hdev); rp.monitor_handle = cpu_to_le16(m->handle); From cdde92e230719f77ac3a5f936e25ed4e01ec057f Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:17 +0200 Subject: [PATCH 0067/2056] Bluetooth: Notify adv monitor removed event This notifies management sockets on MGMT_EV_ADV_MONITOR_REMOVED event. The following test was performed. - Start two btmgmt consoles, issue a btmgmt advmon-remove command on one console and observe a MGMT_EV_ADV_MONITOR_REMOVED event on the other. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- net/bluetooth/mgmt.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 3268d9a00608..b194da4de2d7 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -192,6 +192,7 @@ static const u16 mgmt_untrusted_events[] = { MGMT_EV_EXT_INFO_CHANGED, MGMT_EV_EXP_FEATURE_CHANGED, MGMT_EV_ADV_MONITOR_ADDED, + MGMT_EV_ADV_MONITOR_REMOVED, }; #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) @@ -3988,6 +3989,16 @@ static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); } +static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev, + u16 handle) +{ + struct mgmt_ev_adv_monitor_added ev; + + ev.monitor_handle = cpu_to_le16(handle); + + mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk); +} + static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -4140,6 +4151,7 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, { struct mgmt_cp_remove_adv_monitor *cp = data; struct mgmt_rp_remove_adv_monitor rp; + unsigned int prev_adv_monitors_cnt; u16 handle; int err; @@ -4148,6 +4160,7 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, hci_dev_lock(hdev); handle = __le16_to_cpu(cp->monitor_handle); + prev_adv_monitors_cnt = hdev->adv_monitors_cnt; err = hci_remove_adv_monitor(hdev, handle); if (err == -ENOENT) { @@ -4156,6 +4169,9 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, goto unlock; } + if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt) + mgmt_adv_monitor_removed(sk, hdev, handle); + hci_dev_unlock(hdev); rp.monitor_handle = cp->monitor_handle; From 8208f5a9d435e58ee7f53a24d9ccbe7787944537 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Wed, 17 Jun 2020 16:39:18 +0200 Subject: [PATCH 0068/2056] Bluetooth: Update background scan and report device based on advertisement monitors This calls hci_update_background_scan() when there is any update on the advertisement monitors. If there is at least one advertisement monitor, the filtering policy of scan parameters should be 0x00. This also reports device found mgmt events if there is at least one monitor. The following cases were tested with btmgmt advmon-* commands. (1) add a ADV monitor and observe that the passive scanning is triggered. (2) remove the last ADV monitor and observe that the passive scanning is terminated. (3) with a LE peripheral paired, repeat (1) and observe the passive scanning continues. (4) with a LE peripheral paired, repeat (2) and observe the passive scanning continues. (5) with a ADV monitor, suspend/resume the host and observe the passive scanning continues. Signed-off-by: Miao-chen Chou Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 13 +++++++++++++ net/bluetooth/hci_event.c | 5 +++-- net/bluetooth/hci_request.c | 17 ++++++++++++++--- net/bluetooth/mgmt.c | 5 ++++- 5 files changed, 35 insertions(+), 6 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c54f9295892e..524057598ffd 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1284,6 +1284,7 @@ void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct adv_monitor *monitor); int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle); +bool hci_is_adv_monitoring(struct hci_dev *hdev); void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 59132b3e2cde..7959b851cc63 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3005,6 +3005,8 @@ void hci_adv_monitors_clear(struct hci_dev *hdev) hci_free_adv_monitor(monitor); idr_destroy(&hdev->adv_monitors_idr); + + hci_update_background_scan(hdev); } void hci_free_adv_monitor(struct adv_monitor *monitor) @@ -3038,6 +3040,9 @@ int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) hdev->adv_monitors_cnt++; monitor->handle = handle; + + hci_update_background_scan(hdev); + return 0; } @@ -3069,9 +3074,17 @@ int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle) idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev); } + hci_update_background_scan(hdev); + return 0; } +/* This function requires the caller holds hdev->lock */ +bool hci_is_adv_monitoring(struct hci_dev *hdev) +{ + return !idr_is_empty(&hdev->adv_monitors_idr); +} + struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, bdaddr_t *bdaddr, u8 type) { diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 8981954ff4c4..e08d4dd9a24e 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5447,14 +5447,15 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, /* Passive scanning shouldn't trigger any device found events, * except for devices marked as CONN_REPORT for which we do send - * device found events. + * device found events, or advertisement monitoring requested. */ if (hdev->le_scan_type == LE_SCAN_PASSIVE) { if (type == LE_ADV_DIRECT_IND) return; if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, - bdaddr, bdaddr_type)) + bdaddr, bdaddr_type) && + idr_is_empty(&hdev->adv_monitors_idr)) return; if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index eee9c007a5fb..29decd7e8051 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -413,11 +413,15 @@ static void __hci_update_background_scan(struct hci_request *req) */ hci_discovery_filter_clear(hdev); + BT_DBG("%s ADV monitoring is %s", hdev->name, + hci_is_adv_monitoring(hdev) ? "on" : "off"); + if (list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports)) { + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { /* If there is no pending LE connections or devices - * to be scanned for, we should stop the background - * scanning. + * to be scanned for or no ADV monitors, we should stop the + * background scanning. */ /* If controller is not scanning we are done. */ @@ -794,6 +798,13 @@ static u8 update_white_list(struct hci_request *req) return 0x00; } + /* Once the controller offloading of advertisement monitor is in place, + * the if condition should include the support of MSFT extension + * support. + */ + if (!idr_is_empty(&hdev->adv_monitors_idr)) + return 0x00; + /* Select filter policy to use white list */ return 0x01; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index b194da4de2d7..ec66160a673c 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -8575,8 +8575,11 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, if (!hci_discovery_active(hdev)) { if (link_type == ACL_LINK) return; - if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports)) + if (link_type == LE_LINK && + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev)) { return; + } } if (hdev->discovery.result_filtering) { From 76b139965575e51224d33ea721d9d00a542b6b39 Mon Sep 17 00:00:00 2001 From: Manish Mandlik Date: Wed, 17 Jun 2020 16:39:19 +0200 Subject: [PATCH 0069/2056] Bluetooth: Terminate the link if pairing is cancelled If user decides to cancel the ongoing pairing process (e.g. by clicking the cancel button on pairing/passkey window), abort any ongoing pairing and then terminate the link if it was created because of the pair device action. Signed-off-by: Manish Mandlik Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- include/net/bluetooth/hci_core.h | 14 ++++++++++++-- net/bluetooth/hci_conn.c | 11 ++++++++--- net/bluetooth/l2cap_core.c | 6 ++++-- net/bluetooth/mgmt.c | 22 ++++++++++++++++++---- 4 files changed, 42 insertions(+), 11 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 524057598ffd..77d29341b064 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -564,6 +564,12 @@ struct hci_dev { #define HCI_PHY_HANDLE(handle) (handle & 0xff) +enum conn_reasons { + CONN_REASON_PAIR_DEVICE, + CONN_REASON_L2CAP_CHAN, + CONN_REASON_SCO_CONNECT, +}; + struct hci_conn { struct list_head list; @@ -615,6 +621,8 @@ struct hci_conn { __s8 max_tx_power; unsigned long flags; + enum conn_reasons conn_reason; + __u32 clock; __u16 clock_accuracy; @@ -1040,12 +1048,14 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle); struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout); + u16 conn_timeout, + enum conn_reasons conn_reason); struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, u16 conn_timeout, u8 role, bdaddr_t *direct_rpa); struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type); + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting); int hci_conn_check_link_mode(struct hci_conn *conn); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 9bdffc4e79b0..47f3a45d7dcb 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1174,7 +1174,8 @@ static int hci_explicit_conn_params_set(struct hci_dev *hdev, /* This function requires the caller holds hdev->lock */ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, u8 dst_type, u8 sec_level, - u16 conn_timeout) + u16 conn_timeout, + enum conn_reasons conn_reason) { struct hci_conn *conn; @@ -1219,6 +1220,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, conn->sec_level = BT_SECURITY_LOW; conn->pending_sec_level = sec_level; conn->conn_timeout = conn_timeout; + conn->conn_reason = conn_reason; hci_update_background_scan(hdev); @@ -1228,7 +1230,8 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, } struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, - u8 sec_level, u8 auth_type) + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason) { struct hci_conn *acl; @@ -1248,6 +1251,7 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, hci_conn_hold(acl); + acl->conn_reason = conn_reason; if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { acl->sec_level = BT_SECURITY_LOW; acl->pending_sec_level = sec_level; @@ -1264,7 +1268,8 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, struct hci_conn *acl; struct hci_conn *sco; - acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); + acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, + CONN_REASON_SCO_CONNECT); if (IS_ERR(acl)) return acl; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index fe913a5c754a..35d2bc569a2d 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -7893,11 +7893,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, else hcon = hci_connect_le_scan(hdev, dst, dst_type, chan->sec_level, - HCI_LE_CONN_TIMEOUT); + HCI_LE_CONN_TIMEOUT, + CONN_REASON_L2CAP_CHAN); } else { u8 auth_type = l2cap_get_auth_type(chan); - hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type); + hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, + CONN_REASON_L2CAP_CHAN); } if (IS_ERR(hcon)) { diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index ec66160a673c..2a732cab1dc9 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2931,7 +2931,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (cp->addr.type == BDADDR_BREDR) { conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, - auth_type); + auth_type, CONN_REASON_PAIR_DEVICE); } else { u8 addr_type = le_addr_type(cp->addr.type); struct hci_conn_params *p; @@ -2950,9 +2950,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) p->auto_connect = HCI_AUTO_CONN_DISABLED; - conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, - addr_type, sec_level, - HCI_LE_CONN_TIMEOUT); + conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, + sec_level, HCI_LE_CONN_TIMEOUT, + CONN_REASON_PAIR_DEVICE); } if (IS_ERR(conn)) { @@ -3053,6 +3053,20 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, addr, sizeof(*addr)); + + /* Since user doesn't want to proceed with the connection, abort any + * ongoing pairing and then terminate the link if it was created + * because of the pair device action. + */ + if (addr->type == BDADDR_BREDR) + hci_remove_link_key(hdev, &addr->bdaddr); + else + smp_cancel_and_remove_pairing(hdev, &addr->bdaddr, + le_addr_type(addr->type)); + + if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); + unlock: hci_dev_unlock(hdev); return err; From 46605a271114f1243c807a55c607271c81e662d2 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Wed, 17 Jun 2020 16:39:20 +0200 Subject: [PATCH 0070/2056] Bluetooth: mgmt: Use command complete on success for set system config The command status reply is only for failure. When completing set system config command, the reply has to be command complete. Signed-off-by: Marcel Holtmann Reviewed-by: Alain Michaud Signed-off-by: Johan Hedberg --- net/bluetooth/mgmt_config.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c index 8e7ad2a51dbb..8d01a8ff85e9 100644 --- a/net/bluetooth/mgmt_config.c +++ b/net/bluetooth/mgmt_config.c @@ -247,9 +247,8 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, buffer += exp_len; } - return mgmt_cmd_status(sk, hdev->id, - MGMT_OP_SET_DEF_SYSTEM_CONFIG, - MGMT_STATUS_SUCCESS); + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, 0, NULL, 0); } int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, From 7c7982cbadbb63eb76401ddc4ef090cf7ae274b4 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 17 Jun 2020 10:42:26 -0700 Subject: [PATCH 0071/2056] bpf: sk_storage: Prefer to get a free cache_idx The cache_idx is currently picked by RR. There is chance that the same cache_idx will be picked by multiple sk_storage_maps while other cache_idx is still unused. e.g. It could happen when the sk_storage_map is recreated during the restart of the user space process. This patch tracks the usage count for each cache_idx. There is 16 of them now (defined in BPF_SK_STORAGE_CACHE_SIZE). It will try to pick the free cache_idx. If none was found, it would pick one with the minimal usage count. Signed-off-by: Martin KaFai Lau Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200617174226.2301909-1-kafai@fb.com --- net/core/bpf_sk_storage.c | 41 +++++++++++++++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index d2c4d16dadba..1dae4b543243 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -11,8 +11,6 @@ #include #include -static atomic_t cache_idx; - #define SK_STORAGE_CREATE_FLAG_MASK \ (BPF_F_NO_PREALLOC | BPF_F_CLONE) @@ -81,6 +79,9 @@ struct bpf_sk_storage_elem { #define SDATA(_SELEM) (&(_SELEM)->sdata) #define BPF_SK_STORAGE_CACHE_SIZE 16 +static DEFINE_SPINLOCK(cache_idx_lock); +static u64 cache_idx_usage_counts[BPF_SK_STORAGE_CACHE_SIZE]; + struct bpf_sk_storage { struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE]; struct hlist_head list; /* List of bpf_sk_storage_elem */ @@ -512,6 +513,37 @@ static int sk_storage_delete(struct sock *sk, struct bpf_map *map) return 0; } +static u16 cache_idx_get(void) +{ + u64 min_usage = U64_MAX; + u16 i, res = 0; + + spin_lock(&cache_idx_lock); + + for (i = 0; i < BPF_SK_STORAGE_CACHE_SIZE; i++) { + if (cache_idx_usage_counts[i] < min_usage) { + min_usage = cache_idx_usage_counts[i]; + res = i; + + /* Found a free cache_idx */ + if (!min_usage) + break; + } + } + cache_idx_usage_counts[res]++; + + spin_unlock(&cache_idx_lock); + + return res; +} + +static void cache_idx_free(u16 idx) +{ + spin_lock(&cache_idx_lock); + cache_idx_usage_counts[idx]--; + spin_unlock(&cache_idx_lock); +} + /* Called by __sk_destruct() & bpf_sk_storage_clone() */ void bpf_sk_storage_free(struct sock *sk) { @@ -560,6 +592,8 @@ static void bpf_sk_storage_map_free(struct bpf_map *map) smap = (struct bpf_sk_storage_map *)map; + cache_idx_free(smap->cache_idx); + /* Note that this map might be concurrently cloned from * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone * RCU read section to finish before proceeding. New RCU @@ -673,8 +707,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) } smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size; - smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) % - BPF_SK_STORAGE_CACHE_SIZE; + smap->cache_idx = cache_idx_get(); return &smap->map; } From 4e638025f25652b052ec54cf75e4cfe4f4663ce7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 16 Jun 2020 18:03:28 -0500 Subject: [PATCH 0072/2056] net: stmmac: selftests: Use struct_size() helper in kzalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c index e6696495f126..e113b1376fdd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c @@ -1094,7 +1094,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv) if (!priv->dma_cap.frpsel) return -EOPNOTSUPP; - sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL); + sel = kzalloc(struct_size(sel, keys, nk), GFP_KERNEL); if (!sel) return -ENOMEM; From 427d5838e99632e9218eae752009c873cade89ac Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 09:40:51 -0700 Subject: [PATCH 0073/2056] net: napi: remove useless stack trace Whenever a buggy NAPI driver returns more than its budget, we emit a stack trace that is of no use, since it does not tell which driver is buggy. Instead, emit a message giving the function name, and a descriptive message. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 6bc2388141f6..cea7fc1716ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6683,7 +6683,9 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll) trace_napi_poll(n, work, weight); } - WARN_ON_ONCE(work > weight); + if (unlikely(work > weight)) + pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n", + n->poll, work, weight); if (likely(work < weight)) goto out_unlock; From 1260e772dd184b29266b552d90cbf80f6ee4b066 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 17 Jun 2020 13:53:17 -0500 Subject: [PATCH 0074/2056] enetc: Use struct_size() helper in kzalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 298c55786fd9..61dbf19075fb 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1687,7 +1687,7 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) int enetc_alloc_msix(struct enetc_ndev_priv *priv) { struct pci_dev *pdev = priv->si->pdev; - int size, v_tx_rings; + int v_tx_rings; int i, n, err, nvec; nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num; @@ -1702,15 +1702,13 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) /* # of tx rings per int vector */ v_tx_rings = priv->num_tx_rings / priv->bdr_int_num; - size = sizeof(struct enetc_int_vector) + - sizeof(struct enetc_bdr) * v_tx_rings; for (i = 0; i < priv->bdr_int_num; i++) { struct enetc_int_vector *v; struct enetc_bdr *bdr; int j; - v = kzalloc(size, GFP_KERNEL); + v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL); if (!v) { err = -ENOMEM; goto fail; From a38b7fbfeaebe9a11489db3fbcfc29108c62c0df Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:51:10 +0200 Subject: [PATCH 0075/2056] r8169: add info for DASH being enabled In case of problems it facilitates the bug analysis if we know whether DASH is active. Therefore emit a message in probe if this is the case. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index dad84ecf5a77..7bb26fb07f5b 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5432,8 +5432,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ? "ok" : "ko"); - if (r8168_check_dash(tp)) + if (r8168_check_dash(tp)) { + netdev_info(dev, "DASH enabled\n"); rtl8168_driver_start(tp); + } if (pci_dev_run_wake(pdev)) pm_runtime_put_sync(&pdev->dev); From 27248d57c88af1eede1619eaa3423b424da06b29 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:51:46 +0200 Subject: [PATCH 0076/2056] r8169: remove unused constant RsvdMask Since 9d3679fe0f30 ("r8169: inline rtl8169_make_unusable_by_asic") this constant isn't used any longer, so remove it. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7bb26fb07f5b..4bc6c5529344 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -529,8 +529,6 @@ enum rtl_rx_desc_bit { RxVlanTag = (1 << 16), /* VLAN tag available */ }; -#define RsvdMask 0x3fffc000 - #define RTL_GSO_MAX_SIZE_V1 32000 #define RTL_GSO_MAX_SEGS_V1 24 #define RTL_GSO_MAX_SIZE_V2 64000 From e9882208ae989116b6ea166ef743111ea4a5248f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:52:47 +0200 Subject: [PATCH 0077/2056] r8169: improve setting WoL on runtime-resume In the following scenario WoL isn't configured properly: - Driver is loaded, interface isn't brought up within 10s, so driver runtime-suspends. - WoL is set. - Interface is brought up, stored WoL setting isn't applied. It has always been like that, but the scenario seems to be quite theoretical as I haven't seen any bug report yet. Therefore treat the change as an improvement. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 4bc6c5529344..bd95c0ae6536 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4887,14 +4887,12 @@ static int rtl8169_runtime_resume(struct device *device) rtl_rar_set(tp, tp->dev->dev_addr); - if (!tp->TxDescArray) - return 0; - rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - __rtl8169_resume(tp); + if (tp->TxDescArray) + __rtl8169_resume(tp); return 0; } From 7190aeece918cad9b601789bee58877a6c2eeed8 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:53:21 +0200 Subject: [PATCH 0078/2056] r8169: replace synchronize_rcu with synchronize_net rtl8169_hw_reset() may be called under RTNL lock, therefore switch to synchronize_net(). Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index bd95c0ae6536..0d3e58ae1b31 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3929,7 +3929,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) static void rtl8169_hw_reset(struct rtl8169_private *tp, bool going_down) { /* Give a racing hard_start_xmit a few cycles to complete. */ - synchronize_rcu(); + synchronize_net(); /* Disable interrupts */ rtl8169_irq_mask_and_ack(tp); From 0c28a63a47bfa7497e4dce79f6bb53a00d8f6aa0 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:54:18 +0200 Subject: [PATCH 0079/2056] r8169: move napi_disable call and rename rtl8169_hw_reset rtl8169_hw_reset() meanwhile does more than a hw reset, therefore rename it to rtl8169_cleanup(). In addition move calling napi_disable() to this function. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 0d3e58ae1b31..afcdaace2716 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3926,8 +3926,10 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp) netdev_reset_queue(tp->dev); } -static void rtl8169_hw_reset(struct rtl8169_private *tp, bool going_down) +static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) { + napi_disable(&tp->napi); + /* Give a racing hard_start_xmit a few cycles to complete. */ synchronize_net(); @@ -3970,10 +3972,9 @@ static void rtl_reset_work(struct rtl8169_private *tp) struct net_device *dev = tp->dev; int i; - napi_disable(&tp->napi); netif_stop_queue(dev); - rtl8169_hw_reset(tp, false); + rtl8169_cleanup(tp, false); for (i = 0; i < NUM_RX_DESC; i++) rtl8169_mark_to_asic(tp->RxDescArray + i); @@ -4636,9 +4637,8 @@ static void rtl8169_down(struct rtl8169_private *tp) bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); phy_stop(tp->phydev); - napi_disable(&tp->napi); - rtl8169_hw_reset(tp, true); + rtl8169_cleanup(tp, true); rtl_pll_power_down(tp); From a2ee847242b35816f57b0731aab9fd6e8693ae65 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:54:54 +0200 Subject: [PATCH 0080/2056] r8169: move updating counters to rtl8169_down Counters are updated whenever we go down, therefore move the call to rtl8169_down(). Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index afcdaace2716..9f99b3f076d8 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4638,6 +4638,8 @@ static void rtl8169_down(struct rtl8169_private *tp) phy_stop(tp->phydev); + rtl8169_update_counters(tp); + rtl8169_cleanup(tp, true); rtl_pll_power_down(tp); @@ -4652,9 +4654,6 @@ static int rtl8169_close(struct net_device *dev) pm_runtime_get_sync(&pdev->dev); - /* Update counters before going down */ - rtl8169_update_counters(tp); - netif_stop_queue(dev); rtl8169_down(tp); rtl8169_rx_clear(tp); @@ -4875,9 +4874,6 @@ static int rtl8169_runtime_suspend(struct device *device) rtl8169_net_suspend(tp); - /* Update counters before going runtime suspend */ - rtl8169_update_counters(tp); - return 0; } From 9f0b54cd167219266bd3864570ae8f4987b57520 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:55:40 +0200 Subject: [PATCH 0081/2056] r8169: move switching optional clock on/off to pll power functions Relevant chip clocks are disabled in rtl_pll_power_down(), therefore move calling clk_disable_unprepare() there. Similar for enabling the clock. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 9f99b3f076d8..d55bf2cd2651 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -2292,10 +2292,14 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) default: break; } + + clk_disable_unprepare(tp->clk); } static void rtl_pll_power_up(struct rtl8169_private *tp) { + clk_prepare_enable(tp->clk); + switch (tp->mac_version) { case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_37: @@ -4826,7 +4830,6 @@ static int __maybe_unused rtl8169_suspend(struct device *device) struct rtl8169_private *tp = dev_get_drvdata(device); rtl8169_net_suspend(tp); - clk_disable_unprepare(tp->clk); return 0; } @@ -4853,8 +4856,6 @@ static int __maybe_unused rtl8169_resume(struct device *device) rtl_rar_set(tp, tp->dev->dev_addr); - clk_prepare_enable(tp->clk); - if (netif_running(tp->dev)) __rtl8169_resume(tp); From 51f6291b040ad9ed475af4fd58bb1db53b737475 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 17 Jun 2020 22:56:27 +0200 Subject: [PATCH 0082/2056] r8169: allow setting irq coalescing if link is down So far we can not configure irq coalescing when link is down. Allow the user to do this, and assume that he wants to configure irq coalescing for highest speed. Otherwise the irq rate is low enough anyway. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index d55bf2cd2651..a3c4187d918b 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1731,16 +1731,16 @@ struct rtl_coalesce_info { #define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) } static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { - { SPEED_10, COALESCE_DELAY(40960) }, - { SPEED_100, COALESCE_DELAY(2560) }, { SPEED_1000, COALESCE_DELAY(320) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, { 0 }, }; static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { - { SPEED_10, COALESCE_DELAY(40960) }, - { SPEED_100, COALESCE_DELAY(2560) }, { SPEED_1000, COALESCE_DELAY(5000) }, + { SPEED_100, COALESCE_DELAY(2560) }, + { SPEED_10, COALESCE_DELAY(40960) }, { 0 }, }; #undef COALESCE_DELAY @@ -1756,6 +1756,10 @@ rtl_coalesce_info(struct rtl8169_private *tp) else ci = rtl_coalesce_info_8168_8136; + /* if speed is unknown assume highest one */ + if (tp->phydev->speed == SPEED_UNKNOWN) + return ci; + for (; ci->speed; ci++) { if (tp->phydev->speed == ci->speed) return ci; From c90834cd477a6e406c320c56774999a71a859600 Mon Sep 17 00:00:00 2001 From: Tim Harvey Date: Wed, 17 Jun 2020 15:59:10 -0700 Subject: [PATCH 0083/2056] lan743x: allow mac address to come from dt If a valid mac address is present in dt, use that before using CSR's or a random mac address. Signed-off-by: Tim Harvey Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_main.c | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index f1711ac86d0c..9585467cf11c 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "lan743x_main.h" #include "lan743x_ethtool.h" @@ -807,26 +808,29 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter) data |= MAC_CR_CNTR_RST_; lan743x_csr_write(adapter, MAC_CR, data); - mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); - mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); - adapter->mac_address[0] = mac_addr_lo & 0xFF; - adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; - adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; - adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; - adapter->mac_address[4] = mac_addr_hi & 0xFF; - adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; + if (!is_valid_ether_addr(adapter->mac_address)) { + mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); + mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); + adapter->mac_address[0] = mac_addr_lo & 0xFF; + adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; + adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; + adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; + adapter->mac_address[4] = mac_addr_hi & 0xFF; + adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; - if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && - mac_addr_lo == 0xFFFFFFFF) { - mac_address_valid = false; - } else if (!is_valid_ether_addr(adapter->mac_address)) { - mac_address_valid = false; + if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && + mac_addr_lo == 0xFFFFFFFF) { + mac_address_valid = false; + } else if (!is_valid_ether_addr(adapter->mac_address)) { + mac_address_valid = false; + } + + if (!mac_address_valid) + eth_random_addr(adapter->mac_address); } - - if (!mac_address_valid) - eth_random_addr(adapter->mac_address); lan743x_mac_set_address(adapter, adapter->mac_address); ether_addr_copy(netdev->dev_addr, adapter->mac_address); + return 0; } @@ -2817,6 +2821,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, { struct lan743x_adapter *adapter = NULL; struct net_device *netdev = NULL; + const void *mac_addr; int ret = -ENODEV; netdev = devm_alloc_etherdev(&pdev->dev, @@ -2833,6 +2838,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev, NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; + mac_addr = of_get_mac_address(pdev->dev.of_node); + if (!IS_ERR(mac_addr)) + ether_addr_copy(adapter->mac_address, mac_addr); + ret = lan743x_pci_init(adapter, pdev); if (ret) goto return_error; From 9f66a4557e9338eecee4159834c3d1ec4a327158 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 17 Jun 2020 18:15:57 -0500 Subject: [PATCH 0084/2056] mISDN: hfcsusb: Use struct_size() helper Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/hfcsusb.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index 621364bb6b12..4274906f8654 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -261,8 +261,7 @@ hfcsusb_ph_info(struct hfcsusb *hw) phi->bch[i].Flags = hw->bch[i].Flags; } _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY, - sizeof(struct ph_info_dch) + dch->dev.nrbchan * - sizeof(struct ph_info_ch), phi, GFP_ATOMIC); + struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC); kfree(phi); } From 674a135746aa7553eff12177d11868367aa102cb Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 18 Jun 2020 13:02:07 +1200 Subject: [PATCH 0085/2056] net: hns3: remove unnecessary devm_kfree since we are using device-managed function, it is unnecessary to free in probe. Signed-off-by: Barry Song Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index b14f2abc2425..1817d7f2e5f6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2097,10 +2097,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, ae_dev); ret = hnae3_register_ae_dev(ae_dev); - if (ret) { - devm_kfree(&pdev->dev, ae_dev); + if (ret) pci_set_drvdata(pdev, NULL); - } return ret; } @@ -2157,7 +2155,6 @@ static void hns3_shutdown(struct pci_dev *pdev) struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); hnae3_unregister_ae_dev(ae_dev); - devm_kfree(&pdev->dev, ae_dev); pci_set_drvdata(pdev, NULL); if (system_state == SYSTEM_POWER_OFF) From cb0e3e611575a3335ea0521c9e8ba84a6a7e51c6 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 18 Jun 2020 13:02:08 +1200 Subject: [PATCH 0086/2056] net: hns3: pointer type of buffer should be void Move the type of buffer address from unsigned char to void Signed-off-by: Barry Song Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1817d7f2e5f6..61b5a849b162 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -3070,7 +3070,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) return -ENXIO; if (!skb) - ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + ring->va = desc_cb->buf + desc_cb->page_offset; /* Prefetch first cache line of first page * Idea is to cache few bytes of the header of the packet. Our L1 Cache diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 66cd4395f781..9f64077ee834 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -407,7 +407,7 @@ struct hns3_enet_ring { u32 pull_len; /* head length for current packet */ u32 frag_num; - unsigned char *va; /* first buffer address for current packet */ + void *va; /* first buffer address for current packet */ u32 flag; /* ring attribute */ From 4d2cad32127e5c83e17b0e1dfd4cd5ef6080301f Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 18 Jun 2020 13:02:09 +1200 Subject: [PATCH 0087/2056] net: hns3: rename buffer-related functions This is for improving the readability. Signed-off-by: Barry Song Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 61b5a849b162..3cd2216e49b9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2405,7 +2405,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring) return 0; } -static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, +static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { int ret; @@ -2426,9 +2426,9 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return ret; } -static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) { - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); if (ret) return ret; @@ -2444,7 +2444,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) int i, j, ret; for (i = 0; i < ring->desc_num; i++) { - ret = hns3_alloc_buffer_attach(ring, i); + ret = hns3_alloc_and_attach_buffer(ring, i); if (ret) goto out_buffer_fail; } @@ -2590,7 +2590,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, hns3_reuse_buffer(ring, ring->next_to_use); } else { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; @@ -4184,7 +4184,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) * stack, so we need to replace the buffer here. */ if (!ring->desc_cb[ring->next_to_use].reuse_flag) { - ret = hns3_reserve_buffer_map(ring, &res_cbs); + ret = hns3_alloc_and_map_buffer(ring, &res_cbs); if (ret) { u64_stats_update_begin(&ring->syncp); ring->stats.sw_err_cnt++; From e99a308da3c1518009a740c4d3962f616edeb642 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 18 Jun 2020 13:02:10 +1200 Subject: [PATCH 0088/2056] net: hns3: replace disable_irq by IRQ_NOAUTOEN flag disable_irq() after request_irq() is still risk as there is a chance irq can come after request_irq() and before disable_irq(). this should be done by IRQ_NOAUTOEN flag. Signed-off-by: Barry Song Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 3cd2216e49b9..1330820152fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -8,6 +8,7 @@ #include #endif #include +#include #include #include #include @@ -154,6 +155,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, tqp_vectors->name, tqp_vectors); if (ret) { @@ -163,8 +165,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv) return ret; } - disable_irq(tqp_vectors->vector_irq); - irq_set_affinity_hint(tqp_vectors->vector_irq, &tqp_vectors->affinity_mask); From c2a2e1270a57696827da4ca69f78eb072e37d590 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Thu, 18 Jun 2020 13:02:11 +1200 Subject: [PATCH 0089/2056] net: hns3: streaming dma buffer sync between cpu and device Right now they are empty functions for our SoC since hardware can keep cache coherent, but it is still good to align with streaming DMA APIs as device drivers should not make an assumption of SoC. Reviewed-by: Yunsheng Lin Signed-off-by: Barry Song Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3_enet.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 1330820152fa..b319a766889f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -2473,6 +2473,11 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; + + dma_sync_single_for_device(ring_to_dev(ring), + ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); } static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, @@ -2918,6 +2923,11 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) skb = ring->tail_skb; } + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); trace_hns3_rx_desc(ring); ring_ptr_move_fw(ring, next_to_clean); @@ -3069,9 +3079,15 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring) if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) return -ENXIO; - if (!skb) + if (!skb) { ring->va = desc_cb->buf + desc_cb->page_offset; + dma_sync_single_for_cpu(ring_to_dev(ring), + desc_cb->dma + desc_cb->page_offset, + hns3_buf_size(ring), + DMA_FROM_DEVICE); + } + /* Prefetch first cache line of first page * Idea is to cache few bytes of the header of the packet. Our L1 Cache * line size is 64B so need to prefetch twice to make it 128B. But in From 393415203f5c916b5907e0a7c89f4c2c5a9c5505 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:21 -0700 Subject: [PATCH 0090/2056] octeontx2-af: change (struct qmem)->entry_sz from u8 to u16 We need to increase TSO_HEADER_SIZE from 128 to 256. Since otx2_sq_init() calls qmem_alloc() with TSO_HEADER_SIZE, we need to change (struct qmem)->entry_sz to avoid truncation to 0. Fixes: 7a37245ef23f ("octeontx2-af: NPA block admin queue init") Signed-off-by: Eric Dumazet Cc: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index cd33c2e6ca5f..f48eb66ed021 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -43,7 +43,7 @@ struct qmem { void *base; dma_addr_t iova; int alloc_sz; - u8 entry_sz; + u16 entry_sz; u8 align; u32 qsize; }; From 9c77b803f263573b6019e4828825709845c37d45 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:22 -0700 Subject: [PATCH 0091/2056] net: tso: double TSO_HEADER_SIZE value Transport header size could be 60 bytes, and network header size can also be 60 bytes. Add the Ethernet header and we are above 128 bytes. Since drivers using net/core/tso.c usually allocates one DMA coherent piece of memory per TX queue, this patch might cause issues if a driver was using too many slots. For 1024 slots, we would need 256 KB of physically contiguous memory instead of 128 KB. Alternative fix would be to add checks in the fast path, but this involves more work in all drivers using net/core/tso.c. Fixes: f9cbe9a556af ("net: define the TSO header size in net/tso.h") Signed-off-by: Eric Dumazet Cc: Antoine Tenart Signed-off-by: David S. Miller --- include/net/tso.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/tso.h b/include/net/tso.h index 7e166a570349..c33dd00c161f 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -4,7 +4,7 @@ #include -#define TSO_HEADER_SIZE 128 +#define TSO_HEADER_SIZE 256 struct tso_t { int next_frag_idx; From 185c3e5860227065dcb6ee884b45e0debe4762dd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:23 -0700 Subject: [PATCH 0092/2056] net: tso: shrink struct tso_t size field can be an int, no need for size_t Removes a 32bit hole on 64bit kernels. And align fields for better readability. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tso.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/tso.h b/include/net/tso.h index c33dd00c161f..d9b0a14b2a57 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -7,12 +7,12 @@ #define TSO_HEADER_SIZE 256 struct tso_t { - int next_frag_idx; - void *data; - size_t size; - u16 ip_id; - bool ipv6; - u32 tcp_seq; + int next_frag_idx; + int size; + void *data; + u16 ip_id; + bool ipv6; + u32 tcp_seq; }; int tso_count_descs(struct sk_buff *skb); From 504b912150983a8b2499bbf9e4501336677404c9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:24 -0700 Subject: [PATCH 0093/2056] net: tso: constify tso_count_descs() and friends skb argument of tso_count_descs(), tso_build_hdr() and tso_build_data() can be const. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tso.h | 6 +++--- net/core/tso.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/tso.h b/include/net/tso.h index d9b0a14b2a57..32d9272ade6a 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -15,10 +15,10 @@ struct tso_t { u32 tcp_seq; }; -int tso_count_descs(struct sk_buff *skb); -void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, +int tso_count_descs(const struct sk_buff *skb); +void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); -void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size); +void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); void tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index d4d5c077ad72..56487e3bb26d 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -6,14 +6,14 @@ #include /* Calculate expected number of TX descriptors */ -int tso_count_descs(struct sk_buff *skb) +int tso_count_descs(const struct sk_buff *skb) { /* The Marvell Way */ return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags; } EXPORT_SYMBOL(tso_count_descs); -void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, +void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; @@ -44,7 +44,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, } EXPORT_SYMBOL(tso_build_hdr); -void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size) +void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) { tso->tcp_seq += size; tso->size -= size; From 761b331cb6902dc0a08f786e9fa0dbd572059027 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:25 -0700 Subject: [PATCH 0094/2056] net: tso: cache transport header length Add tlen field into struct tso_t, and change tso_start() to return skb_transport_offset(skb) + tso->tlen This removes from callers the need to use tcp_hdrlen(skb) and will ease UDP segmentation offload addition. v2: calls tso_start() earlier in otx2_sq_append_tso() [Jakub] Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 5 +++-- drivers/net/ethernet/freescale/fec_main.c | 5 ++--- drivers/net/ethernet/marvell/mv643xx_eth.c | 5 ++--- drivers/net/ethernet/marvell/mvneta.c | 5 ++--- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 6 +++--- .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.c | 6 +++--- include/net/tso.h | 3 ++- net/core/tso.c | 11 +++++++---- 8 files changed, 24 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 069e7413f1ef..a45223f0cca5 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1489,9 +1489,10 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int seg_subdescs = 0, desc_cnt = 0; int seg_len, total_len, data_left; int hdr_qentry = qentry; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len; + + hdr_len = tso_start(skb, &tso); - tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { char *hdr; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2d0d313ee7c5..9f80a33c5b16 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -710,8 +710,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int total_len, data_left; + int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; struct tso_t tso; unsigned int index = 0; @@ -731,7 +730,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 4d4b6243318a..90e6111ce534 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -816,10 +816,9 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, struct net_device *dev) { struct mv643xx_eth_private *mp = txq_to_mp(txq); - int total_len, data_left, ret; + int hdr_len, total_len, data_left, ret; int desc_count = 0; struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct tx_desc *first_tx_desc; u32 first_cmd_sts = 0; @@ -832,7 +831,7 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb, first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 946925bbcb2d..95b447c14411 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2604,11 +2604,10 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvneta_tx_queue *txq) { - int total_len, data_left; + int hdr_len, total_len, data_left; int desc_count = 0; struct mvneta_port *pp = netdev_priv(dev); struct tso_t tso; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int i; /* Count needed descriptors */ @@ -2621,7 +2620,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, } /* Initialize the TSO handler, and prepare the first payload */ - tso_start(skb, &tso); + hdr_len = tso_start(skb, &tso); total_len = skb->len - hdr_len; while (total_len > 0) { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 24f4d8e0da98..e9f287568026 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3160,9 +3160,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mvpp2_txq_pcpu *txq_pcpu) { struct mvpp2_port *port = netdev_priv(dev); + int hdr_sz, i, len, descs = 0; struct tso_t tso; - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); - int i, len, descs = 0; /* Check number of available descriptors */ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || @@ -3170,7 +3169,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, tso_count_descs(skb))) return 0; - tso_start(skb, &tso); + hdr_sz = tso_start(skb, &tso); + len = skb->len - hdr_sz; while (len > 0) { int left = min_t(int, skb_shinfo(skb)->gso_size, len); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index b04f5429d72d..3a5b34a2a7a6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -619,13 +619,14 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, struct sk_buff *skb, u16 qidx) { struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - int tcp_data, seg_len, pkt_len, offset; + int hdr_len, tcp_data, seg_len, pkt_len, offset; struct nix_sqe_hdr_s *sqe_hdr; int first_sqe = sq->head; struct sg_list list; struct tso_t tso; + hdr_len = tso_start(skb, &tso); + /* Map SKB's fragments to DMA. * It's done here to avoid mapping for every TSO segment's packet. */ @@ -636,7 +637,6 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, netdev_tx_sent_queue(txq, skb->len); - tso_start(skb, &tso); tcp_data = skb->len - hdr_len; while (tcp_data > 0) { char *hdr; diff --git a/include/net/tso.h b/include/net/tso.h index 32d9272ade6a..62c98a9c60f1 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -11,6 +11,7 @@ struct tso_t { int size; void *data; u16 ip_id; + u8 tlen; /* transport header len */ bool ipv6; u32 tcp_seq; }; @@ -19,6 +20,6 @@ int tso_count_descs(const struct sk_buff *skb); void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last); void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); -void tso_start(struct sk_buff *skb, struct tso_t *tso); +int tso_start(struct sk_buff *skb, struct tso_t *tso); #endif /* _TSO_H */ diff --git a/net/core/tso.c b/net/core/tso.c index 56487e3bb26d..9f35518815bd 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -17,7 +17,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { struct tcphdr *tcph; - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tso->tlen; int mac_hdr_len = skb_network_offset(skb); memcpy(hdr, skb->data, hdr_len); @@ -30,7 +30,7 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, } else { struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); - iph->payload_len = htons(size + tcp_hdrlen(skb)); + iph->payload_len = htons(size + tso->tlen); } tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); put_unaligned_be32(tso->tcp_seq, &tcph->seq); @@ -62,10 +62,12 @@ void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) } EXPORT_SYMBOL(tso_build_data); -void tso_start(struct sk_buff *skb, struct tso_t *tso) +int tso_start(struct sk_buff *skb, struct tso_t *tso) { - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + int tlen = tcp_hdrlen(skb); + int hdr_len = skb_transport_offset(skb) + tlen; + tso->tlen = tlen; tso->ip_id = ntohs(ip_hdr(skb)->id); tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); tso->next_frag_idx = 0; @@ -83,5 +85,6 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso) tso->data = skb_frag_address(frag); tso->next_frag_idx++; } + return hdr_len; } EXPORT_SYMBOL(tso_start); From 3d5b459ba0e3788ab471e8cb98eee89964a9c5e8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 17 Jun 2020 20:53:26 -0700 Subject: [PATCH 0095/2056] net: tso: add UDP segmentation support Note that like TCP, we do not support additional encapsulations, and that checksums must be offloaded to the NIC. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/tso.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/net/core/tso.c b/net/core/tso.c index 9f35518815bd..4148f6d48953 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -16,7 +16,6 @@ EXPORT_SYMBOL(tso_count_descs); void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, int size, bool is_last) { - struct tcphdr *tcph; int hdr_len = skb_transport_offset(skb) + tso->tlen; int mac_hdr_len = skb_network_offset(skb); @@ -32,21 +31,29 @@ void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, iph->payload_len = htons(size + tso->tlen); } - tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); - put_unaligned_be32(tso->tcp_seq, &tcph->seq); + hdr += skb_transport_offset(skb); + if (tso->tlen != sizeof(struct udphdr)) { + struct tcphdr *tcph = (struct tcphdr *)hdr; - if (!is_last) { - /* Clear all special flags for not last packet */ - tcph->psh = 0; - tcph->fin = 0; - tcph->rst = 0; + put_unaligned_be32(tso->tcp_seq, &tcph->seq); + + if (!is_last) { + /* Clear all special flags for not last packet */ + tcph->psh = 0; + tcph->fin = 0; + tcph->rst = 0; + } + } else { + struct udphdr *uh = (struct udphdr *)hdr; + + uh->len = htons(sizeof(*uh) + size); } } EXPORT_SYMBOL(tso_build_hdr); void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size) { - tso->tcp_seq += size; + tso->tcp_seq += size; /* not worth avoiding this operation for UDP */ tso->size -= size; tso->data += size; @@ -64,12 +71,12 @@ EXPORT_SYMBOL(tso_build_data); int tso_start(struct sk_buff *skb, struct tso_t *tso) { - int tlen = tcp_hdrlen(skb); + int tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr); int hdr_len = skb_transport_offset(skb) + tlen; tso->tlen = tlen; tso->ip_id = ntohs(ip_hdr(skb)->id); - tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); + tso->tcp_seq = (tlen != sizeof(struct udphdr)) ? ntohl(tcp_hdr(skb)->seq) : 0; tso->next_frag_idx = 0; tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6); From 3893c905b557670e25be62b98f5cf0626096e9d4 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Thu, 18 Jun 2020 11:35:52 +0530 Subject: [PATCH 0096/2056] cxgb4: update set_flash to flash different images Chelsio adapter contains different flash regions and each region is used by different binary files. This patch adds support to flash images like PHY firmware, boot and boot config using ethtool -f N. The N value mapping is as follows. N = 0 : Parse image and decide which region to flash N = 1 : Firmware N = 2 : PHY firmware N = 3 : boot image N = 4 : boot cfg Signed-off-by: Vishal Kulkarni " Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 ++ .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 121 +++++++++++++++--- 2 files changed, 115 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index cf69c6edcfec..a7a1e1f5d554 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -139,6 +139,10 @@ enum cc_fec { FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ }; +enum { + CXGB4_ETHTOOL_FLASH_FW = 1, +}; + struct port_stats { u64 tx_octets; /* total # of octets in good frames */ u64 tx_frames; /* all good frames */ @@ -492,6 +496,11 @@ struct trace_params { unsigned char port; }; +struct cxgb4_fw_data { + __be32 signature; + __u8 reserved[4]; +}; + /* Firmware Port Capabilities types. */ typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 9fd496732b2c..92f79d0cd6ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -23,6 +23,11 @@ static void set_msglevel(struct net_device *dev, u32 val) netdev2adap(dev)->msg_enable = val; } +static const char * const flash_region_strings[] = { + "All", + "Firmware", +}; + static const char stats_strings[][ETH_GSTRING_LEN] = { "tx_octets_ok ", "tx_frames_ok ", @@ -1235,15 +1240,88 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return err; } -static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +static int cxgb4_ethtool_flash_fw(struct net_device *netdev, + const u8 *data, u32 size) { - int ret; - const struct firmware *fw; struct adapter *adap = netdev2adap(netdev); unsigned int mbox = PCIE_FW_MASTER_M + 1; - u32 pcie_fw; + int ret; + + /* If the adapter has been fully initialized then we'll go ahead and + * try to get the firmware's cooperation in upgrading to the new + * firmware image otherwise we'll try to do the entire job from the + * host ... and we always "force" the operation in this path. + */ + if (adap->flags & CXGB4_FULL_INIT_DONE) + mbox = adap->mbox; + + ret = t4_fw_upgrade(adap, mbox, data, size, 1); + if (ret) + dev_err(adap->pdev_dev, + "Failed to flash firmware\n"); + + return ret; +} + +static int cxgb4_ethtool_flash_region(struct net_device *netdev, + const u8 *data, u32 size, u32 region) +{ + struct adapter *adap = netdev2adap(netdev); + int ret; + + switch (region) { + case CXGB4_ETHTOOL_FLASH_FW: + ret = cxgb4_ethtool_flash_fw(netdev, data, size); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + if (!ret) + dev_info(adap->pdev_dev, + "loading %s successful, reload cxgb4 driver\n", + flash_region_strings[region]); + return ret; +} + +#define CXGB4_FW_SIG 0x4368656c +#define CXGB4_FW_SIG_OFFSET 0x160 + +static int cxgb4_validate_fw_image(const u8 *data, u32 *size) +{ + struct cxgb4_fw_data *header; + + header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET]; + if (be32_to_cpu(header->signature) != CXGB4_FW_SIG) + return -EINVAL; + + if (size) + *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512; + + return 0; +} + +static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size) +{ + if (!cxgb4_validate_fw_image(data, size)) + return CXGB4_ETHTOOL_FLASH_FW; + + return -EOPNOTSUPP; +} + +static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) +{ + struct adapter *adap = netdev2adap(netdev); + const struct firmware *fw; unsigned int master; u8 master_vld = 0; + const u8 *fw_data; + size_t fw_size; + u32 size = 0; + u32 pcie_fw; + int region; + int ret; pcie_fw = t4_read_reg(adap, PCIE_FW_A); master = PCIE_FW_MASTER_G(pcie_fw); @@ -1261,19 +1339,32 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) if (ret < 0) return ret; - /* If the adapter has been fully initialized then we'll go ahead and - * try to get the firmware's cooperation in upgrading to the new - * firmware image otherwise we'll try to do the entire job from the - * host ... and we always "force" the operation in this path. - */ - if (adap->flags & CXGB4_FULL_INIT_DONE) - mbox = adap->mbox; + fw_data = fw->data; + fw_size = fw->size; + if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) { + while (fw_size > 0) { + size = 0; + region = cxgb4_ethtool_get_flash_region(fw_data, &size); + if (region < 0 || !size) { + ret = region; + goto out_free_fw; + } - ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1); + ret = cxgb4_ethtool_flash_region(netdev, fw_data, size, + region); + if (ret) + goto out_free_fw; + + fw_data += size; + fw_size -= size; + } + } else { + ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size, + ef->region); + } + +out_free_fw: release_firmware(fw); - if (!ret) - dev_info(adap->pdev_dev, - "loaded firmware %s, reload cxgb4 driver\n", ef->data); return ret; } From 4ee339e1e92a9cff6ceda5a81622625ffe554163 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Thu, 18 Jun 2020 11:35:53 +0530 Subject: [PATCH 0097/2056] cxgb4: add support to flash PHY image Update set_flash to flash PHY image to flash region Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 39 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index a7a1e1f5d554..b49d16a54ada 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -141,6 +141,7 @@ enum cc_fec { enum { CXGB4_ETHTOOL_FLASH_FW = 1, + CXGB4_ETHTOOL_FLASH_PHY = 2, }; struct port_stats { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 92f79d0cd6ab..7118ba016f01 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -26,6 +26,7 @@ static void set_msglevel(struct net_device *dev, u32 val) static const char * const flash_region_strings[] = { "All", "Firmware", + "PHY Firmware", }; static const char stats_strings[][ETH_GSTRING_LEN] = { @@ -1240,6 +1241,39 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return err; } +#define CXGB4_PHY_SIG 0x130000ea + +static int cxgb4_validate_phy_image(const u8 *data, u32 *size) +{ + struct cxgb4_fw_data *header; + + header = (struct cxgb4_fw_data *)data; + if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG) + return -EINVAL; + + return 0; +} + +static int cxgb4_ethtool_flash_phy(struct net_device *netdev, + const u8 *data, u32 size) +{ + struct adapter *adap = netdev2adap(netdev); + int ret; + + ret = cxgb4_validate_phy_image(data, NULL); + if (ret) { + dev_err(adap->pdev_dev, "PHY signature mismatch\n"); + return ret; + } + + ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, + NULL, data, size); + if (ret) + dev_err(adap->pdev_dev, "Failed to load PHY FW\n"); + + return ret; +} + static int cxgb4_ethtool_flash_fw(struct net_device *netdev, const u8 *data, u32 size) { @@ -1273,6 +1307,9 @@ static int cxgb4_ethtool_flash_region(struct net_device *netdev, case CXGB4_ETHTOOL_FLASH_FW: ret = cxgb4_ethtool_flash_fw(netdev, data, size); break; + case CXGB4_ETHTOOL_FLASH_PHY: + ret = cxgb4_ethtool_flash_phy(netdev, data, size); + break; default: ret = -EOPNOTSUPP; break; @@ -1306,6 +1343,8 @@ static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size) { if (!cxgb4_validate_fw_image(data, size)) return CXGB4_ETHTOOL_FLASH_FW; + if (!cxgb4_validate_phy_image(data, size)) + return CXGB4_ETHTOOL_FLASH_PHY; return -EOPNOTSUPP; } From 550883558f174685c3931bbd963fbde503a6100d Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Thu, 18 Jun 2020 11:35:54 +0530 Subject: [PATCH 0098/2056] cxgb4: add support to flash boot image Update set_flash to flash boot image to flash region Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 48 +++++ .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 56 ++++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 187 ++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 6 + 4 files changed, 297 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index b49d16a54ada..7b5f1869d8e7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -142,6 +142,52 @@ enum cc_fec { enum { CXGB4_ETHTOOL_FLASH_FW = 1, CXGB4_ETHTOOL_FLASH_PHY = 2, + CXGB4_ETHTOOL_FLASH_BOOT = 3, +}; + +struct cxgb4_pcir_data { + __le32 signature; /* Signature. The string "PCIR" */ + __le16 vendor_id; /* Vendor Identification */ + __le16 device_id; /* Device Identification */ + __u8 vital_product[2]; /* Pointer to Vital Product Data */ + __u8 length[2]; /* PCIR Data Structure Length */ + __u8 revision; /* PCIR Data Structure Revision */ + __u8 class_code[3]; /* Class Code */ + __u8 image_length[2]; /* Image Length. Multiple of 512B */ + __u8 code_revision[2]; /* Revision Level of Code/Data */ + __u8 code_type; + __u8 indicator; + __u8 reserved[2]; +}; + +/* BIOS boot headers */ +struct cxgb4_pci_exp_rom_header { + __le16 signature; /* ROM Signature. Should be 0xaa55 */ + __u8 reserved[22]; /* Reserved per processor Architecture data */ + __le16 pcir_offset; /* Offset to PCI Data Structure */ +}; + +/* Legacy PCI Expansion ROM Header */ +struct legacy_pci_rom_hdr { + __u8 signature[2]; /* ROM Signature. Should be 0xaa55 */ + __u8 size512; /* Current Image Size in units of 512 bytes */ + __u8 initentry_point[4]; + __u8 cksum; /* Checksum computed on the entire Image */ + __u8 reserved[16]; /* Reserved */ + __le16 pcir_offset; /* Offset to PCI Data Struture */ +}; + +#define CXGB4_HDR_CODE1 0x00 +#define CXGB4_HDR_CODE2 0x03 +#define CXGB4_HDR_INDI 0x80 + +/* BOOT constants */ +enum { + BOOT_SIZE_INC = 512, + BOOT_SIGNATURE = 0xaa55, + BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header), + BOOT_MAX_SIZE = 1024 * BOOT_SIZE_INC, + PCIR_SIGNATURE = 0x52494350 }; struct port_stats { @@ -1998,6 +2044,8 @@ void t4_register_netevent_notifier(void); int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int devid, unsigned int offset, unsigned int len, u8 *buf); +int t4_load_boot(struct adapter *adap, u8 *boot_data, + unsigned int boot_addr, unsigned int size); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 7118ba016f01..29645252aa27 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -27,6 +27,7 @@ static const char * const flash_region_strings[] = { "All", "Firmware", "PHY Firmware", + "Boot", }; static const char stats_strings[][ETH_GSTRING_LEN] = { @@ -1241,6 +1242,28 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return err; } +static int cxgb4_ethtool_flash_boot(struct net_device *netdev, + const u8 *bdata, u32 size) +{ + struct adapter *adap = netdev2adap(netdev); + unsigned int offset; + u8 *data; + int ret; + + data = kmemdup(bdata, size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A))); + + ret = t4_load_boot(adap, data, offset, size); + if (ret) + dev_err(adap->pdev_dev, "Failed to load boot image\n"); + + kfree(data); + return ret; +} + #define CXGB4_PHY_SIG 0x130000ea static int cxgb4_validate_phy_image(const u8 *data, u32 *size) @@ -1310,6 +1333,9 @@ static int cxgb4_ethtool_flash_region(struct net_device *netdev, case CXGB4_ETHTOOL_FLASH_PHY: ret = cxgb4_ethtool_flash_phy(netdev, data, size); break; + case CXGB4_ETHTOOL_FLASH_BOOT: + ret = cxgb4_ethtool_flash_boot(netdev, data, size); + break; default: ret = -EOPNOTSUPP; break; @@ -1339,10 +1365,40 @@ static int cxgb4_validate_fw_image(const u8 *data, u32 *size) return 0; } +static int cxgb4_validate_boot_image(const u8 *data, u32 *size) +{ + struct cxgb4_pci_exp_rom_header *exp_header; + struct cxgb4_pcir_data *pcir_header; + struct legacy_pci_rom_hdr *header; + const u8 *cur_header = data; + u16 pcir_offset; + + exp_header = (struct cxgb4_pci_exp_rom_header *)data; + + if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE) + return -EINVAL; + + if (size) { + do { + header = (struct legacy_pci_rom_hdr *)cur_header; + pcir_offset = le16_to_cpu(header->pcir_offset); + pcir_header = (struct cxgb4_pcir_data *)(cur_header + + pcir_offset); + + *size += header->size512 * 512; + cur_header += header->size512 * 512; + } while (!(pcir_header->indicator & CXGB4_HDR_INDI)); + } + + return 0; +} + static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size) { if (!cxgb4_validate_fw_image(data, size)) return CXGB4_ETHTOOL_FLASH_FW; + if (!cxgb4_validate_boot_image(data, size)) + return CXGB4_ETHTOOL_FLASH_BOOT; if (!cxgb4_validate_phy_image(data, size)) return CXGB4_ETHTOOL_FLASH_PHY; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 1c8068c02728..ccb550c6fab0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -10481,3 +10481,190 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL); } + +/** + * modify_device_id - Modifies the device ID of the Boot BIOS image + * @device_id: the device ID to write. + * @boot_data: the boot image to modify. + * + * Write the supplied device ID to the boot BIOS image. + */ +static void modify_device_id(int device_id, u8 *boot_data) +{ + struct cxgb4_pcir_data *pcir_header; + struct legacy_pci_rom_hdr *header; + u8 *cur_header = boot_data; + u16 pcir_offset; + + /* Loop through all chained images and change the device ID's */ + do { + header = (struct legacy_pci_rom_hdr *)cur_header; + pcir_offset = le16_to_cpu(header->pcir_offset); + pcir_header = (struct cxgb4_pcir_data *)(cur_header + + pcir_offset); + + /** + * Only modify the Device ID if code type is Legacy or HP. + * 0x00: Okay to modify + * 0x01: FCODE. Do not modify + * 0x03: Okay to modify + * 0x04-0xFF: Do not modify + */ + if (pcir_header->code_type == CXGB4_HDR_CODE1) { + u8 csum = 0; + int i; + + /** + * Modify Device ID to match current adatper + */ + pcir_header->device_id = cpu_to_le16(device_id); + + /** + * Set checksum temporarily to 0. + * We will recalculate it later. + */ + header->cksum = 0x0; + + /** + * Calculate and update checksum + */ + for (i = 0; i < (header->size512 * 512); i++) + csum += cur_header[i]; + + /** + * Invert summed value to create the checksum + * Writing new checksum value directly to the boot data + */ + cur_header[7] = -csum; + + } else if (pcir_header->code_type == CXGB4_HDR_CODE2) { + /** + * Modify Device ID to match current adatper + */ + pcir_header->device_id = cpu_to_le16(device_id); + } + + /** + * Move header pointer up to the next image in the ROM. + */ + cur_header += header->size512 * 512; + } while (!(pcir_header->indicator & CXGB4_HDR_INDI)); +} + +/** + * t4_load_boot - download boot flash + * @adap: the adapter + * @boot_data: the boot image to write + * @boot_addr: offset in flash to write boot_data + * @size: image size + * + * Write the supplied boot image to the card's serial flash. + * The boot image has the following sections: a 28-byte header and the + * boot image. + */ +int t4_load_boot(struct adapter *adap, u8 *boot_data, + unsigned int boot_addr, unsigned int size) +{ + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + unsigned int boot_sector = (boot_addr * 1024); + struct cxgb4_pci_exp_rom_header *header; + struct cxgb4_pcir_data *pcir_header; + int pcir_offset; + unsigned int i; + u16 device_id; + int ret, addr; + + /** + * Make sure the boot image does not encroach on the firmware region + */ + if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) { + dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n"); + return -EFBIG; + } + + /* Get boot header */ + header = (struct cxgb4_pci_exp_rom_header *)boot_data; + pcir_offset = le16_to_cpu(header->pcir_offset); + /* PCIR Data Structure */ + pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset]; + + /** + * Perform some primitive sanity testing to avoid accidentally + * writing garbage over the boot sectors. We ought to check for + * more but it's not worth it for now ... + */ + if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) { + dev_err(adap->pdev_dev, "boot image too small/large\n"); + return -EFBIG; + } + + if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) { + dev_err(adap->pdev_dev, "Boot image missing signature\n"); + return -EINVAL; + } + + /* Check PCI header signature */ + if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) { + dev_err(adap->pdev_dev, "PCI header missing signature\n"); + return -EINVAL; + } + + /* Check Vendor ID matches Chelsio ID*/ + if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) { + dev_err(adap->pdev_dev, "Vendor ID missing signature\n"); + return -EINVAL; + } + + /** + * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot, + * and Boot configuration data sections. These 3 boot sections span + * sectors 0 to 7 in flash and live right before the FW image location. + */ + i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size); + ret = t4_flash_erase_sectors(adap, boot_sector >> 16, + (boot_sector >> 16) + i - 1); + + /** + * If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter option ROM file + */ + if (ret || size == 0) + goto out; + /* Retrieve adapter's device ID */ + pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id); + /* Want to deal with PF 0 so I strip off PF 4 indicator */ + device_id = device_id & 0xf0ff; + + /* Check PCIE Device ID */ + if (le16_to_cpu(pcir_header->device_id) != device_id) { + /** + * Change the device ID in the Boot BIOS image to match + * the Device ID of the current adapter. + */ + modify_device_id(device_id, boot_data); + } + + /** + * Skip over the first SF_PAGE_SIZE worth of data and write it after + * we finish copying the rest of the boot image. This will ensure + * that the BIOS boot header will only be written if the boot image + * was written in full. + */ + addr = boot_sector; + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { + addr += SF_PAGE_SIZE; + boot_data += SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data); + if (ret) + goto out; + } + + ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE, + (const u8 *)header); + +out: + if (ret) + dev_err(adap->pdev_dev, "boot image load failed, error %d\n", + ret); + return ret; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 4a9fcd6c226c..4b697550f08d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -563,6 +563,12 @@ #define AIVEC_V(x) ((x) << AIVEC_S) #define PCIE_PF_CLI_A 0x44 + +#define PCIE_PF_EXPROM_OFST_A 0x4c +#define OFFSET_S 10 +#define OFFSET_M 0x3fffU +#define OFFSET_G(x) (((x) >> OFFSET_S) & OFFSET_M) + #define PCIE_INT_CAUSE_A 0x3004 #define UNXSPLCPLERR_S 29 From d5002c9a3d5a475590cf9e2a3e681bcb6b0cdda6 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Thu, 18 Jun 2020 11:35:55 +0530 Subject: [PATCH 0099/2056] cxgb4: add support to flash boot cfg image Update set_flash to flash boot cfg image to flash region Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 ++ .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 30 +++++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 90 +++++++++++++++++++ 3 files changed, 129 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 7b5f1869d8e7..999816273328 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -143,6 +143,12 @@ enum { CXGB4_ETHTOOL_FLASH_FW = 1, CXGB4_ETHTOOL_FLASH_PHY = 2, CXGB4_ETHTOOL_FLASH_BOOT = 3, + CXGB4_ETHTOOL_FLASH_BOOTCFG = 4 +}; + +struct cxgb4_bootcfg_data { + __le16 signature; + __u8 reserved[2]; }; struct cxgb4_pcir_data { @@ -183,6 +189,7 @@ struct legacy_pci_rom_hdr { /* BOOT constants */ enum { + BOOT_CFG_SIG = 0x4243, BOOT_SIZE_INC = 512, BOOT_SIGNATURE = 0xaa55, BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header), @@ -2046,6 +2053,8 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port, unsigned int len, u8 *buf); int t4_load_boot(struct adapter *adap, u8 *boot_data, unsigned int boot_addr, unsigned int size); +int t4_load_bootcfg(struct adapter *adap, + const u8 *cfg_data, unsigned int size); void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); void free_tx_desc(struct adapter *adap, struct sge_txq *q, unsigned int n, bool unmap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 29645252aa27..0bfdc97e9083 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -28,6 +28,7 @@ static const char * const flash_region_strings[] = { "Firmware", "PHY Firmware", "Boot", + "Boot CFG", }; static const char stats_strings[][ETH_GSTRING_LEN] = { @@ -1242,6 +1243,19 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return err; } +static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev, + const u8 *data, u32 size) +{ + struct adapter *adap = netdev2adap(netdev); + int ret; + + ret = t4_load_bootcfg(adap, data, size); + if (ret) + dev_err(adap->pdev_dev, "Failed to load boot cfg image\n"); + + return ret; +} + static int cxgb4_ethtool_flash_boot(struct net_device *netdev, const u8 *bdata, u32 size) { @@ -1336,6 +1350,9 @@ static int cxgb4_ethtool_flash_region(struct net_device *netdev, case CXGB4_ETHTOOL_FLASH_BOOT: ret = cxgb4_ethtool_flash_boot(netdev, data, size); break; + case CXGB4_ETHTOOL_FLASH_BOOTCFG: + ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size); + break; default: ret = -EOPNOTSUPP; break; @@ -1365,6 +1382,17 @@ static int cxgb4_validate_fw_image(const u8 *data, u32 *size) return 0; } +static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size) +{ + struct cxgb4_bootcfg_data *header; + + header = (struct cxgb4_bootcfg_data *)data; + if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) + return -EINVAL; + + return 0; +} + static int cxgb4_validate_boot_image(const u8 *data, u32 *size) { struct cxgb4_pci_exp_rom_header *exp_header; @@ -1401,6 +1429,8 @@ static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size) return CXGB4_ETHTOOL_FLASH_BOOT; if (!cxgb4_validate_phy_image(data, size)) return CXGB4_ETHTOOL_FLASH_PHY; + if (!cxgb4_validate_bootcfg_image(data, size)) + return CXGB4_ETHTOOL_FLASH_BOOTCFG; return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index ccb550c6fab0..9d557f3cd3aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -10668,3 +10668,93 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data, ret); return ret; } + +/** + * t4_flash_bootcfg_addr - return the address of the flash + * optionrom configuration + * @adapter: the adapter + * + * Return the address within the flash where the OptionROM Configuration + * is stored, or an error if the device FLASH is too small to contain + * a OptionROM Configuration. + */ +static int t4_flash_bootcfg_addr(struct adapter *adapter) +{ + /** + * If the device FLASH isn't large enough to hold a Firmware + * Configuration File, return an error. + */ + if (adapter->params.sf_size < + FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE) + return -ENOSPC; + + return FLASH_BOOTCFG_START; +} + +int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) +{ + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + struct cxgb4_bootcfg_data *header; + unsigned int flash_cfg_start_sec; + unsigned int addr, npad; + int ret, i, n, cfg_addr; + + cfg_addr = t4_flash_bootcfg_addr(adap); + if (cfg_addr < 0) + return cfg_addr; + + addr = cfg_addr; + flash_cfg_start_sec = addr / SF_SEC_SIZE; + + if (size > FLASH_BOOTCFG_MAX_SIZE) { + dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n", + FLASH_BOOTCFG_MAX_SIZE); + return -EFBIG; + } + + header = (struct cxgb4_bootcfg_data *)cfg_data; + if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) { + dev_err(adap->pdev_dev, "Wrong bootcfg signature\n"); + ret = -EINVAL; + goto out; + } + + i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE, + sf_sec_size); + ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, + flash_cfg_start_sec + i - 1); + + /** + * If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter OptionROM Configuration File. + */ + if (ret || size == 0) + goto out; + + /* this will write to the flash up to SF_PAGE_SIZE at a time */ + for (i = 0; i < size; i += SF_PAGE_SIZE) { + n = min_t(u32, size - i, SF_PAGE_SIZE); + + ret = t4_write_flash(adap, addr, n, cfg_data); + if (ret) + goto out; + + addr += SF_PAGE_SIZE; + cfg_data += SF_PAGE_SIZE; + } + + npad = ((size + 4 - 1) & ~3) - size; + for (i = 0; i < npad; i++) { + u8 data = 0; + + ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data); + if (ret) + goto out; + } + +out: + if (ret) + dev_err(adap->pdev_dev, "boot config data %s failed %d\n", + (size == 0 ? "clear" : "download"), ret); + return ret; +} From 17b332f48074e7ee2169ee4268ced6274e1c95c3 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Thu, 18 Jun 2020 11:35:56 +0530 Subject: [PATCH 0100/2056] cxgb4: add support to read serial flash This patch adds support to dump flash memory via ethtool --get-dump Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 3 +- .../net/ethernet/chelsio/cxgb4/cudbg_lib.c | 37 +++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cudbg_lib.h | 4 +- .../net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 14 +++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h | 1 + 5 files changed, 57 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index fc3813050f0d..c84719e3ca08 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -70,7 +70,8 @@ enum cudbg_dbg_entity_type { CUDBG_HMA_INDIRECT = 67, CUDBG_HMA = 68, CUDBG_QDESC = 70, - CUDBG_MAX_ENTITY = 71, + CUDBG_FLASH = 71, + CUDBG_MAX_ENTITY = 72, }; struct cudbg_init { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 7b9cd69f9844..cd9494c5ff37 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -3156,3 +3156,40 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, return rc; } + +int cudbg_collect_flash(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + u32 count = padap->params.sf_size, n; + struct cudbg_buffer temp_buff = {0}; + u32 addr, i; + int rc; + + addr = FLASH_EXP_ROM_START; + + for (i = 0; i < count; i += SF_PAGE_SIZE) { + n = min_t(u32, count - i, SF_PAGE_SIZE); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, n, &temp_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out; + } + rc = t4_read_flash(padap, addr, n, (u32 *)temp_buff.data, 0); + if (rc) + goto out; + + addr += (n * 4); + rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, + dbg_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out; + } + } + +out: + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 10ee6ed1d932..0f488d52797b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -162,7 +162,9 @@ int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); - +int cudbg_collect_flash(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index e374b413d9ac..d7afe0746878 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -66,6 +66,10 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, }; +static const struct cxgb4_collect_entity cxgb4_collect_flash_dump[] = { + { CUDBG_FLASH, cudbg_collect_flash }, +}; + static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) { struct cudbg_tcam tcam_region = { 0 }; @@ -330,6 +334,9 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) } } + if (flag & CXGB4_ETH_DUMP_FLASH) + len += adap->params.sf_size; + /* If compression is enabled, a smaller destination buffer is enough */ wsize = cudbg_get_workspace_size(); if (wsize && len > CUDBG_DUMP_BUFF_SIZE) @@ -468,6 +475,13 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, buf, &total_size); + if (flag & CXGB4_ETH_DUMP_FLASH) + cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, + cxgb4_collect_flash_dump, + ARRAY_SIZE(cxgb4_collect_flash_dump), + buf, + &total_size); + cudbg_free_compress_buff(&cudbg_init); cudbg_hdr->data_len = total_size; if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index 66b805c7a92c..c04a49b6378d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -27,6 +27,7 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS { CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE, CXGB4_ETH_DUMP_MEM = (1 << 0), /* On-Chip Memory Dumps */ CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */ + CXGB4_ETH_DUMP_FLASH = (1 << 2), /* Dump flash memory */ }; #define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW) From 4b61d3e8d3daebbde7ec02d593f84248fdf8bec2 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Fri, 19 Jun 2020 14:01:07 +0800 Subject: [PATCH 0101/2056] net: qos offload add flow status with dropped count This patch adds a drop frames counter to tc flower offloading. Reporting h/w dropped frames is necessary for some actions. Some actions like police action and the coming introduced stream gate action would produce dropped frames which is necessary for user. Status update shows how many filtered packets increasing and how many dropped in those packets. v2: Changes - Update commit comments suggest by Jiri Pirko. Signed-off-by: Po Liu Reviewed-by: Simon Horman Reviewed-by: Vlad Buslov Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_vl.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 2 +- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c | 2 +- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 7 +++++-- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/qos_conf.c | 2 +- include/net/act_api.h | 11 ++++++----- include/net/flow_offload.h | 5 ++++- include/net/pkt_cls.h | 5 +++-- net/sched/act_api.c | 10 ++++------ net/sched/act_ct.c | 6 +++--- net/sched/act_gact.c | 7 ++++--- net/sched/act_gate.c | 6 +++--- net/sched/act_mirred.c | 6 +++--- net/sched/act_pedit.c | 6 +++--- net/sched/act_police.c | 4 ++-- net/sched/act_skbedit.c | 5 +++-- net/sched/act_vlan.c | 6 +++--- net/sched/cls_flower.c | 1 + net/sched/cls_matchall.c | 3 ++- 25 files changed, 60 insertions(+), 50 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c index bdfd6c4e190d..9ddc49b7eb8f 100644 --- a/drivers/net/dsa/sja1105/sja1105_vl.c +++ b/drivers/net/dsa/sja1105/sja1105_vl.c @@ -771,7 +771,7 @@ int sja1105_vl_stats(struct sja1105_private *priv, int port, pkts = timingerr + unreleased + lengtherr; - flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, + flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0, jiffies - rule->vl.stats.lastused, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0eef4f5e4a46..4d482d75a20b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1638,7 +1638,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, lastused = flow->lastused; spin_unlock(&flow->stats_lock); - flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, + flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0, lastused, FLOW_ACTION_HW_STATS_DELAYED); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 4a5fa9eba0b6..030de20a5d27 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -902,7 +902,7 @@ int cxgb4_tc_flower_stats(struct net_device *dev, if (ofld_stats->prev_packet_count != packets) ofld_stats->last_used = jiffies; flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, - packets - ofld_stats->packet_count, + packets - ofld_stats->packet_count, 0, ofld_stats->last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index c88c47a14fbb..c439b5bce9c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -346,7 +346,7 @@ int cxgb4_tc_matchall_stats(struct net_device *dev, flow_stats_update(&cls_matchall->stats, bytes - tc_port_matchall->ingress.bytes, packets - tc_port_matchall->ingress.packets, - tc_port_matchall->ingress.last_used, + 0, tc_port_matchall->ingress.last_used, FLOW_ACTION_HW_STATS_IMMEDIATE); tc_port_matchall->ingress.packets = packets; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index fd3df19eaa32..fb76903eca90 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1291,12 +1291,15 @@ static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, spin_lock(&epsfp.psfp_lock); stats.pkts = counters.matching_frames_count - filter->stats.pkts; + stats.drops = counters.not_passing_frames_count - + filter->stats.drops; stats.lastused = filter->stats.lastused; filter->stats.pkts += stats.pkts; + filter->stats.drops += stats.drops; spin_unlock(&epsfp.psfp_lock); - flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused, - FLOW_ACTION_HW_STATS_DELAYED); + flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops, + stats.lastused, FLOW_ACTION_HW_STATS_DELAYED); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 430025550fad..c7107da03212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -672,7 +672,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, return -ENOENT; mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse); - flow_stats_update(&f->stats, bytes, packets, lastuse, + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 7fc84f58e28a..bc9c0ac15f99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4828,7 +4828,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, no_peer_counter: mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); out: - flow_stats_update(&f->stats, bytes, packets, lastuse, + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, FLOW_ACTION_HW_STATS_DELAYED); trace_mlx5e_stats_flower(f); errout: @@ -4946,7 +4946,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv, dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; rpriv->prev_vf_vport_stats = cur_stats; - flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, + flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies, FLOW_ACTION_HW_STATS_DELAYED); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 51e1b3930c56..61d21043d83a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -633,7 +633,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rule_get_stats; - flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats); + flow_stats_update(&f->stats, bytes, packets, 0, lastuse, used_hw_stats); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 5ce172e22b43..c90bafbd651f 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -244,7 +244,7 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, if (ret) return ret; - flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0, + flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 695d24b9dd92..234c652700e1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1491,7 +1491,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, nfp_flower_update_merge_stats(app, nfp_flow); flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, - priv->stats[ctx_id].pkts, priv->stats[ctx_id].used, + priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, FLOW_ACTION_HW_STATS_DELAYED); priv->stats[ctx_id].pkts = 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index d18a830e4264..bb327d48d1ab 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -319,7 +319,7 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev, prev_stats->bytes = curr_stats->bytes; spin_unlock_bh(&fl_priv->qos_stats_lock); - flow_stats_update(&flow->stats, diff_bytes, diff_pkts, + flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0, repr_priv->qos_table.last_update, FLOW_ACTION_HW_STATS_DELAYED); return 0; diff --git a/include/net/act_api.h b/include/net/act_api.h index 8c3934880670..cb382a89ea58 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -106,7 +106,7 @@ struct tc_action_ops { struct netlink_callback *, int, const struct tc_action_ops *, struct netlink_ext_ack *); - void (*stats_update)(struct tc_action *, u64, u32, u64, bool); + void (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool); size_t (*get_fill_size)(const struct tc_action *act); struct net_device *(*get_dev)(const struct tc_action *a, tc_action_priv_destructor *destructor); @@ -232,8 +232,8 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a) spin_unlock(&a->tcfa_lock); } -void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, - bool drop, bool hw); +void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, bool hw); int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, @@ -244,13 +244,14 @@ struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, #endif /* CONFIG_NET_CLS_ACT */ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, - u64 packets, u64 lastuse, bool hw) + u64 packets, u64 drops, + u64 lastuse, bool hw) { #ifdef CONFIG_NET_CLS_ACT if (!a->ops->stats_update) return; - a->ops->stats_update(a, bytes, packets, lastuse, hw); + a->ops->stats_update(a, bytes, packets, drops, lastuse, hw); #endif } diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index f2c8311a0433..00c15f14c434 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -389,17 +389,20 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule, struct flow_stats { u64 pkts; u64 bytes; + u64 drops; u64 lastused; enum flow_action_hw_stats used_hw_stats; bool used_hw_stats_valid; }; static inline void flow_stats_update(struct flow_stats *flow_stats, - u64 bytes, u64 pkts, u64 lastused, + u64 bytes, u64 pkts, + u64 drops, u64 lastused, enum flow_action_hw_stats used_hw_stats) { flow_stats->pkts += pkts; flow_stats->bytes += bytes; + flow_stats->drops += drops; flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); /* The driver should pass value with a maximum of one bit set. diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ed65619cbc47..ff017e5b3ea2 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -262,7 +262,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts) static inline void tcf_exts_stats_update(const struct tcf_exts *exts, - u64 bytes, u64 packets, u64 lastuse, + u64 bytes, u64 packets, u64 drops, u64 lastuse, u8 used_hw_stats, bool used_hw_stats_valid) { #ifdef CONFIG_NET_CLS_ACT @@ -273,7 +273,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts, for (i = 0; i < exts->nr_actions; i++) { struct tc_action *a = exts->actions[i]; - tcf_action_stats_update(a, bytes, packets, lastuse, true); + tcf_action_stats_update(a, bytes, packets, drops, + lastuse, true); a->used_hw_stats = used_hw_stats; a->used_hw_stats_valid = used_hw_stats_valid; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 8ac7eb0a8309..4c4466f18801 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1059,14 +1059,13 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, return err; } -void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, - bool drop, bool hw) +void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, bool hw) { if (a->cpu_bstats) { _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); - if (drop) - this_cpu_ptr(a->cpu_qstats)->drops += packets; + this_cpu_ptr(a->cpu_qstats)->drops += drops; if (hw) _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), @@ -1075,8 +1074,7 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets, } _bstats_update(&a->tcfa_bstats, bytes, packets); - if (drop) - a->tcfa_qstats.drops += packets; + a->tcfa_qstats.drops += drops; if (hw) _bstats_update(&a->tcfa_bstats_hw, bytes, packets); } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index e9f3576cbf71..1b9c6d4a1b6b 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1450,12 +1450,12 @@ static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } -static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_ct *c = to_ct(a); - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse); } diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index 416065772719..410e3bbfb9ca 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -171,14 +171,15 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a, return action; } -static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_gact *gact = to_gact(a); int action = READ_ONCE(gact->tcf_action); struct tcf_t *tm = &gact->tcf_tm; - tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT, hw); + tcf_action_update_stats(a, bytes, packets, + action == TC_ACT_SHOT ? packets : drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index 9c628591f452..c818844846b1 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -568,13 +568,13 @@ static int tcf_gate_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_gate *gact = to_gate(a); struct tcf_t *tm = &gact->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 83dd82fc9f40..b2705318993b 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -312,13 +312,13 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, return retval; } -static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_mirred *m = to_mirred(a); struct tcf_t *tm = &m->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index d41d6200d9de..66986db062ed 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -409,13 +409,13 @@ static int tcf_pedit_act(struct sk_buff *skb, const struct tc_action *a, return p->tcf_action; } -static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_pedit *d = to_pedit(a); struct tcf_t *tm = &d->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 8b7a0ac96c51..0b431d493768 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -288,13 +288,13 @@ static void tcf_police_cleanup(struct tc_action *a) } static void tcf_police_stats_update(struct tc_action *a, - u64 bytes, u32 packets, + u64 bytes, u64 packets, u64 drops, u64 lastuse, bool hw) { struct tcf_police *police = to_police(a); struct tcf_t *tm = &police->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index b125b2be4467..361b863e0634 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -74,12 +74,13 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, } static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes, - u32 packets, u64 lastuse, bool hw) + u64 packets, u64 drops, + u64 lastuse, bool hw) { struct tcf_skbedit *d = to_skbedit(a); struct tcf_t *tm = &d->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index c91d3958fcbb..a5ff9f68ab02 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -302,13 +302,13 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb, return tcf_generic_walker(tn, skb, cb, type, ops, extack); } -static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets, - u64 lastuse, bool hw) +static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets, + u64 drops, u64 lastuse, bool hw) { struct tcf_vlan *v = to_vlan(a); struct tcf_t *tm = &v->tcf_tm; - tcf_action_update_stats(a, bytes, packets, false, hw); + tcf_action_update_stats(a, bytes, packets, drops, hw); tm->lastuse = max_t(u64, tm->lastuse, lastuse); } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index b2da37286082..391971672d54 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -491,6 +491,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f, tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes, cls_flower.stats.pkts, + cls_flower.stats.drops, cls_flower.stats.lastused, cls_flower.stats.used_hw_stats, cls_flower.stats.used_hw_stats_valid); diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 8d39dbcf1746..cafb84480bab 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -338,7 +338,8 @@ static void mall_stats_hw_filter(struct tcf_proto *tp, tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true); tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, - cls_mall.stats.pkts, cls_mall.stats.lastused, + cls_mall.stats.pkts, cls_mall.stats.drops, + cls_mall.stats.lastused, cls_mall.stats.used_hw_stats, cls_mall.stats.used_hw_stats_valid); } From d915c299f1da68a7dbb43895b8741c7b916c9d08 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Fri, 19 Jun 2020 19:51:35 +0530 Subject: [PATCH 0102/2056] cxgb4: add skeleton for ethtool n-tuple filters Allocate and manage resources required for ethtool n-tuple filters. Also fetch the HASH filter region size and calculate nhash entries. Signed-off-by: Rahul Lakkireddy Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 14 ++++ .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 82 +++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_filter.h | 2 + .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 38 +++++---- .../net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 2 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 4 + 6 files changed, 126 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 999816273328..466a61ba23ce 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1066,6 +1066,17 @@ struct mps_entries_ref { refcount_t refcnt; }; +struct cxgb4_ethtool_filter_info { + u32 *loc_array; /* Array holding the actual TIDs set to filters */ + unsigned long *bmap; /* Bitmap for managing filters in use */ + u32 in_use; /* # of filters in use */ +}; + +struct cxgb4_ethtool_filter { + u32 nentries; /* Adapter wide number of supported filters */ + struct cxgb4_ethtool_filter_info *port; /* Per port entry */ +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -1191,6 +1202,9 @@ struct adapter { /* TC MATCHALL classifier offload */ struct cxgb4_tc_matchall *tc_matchall; + + /* Ethtool n-tuple */ + struct cxgb4_ethtool_filter *ethtool_filters; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 0bfdc97e9083..51f1d5f87bc3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -10,6 +10,7 @@ #include "t4_regs.h" #include "t4fw_api.h" #include "cxgb4_cudbg.h" +#include "cxgb4_filter.h" #define EEPROM_MAGIC 0x38E2F10C @@ -1853,6 +1854,87 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .set_priv_flags = cxgb4_set_priv_flags, }; +void cxgb4_cleanup_ethtool_filters(struct adapter *adap) +{ + struct cxgb4_ethtool_filter_info *eth_filter_info; + u8 i; + + if (!adap->ethtool_filters) + return; + + eth_filter_info = adap->ethtool_filters->port; + + if (eth_filter_info) { + for (i = 0; i < adap->params.nports; i++) { + kvfree(eth_filter_info[i].loc_array); + kfree(eth_filter_info[i].bmap); + } + kfree(eth_filter_info); + } + + kfree(adap->ethtool_filters); +} + +int cxgb4_init_ethtool_filters(struct adapter *adap) +{ + struct cxgb4_ethtool_filter_info *eth_filter_info; + struct cxgb4_ethtool_filter *eth_filter; + struct tid_info *tids = &adap->tids; + u32 nentries, i; + int ret; + + eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL); + if (!eth_filter) + return -ENOMEM; + + eth_filter_info = kcalloc(adap->params.nports, + sizeof(*eth_filter_info), + GFP_KERNEL); + if (!eth_filter_info) { + ret = -ENOMEM; + goto free_eth_filter; + } + + eth_filter->port = eth_filter_info; + + nentries = tids->nhpftids + tids->nftids; + if (is_hashfilter(adap)) + nentries += tids->nhash + + (adap->tids.stid_base - adap->tids.tid_base); + eth_filter->nentries = nentries; + + for (i = 0; i < adap->params.nports; i++) { + eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL); + if (!eth_filter->port[i].loc_array) { + ret = -ENOMEM; + goto free_eth_finfo; + } + + eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries), + sizeof(unsigned long), + GFP_KERNEL); + if (!eth_filter->port[i].bmap) { + ret = -ENOMEM; + goto free_eth_finfo; + } + } + + adap->ethtool_filters = eth_filter; + return 0; + +free_eth_finfo: + while (i-- > 0) { + kfree(eth_filter->port[i].bmap); + kvfree(eth_filter->port[i].loc_array); + } + kfree(eth_filter_info); + +free_eth_filter: + kfree(eth_filter); + + return ret; +} + void cxgb4_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &cxgb_ethtool_ops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index b0751c0611ec..807a8dafec45 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -53,4 +53,6 @@ void clear_all_filters(struct adapter *adapter); void init_hash_filter(struct adapter *adap); bool is_filter_exact_match(struct adapter *adap, struct ch_filter_specification *fs); +void cxgb4_cleanup_ethtool_filters(struct adapter *adap); +int cxgb4_init_ethtool_filters(struct adapter *adap); #endif /* __CXGB4_FILTER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 854b1717a70d..501917751b7f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5860,6 +5860,7 @@ static void free_some_resources(struct adapter *adapter) cxgb4_cleanup_tc_mqprio(adapter); cxgb4_cleanup_tc_flower(adapter); cxgb4_cleanup_tc_u32(adapter); + cxgb4_cleanup_ethtool_filters(adapter); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); kfree(adapter->sge.starving_fl); @@ -6493,6 +6494,24 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) i); } + if (is_offload(adapter) || is_hashfilter(adapter)) { + if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { + u32 v; + + v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A); + if (chip_ver <= CHELSIO_T5) { + adapter->tids.nhash = 1 << HASHTIDSIZE_G(v); + v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A); + adapter->tids.hash_base = v / 4; + } else { + adapter->tids.nhash = HASHTBLSIZE_G(v) << 3; + v = t4_read_reg(adapter, + T6_LE_DB_HASH_TID_BASE_A); + adapter->tids.hash_base = v; + } + } + } + if (tid_init(&adapter->tids) < 0) { dev_warn(&pdev->dev, "could not allocate TID table, " "continuing\n"); @@ -6514,22 +6533,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (cxgb4_init_tc_matchall(adapter)) dev_warn(&pdev->dev, "could not offload tc matchall, continuing\n"); - } - - if (is_offload(adapter) || is_hashfilter(adapter)) { - if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { - u32 hash_base, hash_reg; - - if (chip_ver <= CHELSIO_T5) { - hash_reg = LE_DB_TID_HASHBASE_A; - hash_base = t4_read_reg(adapter, hash_reg); - adapter->tids.hash_base = hash_base / 4; - } else { - hash_reg = T6_LE_DB_HASH_TID_BASE_A; - hash_base = t4_read_reg(adapter, hash_reg); - adapter->tids.hash_base = hash_base; - } - } + if (cxgb4_init_ethtool_filters(adapter)) + dev_warn(&pdev->dev, + "could not initialize ethtool filters, continuing\n"); } /* See what interrupts we'll be using */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index dbce99b209d6..a963fd0b4540 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -106,6 +106,8 @@ struct tid_info { unsigned long *stid_bmap; unsigned int nstids; unsigned int stid_base; + + unsigned int nhash; unsigned int hash_base; union aopen_entry *atid_tab; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 4b697550f08d..065c01c654ff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -3044,6 +3044,10 @@ #define HASHTIDSIZE_M 0x3fU #define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M) +#define HASHTBLSIZE_S 3 +#define HASHTBLSIZE_M 0x1ffffU +#define HASHTBLSIZE_G(x) (((x) >> HASHTBLSIZE_S) & HASHTBLSIZE_M) + #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c From c8729cac2a11e4bc170f5d0041d5561bb7fe82a0 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Fri, 19 Jun 2020 19:51:36 +0530 Subject: [PATCH 0103/2056] cxgb4: add ethtool n-tuple filter insertion Add support to parse and insert ethtool n-tuple filters. Translate n-tuple spec to flow spec and use the existing tc-flower offload infra to insert ethtool n-tuple filters. Signed-off-by: Rahul Lakkireddy Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 75 ++++++++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 5 + .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 97 ++++++++++--------- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 3 + 5 files changed, 135 insertions(+), 47 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 51f1d5f87bc3..82fc09b6dc8e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -11,6 +11,7 @@ #include "t4fw_api.h" #include "cxgb4_cudbg.h" #include "cxgb4_filter.h" +#include "cxgb4_tc_flower.h" #define EEPROM_MAGIC 0x38E2F10C @@ -1635,6 +1636,79 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return -EOPNOTSUPP; } +/* Add Ethtool n-tuple filters. */ +static int cxgb4_ntuple_set_filter(struct net_device *netdev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec_input input = {}; + struct cxgb4_ethtool_filter_info *filter_info; + struct adapter *adapter = netdev2adap(netdev); + struct port_info *pi = netdev_priv(netdev); + struct ch_filter_specification fs; + struct ethtool_rx_flow_rule *flow; + u32 tid; + int ret; + + if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) + return -EAGAIN; /* can still change nfilters */ + + if (!adapter->ethtool_filters) + return -EOPNOTSUPP; + + if (cmd->fs.location >= adapter->ethtool_filters->nentries) { + dev_err(adapter->pdev_dev, + "Location must be < %u", + adapter->ethtool_filters->nentries); + return -ERANGE; + } + + if (test_bit(cmd->fs.location, + adapter->ethtool_filters->port[pi->port_id].bmap)) + return -EEXIST; + + memset(&fs, 0, sizeof(fs)); + + input.fs = &cmd->fs; + flow = ethtool_rx_flow_rule_create(&input); + if (IS_ERR(flow)) { + ret = PTR_ERR(flow); + goto exit; + } + + fs.hitcnts = 1; + + ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location, + NULL, &fs, &tid); + if (ret) + goto free; + + filter_info = &adapter->ethtool_filters->port[pi->port_id]; + + filter_info->loc_array[cmd->fs.location] = tid; + set_bit(cmd->fs.location, filter_info->bmap); + filter_info->in_use++; + +free: + ethtool_rx_flow_rule_destroy(flow); +exit: + return ret; +} + +static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = cxgb4_ntuple_set_filter(dev, cmd); + break; + default: + break; + } + + return ret; +} + static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump) { struct adapter *adapter = netdev2adap(dev); @@ -1840,6 +1914,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_regs_len = get_regs_len, .get_regs = get_regs, .get_rxnfc = get_rxnfc, + .set_rxnfc = set_rxnfc, .get_rxfh_indir_size = get_rss_table_size, .get_rxfh = get_rss_table, .set_rxfh = set_rss_table, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 796555255207..a12df792d832 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1152,6 +1152,11 @@ bool is_filter_exact_match(struct adapter *adap, if (!is_hashfilter(adap)) return false; + if ((atomic_read(&adap->tids.hash_tids_in_use) + + atomic_read(&adap->tids.tids_in_use)) >= + (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base))) + return false; + /* Keep tunnel VNI match disabled for hash-filters for now */ if (fs->mask.encap_vld) return false; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 501917751b7f..7423980bc49a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6371,7 +6371,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_TC; + NETIF_F_HW_TC | NETIF_F_NTUPLE; if (chip_ver > CHELSIO_T5) { netdev->hw_enc_features |= NETIF_F_IP_CSUM | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 030de20a5d27..260d1219b723 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -81,19 +81,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, } static void cxgb4_process_flow_match(struct net_device *dev, - struct flow_cls_offload *cls, + struct flow_rule *rule, struct ch_filter_specification *fs) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); - u16 addr_type = 0; - - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_match_control match; - - flow_rule_match_control(rule, &match); - addr_type = match.key->addr_type; - } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; u16 ethtype_key, ethtype_mask; @@ -116,7 +106,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.proto = match.mask->ip_proto; } - if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); @@ -131,7 +121,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src)); } - if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); @@ -224,9 +214,8 @@ static void cxgb4_process_flow_match(struct net_device *dev, } static int cxgb4_validate_flow_match(struct net_device *dev, - struct flow_cls_offload *cls) + struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct flow_dissector *dissector = rule->match.dissector; u16 ethtype_mask = 0; u16 ethtype_key = 0; @@ -693,14 +682,11 @@ static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio) spin_unlock_bh(&t->ftid_lock); } -int cxgb4_tc_flower_replace(struct net_device *dev, - struct flow_cls_offload *cls) +int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, + u32 tc_prio, struct netlink_ext_ack *extack, + struct ch_filter_specification *fs, u32 *tid) { - struct flow_rule *rule = flow_cls_offload_flow_rule(cls); - struct netlink_ext_ack *extack = cls->common.extack; struct adapter *adap = netdev2adap(dev); - struct ch_tc_flower_entry *ch_flower; - struct ch_filter_specification *fs; struct filter_ctx ctx; u8 inet_family; int fidx, ret; @@ -708,18 +694,10 @@ int cxgb4_tc_flower_replace(struct net_device *dev, if (cxgb4_validate_flow_actions(dev, &rule->action, extack)) return -EOPNOTSUPP; - if (cxgb4_validate_flow_match(dev, cls)) + if (cxgb4_validate_flow_match(dev, rule)) return -EOPNOTSUPP; - ch_flower = allocate_flower_entry(); - if (!ch_flower) { - netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); - return -ENOMEM; - } - - fs = &ch_flower->fs; - fs->hitcnts = 1; - cxgb4_process_flow_match(dev, cls, fs); + cxgb4_process_flow_match(dev, rule, fs); cxgb4_process_flow_actions(dev, &rule->action, fs); fs->hash = is_filter_exact_match(adap, fs); @@ -730,12 +708,11 @@ int cxgb4_tc_flower_replace(struct net_device *dev, * existing rules. */ fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash, - cls->common.prio); + tc_prio); if (fidx < 0) { NL_SET_ERR_MSG_MOD(extack, "No free LETCAM index available"); - ret = -ENOMEM; - goto free_entry; + return -ENOMEM; } if (fidx < adap->tids.nhpftids) { @@ -749,42 +726,70 @@ int cxgb4_tc_flower_replace(struct net_device *dev, if (fs->hash) fidx = 0; - fs->tc_prio = cls->common.prio; - fs->tc_cookie = cls->cookie; + fs->tc_prio = tc_prio; init_completion(&ctx.completion); ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); if (ret) { netdev_err(dev, "%s: filter creation err %d\n", __func__, ret); - goto free_entry; + return ret; } /* Wait for reply */ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); - if (!ret) { - ret = -ETIMEDOUT; - goto free_entry; + if (!ret) + return -ETIMEDOUT; + + /* Check if hw returned error for filter creation */ + if (ctx.result) + return ctx.result; + + *tid = ctx.tid; + + if (fs->hash) + cxgb4_tc_flower_hash_prio_add(adap, tc_prio); + + return 0; +} + +int cxgb4_tc_flower_replace(struct net_device *dev, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_entry *ch_flower; + struct ch_filter_specification *fs; + int ret; + + ch_flower = allocate_flower_entry(); + if (!ch_flower) { + netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); + return -ENOMEM; } - ret = ctx.result; - /* Check if hw returned error for filter creation */ + fs = &ch_flower->fs; + fs->hitcnts = 1; + fs->tc_cookie = cls->cookie; + + ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs, + &ch_flower->filter_id); if (ret) goto free_entry; ch_flower->tc_flower_cookie = cls->cookie; - ch_flower->filter_id = ctx.tid; ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node, adap->flower_ht_params); if (ret) goto del_filter; - if (fs->hash) - cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio); - return 0; del_filter: + if (fs->hash) + cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio); + cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); free_entry: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 0a30c96b81ff..7fa379749500 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -121,6 +121,9 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, struct flow_cls_offload *cls); int cxgb4_tc_flower_stats(struct net_device *dev, struct flow_cls_offload *cls); +int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, + u32 tc_prio, struct netlink_ext_ack *extack, + struct ch_filter_specification *fs, u32 *tid); int cxgb4_init_tc_flower(struct adapter *adap); void cxgb4_cleanup_tc_flower(struct adapter *adap); From db43b30cd89c2549216b9e4ef27c5d2f9dcb964d Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Fri, 19 Jun 2020 19:51:37 +0530 Subject: [PATCH 0104/2056] cxgb4: add ethtool n-tuple filter deletion Add support to delete ethtool n-tuple filter. Fetch the appropriate filter region (HPFILTER, HASH, NORMAL) in which the filter exists, and delete it from the respective region, accordingly. Signed-off-by: Rahul Lakkireddy Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 61 +++++++++++++++++++ .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 30 ++++++--- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 2 + 3 files changed, 84 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 82fc09b6dc8e..5c588677877d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1573,6 +1573,22 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key, return -EPERM; } +static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap, + u32 ftid) +{ + struct tid_info *t = &adap->tids; + struct filter_entry *f; + + if (ftid < t->nhpftids) + f = &adap->tids.hpftid_tab[ftid]; + else if (ftid < t->nftids) + f = &adap->tids.ftid_tab[ftid - t->nhpftids]; + else + f = lookup_tid(&adap->tids, ftid); + + return f; +} + static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { @@ -1636,6 +1652,48 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return -EOPNOTSUPP; } +static int cxgb4_ntuple_del_filter(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct cxgb4_ethtool_filter_info *filter_info; + struct adapter *adapter = netdev2adap(dev); + struct port_info *pi = netdev_priv(dev); + struct filter_entry *f; + u32 filter_id; + int ret; + + if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) + return -EAGAIN; /* can still change nfilters */ + + if (!adapter->ethtool_filters) + return -EOPNOTSUPP; + + if (cmd->fs.location >= adapter->ethtool_filters->nentries) { + dev_err(adapter->pdev_dev, + "Location must be < %u", + adapter->ethtool_filters->nentries); + return -ERANGE; + } + + filter_info = &adapter->ethtool_filters->port[pi->port_id]; + + if (!test_bit(cmd->fs.location, filter_info->bmap)) + return -ENOENT; + + filter_id = filter_info->loc_array[cmd->fs.location]; + f = cxgb4_get_filter_entry(adapter, filter_id); + + ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id); + if (ret) + goto err; + + clear_bit(cmd->fs.location, filter_info->bmap); + filter_info->in_use--; + +err: + return ret; +} + /* Add Ethtool n-tuple filters. */ static int cxgb4_ntuple_set_filter(struct net_device *netdev, struct ethtool_rxnfc *cmd) @@ -1702,6 +1760,9 @@ static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXCLSRLINS: ret = cxgb4_ntuple_set_filter(dev, cmd); break; + case ETHTOOL_SRXCLSRLDEL: + ret = cxgb4_ntuple_del_filter(dev, cmd); + break; default: break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 260d1219b723..3ded4fec6c40 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -797,23 +797,38 @@ int cxgb4_tc_flower_replace(struct net_device *dev, return ret; } +int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio, + struct ch_filter_specification *fs, int tid) +{ + struct adapter *adap = netdev2adap(dev); + u8 hash; + int ret; + + hash = fs->hash; + + ret = cxgb4_del_filter(dev, tid, fs); + if (ret) + return ret; + + if (hash) + cxgb4_tc_flower_hash_prio_del(adap, tc_prio); + + return ret; +} + int cxgb4_tc_flower_destroy(struct net_device *dev, struct flow_cls_offload *cls) { struct adapter *adap = netdev2adap(dev); struct ch_tc_flower_entry *ch_flower; - u32 tc_prio; - bool hash; int ret; ch_flower = ch_flower_lookup(adap, cls->cookie); if (!ch_flower) return -ENOENT; - hash = ch_flower->fs.hash; - tc_prio = ch_flower->fs.tc_prio; - - ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); + ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio, + &ch_flower->fs, ch_flower->filter_id); if (ret) goto err; @@ -825,9 +840,6 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, } kfree_rcu(ch_flower, rcu); - if (hash) - cxgb4_tc_flower_hash_prio_del(adap, tc_prio); - err: return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 7fa379749500..befa459324fb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -124,6 +124,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev, int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, u32 tc_prio, struct netlink_ext_ack *extack, struct ch_filter_specification *fs, u32 *tid); +int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio, + struct ch_filter_specification *fs, int tid); int cxgb4_init_tc_flower(struct adapter *adap); void cxgb4_cleanup_tc_flower(struct adapter *adap); From 27ee29936443f966547c238a93fd1d9fa4e18c2e Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Fri, 19 Jun 2020 19:51:38 +0530 Subject: [PATCH 0105/2056] cxgb4: add support to fetch ethtool n-tuple filters Add support to fetch the requested ethtool n-tuple filters by translating them from hardware spec to ethtool n-tuple spec. Signed-off-by: Rahul Lakkireddy Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 110 ++++++++++++++++++ 1 file changed, 110 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 5c588677877d..3dd28e5856ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1589,10 +1589,104 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap, return f; } +static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs, + struct ch_filter_specification *dfs) +{ + switch (dfs->val.proto) { + case IPPROTO_TCP: + if (dfs->type) + fs->flow_type = TCP_V6_FLOW; + else + fs->flow_type = TCP_V4_FLOW; + break; + case IPPROTO_UDP: + if (dfs->type) + fs->flow_type = UDP_V6_FLOW; + else + fs->flow_type = UDP_V4_FLOW; + break; + } + + if (dfs->type) { + fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport); + fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport); + fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport); + fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport); + memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0], + sizeof(fs->h_u.tcp_ip6_spec.ip6src)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0], + sizeof(fs->m_u.tcp_ip6_spec.ip6src)); + memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0], + sizeof(fs->h_u.tcp_ip6_spec.ip6dst)); + memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0], + sizeof(fs->m_u.tcp_ip6_spec.ip6dst)); + fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos; + fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos; + } else { + fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport); + fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport); + fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport); + fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport); + memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0], + sizeof(fs->h_u.tcp_ip4_spec.ip4src)); + memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0], + sizeof(fs->m_u.tcp_ip4_spec.ip4src)); + memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0], + sizeof(fs->h_u.tcp_ip4_spec.ip4dst)); + memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0], + sizeof(fs->m_u.tcp_ip4_spec.ip4dst)); + fs->h_u.tcp_ip4_spec.tos = dfs->val.tos; + fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos; + } + fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan); + fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan); + fs->flow_type |= FLOW_EXT; + + if (dfs->action == FILTER_DROP) + fs->ring_cookie = RX_CLS_FLOW_DISC; + else + fs->ring_cookie = dfs->iq; +} + +static int cxgb4_ntuple_get_filter(struct net_device *dev, + struct ethtool_rxnfc *cmd, + unsigned int loc) +{ + const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = netdev2adap(dev); + struct filter_entry *f; + int ftid; + + if (!(adap->flags & CXGB4_FULL_INIT_DONE)) + return -EAGAIN; + + /* Check for maximum filter range */ + if (!adap->ethtool_filters) + return -EOPNOTSUPP; + + if (loc >= adap->ethtool_filters->nentries) + return -ERANGE; + + if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap)) + return -ENOENT; + + ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc]; + + /* Fetch filter_entry */ + f = cxgb4_get_filter_entry(adap, ftid); + + cxgb4_fill_filter_rule(&cmd->fs, &f->fs); + + return 0; +} + static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { const struct port_info *pi = netdev_priv(dev); + struct adapter *adap = netdev2adap(dev); + unsigned int count = 0, index = 0; + int ret = 0; switch (info->cmd) { case ETHTOOL_GRXFH: { @@ -1648,7 +1742,23 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = pi->nqsets; return 0; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = + adap->ethtool_filters->port[pi->port_id].in_use; + return 0; + case ETHTOOL_GRXCLSRULE: + return cxgb4_ntuple_get_filter(dev, info, info->fs.location); + case ETHTOOL_GRXCLSRLALL: + info->data = adap->ethtool_filters->nentries; + while (count < info->rule_cnt) { + ret = cxgb4_ntuple_get_filter(dev, info, index); + if (!ret) + rules[count++] = index; + index++; + } + return 0; } + return -EOPNOTSUPP; } From 4dababa232f26eda7e827cfc0360c160549d9a84 Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Fri, 19 Jun 2020 19:51:39 +0530 Subject: [PATCH 0106/2056] cxgb4: add action to steer flows to specific Rxq Add support for queue action to steer Rx traffic hitting the flows to specified Rxq. Signed-off-by: Rahul Lakkireddy Signed-off-by: Vishal Kulkarni Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 3ded4fec6c40..ccc7aab9a8be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -425,6 +425,11 @@ void cxgb4_process_flow_actions(struct net_device *in, process_pedit_field(fs, val, mask, offset, htype); } break; + case FLOW_ACTION_QUEUE: + fs->action = FILTER_PASS; + fs->dirsteer = 1; + fs->iq = act->queue.index; + break; default: break; } @@ -609,6 +614,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev, act_pedit = true; } break; + case FLOW_ACTION_QUEUE: + /* Do nothing. cxgb4_set_filter will validate */ + break; default: netdev_err(dev, "%s: Unsupported action\n", __func__); return -EOPNOTSUPP; From cc7a21b6fbd945f8d8f61422ccd27203c1fafeb7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:02:59 -0700 Subject: [PATCH 0107/2056] ipv6: icmp6: avoid indirect call for icmpv6_send() If IPv6 is builtin, we do not need an expensive indirect call to reach icmp6_send(). v2: put inline keyword before the type to avoid sparse warnings. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/icmpv6.h | 22 +++++++++++++++++++++- net/ipv6/icmp.c | 5 +++-- net/ipv6/ip6_icmp.c | 10 +++++----- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 33d379602314..1b3371ae8193 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -13,12 +13,32 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include #if IS_ENABLED(CONFIG_IPV6) -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr); +#if IS_BUILTIN(CONFIG_IPV6) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr); +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + icmp6_send(skb, type, code, info, NULL); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index fc5000370030..91e0f2fd2523 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -439,8 +439,8 @@ static int icmp6_iif(const struct sk_buff *skb) /* * Send an ICMP message in response to a packet in error */ -static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, - const struct in6_addr *force_saddr) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr) { struct inet6_dev *idev = NULL; struct ipv6hdr *hdr = ipv6_hdr(skb); @@ -625,6 +625,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, out_bh_enable: local_bh_enable(); } +EXPORT_SYMBOL(icmp6_send); /* Slightly more convenient version of icmp6_send. */ diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c index e0086758b6ee..70c8c2f36c98 100644 --- a/net/ipv6/ip6_icmp.c +++ b/net/ipv6/ip6_icmp.c @@ -9,6 +9,8 @@ #if IS_ENABLED(CONFIG_IPV6) +#if !IS_BUILTIN(CONFIG_IPV6) + static ip6_icmp_send_t __rcu *ip6_icmp_send; int inet6_register_icmp_sender(ip6_icmp_send_t *fn) @@ -37,14 +39,12 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) rcu_read_lock(); send = rcu_dereference(ip6_icmp_send); - - if (!send) - goto out; - send(skb, type, code, info, NULL); -out: + if (send) + send(skb, type, code, info, NULL); rcu_read_unlock(); } EXPORT_SYMBOL(icmpv6_send); +#endif #if IS_ENABLED(CONFIG_NF_NAT) #include From f362b70bd67add130ae4321306b62237356889a4 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 19 Jun 2020 12:37:15 -0500 Subject: [PATCH 0108/2056] ethernet: ti: am65-cpsw-qos: Use struct_size() in devm_kzalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. Also, remove unnecessary variable _size_. This code was detected with the help of Coccinelle and, audited and fixed manually. Addresses-KSPP-ID: https://github.com/KSPP/linux/issues/83 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-qos.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c index 32eac04468bb..3bdd4dbcd2ff 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-qos.c +++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c @@ -505,7 +505,6 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data) struct am65_cpsw_port *port = am65_ndev_to_port(ndev); struct tc_taprio_qopt_offload *taprio = type_data; struct am65_cpsw_est *est_new; - size_t size; int ret = 0; if (taprio->cycle_time_extension) { @@ -513,10 +512,9 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data) return -EOPNOTSUPP; } - size = sizeof(struct tc_taprio_sched_entry) * taprio->num_entries + - sizeof(struct am65_cpsw_est); - - est_new = devm_kzalloc(&ndev->dev, size, GFP_KERNEL); + est_new = devm_kzalloc(&ndev->dev, + struct_size(est_new, taprio.entries, taprio->num_entries), + GFP_KERNEL); if (!est_new) return -ENOMEM; From a422d5ff6defb1c86a93f3f320d35eeb545455dd Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 19 Jun 2020 13:01:49 -0500 Subject: [PATCH 0109/2056] cxgb4: Use struct_size() helper Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Addresses-KSPP-ID: https://github.com/KSPP/linux/issues/83 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1359158652b7..b3da3d05aa2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2431,7 +2431,7 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) struct sk_buff *skb; int ret = 0; - len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams; + len = struct_size(flowc, mnemval, nparams); len16 = DIV_ROUND_UP(len, 16); entry = cxgb4_lookup_eotid(&adap->tids, eotid); From 70fc6d9c14e24f9efae968589e5edf584a788e23 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 19 Jun 2020 13:10:07 -0500 Subject: [PATCH 0110/2056] net: dsa: sja1105: Use struct_size() in kzalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Addresses-KSPP-ID: https://github.com/KSPP/linux/issues/83 Signed-off-by: Gustavo A. R. Silva Acked-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_tas.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c index 3aa1a8b5f766..31d8acff1f01 100644 --- a/drivers/net/dsa/sja1105/sja1105_tas.c +++ b/drivers/net/dsa/sja1105/sja1105_tas.c @@ -475,8 +475,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port, if (list_empty(&gating_cfg->entries)) return false; - dummy = kzalloc(sizeof(struct tc_taprio_sched_entry) * num_entries + - sizeof(struct tc_taprio_qopt_offload), GFP_KERNEL); + dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL); if (!dummy) { NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory"); return true; From 6c6935419e2fd8ef1fcde71c4e50b9095e520900 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 18 Jun 2020 16:46:31 -0700 Subject: [PATCH 0111/2056] bpf: Avoid verifier failure for 32bit pointer arithmetic When do experiments with llvm (disabling instcombine and simplifyCFG), I hit the following error with test_seg6_loop.o. ; R1=pkt(id=0,off=0,r=48,imm=0), R7=pkt(id=0,off=40,r=48,imm=0) w2 = w7 ; R2_w=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) w2 -= w1 R2 32-bit pointer arithmetic prohibited The corresponding source code is: uint32_t srh_off // srh and skb->data are all packet pointers srh_off = (char *)srh - (char *)(long)skb->data; The verifier does not support 32-bit pointer/scalar arithmetic. Without my llvm change, the code looks like ; R3=pkt(id=0,off=40,r=48,imm=0), R8=pkt(id=0,off=0,r=48,imm=0) w3 -= w8 ; R3_w=inv(id=0) This is explicitly allowed in verifier if both registers are pointers and the opcode is BPF_SUB. To fix this problem, I changed the verifier to allow 32-bit pointer/scaler BPF_SUB operations. At the source level, the issue could be workarounded with inline asm or changing "uint32_t srh_off" to "uint64_t srh_off". But I feel that verifier change might be the right thing to do. Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200618234631.3321118-1-yhs@fb.com --- kernel/bpf/verifier.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 34cde841ab68..a1857c4ffaaf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5031,6 +5031,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ + if (opcode == BPF_SUB && env->allow_ptr_leaks) { + __mark_reg_unknown(env, dst_reg); + return 0; + } + verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); From d56b74b9e1b8d747171dc6ff60315c00c41562ce Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 18 Jun 2020 16:46:32 -0700 Subject: [PATCH 0112/2056] tools/bpf: Add verifier tests for 32bit pointer/scalar arithmetic Added two test_verifier subtests for 32bit pointer/scalar arithmetic with BPF_SUB operator. They are passing verifier now. Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200618234632.3321367-1-yhs@fb.com --- .../selftests/bpf/verifier/value_ptr_arith.c | 38 +++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 97ee658e1242..ed4e76b24649 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -836,3 +836,41 @@ .errstr = "R0 invalid mem access 'inv'", .errstr_unpriv = "R0 pointer -= pointer prohibited", }, +{ + "32bit pkt_ptr -= scalar", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), + BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_7), + BPF_ALU32_REG(BPF_SUB, BPF_REG_6, BPF_REG_4), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "32bit scalar -= pkt_ptr", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40), + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2), + BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_6), + BPF_ALU32_REG(BPF_SUB, BPF_REG_4, BPF_REG_7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = ACCEPT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, From bb8dc2695a7db4f35c1de94d212f86229bb4a5d2 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 15:20:24 -0700 Subject: [PATCH 0113/2056] tools/bpftool: Relicense bpftool's BPF profiler prog as dual-license GPL/BSD Relicense it to be compatible with the rest of bpftool files. Suggested-by: Quentin Monnet Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200619222024.519774-1-andriin@fb.com --- tools/bpf/bpftool/skeleton/profiler.bpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c index 20034c12f7c5..c9d196ddb670 100644 --- a/tools/bpf/bpftool/skeleton/profiler.bpf.c +++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2020 Facebook #include "profiler.h" #include @@ -116,4 +116,4 @@ int BPF_PROG(fexit_XXX) return 0; } -char LICENSE[] SEC("license") = "GPL"; +char LICENSE[] SEC("license") = "Dual BSD/GPL"; From e034c6d23bc43266af1fa983212218f4aa38f995 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 18 Jun 2020 08:35:00 -0500 Subject: [PATCH 0114/2056] tipc: Use struct_size() helper Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/tipc/link.c | 8 ++++---- net/tipc/msg.h | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index eac89a3e22ce..1c579357ccdf 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1385,12 +1385,12 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l, p = (struct tipc_gap_ack_blks *)msg_data(hdr); sz = ntohs(p->len); /* Sanity check */ - if (sz == tipc_gap_ack_blks_sz(p->ugack_cnt + p->bgack_cnt)) { + if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) { /* Good, check if the desired type exists */ if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt)) goto ok; /* Backward compatible: peer might not support bc, but uc? */ - } else if (uc && sz == tipc_gap_ack_blks_sz(p->ugack_cnt)) { + } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) { if (p->ugack_cnt) { p->bgack_cnt = 0; goto ok; @@ -1472,7 +1472,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr) __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0; /* Total len */ - len = tipc_gap_ack_blks_sz(ga->bgack_cnt + ga->ugack_cnt); + len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt); ga->len = htons(len); return len; } @@ -1521,7 +1521,7 @@ static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r, gacks = &ga->gacks[ga->bgack_cnt]; } else if (ga) { /* Copy the Gap ACKs, bc part, for later renewal if needed */ - this_ga = kmemdup(ga, tipc_gap_ack_blks_sz(ga->bgack_cnt), + this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt), GFP_ATOMIC); if (likely(this_ga)) { this_ga->start_index = 0; diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 65119e81ff0c..1016e96db5c4 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -189,11 +189,9 @@ struct tipc_gap_ack_blks { struct tipc_gap_ack gacks[]; }; -#define tipc_gap_ack_blks_sz(n) (sizeof(struct tipc_gap_ack_blks) + \ - sizeof(struct tipc_gap_ack) * (n)) - #define MAX_GAP_ACK_BLKS 128 -#define MAX_GAP_ACK_BLKS_SZ tipc_gap_ack_blks_sz(MAX_GAP_ACK_BLKS) +#define MAX_GAP_ACK_BLKS_SZ (sizeof(struct tipc_gap_ack_blks) + \ + sizeof(struct tipc_gap_ack) * MAX_GAP_ACK_BLKS) static inline struct tipc_msg *buf_msg(struct sk_buff *skb) { From 454a78d17845882aa552dc3ab86605910da9a96d Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:32 +0100 Subject: [PATCH 0115/2056] net: phy: clean up cortina workaround Move the Cortina PHY workaround out of the "devices in package" loop; it doesn't need to be in there as the control flow will terminate the loop once we enter the workaround irrespective of the workaround's outcome. The workaround is triggered by the ID being mostly 1's, which will in any case terminate the loop. Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 04946de74fa0..c71dc2624c4b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -720,23 +720,21 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); if (phy_reg < 0) return -EIO; + } + if ((*devs & 0x1fffffff) == 0x1fffffff) { + /* If mostly Fs, there is no device there, then let's probe + * MMD 0, as some 10G PHYs have zero Devices In package, + * e.g. Cortina CS4315/CS4340 PHY. + */ + phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); + if (phy_reg < 0) + return -EIO; + + /* no device there, let's get out of here */ if ((*devs & 0x1fffffff) == 0x1fffffff) { - /* If mostly Fs, there is no device there, - * then let's continue to probe more, as some - * 10G PHYs have zero Devices In package, - * e.g. Cortina CS4315/CS4340 PHY. - */ - phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); - if (phy_reg < 0) - return -EIO; - /* no device there, let's get out of here */ - if ((*devs & 0x1fffffff) == 0x1fffffff) { - *phy_id = 0xffffffff; - return 0; - } else { - break; - } + *phy_id = 0xffffffff; + return 0; } } From e63062616df323cee4266ed3f964e20e00092d9a Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:37 +0100 Subject: [PATCH 0116/2056] net: phy: clean up PHY ID reading Rearrange the code to read the PHY IDs, so we don't call get_phy_id() only to immediately call get_phy_c45_ids(). Move that logic into get_phy_device(), which results in better readability. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c71dc2624c4b..a351eabe772f 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -758,29 +758,18 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, } /** - * get_phy_id - reads the specified addr for its ID. + * get_phy_c22_id - reads the specified addr for its clause 22 ID. * @bus: the target MII bus * @addr: PHY address on the MII bus * @phy_id: where to store the ID retrieved. - * @is_c45: If true the PHY uses the 802.3 clause 45 protocol - * @c45_ids: where to store the c45 ID information. - * - * Description: In the case of a 802.3-c22 PHY, reads the ID registers - * of the PHY at @addr on the @bus, stores it in @phy_id and returns - * zero on success. - * - * In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and - * its return value is in turn returned. * + * Read the 802.3 clause 22 PHY ID from the PHY at @addr on the @bus. + * Return the PHY ID read from the PHY in @phy_id on successful access. */ -static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id, - bool is_c45, struct phy_c45_device_ids *c45_ids) +static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id) { int phy_reg; - if (is_c45) - return get_phy_c45_ids(bus, addr, phy_id, c45_ids); - /* Grab the bits from PHYIR1, and put them in the upper half */ phy_reg = mdiobus_read(bus, addr, MII_PHYSID1); if (phy_reg < 0) { @@ -819,7 +808,11 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) c45_ids.devices_in_package = 0; memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); - r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids); + if (is_c45) + r = get_phy_c45_ids(bus, addr, &phy_id, &c45_ids); + else + r = get_phy_c22_id(bus, addr, &phy_id); + if (r) return ERR_PTR(r); From 48c543887bc528451e3a64c503ae82386520c686 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:43 +0100 Subject: [PATCH 0117/2056] net: phy: clean up get_phy_c45_ids() failure handling When we decide that a PHY is not present, we do not need to go through the hoops of setting *phy_id to 0xffffffff, and then return zero to make get_phy_device() fail - we can return -ENODEV which will have the same effect. Doing so means we no longer have to pass a pointer to phy_id in, and we can then clean up the clause 22 path in a similar way. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index a351eabe772f..e8dc9fcf188e 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -697,16 +697,16 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs. * @bus: the target MII bus * @addr: PHY address on the MII bus - * @phy_id: where to store the ID retrieved. * @c45_ids: where to store the c45 ID information. * - * If the PHY devices-in-package appears to be valid, it and the - * corresponding identifiers are stored in @c45_ids, zero is stored - * in @phy_id. Otherwise 0xffffffff is stored in @phy_id. Returns - * zero on success. + * Read the PHY "devices in package". If this appears to be valid, read + * the PHY identifiers for each device. Return the "devices in package" + * and identifiers in @c45_ids. * + * Returns zero on success, %-EIO on bus access error, or %-ENODEV if + * the "devices in package" is invalid. */ -static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, +static int get_phy_c45_ids(struct mii_bus *bus, int addr, struct phy_c45_device_ids *c45_ids) { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); @@ -732,10 +732,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, return -EIO; /* no device there, let's get out of here */ - if ((*devs & 0x1fffffff) == 0x1fffffff) { - *phy_id = 0xffffffff; - return 0; - } + if ((*devs & 0x1fffffff) == 0x1fffffff) + return -ENODEV; } /* Now probe Device Identifiers for each device present. */ @@ -753,7 +751,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, return -EIO; c45_ids->device_ids[i] |= phy_reg; } - *phy_id = 0; + return 0; } @@ -809,7 +807,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); if (is_c45) - r = get_phy_c45_ids(bus, addr, &phy_id, &c45_ids); + r = get_phy_c45_ids(bus, addr, &c45_ids); else r = get_phy_c22_id(bus, addr, &phy_id); From ee951005e95eb61b3bde893b6c7be1abaf07f59b Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:48 +0100 Subject: [PATCH 0118/2056] net: phy: clean up get_phy_c22_id() invalid ID handling Move the ID check from get_phy_device() into get_phy_c22_id(), which simplifies get_phy_device(). The ID reading functions are now responsible for indicating whether they found a PHY or not via their return code - they must return -ENODEV when a PHY is not present. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e8dc9fcf188e..0e802c6add09 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -761,8 +761,10 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, * @addr: PHY address on the MII bus * @phy_id: where to store the ID retrieved. * - * Read the 802.3 clause 22 PHY ID from the PHY at @addr on the @bus. - * Return the PHY ID read from the PHY in @phy_id on successful access. + * Read the 802.3 clause 22 PHY ID from the PHY at @addr on the @bus, + * placing it in @phy_id. Return zero on successful read and the ID is + * valid, %-EIO on bus access error, or %-ENODEV if no device responds + * or invalid ID. */ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id) { @@ -784,6 +786,10 @@ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id) *phy_id |= phy_reg; + /* If the phy_id is mostly Fs, there is no device there */ + if ((*phy_id & 0x1fffffff) == 0x1fffffff) + return -ENODEV; + return 0; } @@ -814,10 +820,6 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) if (r) return ERR_PTR(r); - /* If the phy_id is mostly Fs, there is no device there */ - if ((phy_id & 0x1fffffff) == 0x1fffffff) - return ERR_PTR(-ENODEV); - return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids); } EXPORT_SYMBOL(get_phy_device); From 439625a7726cebe3ba5eb68a6ac74ed30846a384 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:53 +0100 Subject: [PATCH 0119/2056] net: phy: reword get_phy_device() kerneldoc Reword the get_phy_device() kerneldoc to be more explicit about how we probe for the PHY, and what the various return conditions signify. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0e802c6add09..09096c3ceb86 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -800,8 +800,17 @@ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id) * @addr: PHY address on the MII bus * @is_c45: If true the PHY uses the 802.3 clause 45 protocol * - * Description: Reads the ID registers of the PHY at @addr on the - * @bus, then allocates and returns the phy_device to represent it. + * Probe for a PHY at @addr on @bus. + * + * When probing for a clause 22 PHY, then read the ID registers. If we find + * a valid ID, allocate and return a &struct phy_device. + * + * When probing for a clause 45 PHY, read the "devices in package" registers. + * If the "devices in package" appears valid, read the ID registers for each + * MMD, allocate and return a &struct phy_device. + * + * Returns an allocated &struct phy_device on success, %-ENODEV if there is + * no PHY present, or %-EIO on bus access error. */ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) { From c746053d275c8b6ff1d713addabf049c9c9a58fc Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:45:58 +0100 Subject: [PATCH 0120/2056] net: phy: add support for probing MMDs >= 8 for devices-in-package Add support for probing MMDs above 7 for a valid devices-in-package specifier, but only probe the vendor MMDs for this if they also report that there the device is present in status register 2. This avoids issues where the MMD is implemented, but does not provide IEEE 802.3 compliant registers (such as the MV88X3310 PHY.) Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 40 ++++++++++++++++++++++++++++++++++-- include/linux/phy.h | 2 ++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 09096c3ceb86..8d9af2772853 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -661,6 +661,28 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, } EXPORT_SYMBOL(phy_device_create); +/* phy_c45_probe_present - checks to see if a MMD is present in the package + * @bus: the target MII bus + * @prtad: PHY package address on the MII bus + * @devad: PHY device (MMD) address + * + * Read the MDIO_STAT2 register, and check whether a device is responding + * at this address. + * + * Returns: negative error number on bus access error, zero if no device + * is responding, or positive if a device is present. + */ +static int phy_c45_probe_present(struct mii_bus *bus, int prtad, int devad) +{ + int stat2; + + stat2 = mdiobus_c45_read(bus, prtad, devad, MDIO_STAT2); + if (stat2 < 0) + return stat2; + + return (stat2 & MDIO_STAT2_DEVPRST) == MDIO_STAT2_DEVPRST_VAL; +} + /* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. * @bus: the target MII bus * @addr: PHY address on the MII bus @@ -711,12 +733,26 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); u32 *devs = &c45_ids->devices_in_package; - int i, phy_reg; + int i, ret, phy_reg; /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ - for (i = 1; i < num_ids && *devs == 0; i++) { + for (i = 1; i < MDIO_MMD_NUM && *devs == 0; i++) { + if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { + /* Check that there is a device present at this + * address before reading the devices-in-package + * register to avoid reading garbage from the PHY. + * Some PHYs (88x3310) vendor space is not IEEE802.3 + * compliant. + */ + ret = phy_c45_probe_present(bus, addr, i); + if (ret < 0) + return -EIO; + + if (!ret) + continue; + } phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); if (phy_reg < 0) return -EIO; diff --git a/include/linux/phy.h b/include/linux/phy.h index 8c05d0fb5c00..abe318387331 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -388,6 +388,8 @@ enum phy_state { PHY_CABLETEST, }; +#define MDIO_MMD_NUM 32 + /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers * @devices_in_package: Bit vector of devices present. From 5ba33cf4839266f059a6e2342c005709cc6cf256 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:03 +0100 Subject: [PATCH 0121/2056] net: phy: set devices_in_package only after validation Only set the devices_in_package to a non-zero value if we find a valid value for this field, so we avoid leaving it set to e.g. 0x1fffffff. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8d9af2772853..8e11e3d3a801 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -732,13 +732,13 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, struct phy_c45_device_ids *c45_ids) { const int num_ids = ARRAY_SIZE(c45_ids->device_ids); - u32 *devs = &c45_ids->devices_in_package; + u32 devs_in_pkg = 0; int i, ret, phy_reg; /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ - for (i = 1; i < MDIO_MMD_NUM && *devs == 0; i++) { + for (i = 1; i < MDIO_MMD_NUM && devs_in_pkg == 0; i++) { if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { /* Check that there is a device present at this * address before reading the devices-in-package @@ -753,28 +753,28 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, if (!ret) continue; } - phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); + phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, &devs_in_pkg); if (phy_reg < 0) return -EIO; } - if ((*devs & 0x1fffffff) == 0x1fffffff) { + if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) { /* If mostly Fs, there is no device there, then let's probe * MMD 0, as some 10G PHYs have zero Devices In package, * e.g. Cortina CS4315/CS4340 PHY. */ - phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); + phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, &devs_in_pkg); if (phy_reg < 0) return -EIO; /* no device there, let's get out of here */ - if ((*devs & 0x1fffffff) == 0x1fffffff) + if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) return -ENODEV; } /* Now probe Device Identifiers for each device present. */ for (i = 1; i < num_ids; i++) { - if (!(c45_ids->devices_in_package & (1 << i))) + if (!(devs_in_pkg & (1 << i))) continue; phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1); @@ -788,6 +788,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, c45_ids->device_ids[i] |= phy_reg; } + c45_ids->devices_in_package = devs_in_pkg; + return 0; } From 320ed3bf900075614c43499dc01db8d25717b986 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:08 +0100 Subject: [PATCH 0122/2056] net: phy: split devices_in_package We have two competing requirements for the devices_in_package field. We want to use it as a bit array indicating which MMDs are present, but we also want to know if the Clause 22 registers are present. Since "devices in package" is a term used in the 802.3 specification, keep this as the as-specified values read from the PHY, and introduce a new member "mmds_present" to indicate which MMDs are actually present in the PHY, derived from the "devices in package" value. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy-c45.c | 4 ++-- drivers/net/phy/phy_device.c | 6 +++--- drivers/net/phy/phylink.c | 8 ++++---- include/linux/phy.h | 4 +++- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index defe09d94422..bd11e62bfdfe 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -219,7 +219,7 @@ int genphy_c45_read_link(struct phy_device *phydev) int val, devad; bool link = true; - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1); if (val < 0) return val; @@ -409,7 +409,7 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev) int val; linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported); - if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) { + if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) { val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); if (val < 0) return val; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8e11e3d3a801..c1f81c4d0bb3 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -709,9 +709,6 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, return -EIO; *devices_in_package |= phy_reg; - /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ - *devices_in_package &= ~BIT(0); - return 0; } @@ -789,6 +786,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, } c45_ids->devices_in_package = devs_in_pkg; + /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ + c45_ids->mmds_present = devs_in_pkg & ~BIT(0); return 0; } @@ -857,6 +856,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45) int r; c45_ids.devices_in_package = 0; + c45_ids.mmds_present = 0; memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids)); if (is_c45) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0ab65fb75258..7ce787c227b3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1638,11 +1638,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: - devad = __ffs(phydev->c45_ids.devices_in_package); + devad = __ffs(phydev->c45_ids.mmds_present); break; case MII_ADVERTISE: case MII_LPA: - if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (reg == MII_ADVERTISE) @@ -1678,11 +1678,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, case MII_BMSR: case MII_PHYSID1: case MII_PHYSID2: - devad = __ffs(phydev->c45_ids.devices_in_package); + devad = __ffs(phydev->c45_ids.mmds_present); break; case MII_ADVERTISE: case MII_LPA: - if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN)) return -EINVAL; devad = MDIO_MMD_AN; if (reg == MII_ADVERTISE) diff --git a/include/linux/phy.h b/include/linux/phy.h index abe318387331..19d9e040ad84 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -392,11 +392,13 @@ enum phy_state { /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers - * @devices_in_package: Bit vector of devices present. + * @devices_in_package: IEEE 802.3 devices in package register value. + * @mmds_present: bit vector of MMDs present. * @device_ids: The device identifer for each present device. */ struct phy_c45_device_ids { u32 devices_in_package; + u32 mmds_present; u32 device_ids[8]; }; From 389a338999875b6e7b111096e8b6484434556449 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 18 Jun 2020 14:46:13 +0100 Subject: [PATCH 0123/2056] net: phy: read MMD ID from all present MMDs Expand the device_ids[] array to allow all MMD IDs to be read rather than just the first 8 MMDs, but only read the ID if the MDIO_STAT2 register reports that a device really is present here for these new devices to maintain compatibility with our current behaviour. Note that only a limited number of devices have MDIO_STAT2. 88X3310 PHY vendor MMDs do are marked as present in the devices_in_package, but do not contain IEE 802.3 compatible register sets in their lower space. This avoids reading incorrect values as MMD identifiers. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 13 +++++++++++++ include/linux/phy.h | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index c1f81c4d0bb3..29ef4456ac25 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -774,6 +774,19 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, if (!(devs_in_pkg & (1 << i))) continue; + if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { + /* Probe the "Device Present" bits for the vendor MMDs + * to ignore these if they do not contain IEEE 802.3 + * registers. + */ + ret = phy_c45_probe_present(bus, addr, i); + if (ret < 0) + return ret; + + if (!ret) + continue; + } + phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1); if (phy_reg < 0) return -EIO; diff --git a/include/linux/phy.h b/include/linux/phy.h index 19d9e040ad84..9248dd2ce4ca 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -399,7 +399,7 @@ enum phy_state { struct phy_c45_device_ids { u32 devices_in_package; u32 mmds_present; - u32 device_ids[8]; + u32 device_ids[MDIO_MMD_NUM]; }; struct macsec_context; From 11a33de2df06b4ed23844181cb5c03eea67bfed1 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 18 Jun 2020 09:46:48 -0500 Subject: [PATCH 0124/2056] taprio: Use struct_size() in kzalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. Also, remove unnecessary variable _size_. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/sched/sch_taprio.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index b1eb12d33b9a..e981992634dd 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1108,11 +1108,10 @@ static void setup_txtime(struct taprio_sched *q, static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) { - size_t size = sizeof(struct tc_taprio_sched_entry) * num_entries + - sizeof(struct __tc_taprio_qopt_offload); struct __tc_taprio_qopt_offload *__offload; - __offload = kzalloc(size, GFP_KERNEL); + __offload = kzalloc(struct_size(__offload, offload.entries, num_entries), + GFP_KERNEL); if (!__offload) return NULL; From c5eb179edd8a169949b1fb153ecfd04649a71c8d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 18 Jun 2020 09:53:42 -0500 Subject: [PATCH 0125/2056] net/sched: cls_u32: Use struct_size() in kzalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index e15ff335953d..771b068f8254 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -796,9 +796,7 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, struct tc_u32_sel *s = &n->sel; struct tc_u_knode *new; - new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), - GFP_KERNEL); - + new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL); if (!new) return NULL; From 49042c220b3a31e25902b36df71b23dc10efa0b8 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:43 +0200 Subject: [PATCH 0126/2056] l3mdev: add infrastructure for table to VRF mapping Add infrastructure to l3mdev (the core code for Layer 3 master devices) in order to find out the corresponding VRF device for a given table id. Therefore, the l3mdev implementations: - can register a callback that returns the device index of the l3mdev associated with a given table id; - can offer the lookup function (table to VRF device). Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- include/net/l3mdev.h | 39 +++++++++++++++++++ net/l3mdev/l3mdev.c | 93 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 132 insertions(+) diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index e942372b077b..031c661aa14d 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -10,6 +10,16 @@ #include #include +enum l3mdev_type { + L3MDEV_TYPE_UNSPEC, + L3MDEV_TYPE_VRF, + __L3MDEV_TYPE_MAX +}; + +#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1) + +typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d); + /** * struct l3mdev_ops - l3mdev operations * @@ -37,6 +47,15 @@ struct l3mdev_ops { #ifdef CONFIG_NET_L3_MASTER_DEV +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn); + +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn); + +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, + u32 table_id); + int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg); @@ -280,6 +299,26 @@ struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb) return skb; } +static inline +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + return -EOPNOTSUPP; +} + +static inline +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ +} + +static inline +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, + u32 table_id) +{ + return -ENODEV; +} + static inline int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg) diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c index f35899d45a9a..e71ca5aec684 100644 --- a/net/l3mdev/l3mdev.c +++ b/net/l3mdev/l3mdev.c @@ -9,6 +9,99 @@ #include #include +static DEFINE_SPINLOCK(l3mdev_lock); + +struct l3mdev_handler { + lookup_by_table_id_t dev_lookup; +}; + +static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1]; + +static int l3mdev_check_type(enum l3mdev_type l3type) +{ + if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX) + return -EINVAL; + + return 0; +} + +int l3mdev_table_lookup_register(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + struct l3mdev_handler *hdlr; + int res; + + res = l3mdev_check_type(l3type); + if (res) + return res; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + if (hdlr->dev_lookup) { + res = -EBUSY; + goto unlock; + } + + hdlr->dev_lookup = fn; + res = 0; + +unlock: + spin_unlock(&l3mdev_lock); + + return res; +} +EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register); + +void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, + lookup_by_table_id_t fn) +{ + struct l3mdev_handler *hdlr; + + if (l3mdev_check_type(l3type)) + return; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + if (hdlr->dev_lookup == fn) + hdlr->dev_lookup = NULL; + + spin_unlock(&l3mdev_lock); +} +EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister); + +int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, + struct net *net, u32 table_id) +{ + lookup_by_table_id_t lookup; + struct l3mdev_handler *hdlr; + int ifindex = -EINVAL; + int res; + + res = l3mdev_check_type(l3type); + if (res) + return res; + + hdlr = &l3mdev_handlers[l3type]; + + spin_lock(&l3mdev_lock); + + lookup = hdlr->dev_lookup; + if (!lookup) + goto unlock; + + ifindex = lookup(net, table_id); + +unlock: + spin_unlock(&l3mdev_lock); + + return ifindex; +} +EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id); + /** * l3mdev_master_ifindex - get index of L3 master device * @dev: targeted interface From c8baec385737074dad2f792267baa2c134d94ba6 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:44 +0200 Subject: [PATCH 0127/2056] vrf: track associations between VRF devices and tables Add the data structures and the processing logic to keep track of the associations between VRF devices and routing tables. When a VRF is instantiated, it needs to refer to a given routing table. For each table, we explicitly keep track of the existing VRFs that refer to the table. Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- drivers/net/vrf.c | 273 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 268 insertions(+), 5 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 43928a1c2f2a..098fdabaa4c5 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -35,12 +36,75 @@ #include #define DRV_NAME "vrf" -#define DRV_VERSION "1.0" +#define DRV_VERSION "1.1" #define FIB_RULE_PREF 1000 /* default preference for FIB rules */ +#define HT_MAP_BITS 4 +#define HASH_INITVAL ((u32)0xcafef00d) + +struct vrf_map { + DECLARE_HASHTABLE(ht, HT_MAP_BITS); + spinlock_t vmap_lock; + + /* shared_tables: + * count how many distinct tables do not comply with the strict mode + * requirement. + * shared_tables value must be 0 in order to enable the strict mode. + * + * example of the evolution of shared_tables: + * | time + * add vrf0 --> table 100 shared_tables = 0 | t0 + * add vrf1 --> table 101 shared_tables = 0 | t1 + * add vrf2 --> table 100 shared_tables = 1 | t2 + * add vrf3 --> table 100 shared_tables = 1 | t3 + * add vrf4 --> table 101 shared_tables = 2 v t4 + * + * shared_tables is a "step function" (or "staircase function") + * and it is increased by one when the second vrf is associated to a + * table. + * + * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1. + * + * at t3, another dev (vrf3) is bound to the same table 100 but the + * value of shared_tables is still 1. + * This means that no matter how many new vrfs will register on the + * table 100, the shared_tables will not increase (considering only + * table 100). + * + * at t4, vrf4 is bound to table 101, and shared_tables = 2. + * + * Looking at the value of shared_tables we can immediately know if + * the strict_mode can or cannot be enforced. Indeed, strict_mode + * can be enforced iff shared_tables = 0. + * + * Conversely, shared_tables is decreased when a vrf is de-associated + * from a table with exactly two associated vrfs. + */ + u32 shared_tables; + + bool strict_mode; +}; + +struct vrf_map_elem { + struct hlist_node hnode; + struct list_head vrf_list; /* VRFs registered to this table */ + + u32 table_id; + int users; + int ifindex; +}; + static unsigned int vrf_net_id; +/* per netns vrf data */ +struct netns_vrf { + /* protected by rtnl lock */ + bool add_fib_rules; + + struct vrf_map vmap; +}; + struct net_vrf { struct rtable __rcu *rth; struct rt6_info __rcu *rt6; @@ -48,6 +112,9 @@ struct net_vrf { struct fib6_table *fib6_table; #endif u32 tb_id; + + struct list_head me_list; /* entry in vrf_map_elem */ + int ifindex; }; struct pcpu_dstats { @@ -103,6 +170,173 @@ static void vrf_get_stats64(struct net_device *dev, } } +static struct vrf_map *netns_vrf_map(struct net *net) +{ + struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); + + return &nn_vrf->vmap; +} + +static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev) +{ + return netns_vrf_map(dev_net(dev)); +} + +static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags) +{ + struct vrf_map_elem *me; + + me = kmalloc(sizeof(*me), flags); + if (!me) + return NULL; + + return me; +} + +static void vrf_map_elem_free(struct vrf_map_elem *me) +{ + kfree(me); +} + +static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id, + int ifindex, int users) +{ + me->table_id = table_id; + me->ifindex = ifindex; + me->users = users; + INIT_LIST_HEAD(&me->vrf_list); +} + +static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, + u32 table_id) +{ + struct vrf_map_elem *me; + u32 key; + + key = jhash_1word(table_id, HASH_INITVAL); + hash_for_each_possible(vmap->ht, me, hnode, key) { + if (me->table_id == table_id) + return me; + } + + return NULL; +} + +static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) +{ + u32 table_id = me->table_id; + u32 key; + + key = jhash_1word(table_id, HASH_INITVAL); + hash_add(vmap->ht, &me->hnode, key); +} + +static void vrf_map_del_elem(struct vrf_map_elem *me) +{ + hash_del(&me->hnode); +} + +static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) +{ + spin_lock(&vmap->vmap_lock); +} + +static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) +{ + spin_unlock(&vmap->vmap_lock); +} + +/* called with rtnl lock held */ +static int +vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack) +{ + struct vrf_map *vmap = netns_vrf_map_by_dev(dev); + struct net_vrf *vrf = netdev_priv(dev); + struct vrf_map_elem *new_me, *me; + u32 table_id = vrf->tb_id; + bool free_new_me = false; + int users; + int res; + + /* we pre-allocate elements used in the spin-locked section (so that we + * keep the spinlock as short as possibile). + */ + new_me = vrf_map_elem_alloc(GFP_KERNEL); + if (!new_me) + return -ENOMEM; + + vrf_map_elem_init(new_me, table_id, dev->ifindex, 0); + + vrf_map_lock(vmap); + + me = vrf_map_lookup_elem(vmap, table_id); + if (!me) { + me = new_me; + vrf_map_add_elem(vmap, me); + goto link_vrf; + } + + /* we already have an entry in the vrf_map, so it means there is (at + * least) a vrf registered on the specific table. + */ + free_new_me = true; + if (vmap->strict_mode) { + /* vrfs cannot share the same table */ + NL_SET_ERR_MSG(extack, "Table is used by another VRF"); + res = -EBUSY; + goto unlock; + } + +link_vrf: + users = ++me->users; + if (users == 2) + ++vmap->shared_tables; + + list_add(&vrf->me_list, &me->vrf_list); + + res = 0; + +unlock: + vrf_map_unlock(vmap); + + /* clean-up, if needed */ + if (free_new_me) + vrf_map_elem_free(new_me); + + return res; +} + +/* called with rtnl lock held */ +static void vrf_map_unregister_dev(struct net_device *dev) +{ + struct vrf_map *vmap = netns_vrf_map_by_dev(dev); + struct net_vrf *vrf = netdev_priv(dev); + u32 table_id = vrf->tb_id; + struct vrf_map_elem *me; + int users; + + vrf_map_lock(vmap); + + me = vrf_map_lookup_elem(vmap, table_id); + if (!me) + goto unlock; + + list_del(&vrf->me_list); + + users = --me->users; + if (users == 1) { + --vmap->shared_tables; + } else if (users == 0) { + vrf_map_del_elem(me); + + /* no one will refer to this element anymore */ + vrf_map_elem_free(me); + } + +unlock: + vrf_map_unlock(vmap); +} + /* by default VRF devices do not have a qdisc and are expected * to be created with only a single queue. */ @@ -1319,6 +1553,8 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head) netdev_for_each_lower_dev(dev, port_dev, iter) vrf_del_slave(dev, port_dev); + vrf_map_unregister_dev(dev); + unregister_netdevice_queue(dev, head); } @@ -1327,6 +1563,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { struct net_vrf *vrf = netdev_priv(dev); + struct netns_vrf *nn_vrf; bool *add_fib_rules; struct net *net; int err; @@ -1349,11 +1586,26 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, if (err) goto out; + /* mapping between table_id and vrf; + * note: such binding could not be done in the dev init function + * because dev->ifindex id is not available yet. + */ + vrf->ifindex = dev->ifindex; + + err = vrf_map_register_dev(dev, extack); + if (err) { + unregister_netdevice(dev); + goto out; + } + net = dev_net(dev); - add_fib_rules = net_generic(net, vrf_net_id); + nn_vrf = net_generic(net, vrf_net_id); + + add_fib_rules = &nn_vrf->add_fib_rules; if (*add_fib_rules) { err = vrf_add_fib_rules(dev); if (err) { + vrf_map_unregister_dev(dev); unregister_netdevice(dev); goto out; } @@ -1440,12 +1692,23 @@ static struct notifier_block vrf_notifier_block __read_mostly = { .notifier_call = vrf_device_event, }; +static int vrf_map_init(struct vrf_map *vmap) +{ + spin_lock_init(&vmap->vmap_lock); + hash_init(vmap->ht); + + vmap->strict_mode = false; + + return 0; +} + /* Initialize per network namespace state */ static int __net_init vrf_netns_init(struct net *net) { - bool *add_fib_rules = net_generic(net, vrf_net_id); + struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); - *add_fib_rules = true; + nn_vrf->add_fib_rules = true; + vrf_map_init(&nn_vrf->vmap); return 0; } @@ -1453,7 +1716,7 @@ static int __net_init vrf_netns_init(struct net *net) static struct pernet_operations vrf_net_ops __net_initdata = { .init = vrf_netns_init, .id = &vrf_net_id, - .size = sizeof(bool), + .size = sizeof(struct netns_vrf), }; static int __init vrf_init_module(void) From 33306f1aaf82ba7dd072d4d7b97de63b1033cce3 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:45 +0200 Subject: [PATCH 0128/2056] vrf: add sysctl parameter for strict mode Add net.vrf.strict_mode sysctl parameter. When net.vrf.strict_mode=0 (default) it is possible to associate multiple VRF devices to the same table. Conversely, when net.vrf.strict_mode=1 a table can be associated to a single VRF device. When switching from net.vrf.strict_mode=0 to net.vrf.strict_mode=1, a check is performed to verify that all tables have at most one VRF associated, otherwise the switch is not allowed. The net.vrf.strict_mode parameter is per network namespace. Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- drivers/net/vrf.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 098fdabaa4c5..c53e57354d2c 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -103,6 +103,7 @@ struct netns_vrf { bool add_fib_rules; struct vrf_map vmap; + struct ctl_table_header *ctl_hdr; }; struct net_vrf { @@ -246,6 +247,52 @@ static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) spin_unlock(&vmap->vmap_lock); } +static bool vrf_strict_mode(struct vrf_map *vmap) +{ + bool strict_mode; + + vrf_map_lock(vmap); + strict_mode = vmap->strict_mode; + vrf_map_unlock(vmap); + + return strict_mode; +} + +static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode) +{ + bool *cur_mode; + int res = 0; + + vrf_map_lock(vmap); + + cur_mode = &vmap->strict_mode; + if (*cur_mode == new_mode) + goto unlock; + + if (*cur_mode) { + /* disable strict mode */ + *cur_mode = false; + } else { + if (vmap->shared_tables) { + /* we cannot allow strict_mode because there are some + * vrfs that share one or more tables. + */ + res = -EBUSY; + goto unlock; + } + + /* no tables are shared among vrfs, so we can go back + * to 1:1 association between a vrf with its table. + */ + *cur_mode = true; + } + +unlock: + vrf_map_unlock(vmap); + + return res; +} + /* called with rtnl lock held */ static int vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack) @@ -1702,19 +1749,90 @@ static int vrf_map_init(struct vrf_map *vmap) return 0; } +static int vrf_shared_table_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos) +{ + struct net *net = (struct net *)table->extra1; + struct vrf_map *vmap = netns_vrf_map(net); + int proc_strict_mode = 0; + struct ctl_table tmp = { + .procname = table->procname, + .data = &proc_strict_mode, + .maxlen = sizeof(int), + .mode = table->mode, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }; + int ret; + + if (!write) + proc_strict_mode = vrf_strict_mode(vmap); + + ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); + + if (write && ret == 0) + ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode); + + return ret; +} + +static const struct ctl_table vrf_table[] = { + { + .procname = "strict_mode", + .data = NULL, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = vrf_shared_table_handler, + /* set by the vrf_netns_init */ + .extra1 = NULL, + }, + { }, +}; + /* Initialize per network namespace state */ static int __net_init vrf_netns_init(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); + struct ctl_table *table; + int res; nn_vrf->add_fib_rules = true; vrf_map_init(&nn_vrf->vmap); + table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + /* init the extra1 parameter with the reference to current netns */ + table[0].extra1 = net; + + nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table); + if (!nn_vrf->ctl_hdr) { + res = -ENOMEM; + goto free_table; + } + return 0; + +free_table: + kfree(table); + + return res; +} + +static void __net_exit vrf_netns_exit(struct net *net) +{ + struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); + struct ctl_table *table; + + table = nn_vrf->ctl_hdr->ctl_table_arg; + unregister_net_sysctl_table(nn_vrf->ctl_hdr); + kfree(table); } static struct pernet_operations vrf_net_ops __net_initdata = { .init = vrf_netns_init, + .exit = vrf_netns_exit, .id = &vrf_net_id, .size = sizeof(struct netns_vrf), }; From a59a8ffd4a1bc413c1e0169cf8a31cf9b4237264 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:46 +0200 Subject: [PATCH 0129/2056] vrf: add l3mdev registration for table to VRF device lookup During the initialization phase of the VRF module, the callback for table to VRF device lookup is registered in l3mdev. Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- drivers/net/vrf.c | 59 +++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index c53e57354d2c..46599606ff10 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -183,6 +183,19 @@ static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev) return netns_vrf_map(dev_net(dev)); } +static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me) +{ + struct list_head *me_head = &me->vrf_list; + struct net_vrf *vrf; + + if (list_empty(me_head)) + return -ENODEV; + + vrf = list_first_entry(me_head, struct net_vrf, me_list); + + return vrf->ifindex; +} + static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags) { struct vrf_map_elem *me; @@ -384,6 +397,34 @@ static void vrf_map_unregister_dev(struct net_device *dev) vrf_map_unlock(vmap); } +/* return the vrf device index associated with the table_id */ +static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id) +{ + struct vrf_map *vmap = netns_vrf_map(net); + struct vrf_map_elem *me; + int ifindex; + + vrf_map_lock(vmap); + + if (!vmap->strict_mode) { + ifindex = -EPERM; + goto unlock; + } + + me = vrf_map_lookup_elem(vmap, table_id); + if (!me) { + ifindex = -ENODEV; + goto unlock; + } + + ifindex = vrf_map_elem_get_vrf_ifindex(me); + +unlock: + vrf_map_unlock(vmap); + + return ifindex; +} + /* by default VRF devices do not have a qdisc and are expected * to be created with only a single queue. */ @@ -1847,14 +1888,24 @@ static int __init vrf_init_module(void) if (rc < 0) goto error; + rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF, + vrf_ifindex_lookup_by_table_id); + if (rc < 0) + goto unreg_pernet; + rc = rtnl_link_register(&vrf_link_ops); - if (rc < 0) { - unregister_pernet_subsys(&vrf_net_ops); - goto error; - } + if (rc < 0) + goto table_lookup_unreg; return 0; +table_lookup_unreg: + l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF, + vrf_ifindex_lookup_by_table_id); + +unreg_pernet: + unregister_pernet_subsys(&vrf_net_ops); + error: unregister_netdevice_notifier(&vrf_notifier_block); return rc; From 8735e6eaa43899d20a1a54b40e79bfa6b324b107 Mon Sep 17 00:00:00 2001 From: Andrea Mayer Date: Sat, 20 Jun 2020 00:54:47 +0200 Subject: [PATCH 0130/2056] selftests: add selftest for the VRF strict mode The new strict mode functionality is tested in different configurations and on different network namespaces. Signed-off-by: Andrea Mayer Signed-off-by: David S. Miller --- .../selftests/net/vrf_strict_mode_test.sh | 390 ++++++++++++++++++ 1 file changed, 390 insertions(+) create mode 100755 tools/testing/selftests/net/vrf_strict_mode_test.sh diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh new file mode 100755 index 000000000000..5274f4a1fba1 --- /dev/null +++ b/tools/testing/selftests/net/vrf_strict_mode_test.sh @@ -0,0 +1,390 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test is designed for testing the new VRF strict_mode functionality. + +ret=0 + +# identifies the "init" network namespace which is often called root network +# namespace. +INIT_NETNS_NAME="init" + +PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no} + +log_test() +{ + local rc=$1 + local expected=$2 + local msg="$3" + + if [ ${rc} -eq ${expected} ]; then + nsuccess=$((nsuccess+1)) + printf "\n TEST: %-60s [ OK ]\n" "${msg}" + else + ret=1 + nfail=$((nfail+1)) + printf "\n TEST: %-60s [FAIL]\n" "${msg}" + if [ "${PAUSE_ON_FAIL}" = "yes" ]; then + echo + echo "hit enter to continue, 'q' to quit" + read a + [ "$a" = "q" ] && exit 1 + fi + fi +} + +print_log_test_results() +{ + if [ "$TESTS" != "none" ]; then + printf "\nTests passed: %3d\n" ${nsuccess} + printf "Tests failed: %3d\n" ${nfail} + fi +} + +log_section() +{ + echo + echo "################################################################################" + echo "TEST SECTION: $*" + echo "################################################################################" +} + +ip_expand_args() +{ + local nsname=$1 + local nsarg="" + + if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then + nsarg="-netns ${nsname}" + fi + + echo "${nsarg}" +} + +vrf_count() +{ + local nsname=$1 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} -o link show type vrf | wc -l +} + +count_vrf_by_table_id() +{ + local nsname=$1 + local tableid=$2 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} -d -o link show type vrf | grep "table ${tableid}" | wc -l +} + +add_vrf() +{ + local nsname=$1 + local vrfname=$2 + local vrftable=$3 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} link add ${vrfname} type vrf table ${vrftable} &>/dev/null +} + +add_vrf_and_check() +{ + local nsname=$1 + local vrfname=$2 + local vrftable=$3 + local cnt + local rc + + add_vrf ${nsname} ${vrfname} ${vrftable}; rc=$? + + cnt=$(count_vrf_by_table_id ${nsname} ${vrftable}) + + log_test ${rc} 0 "${nsname}: add vrf ${vrfname}, ${cnt} vrfs for table ${vrftable}" +} + +add_vrf_and_check_fail() +{ + local nsname=$1 + local vrfname=$2 + local vrftable=$3 + local cnt + local rc + + add_vrf ${nsname} ${vrfname} ${vrftable}; rc=$? + + cnt=$(count_vrf_by_table_id ${nsname} ${vrftable}) + + log_test ${rc} 2 "${nsname}: CANNOT add vrf ${vrfname}, ${cnt} vrfs for table ${vrftable}" +} + +del_vrf_and_check() +{ + local nsname=$1 + local vrfname=$2 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} link del ${vrfname} + log_test $? 0 "${nsname}: remove vrf ${vrfname}" +} + +config_vrf_and_check() +{ + local nsname=$1 + local addr=$2 + local vrfname=$3 + local nsarg="$(ip_expand_args ${nsname})" + + ip ${nsarg} link set dev ${vrfname} up && \ + ip ${nsarg} addr add ${addr} dev ${vrfname} + log_test $? 0 "${nsname}: vrf ${vrfname} up, addr ${addr}" +} + +read_strict_mode() +{ + local nsname=$1 + local rval + local rc=0 + local nsexec="" + + if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then + # a custom network namespace is provided + nsexec="ip netns exec ${nsname}" + fi + + rval="$(${nsexec} bash -c "cat /proc/sys/net/vrf/strict_mode" | \ + grep -E "^[0-1]$")" &> /dev/null + if [ $? -ne 0 ]; then + # set errors + rval=255 + rc=1 + fi + + # on success, rval can be only 0 or 1; on error, rval is equal to 255 + echo ${rval} + return ${rc} +} + +read_strict_mode_compare_and_check() +{ + local nsname=$1 + local expected=$2 + local res + + res="$(read_strict_mode ${nsname})" + log_test ${res} ${expected} "${nsname}: check strict_mode=${res}" +} + +set_strict_mode() +{ + local nsname=$1 + local val=$2 + local nsexec="" + + if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then + # a custom network namespace is provided + nsexec="ip netns exec ${nsname}" + fi + + ${nsexec} bash -c "echo ${val} >/proc/sys/net/vrf/strict_mode" &>/dev/null +} + +enable_strict_mode() +{ + local nsname=$1 + + set_strict_mode ${nsname} 1 +} + +disable_strict_mode() +{ + local nsname=$1 + + set_strict_mode ${nsname} 0 +} + +disable_strict_mode_and_check() +{ + local nsname=$1 + + disable_strict_mode ${nsname} + log_test $? 0 "${nsname}: disable strict_mode (=0)" +} + +enable_strict_mode_and_check() +{ + local nsname=$1 + + enable_strict_mode ${nsname} + log_test $? 0 "${nsname}: enable strict_mode (=1)" +} + +enable_strict_mode_and_check_fail() +{ + local nsname=$1 + + enable_strict_mode ${nsname} + log_test $? 1 "${nsname}: CANNOT enable strict_mode" +} + +strict_mode_check_default() +{ + local nsname=$1 + local strictmode + local vrfcnt + + vrfcnt=$(vrf_count ${nsname}) + strictmode=$(read_strict_mode ${nsname}) + log_test ${strictmode} 0 "${nsname}: strict_mode=0 by default, ${vrfcnt} vrfs" +} + +setup() +{ + modprobe vrf + + ip netns add testns + ip netns exec testns ip link set lo up +} + +cleanup() +{ + ip netns del testns 2>/dev/null + + ip link del vrf100 2>/dev/null + ip link del vrf101 2>/dev/null + ip link del vrf102 2>/dev/null + + echo 0 >/proc/sys/net/vrf/strict_mode 2>/dev/null +} + +vrf_strict_mode_tests_init() +{ + vrf_strict_mode_check_support init + + strict_mode_check_default init + + add_vrf_and_check init vrf100 100 + config_vrf_and_check init 172.16.100.1/24 vrf100 + + enable_strict_mode_and_check init + + add_vrf_and_check_fail init vrf101 100 + + disable_strict_mode_and_check init + + add_vrf_and_check init vrf101 100 + config_vrf_and_check init 172.16.101.1/24 vrf101 + + enable_strict_mode_and_check_fail init + + del_vrf_and_check init vrf101 + + enable_strict_mode_and_check init + + add_vrf_and_check init vrf102 102 + config_vrf_and_check init 172.16.102.1/24 vrf102 + + # the strict_modle is enabled in the init +} + +vrf_strict_mode_tests_testns() +{ + vrf_strict_mode_check_support testns + + strict_mode_check_default testns + + enable_strict_mode_and_check testns + + add_vrf_and_check testns vrf100 100 + config_vrf_and_check testns 10.0.100.1/24 vrf100 + + add_vrf_and_check_fail testns vrf101 100 + + add_vrf_and_check_fail testns vrf102 100 + + add_vrf_and_check testns vrf200 200 + + disable_strict_mode_and_check testns + + add_vrf_and_check testns vrf101 100 + + add_vrf_and_check testns vrf102 100 + + #the strict_mode is disabled in the testns +} + +vrf_strict_mode_tests_mix() +{ + read_strict_mode_compare_and_check init 1 + + read_strict_mode_compare_and_check testns 0 + + del_vrf_and_check testns vrf101 + + del_vrf_and_check testns vrf102 + + disable_strict_mode_and_check init + + enable_strict_mode_and_check testns + + enable_strict_mode_and_check init + enable_strict_mode_and_check init + + disable_strict_mode_and_check testns + disable_strict_mode_and_check testns + + read_strict_mode_compare_and_check init 1 + + read_strict_mode_compare_and_check testns 0 +} + +vrf_strict_mode_tests() +{ + log_section "VRF strict_mode test on init network namespace" + vrf_strict_mode_tests_init + + log_section "VRF strict_mode test on testns network namespace" + vrf_strict_mode_tests_testns + + log_section "VRF strict_mode test mixing init and testns network namespaces" + vrf_strict_mode_tests_mix +} + +vrf_strict_mode_check_support() +{ + local nsname=$1 + local output + local rc + + output="$(lsmod | grep '^vrf' | awk '{print $1}')" + if [ -z "${output}" ]; then + modinfo vrf || return $? + fi + + # we do not care about the value of the strict_mode; we only check if + # the strict_mode parameter is available or not. + read_strict_mode ${nsname} &>/dev/null; rc=$? + log_test ${rc} 0 "${nsname}: net.vrf.strict_mode is available" + + return ${rc} +} + +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit 0 +fi + +if [ ! -x "$(command -v ip)" ]; then + echo "SKIP: Could not run test without ip tool" + exit 0 +fi + +cleanup &> /dev/null + +setup +vrf_strict_mode_tests +cleanup + +print_log_test_results + +exit $ret From 3ab4ceb6e9639e4e42d473e46ae7976c24187876 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:36 +0300 Subject: [PATCH 0131/2056] net: dsa: felix: make vcap is2 keys and actions static Get rid of some sparse warnings. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 1dd9e348152d..2067776773f7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -557,7 +557,7 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = { { .offset = 0x111, .name = "drop_green_prio_7", }, }; -struct vcap_field vsc9959_vcap_is2_keys[] = { +static struct vcap_field vsc9959_vcap_is2_keys[] = { /* Common: 41 bits */ [VCAP_IS2_TYPE] = { 0, 4}, [VCAP_IS2_HK_FIRST] = { 4, 1}, @@ -637,7 +637,7 @@ struct vcap_field vsc9959_vcap_is2_keys[] = { [VCAP_IS2_HK_OAM_IS_Y1731] = {182, 1}, }; -struct vcap_field vsc9959_vcap_is2_actions[] = { +static struct vcap_field vsc9959_vcap_is2_actions[] = { [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1}, [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1}, [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3}, From 7eb5c96a7cae01f500b584d21f6db3abf1aad33e Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:37 +0300 Subject: [PATCH 0132/2056] net: mscc: ocelot: use plain int when interacting with TCAM tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sparse is rightfully complaining about the fact that: warning: comparison of unsigned expression < 0 is always false [-Wtype-limits] 26 | __builtin_constant_p((l) > (h)), (l) > (h), 0))) | ^ note: in expansion of macro ‘GENMASK_INPUT_CHECK’ 39 | (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l)) | ^~~~~~~~~~~~~~~~~~~ note: in expansion of macro ‘GENMASK’ 127 | mask = GENMASK(width, 0); | ^~~~~~~ So replace the variables that go into GENMASK with plain, signed integer types. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_ace.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c index dfd82a3baab2..17b642e4d291 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.c +++ b/drivers/net/ethernet/mscc/ocelot_ace.c @@ -119,7 +119,8 @@ static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data) static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; - u32 action_words, i, width, mask; + u32 action_words, mask; + int i, width; /* Encode action type */ width = vcap_is2->action_type_width; @@ -141,7 +142,8 @@ static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data) static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; - u32 action_words, i, width; + u32 action_words; + int i, width; action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH); @@ -161,8 +163,8 @@ static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data) static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; - u32 i, col, offset, count, cnt, base; - u32 width = vcap_is2->tg_width; + int i, col, offset, count, cnt, base; + int width = vcap_is2->tg_width; count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4); col = (ix % 2); From ff4b0bc62353d6c1fc3b347189464f85f6fa5643 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:38 +0300 Subject: [PATCH 0133/2056] net: mscc: ocelot: access EtherType using __be16 Get rid of sparse "cast to restricted __be16" warnings. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_ace.c | 4 ++-- drivers/net/ethernet/mscc/ocelot_flower.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c index 17b642e4d291..1dd881340067 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.c +++ b/drivers/net/ethernet/mscc/ocelot_ace.c @@ -746,8 +746,8 @@ static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) if (ace->type != OCELOT_ACE_TYPE_ETYPE) return false; - proto = ntohs(*(u16 *)ace->frame.etype.etype.value); - mask = ntohs(*(u16 *)ace->frame.etype.etype.mask); + proto = ntohs(*(__be16 *)ace->frame.etype.etype.value); + mask = ntohs(*(__be16 *)ace->frame.etype.etype.mask); /* ETH_P_ALL match, so all protocols below are included */ if (mask == 0) diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index c90bafbd651f..99338d27aec0 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -176,8 +176,8 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, if (proto < ETH_P_802_3_MIN) return -EOPNOTSUPP; ace->type = OCELOT_ACE_TYPE_ETYPE; - *(u16 *)ace->frame.etype.etype.value = htons(proto); - *(u16 *)ace->frame.etype.etype.mask = 0xffff; + *(__be16 *)ace->frame.etype.etype.value = htons(proto); + *(__be16 *)ace->frame.etype.etype.mask = htons(0xffff); } /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */ From 589aa6e7c9de322d47eb33a5cee8cc38838319e6 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:39 +0300 Subject: [PATCH 0134/2056] net: mscc: ocelot: rename ocelot_board.c to ocelot_vsc7514.c To follow the model of felix and seville where we have one platform-specific file, rename this file to the actual SoC it serves. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/Makefile | 2 +- drivers/net/ethernet/mscc/{ocelot_board.c => ocelot_vsc7514.c} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename drivers/net/ethernet/mscc/{ocelot_board.c => ocelot_vsc7514.c} (100%) diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 91b33b55054e..ad97a5cca6f9 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -2,4 +2,4 @@ obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o mscc_ocelot_common-y := ocelot.o ocelot_io.o mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o -obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_vsc7514.o diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c similarity index 100% rename from drivers/net/ethernet/mscc/ocelot_board.c rename to drivers/net/ethernet/mscc/ocelot_vsc7514.c From 56583862b87b70dded31b1768efe3d47d6066887 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:40 +0300 Subject: [PATCH 0135/2056] net: mscc: ocelot: rename module to mscc_ocelot mscc_ocelot is a slightly better name for a module than ocelot_board or ocelot_vsc7514 is, so let's use that. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index ad97a5cca6f9..53572bb76ccd 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -2,4 +2,5 @@ obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o mscc_ocelot_common-y := ocelot.o ocelot_io.o mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o -obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_vsc7514.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += mscc_ocelot.o +mscc_ocelot-y := ocelot_vsc7514.o From f4d0323bae4ed24d9f9f5bba3d27def06354fee7 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:41 +0300 Subject: [PATCH 0136/2056] net: mscc: ocelot: convert MSCC_OCELOT_SWITCH into a library Hide the CONFIG_MSCC_OCELOT_SWITCH option from users. It is meant to be only a hardware library which is selected by the drivers that use it (ocelot, felix). Since it is "selected" from Kconfig, all its dependencies are manually transferred to the driver that selects it. This is because "select" in Kconfig language is a bit of a mess, and doesn't handle dependencies of selected options quite right. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/Kconfig | 4 +++- drivers/net/ethernet/mscc/Kconfig | 18 ++++++++++-------- drivers/net/ethernet/mscc/Makefile | 13 ++++++++++--- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index a5b7cca03d09..3d3c2a6fb0c0 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -4,7 +4,9 @@ config NET_DSA_MSCC_FELIX depends on NET_DSA && PCI depends on NET_VENDOR_MICROSEMI depends on NET_VENDOR_FREESCALE - select MSCC_OCELOT_SWITCH + depends on HAS_IOMEM + depends on REGMAP_MMIO + select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT select FSL_ENETC_MDIO help diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index bcec0587cf61..24db927e8b30 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -11,20 +11,22 @@ config NET_VENDOR_MICROSEMI if NET_VENDOR_MICROSEMI -config MSCC_OCELOT_SWITCH - tristate "Ocelot switch driver" - depends on NET_SWITCHDEV - depends on HAS_IOMEM - select PHYLIB - select REGMAP_MMIO +# Users should depend on NET_SWITCHDEV, HAS_IOMEM, PHYLIB and REGMAP_MMIO +config MSCC_OCELOT_SWITCH_LIB + tristate help - This driver supports the Ocelot network switch device. + This is a hardware support library for Ocelot network switches. It is + used by switchdev as well as by DSA drivers. config MSCC_OCELOT_SWITCH_OCELOT tristate "Ocelot switch driver on Ocelot" - depends on MSCC_OCELOT_SWITCH + depends on NET_SWITCHDEV depends on GENERIC_PHY + depends on REGMAP_MMIO + depends on HAS_IOMEM + depends on PHYLIB depends on OF_NET + select MSCC_OCELOT_SWITCH_LIB help This driver supports the Ocelot network switch device as present on the Ocelot SoCs. diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 53572bb76ccd..77222e47d63f 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -1,6 +1,13 @@ # SPDX-License-Identifier: (GPL-2.0 OR MIT) -obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o -mscc_ocelot_common-y := ocelot.o ocelot_io.o -mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o +mscc_ocelot_switch_lib-y := \ + ocelot.o \ + ocelot_io.o \ + ocelot_regs.o \ + ocelot_tc.o \ + ocelot_police.o \ + ocelot_ace.o \ + ocelot_flower.o \ + ocelot_ptp.o obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += mscc_ocelot.o mscc_ocelot-y := ocelot_vsc7514.o From 14addfb6356b738269d36eb3204cc4fedd09760a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:42 +0300 Subject: [PATCH 0137/2056] net: mscc: ocelot: rename MSCC_OCELOT_SWITCH_OCELOT to MSCC_OCELOT_SWITCH Putting 'ocelot' in the config's name twice just to say that 'it's the ocelot driver running on the ocelot SoC' is a bit confusing. Instead, it's just the ocelot driver. Now that we've renamed the previous symbol that was holding the MSCC_OCELOT_SWITCH_OCELOT into *_LIB (because that's what it is), we're free to use this name for the driver. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/Kconfig | 6 +++--- drivers/net/ethernet/mscc/Makefile | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 24db927e8b30..3cfd1b629886 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -18,8 +18,8 @@ config MSCC_OCELOT_SWITCH_LIB This is a hardware support library for Ocelot network switches. It is used by switchdev as well as by DSA drivers. -config MSCC_OCELOT_SWITCH_OCELOT - tristate "Ocelot switch driver on Ocelot" +config MSCC_OCELOT_SWITCH + tristate "Ocelot switch driver" depends on NET_SWITCHDEV depends on GENERIC_PHY depends on REGMAP_MMIO @@ -29,6 +29,6 @@ config MSCC_OCELOT_SWITCH_OCELOT select MSCC_OCELOT_SWITCH_LIB help This driver supports the Ocelot network switch device as present on - the Ocelot SoCs. + the Ocelot SoCs (VSC7514). endif # NET_VENDOR_MICROSEMI diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 77222e47d63f..c6d372b6dc3f 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -9,5 +9,5 @@ mscc_ocelot_switch_lib-y := \ ocelot_ace.o \ ocelot_flower.o \ ocelot_ptp.o -obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += mscc_ocelot.o +obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o mscc_ocelot-y := ocelot_vsc7514.o From d9feb9049973332de0242a08248e069113bf5761 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:43 +0300 Subject: [PATCH 0138/2056] net: mscc: ocelot: move ocelot_regs.c into ocelot_vsc7514.c ocelot_regs.c actually shouldn't be part of the common library. It describes the register map of the VSC7514 switch. The way ocelot switches work, they'll have highly optimized register maps, so another SoC will likely have the same registers but laid out completely different in memory (so there's little room for reusing this structure). So move it to ocelot_vsc7514.c instead. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/Makefile | 1 - drivers/net/ethernet/mscc/ocelot.h | 1 - drivers/net/ethernet/mscc/ocelot_regs.c | 450 --------------------- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 442 ++++++++++++++++++++ 4 files changed, 442 insertions(+), 452 deletions(-) delete mode 100644 drivers/net/ethernet/mscc/ocelot_regs.c diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index c6d372b6dc3f..5d0b6c1da3a0 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -3,7 +3,6 @@ obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o mscc_ocelot_switch_lib-y := \ ocelot.o \ ocelot_io.o \ - ocelot_regs.o \ ocelot_tc.o \ ocelot_police.o \ ocelot_ace.o \ diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index f0a15aa187f2..a834747ec9a3 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -66,7 +66,6 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); #define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) #define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) -int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops); int ocelot_probe_port(struct ocelot *ocelot, u8 port, void __iomem *regs, struct phy_device *phy); diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c deleted file mode 100644 index 81d81ff75646..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_regs.c +++ /dev/null @@ -1,450 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR MIT) -/* - * Microsemi Ocelot Switch driver - * - * Copyright (c) 2017 Microsemi Corporation - */ -#include "ocelot.h" -#include - -static const u32 ocelot_ana_regmap[] = { - REG(ANA_ADVLEARN, 0x009000), - REG(ANA_VLANMASK, 0x009004), - REG(ANA_PORT_B_DOMAIN, 0x009008), - REG(ANA_ANAGEFIL, 0x00900c), - REG(ANA_ANEVENTS, 0x009010), - REG(ANA_STORMLIMIT_BURST, 0x009014), - REG(ANA_STORMLIMIT_CFG, 0x009018), - REG(ANA_ISOLATED_PORTS, 0x009028), - REG(ANA_COMMUNITY_PORTS, 0x00902c), - REG(ANA_AUTOAGE, 0x009030), - REG(ANA_MACTOPTIONS, 0x009034), - REG(ANA_LEARNDISC, 0x009038), - REG(ANA_AGENCTRL, 0x00903c), - REG(ANA_MIRRORPORTS, 0x009040), - REG(ANA_EMIRRORPORTS, 0x009044), - REG(ANA_FLOODING, 0x009048), - REG(ANA_FLOODING_IPMC, 0x00904c), - REG(ANA_SFLOW_CFG, 0x009050), - REG(ANA_PORT_MODE, 0x009080), - REG(ANA_PGID_PGID, 0x008c00), - REG(ANA_TABLES_ANMOVED, 0x008b30), - REG(ANA_TABLES_MACHDATA, 0x008b34), - REG(ANA_TABLES_MACLDATA, 0x008b38), - REG(ANA_TABLES_MACACCESS, 0x008b3c), - REG(ANA_TABLES_MACTINDX, 0x008b40), - REG(ANA_TABLES_VLANACCESS, 0x008b44), - REG(ANA_TABLES_VLANTIDX, 0x008b48), - REG(ANA_TABLES_ISDXACCESS, 0x008b4c), - REG(ANA_TABLES_ISDXTIDX, 0x008b50), - REG(ANA_TABLES_ENTRYLIM, 0x008b00), - REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), - REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), - REG(ANA_MSTI_STATE, 0x008e00), - REG(ANA_PORT_VLAN_CFG, 0x007000), - REG(ANA_PORT_DROP_CFG, 0x007004), - REG(ANA_PORT_QOS_CFG, 0x007008), - REG(ANA_PORT_VCAP_CFG, 0x00700c), - REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), - REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), - REG(ANA_PORT_PCP_DEI_MAP, 0x007020), - REG(ANA_PORT_CPU_FWD_CFG, 0x007060), - REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), - REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), - REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), - REG(ANA_PORT_PORT_CFG, 0x007070), - REG(ANA_PORT_POL_CFG, 0x007074), - REG(ANA_PORT_PTP_CFG, 0x007078), - REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), - REG(ANA_OAM_UPM_LM_CNT, 0x007c00), - REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), - REG(ANA_PFC_PFC_CFG, 0x008800), - REG(ANA_PFC_PFC_TIMER, 0x008804), - REG(ANA_IPT_OAM_MEP_CFG, 0x008000), - REG(ANA_IPT_IPT, 0x008004), - REG(ANA_PPT_PPT, 0x008ac0), - REG(ANA_FID_MAP_FID_MAP, 0x000000), - REG(ANA_AGGR_CFG, 0x0090b4), - REG(ANA_CPUQ_CFG, 0x0090b8), - REG(ANA_CPUQ_CFG2, 0x0090bc), - REG(ANA_CPUQ_8021_CFG, 0x0090c0), - REG(ANA_DSCP_CFG, 0x009100), - REG(ANA_DSCP_REWR_CFG, 0x009200), - REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), - REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), - REG(ANA_VRAP_CFG, 0x009280), - REG(ANA_VRAP_HDR_DATA, 0x009284), - REG(ANA_VRAP_HDR_MASK, 0x009288), - REG(ANA_DISCARD_CFG, 0x00928c), - REG(ANA_FID_CFG, 0x009290), - REG(ANA_POL_PIR_CFG, 0x004000), - REG(ANA_POL_CIR_CFG, 0x004004), - REG(ANA_POL_MODE_CFG, 0x004008), - REG(ANA_POL_PIR_STATE, 0x00400c), - REG(ANA_POL_CIR_STATE, 0x004010), - REG(ANA_POL_STATE, 0x004014), - REG(ANA_POL_FLOWC, 0x008b80), - REG(ANA_POL_HYST, 0x008bec), - REG(ANA_POL_MISC_CFG, 0x008bf0), -}; - -static const u32 ocelot_qs_regmap[] = { - REG(QS_XTR_GRP_CFG, 0x000000), - REG(QS_XTR_RD, 0x000008), - REG(QS_XTR_FRM_PRUNING, 0x000010), - REG(QS_XTR_FLUSH, 0x000018), - REG(QS_XTR_DATA_PRESENT, 0x00001c), - REG(QS_XTR_CFG, 0x000020), - REG(QS_INJ_GRP_CFG, 0x000024), - REG(QS_INJ_WR, 0x00002c), - REG(QS_INJ_CTRL, 0x000034), - REG(QS_INJ_STATUS, 0x00003c), - REG(QS_INJ_ERR, 0x000040), - REG(QS_INH_DBG, 0x000048), -}; - -static const u32 ocelot_qsys_regmap[] = { - REG(QSYS_PORT_MODE, 0x011200), - REG(QSYS_SWITCH_PORT_MODE, 0x011234), - REG(QSYS_STAT_CNT_CFG, 0x011264), - REG(QSYS_EEE_CFG, 0x011268), - REG(QSYS_EEE_THRES, 0x011294), - REG(QSYS_IGR_NO_SHARING, 0x011298), - REG(QSYS_EGR_NO_SHARING, 0x01129c), - REG(QSYS_SW_STATUS, 0x0112a0), - REG(QSYS_EXT_CPU_CFG, 0x0112d0), - REG(QSYS_PAD_CFG, 0x0112d4), - REG(QSYS_CPU_GROUP_MAP, 0x0112d8), - REG(QSYS_QMAP, 0x0112dc), - REG(QSYS_ISDX_SGRP, 0x011400), - REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), - REG(QSYS_TFRM_MISC, 0x011310), - REG(QSYS_TFRM_PORT_DLY, 0x011314), - REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), - REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), - REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), - REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), - REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), - REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), - REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), - REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), - REG(QSYS_RED_PROFILE, 0x011338), - REG(QSYS_RES_QOS_MODE, 0x011378), - REG(QSYS_RES_CFG, 0x012000), - REG(QSYS_RES_STAT, 0x012004), - REG(QSYS_EGR_DROP_MODE, 0x01137c), - REG(QSYS_EQ_CTRL, 0x011380), - REG(QSYS_EVENTS_CORE, 0x011384), - REG(QSYS_CIR_CFG, 0x000000), - REG(QSYS_EIR_CFG, 0x000004), - REG(QSYS_SE_CFG, 0x000008), - REG(QSYS_SE_DWRR_CFG, 0x00000c), - REG(QSYS_SE_CONNECT, 0x00003c), - REG(QSYS_SE_DLB_SENSE, 0x000040), - REG(QSYS_CIR_STATE, 0x000044), - REG(QSYS_EIR_STATE, 0x000048), - REG(QSYS_SE_STATE, 0x00004c), - REG(QSYS_HSCH_MISC_CFG, 0x011388), -}; - -static const u32 ocelot_rew_regmap[] = { - REG(REW_PORT_VLAN_CFG, 0x000000), - REG(REW_TAG_CFG, 0x000004), - REG(REW_PORT_CFG, 0x000008), - REG(REW_DSCP_CFG, 0x00000c), - REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), - REG(REW_PTP_CFG, 0x000050), - REG(REW_PTP_DLY1_CFG, 0x000054), - REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), - REG(REW_DSCP_REMAP_CFG, 0x000790), - REG(REW_STAT_CFG, 0x000890), - REG(REW_PPT, 0x000680), -}; - -static const u32 ocelot_sys_regmap[] = { - REG(SYS_COUNT_RX_OCTETS, 0x000000), - REG(SYS_COUNT_RX_UNICAST, 0x000004), - REG(SYS_COUNT_RX_MULTICAST, 0x000008), - REG(SYS_COUNT_RX_BROADCAST, 0x00000c), - REG(SYS_COUNT_RX_SHORTS, 0x000010), - REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), - REG(SYS_COUNT_RX_JABBERS, 0x000018), - REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), - REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), - REG(SYS_COUNT_RX_64, 0x000024), - REG(SYS_COUNT_RX_65_127, 0x000028), - REG(SYS_COUNT_RX_128_255, 0x00002c), - REG(SYS_COUNT_RX_256_1023, 0x000030), - REG(SYS_COUNT_RX_1024_1526, 0x000034), - REG(SYS_COUNT_RX_1527_MAX, 0x000038), - REG(SYS_COUNT_RX_PAUSE, 0x00003c), - REG(SYS_COUNT_RX_CONTROL, 0x000040), - REG(SYS_COUNT_RX_LONGS, 0x000044), - REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), - REG(SYS_COUNT_TX_OCTETS, 0x000100), - REG(SYS_COUNT_TX_UNICAST, 0x000104), - REG(SYS_COUNT_TX_MULTICAST, 0x000108), - REG(SYS_COUNT_TX_BROADCAST, 0x00010c), - REG(SYS_COUNT_TX_COLLISION, 0x000110), - REG(SYS_COUNT_TX_DROPS, 0x000114), - REG(SYS_COUNT_TX_PAUSE, 0x000118), - REG(SYS_COUNT_TX_64, 0x00011c), - REG(SYS_COUNT_TX_65_127, 0x000120), - REG(SYS_COUNT_TX_128_511, 0x000124), - REG(SYS_COUNT_TX_512_1023, 0x000128), - REG(SYS_COUNT_TX_1024_1526, 0x00012c), - REG(SYS_COUNT_TX_1527_MAX, 0x000130), - REG(SYS_COUNT_TX_AGING, 0x000170), - REG(SYS_RESET_CFG, 0x000508), - REG(SYS_CMID, 0x00050c), - REG(SYS_VLAN_ETYPE_CFG, 0x000510), - REG(SYS_PORT_MODE, 0x000514), - REG(SYS_FRONT_PORT_MODE, 0x000548), - REG(SYS_FRM_AGING, 0x000574), - REG(SYS_STAT_CFG, 0x000578), - REG(SYS_SW_STATUS, 0x00057c), - REG(SYS_MISC_CFG, 0x0005ac), - REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), - REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), - REG(SYS_CM_ADDR, 0x000500), - REG(SYS_CM_DATA, 0x000504), - REG(SYS_PAUSE_CFG, 0x000608), - REG(SYS_PAUSE_TOT_CFG, 0x000638), - REG(SYS_ATOP, 0x00063c), - REG(SYS_ATOP_TOT_CFG, 0x00066c), - REG(SYS_MAC_FC_CFG, 0x000670), - REG(SYS_MMGT, 0x00069c), - REG(SYS_MMGT_FAST, 0x0006a0), - REG(SYS_EVENTS_DIF, 0x0006a4), - REG(SYS_EVENTS_CORE, 0x0006b4), - REG(SYS_CNT, 0x000000), - REG(SYS_PTP_STATUS, 0x0006b8), - REG(SYS_PTP_TXSTAMP, 0x0006bc), - REG(SYS_PTP_NXT, 0x0006c0), - REG(SYS_PTP_CFG, 0x0006c4), -}; - -static const u32 ocelot_s2_regmap[] = { - REG(S2_CORE_UPDATE_CTRL, 0x000000), - REG(S2_CORE_MV_CFG, 0x000004), - REG(S2_CACHE_ENTRY_DAT, 0x000008), - REG(S2_CACHE_MASK_DAT, 0x000108), - REG(S2_CACHE_ACTION_DAT, 0x000208), - REG(S2_CACHE_CNT_DAT, 0x000308), - REG(S2_CACHE_TG_DAT, 0x000388), -}; - -static const u32 ocelot_ptp_regmap[] = { - REG(PTP_PIN_CFG, 0x000000), - REG(PTP_PIN_TOD_SEC_MSB, 0x000004), - REG(PTP_PIN_TOD_SEC_LSB, 0x000008), - REG(PTP_PIN_TOD_NSEC, 0x00000c), - REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), - REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), - REG(PTP_CFG_MISC, 0x0000a0), - REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), - REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), -}; - -static const u32 *ocelot_regmap[] = { - [ANA] = ocelot_ana_regmap, - [QS] = ocelot_qs_regmap, - [QSYS] = ocelot_qsys_regmap, - [REW] = ocelot_rew_regmap, - [SYS] = ocelot_sys_regmap, - [S2] = ocelot_s2_regmap, - [PTP] = ocelot_ptp_regmap, -}; - -static const struct reg_field ocelot_regfields[] = { - [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), - [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), - [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), - [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26), - [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25), - [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), - [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23), - [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), - [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), - [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), - [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), - [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), - [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), - [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), - [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), - [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14), - [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), - [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), - [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), - [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), - [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), - [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), - [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), - [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), - [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), - [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), - [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), - [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), - [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), - [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), - [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18), - [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11), - [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9), - [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20), - [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19), - [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7), - [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3), - [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0), - [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), - [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), - [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), -}; - -static const struct ocelot_stat_layout ocelot_stats_layout[] = { - { .name = "rx_octets", .offset = 0x00, }, - { .name = "rx_unicast", .offset = 0x01, }, - { .name = "rx_multicast", .offset = 0x02, }, - { .name = "rx_broadcast", .offset = 0x03, }, - { .name = "rx_shorts", .offset = 0x04, }, - { .name = "rx_fragments", .offset = 0x05, }, - { .name = "rx_jabbers", .offset = 0x06, }, - { .name = "rx_crc_align_errs", .offset = 0x07, }, - { .name = "rx_sym_errs", .offset = 0x08, }, - { .name = "rx_frames_below_65_octets", .offset = 0x09, }, - { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, }, - { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, }, - { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, }, - { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, }, - { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, }, - { .name = "rx_frames_over_1526_octets", .offset = 0x0F, }, - { .name = "rx_pause", .offset = 0x10, }, - { .name = "rx_control", .offset = 0x11, }, - { .name = "rx_longs", .offset = 0x12, }, - { .name = "rx_classified_drops", .offset = 0x13, }, - { .name = "rx_red_prio_0", .offset = 0x14, }, - { .name = "rx_red_prio_1", .offset = 0x15, }, - { .name = "rx_red_prio_2", .offset = 0x16, }, - { .name = "rx_red_prio_3", .offset = 0x17, }, - { .name = "rx_red_prio_4", .offset = 0x18, }, - { .name = "rx_red_prio_5", .offset = 0x19, }, - { .name = "rx_red_prio_6", .offset = 0x1A, }, - { .name = "rx_red_prio_7", .offset = 0x1B, }, - { .name = "rx_yellow_prio_0", .offset = 0x1C, }, - { .name = "rx_yellow_prio_1", .offset = 0x1D, }, - { .name = "rx_yellow_prio_2", .offset = 0x1E, }, - { .name = "rx_yellow_prio_3", .offset = 0x1F, }, - { .name = "rx_yellow_prio_4", .offset = 0x20, }, - { .name = "rx_yellow_prio_5", .offset = 0x21, }, - { .name = "rx_yellow_prio_6", .offset = 0x22, }, - { .name = "rx_yellow_prio_7", .offset = 0x23, }, - { .name = "rx_green_prio_0", .offset = 0x24, }, - { .name = "rx_green_prio_1", .offset = 0x25, }, - { .name = "rx_green_prio_2", .offset = 0x26, }, - { .name = "rx_green_prio_3", .offset = 0x27, }, - { .name = "rx_green_prio_4", .offset = 0x28, }, - { .name = "rx_green_prio_5", .offset = 0x29, }, - { .name = "rx_green_prio_6", .offset = 0x2A, }, - { .name = "rx_green_prio_7", .offset = 0x2B, }, - { .name = "tx_octets", .offset = 0x40, }, - { .name = "tx_unicast", .offset = 0x41, }, - { .name = "tx_multicast", .offset = 0x42, }, - { .name = "tx_broadcast", .offset = 0x43, }, - { .name = "tx_collision", .offset = 0x44, }, - { .name = "tx_drops", .offset = 0x45, }, - { .name = "tx_pause", .offset = 0x46, }, - { .name = "tx_frames_below_65_octets", .offset = 0x47, }, - { .name = "tx_frames_65_to_127_octets", .offset = 0x48, }, - { .name = "tx_frames_128_255_octets", .offset = 0x49, }, - { .name = "tx_frames_256_511_octets", .offset = 0x4A, }, - { .name = "tx_frames_512_1023_octets", .offset = 0x4B, }, - { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, }, - { .name = "tx_frames_over_1526_octets", .offset = 0x4D, }, - { .name = "tx_yellow_prio_0", .offset = 0x4E, }, - { .name = "tx_yellow_prio_1", .offset = 0x4F, }, - { .name = "tx_yellow_prio_2", .offset = 0x50, }, - { .name = "tx_yellow_prio_3", .offset = 0x51, }, - { .name = "tx_yellow_prio_4", .offset = 0x52, }, - { .name = "tx_yellow_prio_5", .offset = 0x53, }, - { .name = "tx_yellow_prio_6", .offset = 0x54, }, - { .name = "tx_yellow_prio_7", .offset = 0x55, }, - { .name = "tx_green_prio_0", .offset = 0x56, }, - { .name = "tx_green_prio_1", .offset = 0x57, }, - { .name = "tx_green_prio_2", .offset = 0x58, }, - { .name = "tx_green_prio_3", .offset = 0x59, }, - { .name = "tx_green_prio_4", .offset = 0x5A, }, - { .name = "tx_green_prio_5", .offset = 0x5B, }, - { .name = "tx_green_prio_6", .offset = 0x5C, }, - { .name = "tx_green_prio_7", .offset = 0x5D, }, - { .name = "tx_aged", .offset = 0x5E, }, - { .name = "drop_local", .offset = 0x80, }, - { .name = "drop_tail", .offset = 0x81, }, - { .name = "drop_yellow_prio_0", .offset = 0x82, }, - { .name = "drop_yellow_prio_1", .offset = 0x83, }, - { .name = "drop_yellow_prio_2", .offset = 0x84, }, - { .name = "drop_yellow_prio_3", .offset = 0x85, }, - { .name = "drop_yellow_prio_4", .offset = 0x86, }, - { .name = "drop_yellow_prio_5", .offset = 0x87, }, - { .name = "drop_yellow_prio_6", .offset = 0x88, }, - { .name = "drop_yellow_prio_7", .offset = 0x89, }, - { .name = "drop_green_prio_0", .offset = 0x8A, }, - { .name = "drop_green_prio_1", .offset = 0x8B, }, - { .name = "drop_green_prio_2", .offset = 0x8C, }, - { .name = "drop_green_prio_3", .offset = 0x8D, }, - { .name = "drop_green_prio_4", .offset = 0x8E, }, - { .name = "drop_green_prio_5", .offset = 0x8F, }, - { .name = "drop_green_prio_6", .offset = 0x90, }, - { .name = "drop_green_prio_7", .offset = 0x91, }, -}; - -static void ocelot_pll5_init(struct ocelot *ocelot) -{ - /* Configure PLL5. This will need a proper CCF driver - * The values are coming from the VTSS API for Ocelot - */ - regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, - HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | - HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8)); - regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, - HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | - HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | - HSIO_PLL5G_CFG0_ENA_BIAS | - HSIO_PLL5G_CFG0_ENA_VCO_BUF | - HSIO_PLL5G_CFG0_ENA_CP1 | - HSIO_PLL5G_CFG0_SELCPI(2) | - HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) | - HSIO_PLL5G_CFG0_SELBGV820(4) | - HSIO_PLL5G_CFG0_DIV4 | - HSIO_PLL5G_CFG0_ENA_CLKTREE | - HSIO_PLL5G_CFG0_ENA_LANE); - regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, - HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | - HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | - HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | - HSIO_PLL5G_CFG2_ENA_AMPCTRL | - HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | - HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); -} - -int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) -{ - int ret; - - ocelot->map = ocelot_regmap; - ocelot->stats_layout = ocelot_stats_layout; - ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); - ocelot->shared_queue_sz = 224 * 1024; - ocelot->num_mact_rows = 1024; - ocelot->ops = ops; - - ret = ocelot_regfields_init(ocelot, ocelot_regfields); - if (ret) - return ret; - - ocelot_pll5_init(ocelot); - - eth_random_addr(ocelot->base_mac); - ocelot->base_mac[5] &= 0xf0; - - return 0; -} -EXPORT_SYMBOL(ocelot_chip_init); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 4a15d2ff8b70..43716e8dc0ac 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -15,6 +15,7 @@ #include #include +#include #include "ocelot.h" #define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0)) @@ -23,6 +24,447 @@ #define VSC7514_VCAP_IS2_ACTION_WIDTH 99 #define VSC7514_VCAP_PORT_CNT 11 +static const u32 ocelot_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x009000), + REG(ANA_VLANMASK, 0x009004), + REG(ANA_PORT_B_DOMAIN, 0x009008), + REG(ANA_ANAGEFIL, 0x00900c), + REG(ANA_ANEVENTS, 0x009010), + REG(ANA_STORMLIMIT_BURST, 0x009014), + REG(ANA_STORMLIMIT_CFG, 0x009018), + REG(ANA_ISOLATED_PORTS, 0x009028), + REG(ANA_COMMUNITY_PORTS, 0x00902c), + REG(ANA_AUTOAGE, 0x009030), + REG(ANA_MACTOPTIONS, 0x009034), + REG(ANA_LEARNDISC, 0x009038), + REG(ANA_AGENCTRL, 0x00903c), + REG(ANA_MIRRORPORTS, 0x009040), + REG(ANA_EMIRRORPORTS, 0x009044), + REG(ANA_FLOODING, 0x009048), + REG(ANA_FLOODING_IPMC, 0x00904c), + REG(ANA_SFLOW_CFG, 0x009050), + REG(ANA_PORT_MODE, 0x009080), + REG(ANA_PGID_PGID, 0x008c00), + REG(ANA_TABLES_ANMOVED, 0x008b30), + REG(ANA_TABLES_MACHDATA, 0x008b34), + REG(ANA_TABLES_MACLDATA, 0x008b38), + REG(ANA_TABLES_MACACCESS, 0x008b3c), + REG(ANA_TABLES_MACTINDX, 0x008b40), + REG(ANA_TABLES_VLANACCESS, 0x008b44), + REG(ANA_TABLES_VLANTIDX, 0x008b48), + REG(ANA_TABLES_ISDXACCESS, 0x008b4c), + REG(ANA_TABLES_ISDXTIDX, 0x008b50), + REG(ANA_TABLES_ENTRYLIM, 0x008b00), + REG(ANA_TABLES_PTP_ID_HIGH, 0x008b54), + REG(ANA_TABLES_PTP_ID_LOW, 0x008b58), + REG(ANA_MSTI_STATE, 0x008e00), + REG(ANA_PORT_VLAN_CFG, 0x007000), + REG(ANA_PORT_DROP_CFG, 0x007004), + REG(ANA_PORT_QOS_CFG, 0x007008), + REG(ANA_PORT_VCAP_CFG, 0x00700c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x007010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00701c), + REG(ANA_PORT_PCP_DEI_MAP, 0x007020), + REG(ANA_PORT_CPU_FWD_CFG, 0x007060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x007064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x007068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00706c), + REG(ANA_PORT_PORT_CFG, 0x007070), + REG(ANA_PORT_POL_CFG, 0x007074), + REG(ANA_PORT_PTP_CFG, 0x007078), + REG(ANA_PORT_PTP_DLY1_CFG, 0x00707c), + REG(ANA_OAM_UPM_LM_CNT, 0x007c00), + REG(ANA_PORT_PTP_DLY2_CFG, 0x007080), + REG(ANA_PFC_PFC_CFG, 0x008800), + REG(ANA_PFC_PFC_TIMER, 0x008804), + REG(ANA_IPT_OAM_MEP_CFG, 0x008000), + REG(ANA_IPT_IPT, 0x008004), + REG(ANA_PPT_PPT, 0x008ac0), + REG(ANA_FID_MAP_FID_MAP, 0x000000), + REG(ANA_AGGR_CFG, 0x0090b4), + REG(ANA_CPUQ_CFG, 0x0090b8), + REG(ANA_CPUQ_CFG2, 0x0090bc), + REG(ANA_CPUQ_8021_CFG, 0x0090c0), + REG(ANA_DSCP_CFG, 0x009100), + REG(ANA_DSCP_REWR_CFG, 0x009200), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x009240), + REG(ANA_VCAP_RNG_VAL_CFG, 0x009260), + REG(ANA_VRAP_CFG, 0x009280), + REG(ANA_VRAP_HDR_DATA, 0x009284), + REG(ANA_VRAP_HDR_MASK, 0x009288), + REG(ANA_DISCARD_CFG, 0x00928c), + REG(ANA_FID_CFG, 0x009290), + REG(ANA_POL_PIR_CFG, 0x004000), + REG(ANA_POL_CIR_CFG, 0x004004), + REG(ANA_POL_MODE_CFG, 0x004008), + REG(ANA_POL_PIR_STATE, 0x00400c), + REG(ANA_POL_CIR_STATE, 0x004010), + REG(ANA_POL_STATE, 0x004014), + REG(ANA_POL_FLOWC, 0x008b80), + REG(ANA_POL_HYST, 0x008bec), + REG(ANA_POL_MISC_CFG, 0x008bf0), +}; + +static const u32 ocelot_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG(QS_INH_DBG, 0x000048), +}; + +static const u32 ocelot_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x011200), + REG(QSYS_SWITCH_PORT_MODE, 0x011234), + REG(QSYS_STAT_CNT_CFG, 0x011264), + REG(QSYS_EEE_CFG, 0x011268), + REG(QSYS_EEE_THRES, 0x011294), + REG(QSYS_IGR_NO_SHARING, 0x011298), + REG(QSYS_EGR_NO_SHARING, 0x01129c), + REG(QSYS_SW_STATUS, 0x0112a0), + REG(QSYS_EXT_CPU_CFG, 0x0112d0), + REG(QSYS_PAD_CFG, 0x0112d4), + REG(QSYS_CPU_GROUP_MAP, 0x0112d8), + REG(QSYS_QMAP, 0x0112dc), + REG(QSYS_ISDX_SGRP, 0x011400), + REG(QSYS_TIMED_FRAME_ENTRY, 0x014000), + REG(QSYS_TFRM_MISC, 0x011310), + REG(QSYS_TFRM_PORT_DLY, 0x011314), + REG(QSYS_TFRM_TIMER_CFG_1, 0x011318), + REG(QSYS_TFRM_TIMER_CFG_2, 0x01131c), + REG(QSYS_TFRM_TIMER_CFG_3, 0x011320), + REG(QSYS_TFRM_TIMER_CFG_4, 0x011324), + REG(QSYS_TFRM_TIMER_CFG_5, 0x011328), + REG(QSYS_TFRM_TIMER_CFG_6, 0x01132c), + REG(QSYS_TFRM_TIMER_CFG_7, 0x011330), + REG(QSYS_TFRM_TIMER_CFG_8, 0x011334), + REG(QSYS_RED_PROFILE, 0x011338), + REG(QSYS_RES_QOS_MODE, 0x011378), + REG(QSYS_RES_CFG, 0x012000), + REG(QSYS_RES_STAT, 0x012004), + REG(QSYS_EGR_DROP_MODE, 0x01137c), + REG(QSYS_EQ_CTRL, 0x011380), + REG(QSYS_EVENTS_CORE, 0x011384), + REG(QSYS_CIR_CFG, 0x000000), + REG(QSYS_EIR_CFG, 0x000004), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG(QSYS_SE_CONNECT, 0x00003c), + REG(QSYS_SE_DLB_SENSE, 0x000040), + REG(QSYS_CIR_STATE, 0x000044), + REG(QSYS_EIR_STATE, 0x000048), + REG(QSYS_SE_STATE, 0x00004c), + REG(QSYS_HSCH_MISC_CFG, 0x011388), +}; + +static const u32 ocelot_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG(REW_PTP_CFG, 0x000050), + REG(REW_PTP_DLY1_CFG, 0x000054), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000690), + REG(REW_DSCP_REMAP_CFG, 0x000790), + REG(REW_STAT_CFG, 0x000890), + REG(REW_PPT, 0x000680), +}; + +static const u32 ocelot_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_UNICAST, 0x000004), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_BROADCAST, 0x00000c), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_CRC_ALIGN_ERRS, 0x00001c), + REG(SYS_COUNT_RX_SYM_ERRS, 0x000020), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_PAUSE, 0x00003c), + REG(SYS_COUNT_RX_CONTROL, 0x000040), + REG(SYS_COUNT_RX_LONGS, 0x000044), + REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_UNICAST, 0x000104), + REG(SYS_COUNT_TX_MULTICAST, 0x000108), + REG(SYS_COUNT_TX_BROADCAST, 0x00010c), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_PAUSE, 0x000118), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_511, 0x000124), + REG(SYS_COUNT_TX_512_1023, 0x000128), + REG(SYS_COUNT_TX_1024_1526, 0x00012c), + REG(SYS_COUNT_TX_1527_MAX, 0x000130), + REG(SYS_COUNT_TX_AGING, 0x000170), + REG(SYS_RESET_CFG, 0x000508), + REG(SYS_CMID, 0x00050c), + REG(SYS_VLAN_ETYPE_CFG, 0x000510), + REG(SYS_PORT_MODE, 0x000514), + REG(SYS_FRONT_PORT_MODE, 0x000548), + REG(SYS_FRM_AGING, 0x000574), + REG(SYS_STAT_CFG, 0x000578), + REG(SYS_SW_STATUS, 0x00057c), + REG(SYS_MISC_CFG, 0x0005ac), + REG(SYS_REW_MAC_HIGH_CFG, 0x0005b0), + REG(SYS_REW_MAC_LOW_CFG, 0x0005dc), + REG(SYS_CM_ADDR, 0x000500), + REG(SYS_CM_DATA, 0x000504), + REG(SYS_PAUSE_CFG, 0x000608), + REG(SYS_PAUSE_TOT_CFG, 0x000638), + REG(SYS_ATOP, 0x00063c), + REG(SYS_ATOP_TOT_CFG, 0x00066c), + REG(SYS_MAC_FC_CFG, 0x000670), + REG(SYS_MMGT, 0x00069c), + REG(SYS_MMGT_FAST, 0x0006a0), + REG(SYS_EVENTS_DIF, 0x0006a4), + REG(SYS_EVENTS_CORE, 0x0006b4), + REG(SYS_CNT, 0x000000), + REG(SYS_PTP_STATUS, 0x0006b8), + REG(SYS_PTP_TXSTAMP, 0x0006bc), + REG(SYS_PTP_NXT, 0x0006c0), + REG(SYS_PTP_CFG, 0x0006c4), +}; + +static const u32 ocelot_s2_regmap[] = { + REG(S2_CORE_UPDATE_CTRL, 0x000000), + REG(S2_CORE_MV_CFG, 0x000004), + REG(S2_CACHE_ENTRY_DAT, 0x000008), + REG(S2_CACHE_MASK_DAT, 0x000108), + REG(S2_CACHE_ACTION_DAT, 0x000208), + REG(S2_CACHE_CNT_DAT, 0x000308), + REG(S2_CACHE_TG_DAT, 0x000388), +}; + +static const u32 ocelot_ptp_regmap[] = { + REG(PTP_PIN_CFG, 0x000000), + REG(PTP_PIN_TOD_SEC_MSB, 0x000004), + REG(PTP_PIN_TOD_SEC_LSB, 0x000008), + REG(PTP_PIN_TOD_NSEC, 0x00000c), + REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014), + REG(PTP_PIN_WF_LOW_PERIOD, 0x000018), + REG(PTP_CFG_MISC, 0x0000a0), + REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4), + REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), +}; + +static const u32 *ocelot_regmap[] = { + [ANA] = ocelot_ana_regmap, + [QS] = ocelot_qs_regmap, + [QSYS] = ocelot_qsys_regmap, + [REW] = ocelot_rew_regmap, + [SYS] = ocelot_sys_regmap, + [S2] = ocelot_s2_regmap, + [PTP] = ocelot_ptp_regmap, +}; + +static const struct reg_field ocelot_regfields[] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), + [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), + [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26), + [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9), + [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20), + [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19), + [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3), + [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), + [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), + [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), +}; + +static const struct ocelot_stat_layout ocelot_stats_layout[] = { + { .name = "rx_octets", .offset = 0x00, }, + { .name = "rx_unicast", .offset = 0x01, }, + { .name = "rx_multicast", .offset = 0x02, }, + { .name = "rx_broadcast", .offset = 0x03, }, + { .name = "rx_shorts", .offset = 0x04, }, + { .name = "rx_fragments", .offset = 0x05, }, + { .name = "rx_jabbers", .offset = 0x06, }, + { .name = "rx_crc_align_errs", .offset = 0x07, }, + { .name = "rx_sym_errs", .offset = 0x08, }, + { .name = "rx_frames_below_65_octets", .offset = 0x09, }, + { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, }, + { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, }, + { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, }, + { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, }, + { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, }, + { .name = "rx_frames_over_1526_octets", .offset = 0x0F, }, + { .name = "rx_pause", .offset = 0x10, }, + { .name = "rx_control", .offset = 0x11, }, + { .name = "rx_longs", .offset = 0x12, }, + { .name = "rx_classified_drops", .offset = 0x13, }, + { .name = "rx_red_prio_0", .offset = 0x14, }, + { .name = "rx_red_prio_1", .offset = 0x15, }, + { .name = "rx_red_prio_2", .offset = 0x16, }, + { .name = "rx_red_prio_3", .offset = 0x17, }, + { .name = "rx_red_prio_4", .offset = 0x18, }, + { .name = "rx_red_prio_5", .offset = 0x19, }, + { .name = "rx_red_prio_6", .offset = 0x1A, }, + { .name = "rx_red_prio_7", .offset = 0x1B, }, + { .name = "rx_yellow_prio_0", .offset = 0x1C, }, + { .name = "rx_yellow_prio_1", .offset = 0x1D, }, + { .name = "rx_yellow_prio_2", .offset = 0x1E, }, + { .name = "rx_yellow_prio_3", .offset = 0x1F, }, + { .name = "rx_yellow_prio_4", .offset = 0x20, }, + { .name = "rx_yellow_prio_5", .offset = 0x21, }, + { .name = "rx_yellow_prio_6", .offset = 0x22, }, + { .name = "rx_yellow_prio_7", .offset = 0x23, }, + { .name = "rx_green_prio_0", .offset = 0x24, }, + { .name = "rx_green_prio_1", .offset = 0x25, }, + { .name = "rx_green_prio_2", .offset = 0x26, }, + { .name = "rx_green_prio_3", .offset = 0x27, }, + { .name = "rx_green_prio_4", .offset = 0x28, }, + { .name = "rx_green_prio_5", .offset = 0x29, }, + { .name = "rx_green_prio_6", .offset = 0x2A, }, + { .name = "rx_green_prio_7", .offset = 0x2B, }, + { .name = "tx_octets", .offset = 0x40, }, + { .name = "tx_unicast", .offset = 0x41, }, + { .name = "tx_multicast", .offset = 0x42, }, + { .name = "tx_broadcast", .offset = 0x43, }, + { .name = "tx_collision", .offset = 0x44, }, + { .name = "tx_drops", .offset = 0x45, }, + { .name = "tx_pause", .offset = 0x46, }, + { .name = "tx_frames_below_65_octets", .offset = 0x47, }, + { .name = "tx_frames_65_to_127_octets", .offset = 0x48, }, + { .name = "tx_frames_128_255_octets", .offset = 0x49, }, + { .name = "tx_frames_256_511_octets", .offset = 0x4A, }, + { .name = "tx_frames_512_1023_octets", .offset = 0x4B, }, + { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, }, + { .name = "tx_frames_over_1526_octets", .offset = 0x4D, }, + { .name = "tx_yellow_prio_0", .offset = 0x4E, }, + { .name = "tx_yellow_prio_1", .offset = 0x4F, }, + { .name = "tx_yellow_prio_2", .offset = 0x50, }, + { .name = "tx_yellow_prio_3", .offset = 0x51, }, + { .name = "tx_yellow_prio_4", .offset = 0x52, }, + { .name = "tx_yellow_prio_5", .offset = 0x53, }, + { .name = "tx_yellow_prio_6", .offset = 0x54, }, + { .name = "tx_yellow_prio_7", .offset = 0x55, }, + { .name = "tx_green_prio_0", .offset = 0x56, }, + { .name = "tx_green_prio_1", .offset = 0x57, }, + { .name = "tx_green_prio_2", .offset = 0x58, }, + { .name = "tx_green_prio_3", .offset = 0x59, }, + { .name = "tx_green_prio_4", .offset = 0x5A, }, + { .name = "tx_green_prio_5", .offset = 0x5B, }, + { .name = "tx_green_prio_6", .offset = 0x5C, }, + { .name = "tx_green_prio_7", .offset = 0x5D, }, + { .name = "tx_aged", .offset = 0x5E, }, + { .name = "drop_local", .offset = 0x80, }, + { .name = "drop_tail", .offset = 0x81, }, + { .name = "drop_yellow_prio_0", .offset = 0x82, }, + { .name = "drop_yellow_prio_1", .offset = 0x83, }, + { .name = "drop_yellow_prio_2", .offset = 0x84, }, + { .name = "drop_yellow_prio_3", .offset = 0x85, }, + { .name = "drop_yellow_prio_4", .offset = 0x86, }, + { .name = "drop_yellow_prio_5", .offset = 0x87, }, + { .name = "drop_yellow_prio_6", .offset = 0x88, }, + { .name = "drop_yellow_prio_7", .offset = 0x89, }, + { .name = "drop_green_prio_0", .offset = 0x8A, }, + { .name = "drop_green_prio_1", .offset = 0x8B, }, + { .name = "drop_green_prio_2", .offset = 0x8C, }, + { .name = "drop_green_prio_3", .offset = 0x8D, }, + { .name = "drop_green_prio_4", .offset = 0x8E, }, + { .name = "drop_green_prio_5", .offset = 0x8F, }, + { .name = "drop_green_prio_6", .offset = 0x90, }, + { .name = "drop_green_prio_7", .offset = 0x91, }, +}; + +static void ocelot_pll5_init(struct ocelot *ocelot) +{ + /* Configure PLL5. This will need a proper CCF driver + * The values are coming from the VTSS API for Ocelot + */ + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4, + HSIO_PLL5G_CFG4_IB_CTRL(0x7600) | + HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8)); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0, + HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) | + HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) | + HSIO_PLL5G_CFG0_ENA_BIAS | + HSIO_PLL5G_CFG0_ENA_VCO_BUF | + HSIO_PLL5G_CFG0_ENA_CP1 | + HSIO_PLL5G_CFG0_SELCPI(2) | + HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) | + HSIO_PLL5G_CFG0_SELBGV820(4) | + HSIO_PLL5G_CFG0_DIV4 | + HSIO_PLL5G_CFG0_ENA_CLKTREE | + HSIO_PLL5G_CFG0_ENA_LANE); + regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2, + HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET | + HSIO_PLL5G_CFG2_EN_RESET_OVERRUN | + HSIO_PLL5G_CFG2_GAIN_TEST(0x8) | + HSIO_PLL5G_CFG2_ENA_AMPCTRL | + HSIO_PLL5G_CFG2_PWD_AMPCTRL_N | + HSIO_PLL5G_CFG2_AMPC_SEL(0x10)); +} + +static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops) +{ + int ret; + + ocelot->map = ocelot_regmap; + ocelot->stats_layout = ocelot_stats_layout; + ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout); + ocelot->shared_queue_sz = 224 * 1024; + ocelot->num_mact_rows = 1024; + ocelot->ops = ops; + + ret = ocelot_regfields_init(ocelot, ocelot_regfields); + if (ret) + return ret; + + ocelot_pll5_init(ocelot); + + eth_random_addr(ocelot->base_mac); + ocelot->base_mac[5] &= 0xf0; + + return 0; +} + static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info) { u8 llen, wlen; From 9c90eea310f80460b81a7afe27064c7f0200d1d1 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:44 +0300 Subject: [PATCH 0139/2056] net: mscc: ocelot: move net_device related functions to ocelot_net.c The ocelot hardware library shouldn't contain too much net_device specific code, since it is shared with DSA which abstracts that structure away. So much as much of this code as possible into the mscc_ocelot driver and outside of the common library. We're making an exception for MDB and LAG code. That is not yet exported to DSA, but when it will, most of the code that's already in ocelot.c will remain there. So, there's no point in moving code to ocelot_net.c just to move it back later. We could have moved all net_device code to ocelot_vsc7514.c directly, but let's operate under the assumption that if a new switchdev ocelot driver gets added, it'll define its SoC-specific stuff in a new ocelot_vsc*.c file and it'll reuse the rest of the code. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/Makefile | 5 +- drivers/net/ethernet/mscc/ocelot.c | 909 +----------------- drivers/net/ethernet/mscc/ocelot.h | 45 +- drivers/net/ethernet/mscc/ocelot_ace.c | 24 + drivers/net/ethernet/mscc/ocelot_flower.c | 22 - drivers/net/ethernet/mscc/ocelot_net.c | 1031 +++++++++++++++++++++ drivers/net/ethernet/mscc/ocelot_police.c | 49 +- drivers/net/ethernet/mscc/ocelot_police.h | 24 + drivers/net/ethernet/mscc/ocelot_tc.c | 179 ---- drivers/net/ethernet/mscc/ocelot_tc.h | 22 - 10 files changed, 1151 insertions(+), 1159 deletions(-) create mode 100644 drivers/net/ethernet/mscc/ocelot_net.c delete mode 100644 drivers/net/ethernet/mscc/ocelot_tc.c delete mode 100644 drivers/net/ethernet/mscc/ocelot_tc.h diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 5d0b6c1da3a0..7ab3bc25ed27 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -3,10 +3,11 @@ obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o mscc_ocelot_switch_lib-y := \ ocelot.o \ ocelot_io.o \ - ocelot_tc.o \ ocelot_police.o \ ocelot_ace.o \ ocelot_flower.o \ ocelot_ptp.o obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o -mscc_ocelot-y := ocelot_vsc7514.o +mscc_ocelot-y := \ + ocelot_vsc7514.o \ + ocelot_net.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 9cfe1fd98c30..5c2b5a2e8608 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -4,42 +4,13 @@ * * Copyright (c) 2017 Microsemi Corporation */ -#include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - #include "ocelot.h" #include "ocelot_ace.h" #define TABLE_UPDATE_SLEEP_US 10 #define TABLE_UPDATE_TIMEOUT_US 100000 -/* MAC table entry types. - * ENTRYTYPE_NORMAL is subject to aging. - * ENTRYTYPE_LOCKED is not subject to aging. - * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. - * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. - */ -enum macaccess_entry_type { - ENTRYTYPE_NORMAL = 0, - ENTRYTYPE_LOCKED, - ENTRYTYPE_MACv4, - ENTRYTYPE_MACv6, -}; - struct ocelot_mact_entry { u8 mac[ETH_ALEN]; u16 vid; @@ -84,10 +55,9 @@ static void ocelot_mact_select(struct ocelot *ocelot, } -static int ocelot_mact_learn(struct ocelot *ocelot, int port, - const unsigned char mac[ETH_ALEN], - unsigned int vid, - enum macaccess_entry_type type) +int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type) { ocelot_mact_select(ocelot, mac, vid); @@ -100,10 +70,10 @@ static int ocelot_mact_learn(struct ocelot *ocelot, int port, return ocelot_mact_wait_for_completion(ocelot); } +EXPORT_SYMBOL(ocelot_mact_learn); -static int ocelot_mact_forget(struct ocelot *ocelot, - const unsigned char mac[ETH_ALEN], - unsigned int vid) +int ocelot_mact_forget(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], unsigned int vid) { ocelot_mact_select(ocelot, mac, vid); @@ -114,6 +84,7 @@ static int ocelot_mact_forget(struct ocelot *ocelot, return ocelot_mact_wait_for_completion(ocelot); } +EXPORT_SYMBOL(ocelot_mact_forget); static void ocelot_mact_init(struct ocelot *ocelot) { @@ -168,20 +139,6 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) return ocelot_vlant_wait_for_completion(ocelot); } -static void ocelot_vlan_mode(struct ocelot *ocelot, int port, - netdev_features_t features) -{ - u32 val; - - /* Filtering */ - val = ocelot_read(ocelot, ANA_VLANMASK); - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - val |= BIT(port); - else - val &= ~BIT(port); - ocelot_write(ocelot, val, ANA_VLANMASK); -} - static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port, u16 vid) { @@ -295,26 +252,6 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, } EXPORT_SYMBOL(ocelot_vlan_add); -static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, - bool untagged) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - int ret; - - ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged); - if (ret) - return ret; - - /* Add the port MAC address to with the right VLAN information */ - ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, - ENTRYTYPE_LOCKED); - - return 0; -} - int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -338,30 +275,6 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) } EXPORT_SYMBOL(ocelot_vlan_del); -static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - int ret; - - /* 8021q removes VID 0 on module unload for all interfaces - * with VLAN filtering feature. We need to keep it to receive - * untagged traffic. - */ - if (vid == 0) - return 0; - - ret = ocelot_vlan_del(ocelot, port, vid); - if (ret) - return ret; - - /* Del the port MAC address to with the right VLAN information */ - ocelot_mact_forget(ocelot, dev->dev_addr, vid); - - return 0; -} - static void ocelot_vlan_init(struct ocelot *ocelot) { u16 port, vid; @@ -492,15 +405,6 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_adjust_link); -static void ocelot_port_adjust_link(struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - ocelot_adjust_link(ocelot, port, dev->phydev); -} - void ocelot_port_enable(struct ocelot *ocelot, int port, struct phy_device *phy) { @@ -514,40 +418,6 @@ void ocelot_port_enable(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_port_enable); -static int ocelot_port_open(struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - int err; - - if (priv->serdes) { - err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET, - ocelot_port->phy_mode); - if (err) { - netdev_err(dev, "Could not set mode of SerDes\n"); - return err; - } - } - - err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link, - ocelot_port->phy_mode); - if (err) { - netdev_err(dev, "Could not attach to PHY\n"); - return err; - } - - dev->phydev = priv->phy; - - phy_attached_info(priv->phy); - phy_start(priv->phy); - - ocelot_port_enable(ocelot, port, priv->phy); - - return 0; -} - void ocelot_port_disable(struct ocelot *ocelot, int port) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -558,41 +428,6 @@ void ocelot_port_disable(struct ocelot *ocelot, int port) } EXPORT_SYMBOL(ocelot_port_disable); -static int ocelot_port_stop(struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - phy_disconnect(priv->phy); - - dev->phydev = NULL; - - ocelot_port_disable(ocelot, port); - - return 0; -} - -/* Generate the IFH for frame injection - * - * The IFH is a 128bit-value - * bit 127: bypass the analyzer processing - * bit 56-67: destination mask - * bit 28-29: pop_cnt: 3 disables all rewriting of the frame - * bit 20-27: cpu extraction queue mask - * bit 16: tag type 0: C-tag, 1: S-tag - * bit 0-11: VID - */ -static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) -{ - ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21); - ifh[1] = (0xf00 & info->port) >> 8; - ifh[2] = (0xff & info->port) << 24; - ifh[3] = (info->tag_type << 16) | info->vid; - - return 0; -} - int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, struct sk_buff *skb) { @@ -611,77 +446,6 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, } EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb); -static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct skb_shared_info *shinfo = skb_shinfo(skb); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - u32 val, ifh[OCELOT_TAG_LEN / 4]; - struct frame_info info = {}; - u8 grp = 0; /* Send everything on CPU group 0 */ - unsigned int i, count, last; - int port = priv->chip_port; - - val = ocelot_read(ocelot, QS_INJ_STATUS); - if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || - (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))) - return NETDEV_TX_BUSY; - - ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | - QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); - - info.port = BIT(port); - info.tag_type = IFH_TAG_TYPE_C; - info.vid = skb_vlan_tag_get(skb); - - /* Check if timestamping is needed */ - if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { - info.rew_op = ocelot_port->ptp_cmd; - if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) - info.rew_op |= (ocelot_port->ts_id % 4) << 3; - } - - ocelot_gen_ifh(ifh, &info); - - for (i = 0; i < OCELOT_TAG_LEN / 4; i++) - ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]), - QS_INJ_WR, grp); - - count = (skb->len + 3) / 4; - last = skb->len % 4; - for (i = 0; i < count; i++) { - ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); - } - - /* Add padding */ - while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { - ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); - i++; - } - - /* Indicate EOF and valid bytes in last word */ - ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | - QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | - QS_INJ_CTRL_EOF, - QS_INJ_CTRL, grp); - - /* Add dummy CRC */ - ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); - skb_tx_timestamp(skb); - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) { - ocelot_port->ts_id++; - return NETDEV_TX_OK; - } - - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - static void ocelot_get_hwtimestamp(struct ocelot *ocelot, struct timespec64 *ts) { @@ -767,113 +531,6 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) } EXPORT_SYMBOL(ocelot_get_txtstamp); -static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - - return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid); -} - -static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - - return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid, - ENTRYTYPE_LOCKED); -} - -static void ocelot_set_rx_mode(struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - u32 val; - int i; - - /* This doesn't handle promiscuous mode because the bridge core is - * setting IFF_PROMISC on all slave interfaces and all frames would be - * forwarded to the CPU port. - */ - val = GENMASK(ocelot->num_phys_ports - 1, 0); - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) - ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); - - __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); -} - -static int ocelot_port_get_phys_port_name(struct net_device *dev, - char *buf, size_t len) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - int port = priv->chip_port; - int ret; - - ret = snprintf(buf, len, "p%d", port); - if (ret >= len) - return -EINVAL; - - return 0; -} - -static int ocelot_port_set_mac_address(struct net_device *dev, void *p) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - const struct sockaddr *addr = p; - - /* Learn the new net device MAC address in the mac table. */ - ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid, - ENTRYTYPE_LOCKED); - /* Then forget the previous one. */ - ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid); - - ether_addr_copy(dev->dev_addr, addr->sa_data); - return 0; -} - -static void ocelot_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - /* Configure the port to read the stats from */ - ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), - SYS_STAT_CFG); - - /* Get Rx stats */ - stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS); - stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) + - ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) + - ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) + - ocelot_read(ocelot, SYS_COUNT_RX_LONGS) + - ocelot_read(ocelot, SYS_COUNT_RX_64) + - ocelot_read(ocelot, SYS_COUNT_RX_65_127) + - ocelot_read(ocelot, SYS_COUNT_RX_128_255) + - ocelot_read(ocelot, SYS_COUNT_RX_256_1023) + - ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) + - ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX); - stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST); - stats->rx_dropped = dev->stats.rx_dropped; - - /* Get Tx stats */ - stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS); - stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) + - ocelot_read(ocelot, SYS_COUNT_TX_65_127) + - ocelot_read(ocelot, SYS_COUNT_TX_128_511) + - ocelot_read(ocelot, SYS_COUNT_TX_512_1023) + - ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) + - ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX); - stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) + - ocelot_read(ocelot, SYS_COUNT_TX_AGING); - stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION); -} - int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid) { @@ -897,19 +554,6 @@ int ocelot_fdb_add(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_add); -static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, - u16 vid, u16 flags, - struct netlink_ext_ack *extack) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - return ocelot_fdb_add(ocelot, port, addr, vid); -} - int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid) { @@ -917,26 +561,8 @@ int ocelot_fdb_del(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_del); -static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - return ocelot_fdb_del(ocelot, port, addr, vid); -} - -struct ocelot_dump_ctx { - struct net_device *dev; - struct sk_buff *skb; - struct netlink_callback *cb; - int idx; -}; - -static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, - bool is_static, void *data) +int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, + bool is_static, void *data) { struct ocelot_dump_ctx *dump = data; u32 portid = NETLINK_CB(dump->cb->skb).portid; @@ -977,6 +603,7 @@ static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, nlmsg_cancel(dump->skb, nlh); return -EMSGSIZE; } +EXPORT_SYMBOL(ocelot_port_fdb_do_dump); static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col, struct ocelot_mact_entry *entry) @@ -1058,74 +685,6 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_fdb_dump); -static int ocelot_port_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, int *idx) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - struct ocelot_dump_ctx dump = { - .dev = dev, - .skb = skb, - .cb = cb, - .idx = *idx, - }; - int port = priv->chip_port; - int ret; - - ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump); - - *idx = dump.idx; - - return ret; -} - -static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, - u16 vid) -{ - return ocelot_vlan_vid_add(dev, vid, false, false); -} - -static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, - u16 vid) -{ - return ocelot_vlan_vid_del(dev, vid); -} - -static int ocelot_set_features(struct net_device *dev, - netdev_features_t features) -{ - netdev_features_t changed = dev->features ^ features; - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && - priv->tc.offload_cnt) { - netdev_err(dev, - "Cannot disable HW TC offload while offloads active\n"); - return -EBUSY; - } - - if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) - ocelot_vlan_mode(ocelot, port, features); - - return 0; -} - -static int ocelot_get_port_parent_id(struct net_device *dev, - struct netdev_phys_item_id *ppid) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - - ppid->id_len = sizeof(ocelot->base_mac); - memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); - - return 0; -} - int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config, @@ -1198,46 +757,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) } EXPORT_SYMBOL(ocelot_hwstamp_set); -static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - /* If the attached PHY device isn't capable of timestamping operations, - * use our own (when possible). - */ - if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) { - switch (cmd) { - case SIOCSHWTSTAMP: - return ocelot_hwstamp_set(ocelot, port, ifr); - case SIOCGHWTSTAMP: - return ocelot_hwstamp_get(ocelot, port, ifr); - } - } - - return phy_mii_ioctl(dev->phydev, ifr, cmd); -} - -static const struct net_device_ops ocelot_port_netdev_ops = { - .ndo_open = ocelot_port_open, - .ndo_stop = ocelot_port_stop, - .ndo_start_xmit = ocelot_port_xmit, - .ndo_set_rx_mode = ocelot_set_rx_mode, - .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, - .ndo_set_mac_address = ocelot_port_set_mac_address, - .ndo_get_stats64 = ocelot_get_stats64, - .ndo_fdb_add = ocelot_port_fdb_add, - .ndo_fdb_del = ocelot_port_fdb_del, - .ndo_fdb_dump = ocelot_port_fdb_dump, - .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, - .ndo_set_features = ocelot_set_features, - .ndo_get_port_parent_id = ocelot_get_port_parent_id, - .ndo_setup_tc = ocelot_setup_tc, - .ndo_do_ioctl = ocelot_ioctl, -}; - void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) { int i; @@ -1251,16 +770,6 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data) } EXPORT_SYMBOL(ocelot_get_strings); -static void ocelot_port_get_strings(struct net_device *netdev, u32 sset, - u8 *data) -{ - struct ocelot_port_private *priv = netdev_priv(netdev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - ocelot_get_strings(ocelot, port, sset, data); -} - static void ocelot_update_stats(struct ocelot *ocelot) { int i, j; @@ -1314,17 +823,6 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data) } EXPORT_SYMBOL(ocelot_get_ethtool_stats); -static void ocelot_port_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, - u64 *data) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - ocelot_get_ethtool_stats(ocelot, port, data); -} - int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) { if (sset != ETH_SS_STATS) @@ -1334,15 +832,6 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset) } EXPORT_SYMBOL(ocelot_get_sset_count); -static int ocelot_port_get_sset_count(struct net_device *dev, int sset) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - return ocelot_get_sset_count(ocelot, port, sset); -} - int ocelot_get_ts_info(struct ocelot *ocelot, int port, struct ethtool_ts_info *info) { @@ -1368,28 +857,6 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_get_ts_info); -static int ocelot_port_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - if (!ocelot->ptp) - return ethtool_op_get_ts_info(dev, info); - - return ocelot_get_ts_info(ocelot, port, info); -} - -static const struct ethtool_ops ocelot_ethtool_ops = { - .get_strings = ocelot_port_get_strings, - .get_ethtool_stats = ocelot_port_get_ethtool_stats, - .get_sset_count = ocelot_port_get_sset_count, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, - .get_ts_info = ocelot_port_get_ts_info, -}; - void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) { u32 port_cfg; @@ -1445,16 +912,6 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state) } EXPORT_SYMBOL(ocelot_bridge_stp_state_set); -static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, - struct switchdev_trans *trans, - u8 state) -{ - if (switchdev_trans_ph_prepare(trans)) - return; - - ocelot_bridge_stp_state_set(ocelot, port, state); -} - void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) { unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000); @@ -1469,95 +926,6 @@ void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs) } EXPORT_SYMBOL(ocelot_set_ageing_time); -static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port, - unsigned long ageing_clock_t) -{ - unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); - u32 ageing_time = jiffies_to_msecs(ageing_jiffies); - - ocelot_set_ageing_time(ocelot, ageing_time); -} - -static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc) -{ - u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | - ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | - ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; - u32 val = 0; - - if (mc) - val = cpu_fwd_mcast; - - ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast, - ANA_PORT_CPU_FWD_CFG, port); -} - -static int ocelot_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr, - struct switchdev_trans *trans) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - ocelot_port_attr_stp_state_set(ocelot, port, trans, - attr->u.stp_state); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - ocelot_port_vlan_filtering(ocelot, port, - attr->u.vlan_filtering); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: - ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int ocelot_port_obj_add_vlan(struct net_device *dev, - const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) -{ - int ret; - u16 vid; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - ret = ocelot_vlan_vid_add(dev, vid, - vlan->flags & BRIDGE_VLAN_INFO_PVID, - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); - if (ret) - return ret; - } - - return 0; -} - -static int ocelot_port_vlan_del_vlan(struct net_device *dev, - const struct switchdev_obj_port_vlan *vlan) -{ - int ret; - u16 vid; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - ret = ocelot_vlan_vid_del(dev, vid); - - if (ret) - return ret; - } - - return 0; -} - static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, const unsigned char *addr, u16 vid) @@ -1572,9 +940,9 @@ static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, return NULL; } -static int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) +int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; @@ -1616,9 +984,10 @@ static int ocelot_port_obj_add_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } +EXPORT_SYMBOL(ocelot_port_obj_add_mdb); -static int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb) +int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot_port *ocelot_port = &priv->port; @@ -1653,50 +1022,7 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } - -static int ocelot_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct switchdev_trans *trans, - struct netlink_ext_ack *extack) -{ - int ret = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - ret = ocelot_port_obj_add_vlan(dev, - SWITCHDEV_OBJ_PORT_VLAN(obj), - trans); - break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), - trans); - break; - default: - return -EOPNOTSUPP; - } - - return ret; -} - -static int ocelot_port_obj_del(struct net_device *dev, - const struct switchdev_obj *obj) -{ - int ret = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - ret = ocelot_port_vlan_del_vlan(dev, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - default: - return -EOPNOTSUPP; - } - - return ret; -} +EXPORT_SYMBOL(ocelot_port_obj_del_mdb); int ocelot_port_bridge_join(struct ocelot *ocelot, int port, struct net_device *bridge) @@ -1788,8 +1114,8 @@ static void ocelot_setup_lag(struct ocelot *ocelot, int lag) } } -static int ocelot_port_lag_join(struct ocelot *ocelot, int port, - struct net_device *bond) +int ocelot_port_lag_join(struct ocelot *ocelot, int port, + struct net_device *bond) { struct net_device *ndev; u32 bond_mask = 0; @@ -1826,9 +1152,10 @@ static int ocelot_port_lag_join(struct ocelot *ocelot, int port, return 0; } +EXPORT_SYMBOL(ocelot_port_lag_join); -static void ocelot_port_lag_leave(struct ocelot *ocelot, int port, - struct net_device *bond) +void ocelot_port_lag_leave(struct ocelot *ocelot, int port, + struct net_device *bond) { u32 port_cfg; int i; @@ -1856,151 +1183,7 @@ static void ocelot_port_lag_leave(struct ocelot *ocelot, int port, ocelot_set_aggr_pgids(ocelot); } - -/* Checks if the net_device instance given to us originate from our driver. */ -static bool ocelot_netdevice_dev_check(const struct net_device *dev) -{ - return dev->netdev_ops == &ocelot_port_netdev_ops; -} - -static int ocelot_netdevice_port_event(struct net_device *dev, - unsigned long event, - struct netdev_notifier_changeupper_info *info) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - int err = 0; - - switch (event) { - case NETDEV_CHANGEUPPER: - if (netif_is_bridge_master(info->upper_dev)) { - if (info->linking) { - err = ocelot_port_bridge_join(ocelot, port, - info->upper_dev); - } else { - err = ocelot_port_bridge_leave(ocelot, port, - info->upper_dev); - } - } - if (netif_is_lag_master(info->upper_dev)) { - if (info->linking) - err = ocelot_port_lag_join(ocelot, port, - info->upper_dev); - else - ocelot_port_lag_leave(ocelot, port, - info->upper_dev); - } - break; - default: - break; - } - - return err; -} - -static int ocelot_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct netdev_notifier_changeupper_info *info = ptr; - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - int ret = 0; - - if (!ocelot_netdevice_dev_check(dev)) - return 0; - - if (event == NETDEV_PRECHANGEUPPER && - netif_is_lag_master(info->upper_dev)) { - struct netdev_lag_upper_info *lag_upper_info = info->upper_info; - struct netlink_ext_ack *extack; - - if (lag_upper_info && - lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { - extack = netdev_notifier_info_to_extack(&info->info); - NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); - - ret = -EINVAL; - goto notify; - } - } - - if (netif_is_lag_master(dev)) { - struct net_device *slave; - struct list_head *iter; - - netdev_for_each_lower_dev(dev, slave, iter) { - ret = ocelot_netdevice_port_event(slave, event, info); - if (ret) - goto notify; - } - } else { - ret = ocelot_netdevice_port_event(dev, event, info); - } - -notify: - return notifier_from_errno(ret); -} - -struct notifier_block ocelot_netdevice_nb __read_mostly = { - .notifier_call = ocelot_netdevice_event, -}; -EXPORT_SYMBOL(ocelot_netdevice_nb); - -static int ocelot_switchdev_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - int err; - - switch (event) { - case SWITCHDEV_PORT_ATTR_SET: - err = switchdev_handle_port_attr_set(dev, ptr, - ocelot_netdevice_dev_check, - ocelot_port_attr_set); - return notifier_from_errno(err); - } - - return NOTIFY_DONE; -} - -struct notifier_block ocelot_switchdev_nb __read_mostly = { - .notifier_call = ocelot_switchdev_event, -}; -EXPORT_SYMBOL(ocelot_switchdev_nb); - -static int ocelot_switchdev_blocking_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - int err; - - switch (event) { - /* Blocking events. */ - case SWITCHDEV_PORT_OBJ_ADD: - err = switchdev_handle_port_obj_add(dev, ptr, - ocelot_netdevice_dev_check, - ocelot_port_obj_add); - return notifier_from_errno(err); - case SWITCHDEV_PORT_OBJ_DEL: - err = switchdev_handle_port_obj_del(dev, ptr, - ocelot_netdevice_dev_check, - ocelot_port_obj_del); - return notifier_from_errno(err); - case SWITCHDEV_PORT_ATTR_SET: - err = switchdev_handle_port_attr_set(dev, ptr, - ocelot_netdevice_dev_check, - ocelot_port_attr_set); - return notifier_from_errno(err); - } - - return NOTIFY_DONE; -} - -struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { - .notifier_call = ocelot_switchdev_blocking_event, -}; -EXPORT_SYMBOL(ocelot_switchdev_blocking_nb); +EXPORT_SYMBOL(ocelot_port_lag_leave); /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu. * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG. @@ -2109,52 +1292,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port) } EXPORT_SYMBOL(ocelot_init_port); -int ocelot_probe_port(struct ocelot *ocelot, u8 port, - void __iomem *regs, - struct phy_device *phy) -{ - struct ocelot_port_private *priv; - struct ocelot_port *ocelot_port; - struct net_device *dev; - int err; - - dev = alloc_etherdev(sizeof(struct ocelot_port_private)); - if (!dev) - return -ENOMEM; - SET_NETDEV_DEV(dev, ocelot->dev); - priv = netdev_priv(dev); - priv->dev = dev; - priv->phy = phy; - priv->chip_port = port; - ocelot_port = &priv->port; - ocelot_port->ocelot = ocelot; - ocelot_port->regs = regs; - ocelot->ports[port] = ocelot_port; - - dev->netdev_ops = &ocelot_port_netdev_ops; - dev->ethtool_ops = &ocelot_ethtool_ops; - - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS | - NETIF_F_HW_TC; - dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; - - memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); - dev->dev_addr[ETH_ALEN - 1] += port; - ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, - ENTRYTYPE_LOCKED); - - ocelot_init_port(ocelot, port); - - err = register_netdev(dev); - if (err) { - dev_err(ocelot->dev, "register_netdev failed\n"); - free_netdev(dev); - } - - return err; -} -EXPORT_SYMBOL(ocelot_probe_port); - /* Configure and enable the CPU port module, which is a set of queues. * If @npi contains a valid port index, the CPU port module is connected * to the Node Processor Interface (NPI). This is the mode through which diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index a834747ec9a3..0c23734a87be 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -25,7 +25,6 @@ #include #include "ocelot_rew.h" #include "ocelot_qs.h" -#include "ocelot_tc.h" #define OCELOT_BUFFER_CELL_SZ 60 @@ -49,6 +48,13 @@ struct ocelot_multicast { u16 ports; }; +struct ocelot_port_tc { + bool block_shared; + unsigned long offload_cnt; + + unsigned long police_id; +}; + struct ocelot_port_private { struct ocelot_port port; struct net_device *dev; @@ -60,6 +66,43 @@ struct ocelot_port_private { struct ocelot_port_tc tc; }; +struct ocelot_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +/* MAC table entry types. + * ENTRYTYPE_NORMAL is subject to aging. + * ENTRYTYPE_LOCKED is not subject to aging. + * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast. + * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast. + */ +enum macaccess_entry_type { + ENTRYTYPE_NORMAL = 0, + ENTRYTYPE_LOCKED, + ENTRYTYPE_MACv4, + ENTRYTYPE_MACv6, +}; + +int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid, + bool is_static, void *data); +int ocelot_mact_learn(struct ocelot *ocelot, int port, + const unsigned char mac[ETH_ALEN], + unsigned int vid, enum macaccess_entry_type type); +int ocelot_mact_forget(struct ocelot *ocelot, + const unsigned char mac[ETH_ALEN], unsigned int vid); +int ocelot_port_lag_join(struct ocelot *ocelot, int port, + struct net_device *bond); +void ocelot_port_lag_leave(struct ocelot *ocelot, int port, + struct net_device *bond); +int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb); +int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans); + u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_ace.c index 1dd881340067..dbfb2666e211 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.c +++ b/drivers/net/ethernet/mscc/ocelot_ace.c @@ -843,6 +843,30 @@ int ocelot_ace_rule_offload_add(struct ocelot *ocelot, return 0; } +int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) +{ + struct qos_policer_conf pp = { 0 }; + + if (!pol) + return -EINVAL; + + pp.mode = MSCC_QOS_RATE_MODE_DATA; + pp.pir = pol->rate; + pp.pbs = pol->burst; + + return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); +} + +int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) +{ + struct qos_policer_conf pp = { 0 }; + + pp.mode = MSCC_QOS_RATE_MODE_DISABLED; + + return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); +} + static void ocelot_ace_police_del(struct ocelot *ocelot, struct ocelot_acl_block *block, u32 ix) diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index 99338d27aec0..ad4e8e0d62a4 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -249,25 +249,3 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, return 0; } EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats); - -int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, - struct flow_cls_offload *f, - bool ingress) -{ - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - if (!ingress) - return -EOPNOTSUPP; - - switch (f->command) { - case FLOW_CLS_REPLACE: - return ocelot_cls_flower_replace(ocelot, port, f, ingress); - case FLOW_CLS_DESTROY: - return ocelot_cls_flower_destroy(ocelot, port, f, ingress); - case FLOW_CLS_STATS: - return ocelot_cls_flower_stats(ocelot, port, f, ingress); - default: - return -EOPNOTSUPP; - } -} diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c new file mode 100644 index 000000000000..1ce444dff983 --- /dev/null +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -0,0 +1,1031 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Microsemi Ocelot Switch driver + * + * Copyright (c) 2017, 2019 Microsemi Corporation + */ + +#include +#include "ocelot.h" +#include "ocelot_ace.h" + +int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, + struct flow_cls_offload *f, + bool ingress) +{ + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + if (!ingress) + return -EOPNOTSUPP; + + switch (f->command) { + case FLOW_CLS_REPLACE: + return ocelot_cls_flower_replace(ocelot, port, f, ingress); + case FLOW_CLS_DESTROY: + return ocelot_cls_flower_destroy(ocelot, port, f, ingress); + case FLOW_CLS_STATS: + return ocelot_cls_flower_stats(ocelot, port, f, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, + struct tc_cls_matchall_offload *f, + bool ingress) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_policer pol = { 0 }; + struct flow_action_entry *action; + int port = priv->chip_port; + int err; + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported"); + return -EOPNOTSUPP; + } + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + if (!flow_offload_has_one_action(&f->rule->action)) { + NL_SET_ERR_MSG_MOD(extack, + "Only one action is supported"); + return -EOPNOTSUPP; + } + + if (priv->tc.block_shared) { + NL_SET_ERR_MSG_MOD(extack, + "Rate limit is not supported on shared blocks"); + return -EOPNOTSUPP; + } + + action = &f->rule->action.entries[0]; + + if (action->id != FLOW_ACTION_POLICE) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); + return -EOPNOTSUPP; + } + + if (priv->tc.police_id && priv->tc.police_id != f->cookie) { + NL_SET_ERR_MSG_MOD(extack, + "Only one policer per port is supported"); + return -EEXIST; + } + + pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; + pol.burst = (u32)div_u64(action->police.rate_bytes_ps * + PSCHED_NS2TICKS(action->police.burst), + PSCHED_TICKS_PER_SEC); + + err = ocelot_port_policer_add(ocelot, port, &pol); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Could not add policer"); + return err; + } + + priv->tc.police_id = f->cookie; + priv->tc.offload_cnt++; + return 0; + case TC_CLSMATCHALL_DESTROY: + if (priv->tc.police_id != f->cookie) + return -ENOENT; + + err = ocelot_port_policer_del(ocelot, port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Could not delete policer"); + return err; + } + priv->tc.police_id = 0; + priv->tc.offload_cnt--; + return 0; + case TC_CLSMATCHALL_STATS: + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, + void *cb_priv, bool ingress) +{ + struct ocelot_port_private *priv = cb_priv; + + if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return ocelot_setup_tc_cls_matchall(priv, type_data, ingress); + case TC_SETUP_CLSFLOWER: + return ocelot_setup_tc_cls_flower(priv, type_data, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return ocelot_setup_tc_block_cb(type, type_data, + cb_priv, true); +} + +static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type, + void *type_data, + void *cb_priv) +{ + return ocelot_setup_tc_block_cb(type, type_data, + cb_priv, false); +} + +static LIST_HEAD(ocelot_block_cb_list); + +static int ocelot_setup_tc_block(struct ocelot_port_private *priv, + struct flow_block_offload *f) +{ + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { + cb = ocelot_setup_tc_block_cb_ig; + priv->tc.block_shared = f->block_shared; + } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { + cb = ocelot_setup_tc_block_cb_eg; + } else { + return -EOPNOTSUPP; + } + + f->driver_block_list = &ocelot_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list)) + return -EBUSY; + + block_cb = flow_block_cb_alloc(cb, priv, priv, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, f->driver_block_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, priv); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + switch (type) { + case TC_SETUP_BLOCK: + return ocelot_setup_tc_block(priv, type_data); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static void ocelot_port_adjust_link(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_adjust_link(ocelot, port, dev->phydev); +} + +static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, + bool untagged) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + int ret; + + ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged); + if (ret) + return ret; + + /* Add the port MAC address to with the right VLAN information */ + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid, + ENTRYTYPE_LOCKED); + + return 0; +} + +static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + int ret; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + ret = ocelot_vlan_del(ocelot, port, vid); + if (ret) + return ret; + + /* Del the port MAC address to with the right VLAN information */ + ocelot_mact_forget(ocelot, dev->dev_addr, vid); + + return 0; +} + +static int ocelot_port_open(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + int err; + + if (priv->serdes) { + err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET, + ocelot_port->phy_mode); + if (err) { + netdev_err(dev, "Could not set mode of SerDes\n"); + return err; + } + } + + err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link, + ocelot_port->phy_mode); + if (err) { + netdev_err(dev, "Could not attach to PHY\n"); + return err; + } + + dev->phydev = priv->phy; + + phy_attached_info(priv->phy); + phy_start(priv->phy); + + ocelot_port_enable(ocelot, port, priv->phy); + + return 0; +} + +static int ocelot_port_stop(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + phy_disconnect(priv->phy); + + dev->phydev = NULL; + + ocelot_port_disable(ocelot, port); + + return 0; +} + +/* Generate the IFH for frame injection + * + * The IFH is a 128bit-value + * bit 127: bypass the analyzer processing + * bit 56-67: destination mask + * bit 28-29: pop_cnt: 3 disables all rewriting of the frame + * bit 20-27: cpu extraction queue mask + * bit 16: tag type 0: C-tag, 1: S-tag + * bit 0-11: VID + */ +static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info) +{ + ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21); + ifh[1] = (0xf00 & info->port) >> 8; + ifh[2] = (0xff & info->port) << 24; + ifh[3] = (info->tag_type << 16) | info->vid; + + return 0; +} + +static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct skb_shared_info *shinfo = skb_shinfo(skb); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + u32 val, ifh[OCELOT_TAG_LEN / 4]; + struct frame_info info = {}; + u8 grp = 0; /* Send everything on CPU group 0 */ + unsigned int i, count, last; + int port = priv->chip_port; + + val = ocelot_read(ocelot, QS_INJ_STATUS); + if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || + (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))) + return NETDEV_TX_BUSY; + + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp); + + info.port = BIT(port); + info.tag_type = IFH_TAG_TYPE_C; + info.vid = skb_vlan_tag_get(skb); + + /* Check if timestamping is needed */ + if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { + info.rew_op = ocelot_port->ptp_cmd; + if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) + info.rew_op |= (ocelot_port->ts_id % 4) << 3; + } + + ocelot_gen_ifh(ifh, &info); + + for (i = 0; i < OCELOT_TAG_LEN / 4; i++) + ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]), + QS_INJ_WR, grp); + + count = (skb->len + 3) / 4; + last = skb->len % 4; + for (i = 0; i < count; i++) + ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp); + + /* Add padding */ + while (i < (OCELOT_BUFFER_CELL_SZ / 4)) { + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + i++; + } + + /* Indicate EOF and valid bytes in last word */ + ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) | + QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) | + QS_INJ_CTRL_EOF, + QS_INJ_CTRL, grp); + + /* Add dummy CRC */ + ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp); + skb_tx_timestamp(skb); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) { + ocelot_port->ts_id++; + return NETDEV_TX_OK; + } + + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + + return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid); +} + +static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + + return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid, + ENTRYTYPE_LOCKED); +} + +static void ocelot_set_rx_mode(struct net_device *dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + u32 val; + int i; + + /* This doesn't handle promiscuous mode because the bridge core is + * setting IFF_PROMISC on all slave interfaces and all frames would be + * forwarded to the CPU port. + */ + val = GENMASK(ocelot->num_phys_ports - 1, 0); + for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) + ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); + + __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); +} + +static int ocelot_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + int port = priv->chip_port; + int ret; + + ret = snprintf(buf, len, "p%d", port); + if (ret >= len) + return -EINVAL; + + return 0; +} + +static int ocelot_port_set_mac_address(struct net_device *dev, void *p) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + const struct sockaddr *addr = p; + + /* Learn the new net device MAC address in the mac table. */ + ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid, + ENTRYTYPE_LOCKED); + /* Then forget the previous one. */ + ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid); + + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; +} + +static void ocelot_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + /* Configure the port to read the stats from */ + ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), + SYS_STAT_CFG); + + /* Get Rx stats */ + stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS); + stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) + + ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) + + ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) + + ocelot_read(ocelot, SYS_COUNT_RX_LONGS) + + ocelot_read(ocelot, SYS_COUNT_RX_64) + + ocelot_read(ocelot, SYS_COUNT_RX_65_127) + + ocelot_read(ocelot, SYS_COUNT_RX_128_255) + + ocelot_read(ocelot, SYS_COUNT_RX_256_1023) + + ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) + + ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX); + stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST); + stats->rx_dropped = dev->stats.rx_dropped; + + /* Get Tx stats */ + stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS); + stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) + + ocelot_read(ocelot, SYS_COUNT_TX_65_127) + + ocelot_read(ocelot, SYS_COUNT_TX_128_511) + + ocelot_read(ocelot, SYS_COUNT_TX_512_1023) + + ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) + + ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX); + stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) + + ocelot_read(ocelot, SYS_COUNT_TX_AGING); + stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION); +} + +static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u16 flags, + struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return ocelot_fdb_add(ocelot, port, addr, vid); +} + +static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return ocelot_fdb_del(ocelot, port, addr, vid); +} + +static int ocelot_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, int *idx) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + struct ocelot_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + int port = priv->chip_port; + int ret; + + ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump); + + *idx = dump.idx; + + return ret; +} + +static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_add(dev, vid, false, false); +} + +static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + return ocelot_vlan_vid_del(dev, vid); +} + +static void ocelot_vlan_mode(struct ocelot *ocelot, int port, + netdev_features_t features) +{ + u32 val; + + /* Filtering */ + val = ocelot_read(ocelot, ANA_VLANMASK); + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + val |= BIT(port); + else + val &= ~BIT(port); + ocelot_write(ocelot, val, ANA_VLANMASK); +} + +static int ocelot_set_features(struct net_device *dev, + netdev_features_t features) +{ + netdev_features_t changed = dev->features ^ features; + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && + priv->tc.offload_cnt) { + netdev_err(dev, + "Cannot disable HW TC offload while offloads active\n"); + return -EBUSY; + } + + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) + ocelot_vlan_mode(ocelot, port, features); + + return 0; +} + +static int ocelot_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + + ppid->id_len = sizeof(ocelot->base_mac); + memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len); + + return 0; +} + +static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + /* If the attached PHY device isn't capable of timestamping operations, + * use our own (when possible). + */ + if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) { + switch (cmd) { + case SIOCSHWTSTAMP: + return ocelot_hwstamp_set(ocelot, port, ifr); + case SIOCGHWTSTAMP: + return ocelot_hwstamp_get(ocelot, port, ifr); + } + } + + return phy_mii_ioctl(dev->phydev, ifr, cmd); +} + +static const struct net_device_ops ocelot_port_netdev_ops = { + .ndo_open = ocelot_port_open, + .ndo_stop = ocelot_port_stop, + .ndo_start_xmit = ocelot_port_xmit, + .ndo_set_rx_mode = ocelot_set_rx_mode, + .ndo_get_phys_port_name = ocelot_port_get_phys_port_name, + .ndo_set_mac_address = ocelot_port_set_mac_address, + .ndo_get_stats64 = ocelot_get_stats64, + .ndo_fdb_add = ocelot_port_fdb_add, + .ndo_fdb_del = ocelot_port_fdb_del, + .ndo_fdb_dump = ocelot_port_fdb_dump, + .ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, + .ndo_set_features = ocelot_set_features, + .ndo_get_port_parent_id = ocelot_get_port_parent_id, + .ndo_setup_tc = ocelot_setup_tc, + .ndo_do_ioctl = ocelot_ioctl, +}; + +static void ocelot_port_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + struct ocelot_port_private *priv = netdev_priv(netdev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_get_strings(ocelot, port, sset, data); +} + +static void ocelot_port_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_get_ethtool_stats(ocelot, port, data); +} + +static int ocelot_port_get_sset_count(struct net_device *dev, int sset) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + return ocelot_get_sset_count(ocelot, port, sset); +} + +static int ocelot_port_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + if (!ocelot->ptp) + return ethtool_op_get_ts_info(dev, info); + + return ocelot_get_ts_info(ocelot, port, info); +} + +static const struct ethtool_ops ocelot_ethtool_ops = { + .get_strings = ocelot_port_get_strings, + .get_ethtool_stats = ocelot_port_get_ethtool_stats, + .get_sset_count = ocelot_port_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_ts_info = ocelot_port_get_ts_info, +}; + +static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port, + struct switchdev_trans *trans, + u8 state) +{ + if (switchdev_trans_ph_prepare(trans)) + return; + + ocelot_bridge_stp_state_set(ocelot, port, state); +} + +static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port, + unsigned long ageing_clock_t) +{ + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); + + ocelot_set_ageing_time(ocelot, ageing_time); +} + +static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc) +{ + u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA | + ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA; + u32 val = 0; + + if (mc) + val = cpu_fwd_mcast; + + ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast, + ANA_PORT_CPU_FWD_CFG, port); +} + +static int ocelot_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ocelot_port_attr_stp_state_set(ocelot, port, trans, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + ocelot_port_vlan_filtering(ocelot, port, + attr->u.vlan_filtering); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: + ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int ocelot_port_obj_add_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_add(dev, vid, + vlan->flags & BRIDGE_VLAN_INFO_PVID, + vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); + if (ret) + return ret; + } + + return 0; +} + +static int ocelot_port_vlan_del_vlan(struct net_device *dev, + const struct switchdev_obj_port_vlan *vlan) +{ + int ret; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + ret = ocelot_vlan_vid_del(dev, vid); + + if (ret) + return ret; + } + + return 0; +} + +static int ocelot_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) +{ + int ret = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_obj_add_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj), + trans); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +static int ocelot_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + int ret = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + ret = ocelot_port_vlan_del_vlan(dev, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + default: + return -EOPNOTSUPP; + } + + return ret; +} + +/* Checks if the net_device instance given to us originate from our driver. */ +static bool ocelot_netdevice_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &ocelot_port_netdev_ops; +} + +static int ocelot_netdevice_port_event(struct net_device *dev, + unsigned long event, + struct netdev_notifier_changeupper_info *info) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + int err = 0; + + switch (event) { + case NETDEV_CHANGEUPPER: + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) { + err = ocelot_port_bridge_join(ocelot, port, + info->upper_dev); + } else { + err = ocelot_port_bridge_leave(ocelot, port, + info->upper_dev); + } + } + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) + err = ocelot_port_lag_join(ocelot, port, + info->upper_dev); + else + ocelot_port_lag_leave(ocelot, port, + info->upper_dev); + } + break; + default: + break; + } + + return err; +} + +static int ocelot_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int ret = 0; + + if (!ocelot_netdevice_dev_check(dev)) + return 0; + + if (event == NETDEV_PRECHANGEUPPER && + netif_is_lag_master(info->upper_dev)) { + struct netdev_lag_upper_info *lag_upper_info = info->upper_info; + struct netlink_ext_ack *extack; + + if (lag_upper_info && + lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + extack = netdev_notifier_info_to_extack(&info->info); + NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); + + ret = -EINVAL; + goto notify; + } + } + + if (netif_is_lag_master(dev)) { + struct net_device *slave; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, slave, iter) { + ret = ocelot_netdevice_port_event(slave, event, info); + if (ret) + goto notify; + } + } else { + ret = ocelot_netdevice_port_event(dev, event, info); + } + +notify: + return notifier_from_errno(ret); +} + +struct notifier_block ocelot_netdevice_nb __read_mostly = { + .notifier_call = ocelot_netdevice_event, +}; +EXPORT_SYMBOL(ocelot_netdevice_nb); + +static int ocelot_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_switchdev_nb __read_mostly = { + .notifier_call = ocelot_switchdev_event, +}; +EXPORT_SYMBOL(ocelot_switchdev_nb); + +static int ocelot_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + /* Blocking events. */ + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { + .notifier_call = ocelot_switchdev_blocking_event, +}; +EXPORT_SYMBOL(ocelot_switchdev_blocking_nb); + +int ocelot_probe_port(struct ocelot *ocelot, u8 port, + void __iomem *regs, + struct phy_device *phy) +{ + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + struct net_device *dev; + int err; + + dev = alloc_etherdev(sizeof(struct ocelot_port_private)); + if (!dev) + return -ENOMEM; + SET_NETDEV_DEV(dev, ocelot->dev); + priv = netdev_priv(dev); + priv->dev = dev; + priv->phy = phy; + priv->chip_port = port; + ocelot_port = &priv->port; + ocelot_port->ocelot = ocelot; + ocelot_port->regs = regs; + ocelot->ports[port] = ocelot_port; + + dev->netdev_ops = &ocelot_port_netdev_ops; + dev->ethtool_ops = &ocelot_ethtool_ops; + + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS | + NETIF_F_HW_TC; + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; + + memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN); + dev->dev_addr[ETH_ALEN - 1] += port; + ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid, + ENTRYTYPE_LOCKED); + + ocelot_init_port(ocelot, port); + + err = register_netdev(dev); + if (err) { + dev_err(ocelot->dev, "register_netdev failed\n"); + free_netdev(dev); + } + + return err; +} +EXPORT_SYMBOL(ocelot_probe_port); diff --git a/drivers/net/ethernet/mscc/ocelot_police.c b/drivers/net/ethernet/mscc/ocelot_police.c index 2e1d8e187332..6f5068c1041a 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.c +++ b/drivers/net/ethernet/mscc/ocelot_police.c @@ -7,16 +7,6 @@ #include #include "ocelot_police.h" -enum mscc_qos_rate_mode { - MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */ - MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */ - MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */ - MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */ - __MSCC_QOS_RATE_MODE_END, - NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END, - MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1, -}; - /* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */ #define POL_MODE_LINERATE 0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */ #define POL_MODE_DATARATE 1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes */ @@ -30,19 +20,8 @@ enum mscc_qos_rate_mode { /* Default policer order */ #define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */ -struct qos_policer_conf { - enum mscc_qos_rate_mode mode; - bool dlb; /* Enable DLB (dual leaky bucket mode */ - bool cf; /* Coupling flag (ignored in SLB mode) */ - u32 cir; /* CIR in kbps/fps (ignored in SLB mode) */ - u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */ - u32 pir; /* PIR in kbps/fps */ - u32 pbs; /* PBS in bytes/frames */ - u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */ -}; - -static int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, - struct qos_policer_conf *conf) +int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, + struct qos_policer_conf *conf) { u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE; u32 cir = 0, cbs = 0, pir = 0, pbs = 0; @@ -228,27 +207,3 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port) return 0; } EXPORT_SYMBOL(ocelot_port_policer_del); - -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol) -{ - struct qos_policer_conf pp = { 0 }; - - if (!pol) - return -EINVAL; - - pp.mode = MSCC_QOS_RATE_MODE_DATA; - pp.pir = pol->rate; - pp.pbs = pol->burst; - - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); -} - -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) -{ - struct qos_policer_conf pp = { 0 }; - - pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); -} diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index 792abd28010a..79d18442aa9b 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -9,6 +9,30 @@ #include "ocelot.h" +enum mscc_qos_rate_mode { + MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */ + MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */ + MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */ + MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */ + __MSCC_QOS_RATE_MODE_END, + NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END, + MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1, +}; + +struct qos_policer_conf { + enum mscc_qos_rate_mode mode; + bool dlb; /* Enable DLB (dual leaky bucket mode */ + bool cf; /* Coupling flag (ignored in SLB mode) */ + u32 cir; /* CIR in kbps/fps (ignored in SLB mode) */ + u32 cbs; /* CBS in bytes/frames (ignored in SLB mode) */ + u32 pir; /* PIR in kbps/fps */ + u32 pbs; /* PBS in bytes/frames */ + u8 ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */ +}; + +int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, + struct qos_policer_conf *conf); + int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, struct ocelot_policer *pol); diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c deleted file mode 100644 index b7baf7624e18..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_tc.c +++ /dev/null @@ -1,179 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0 OR MIT) -/* Microsemi Ocelot Switch TC driver - * - * Copyright (c) 2019 Microsemi Corporation - */ - -#include -#include "ocelot_tc.h" -#include "ocelot_ace.h" -#include - -static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, - struct tc_cls_matchall_offload *f, - bool ingress) -{ - struct netlink_ext_ack *extack = f->common.extack; - struct ocelot *ocelot = priv->port.ocelot; - struct ocelot_policer pol = { 0 }; - struct flow_action_entry *action; - int port = priv->chip_port; - int err; - - if (!ingress) { - NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported"); - return -EOPNOTSUPP; - } - - switch (f->command) { - case TC_CLSMATCHALL_REPLACE: - if (!flow_offload_has_one_action(&f->rule->action)) { - NL_SET_ERR_MSG_MOD(extack, - "Only one action is supported"); - return -EOPNOTSUPP; - } - - if (priv->tc.block_shared) { - NL_SET_ERR_MSG_MOD(extack, - "Rate limit is not supported on shared blocks"); - return -EOPNOTSUPP; - } - - action = &f->rule->action.entries[0]; - - if (action->id != FLOW_ACTION_POLICE) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); - return -EOPNOTSUPP; - } - - if (priv->tc.police_id && priv->tc.police_id != f->cookie) { - NL_SET_ERR_MSG_MOD(extack, - "Only one policer per port is supported"); - return -EEXIST; - } - - pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; - pol.burst = (u32)div_u64(action->police.rate_bytes_ps * - PSCHED_NS2TICKS(action->police.burst), - PSCHED_TICKS_PER_SEC); - - err = ocelot_port_policer_add(ocelot, port, &pol); - if (err) { - NL_SET_ERR_MSG_MOD(extack, "Could not add policer"); - return err; - } - - priv->tc.police_id = f->cookie; - priv->tc.offload_cnt++; - return 0; - case TC_CLSMATCHALL_DESTROY: - if (priv->tc.police_id != f->cookie) - return -ENOENT; - - err = ocelot_port_policer_del(ocelot, port); - if (err) { - NL_SET_ERR_MSG_MOD(extack, - "Could not delete policer"); - return err; - } - priv->tc.police_id = 0; - priv->tc.offload_cnt--; - return 0; - case TC_CLSMATCHALL_STATS: /* fall through */ - default: - return -EOPNOTSUPP; - } -} - -static int ocelot_setup_tc_block_cb(enum tc_setup_type type, - void *type_data, - void *cb_priv, bool ingress) -{ - struct ocelot_port_private *priv = cb_priv; - - if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) - return -EOPNOTSUPP; - - switch (type) { - case TC_SETUP_CLSMATCHALL: - return ocelot_setup_tc_cls_matchall(priv, type_data, ingress); - case TC_SETUP_CLSFLOWER: - return ocelot_setup_tc_cls_flower(priv, type_data, ingress); - default: - return -EOPNOTSUPP; - } -} - -static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type, - void *type_data, - void *cb_priv) -{ - return ocelot_setup_tc_block_cb(type, type_data, - cb_priv, true); -} - -static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type, - void *type_data, - void *cb_priv) -{ - return ocelot_setup_tc_block_cb(type, type_data, - cb_priv, false); -} - -static LIST_HEAD(ocelot_block_cb_list); - -static int ocelot_setup_tc_block(struct ocelot_port_private *priv, - struct flow_block_offload *f) -{ - struct flow_block_cb *block_cb; - flow_setup_cb_t *cb; - - if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) { - cb = ocelot_setup_tc_block_cb_ig; - priv->tc.block_shared = f->block_shared; - } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) { - cb = ocelot_setup_tc_block_cb_eg; - } else { - return -EOPNOTSUPP; - } - - f->driver_block_list = &ocelot_block_cb_list; - - switch (f->command) { - case FLOW_BLOCK_BIND: - if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list)) - return -EBUSY; - - block_cb = flow_block_cb_alloc(cb, priv, priv, NULL); - if (IS_ERR(block_cb)) - return PTR_ERR(block_cb); - - flow_block_cb_add(block_cb, f); - list_add_tail(&block_cb->driver_list, f->driver_block_list); - return 0; - case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f->block, cb, priv); - if (!block_cb) - return -ENOENT; - - flow_block_cb_remove(block_cb, f); - list_del(&block_cb->driver_list); - return 0; - default: - return -EOPNOTSUPP; - } -} - -int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - - switch (type) { - case TC_SETUP_BLOCK: - return ocelot_setup_tc_block(priv, type_data); - default: - return -EOPNOTSUPP; - } - return 0; -} diff --git a/drivers/net/ethernet/mscc/ocelot_tc.h b/drivers/net/ethernet/mscc/ocelot_tc.h deleted file mode 100644 index 61757c2250a6..000000000000 --- a/drivers/net/ethernet/mscc/ocelot_tc.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ -/* Microsemi Ocelot Switch driver - * - * Copyright (c) 2019 Microsemi Corporation - */ - -#ifndef _MSCC_OCELOT_TC_H_ -#define _MSCC_OCELOT_TC_H_ - -#include - -struct ocelot_port_tc { - bool block_shared; - unsigned long offload_cnt; - - unsigned long police_id; -}; - -int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data); - -#endif /* _MSCC_OCELOT_TC_H_ */ From 3c83654f246b980cc65f72f5c7b1501470082ede Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:45 +0300 Subject: [PATCH 0140/2056] net: mscc: ocelot: rename ocelot_ace.{c, h} to ocelot_vcap.{c,h} Access Control Lists (and their respective Access Control Entries) are specifically entries in the VCAP IS2, the security enforcement block, according to the documentation. Let's rename the files that deal with generic operations on the VCAP TCAM, so that VCAP IS1 and ES0 can reuse the same code without confusion. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/Makefile | 2 +- drivers/net/ethernet/mscc/ocelot.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 2 +- drivers/net/ethernet/mscc/ocelot_net.c | 2 +- drivers/net/ethernet/mscc/{ocelot_ace.c => ocelot_vcap.c} | 2 +- drivers/net/ethernet/mscc/{ocelot_ace.h => ocelot_vcap.h} | 0 6 files changed, 5 insertions(+), 5 deletions(-) rename drivers/net/ethernet/mscc/{ocelot_ace.c => ocelot_vcap.c} (99%) rename drivers/net/ethernet/mscc/{ocelot_ace.h => ocelot_vcap.h} (100%) diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile index 7ab3bc25ed27..58f94c3d80f9 100644 --- a/drivers/net/ethernet/mscc/Makefile +++ b/drivers/net/ethernet/mscc/Makefile @@ -4,7 +4,7 @@ mscc_ocelot_switch_lib-y := \ ocelot.o \ ocelot_io.o \ ocelot_police.o \ - ocelot_ace.o \ + ocelot_vcap.o \ ocelot_flower.o \ ocelot_ptp.o obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 5c2b5a2e8608..d4ad7ffe6f6e 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -6,7 +6,7 @@ */ #include #include "ocelot.h" -#include "ocelot_ace.h" +#include "ocelot_vcap.h" #define TABLE_UPDATE_SLEEP_US 10 #define TABLE_UPDATE_TIMEOUT_US 100000 diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index ad4e8e0d62a4..d57d6948ebf2 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -6,7 +6,7 @@ #include #include -#include "ocelot_ace.h" +#include "ocelot_vcap.h" static int ocelot_flower_parse_action(struct flow_cls_offload *f, struct ocelot_ace_rule *ace) diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 1ce444dff983..80cb1873e9d9 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -6,7 +6,7 @@ #include #include "ocelot.h" -#include "ocelot_ace.h" +#include "ocelot_vcap.h" int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, struct flow_cls_offload *f, diff --git a/drivers/net/ethernet/mscc/ocelot_ace.c b/drivers/net/ethernet/mscc/ocelot_vcap.c similarity index 99% rename from drivers/net/ethernet/mscc/ocelot_ace.c rename to drivers/net/ethernet/mscc/ocelot_vcap.c index dbfb2666e211..33b5b015e8a7 100644 --- a/drivers/net/ethernet/mscc/ocelot_ace.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -8,7 +8,7 @@ #include #include "ocelot_police.h" -#include "ocelot_ace.h" +#include "ocelot_vcap.h" #include "ocelot_s2.h" #define OCELOT_POLICER_DISCARD 0x17f diff --git a/drivers/net/ethernet/mscc/ocelot_ace.h b/drivers/net/ethernet/mscc/ocelot_vcap.h similarity index 100% rename from drivers/net/ethernet/mscc/ocelot_ace.h rename to drivers/net/ethernet/mscc/ocelot_vcap.h From aae4e500e106d2ce48d5bdb21210e36efc7460cb Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:46 +0300 Subject: [PATCH 0141/2056] net: mscc: ocelot: generalize the "ACE/ACL" names Access Control Lists (and their respective Access Control Entries) are specifically entries in the VCAP IS2, the security enforcement block, according to the documentation. Let's rename the structures and functions to something more generic, so that VCAP IS1 structures (which would otherwise have to be called Ingress Classification Entries) can reuse the same code without confusion. Some renaming that was done: struct ocelot_ace_rule -> struct ocelot_vcap_filter struct ocelot_acl_block -> struct ocelot_vcap_block enum ocelot_ace_type -> enum ocelot_vcap_key_type struct ocelot_ace_vlan -> struct ocelot_vcap_key_vlan enum ocelot_ace_action -> enum ocelot_vcap_action struct ocelot_ace_stats -> struct ocelot_vcap_stats enum ocelot_ace_type -> enum ocelot_vcap_key_type struct ocelot_ace_frame_* -> struct ocelot_vcap_key_* No functional change is intended. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 2 +- drivers/net/ethernet/mscc/ocelot_flower.c | 122 ++++----- drivers/net/ethernet/mscc/ocelot_police.h | 6 +- drivers/net/ethernet/mscc/ocelot_vcap.c | 286 +++++++++++----------- drivers/net/ethernet/mscc/ocelot_vcap.h | 88 +++---- include/soc/mscc/ocelot.h | 4 +- 6 files changed, 257 insertions(+), 251 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index d4ad7ffe6f6e..52b180280d2f 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1392,7 +1392,7 @@ int ocelot_init(struct ocelot *ocelot) INIT_LIST_HEAD(&ocelot->multicast); ocelot_mact_init(ocelot); ocelot_vlan_init(ocelot); - ocelot_ace_init(ocelot); + ocelot_vcap_init(ocelot); for (port = 0; port < ocelot->num_phys_ports; port++) { /* Clear all counters (5 groups) */ diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index d57d6948ebf2..f2a85b06a6e7 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -9,7 +9,7 @@ #include "ocelot_vcap.h" static int ocelot_flower_parse_action(struct flow_cls_offload *f, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { const struct flow_action_entry *a; s64 burst; @@ -26,17 +26,17 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, flow_action_for_each(i, a, &f->rule->action) { switch (a->id) { case FLOW_ACTION_DROP: - ace->action = OCELOT_ACL_ACTION_DROP; + filter->action = OCELOT_VCAP_ACTION_DROP; break; case FLOW_ACTION_TRAP: - ace->action = OCELOT_ACL_ACTION_TRAP; + filter->action = OCELOT_VCAP_ACTION_TRAP; break; case FLOW_ACTION_POLICE: - ace->action = OCELOT_ACL_ACTION_POLICE; + filter->action = OCELOT_VCAP_ACTION_POLICE; rate = a->police.rate_bytes_ps; - ace->pol.rate = div_u64(rate, 1000) * 8; + filter->pol.rate = div_u64(rate, 1000) * 8; burst = rate * PSCHED_NS2TICKS(a->police.burst); - ace->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); + filter->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); break; default: return -EOPNOTSUPP; @@ -47,7 +47,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, } static int ocelot_flower_parse(struct flow_cls_offload *f, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_dissector *dissector = rule->match.dissector; @@ -88,14 +88,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, return -EOPNOTSUPP; flow_rule_match_eth_addrs(rule, &match); - ace->type = OCELOT_ACE_TYPE_ETYPE; - ether_addr_copy(ace->frame.etype.dmac.value, + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + ether_addr_copy(filter->key.etype.dmac.value, match.key->dst); - ether_addr_copy(ace->frame.etype.smac.value, + ether_addr_copy(filter->key.etype.smac.value, match.key->src); - ether_addr_copy(ace->frame.etype.dmac.mask, + ether_addr_copy(filter->key.etype.dmac.mask, match.mask->dst); - ether_addr_copy(ace->frame.etype.smac.mask, + ether_addr_copy(filter->key.etype.smac.mask, match.mask->src); goto finished_key_parsing; } @@ -105,18 +105,18 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, flow_rule_match_basic(rule, &match); if (ntohs(match.key->n_proto) == ETH_P_IP) { - ace->type = OCELOT_ACE_TYPE_IPV4; - ace->frame.ipv4.proto.value[0] = + filter->key_type = OCELOT_VCAP_KEY_IPV4; + filter->key.ipv4.proto.value[0] = match.key->ip_proto; - ace->frame.ipv4.proto.mask[0] = + filter->key.ipv4.proto.mask[0] = match.mask->ip_proto; match_protocol = false; } if (ntohs(match.key->n_proto) == ETH_P_IPV6) { - ace->type = OCELOT_ACE_TYPE_IPV6; - ace->frame.ipv6.proto.value[0] = + filter->key_type = OCELOT_VCAP_KEY_IPV6; + filter->key.ipv6.proto.value[0] = match.key->ip_proto; - ace->frame.ipv6.proto.mask[0] = + filter->key.ipv6.proto.mask[0] = match.mask->ip_proto; match_protocol = false; } @@ -128,16 +128,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, u8 *tmp; flow_rule_match_ipv4_addrs(rule, &match); - tmp = &ace->frame.ipv4.sip.value.addr[0]; + tmp = &filter->key.ipv4.sip.value.addr[0]; memcpy(tmp, &match.key->src, 4); - tmp = &ace->frame.ipv4.sip.mask.addr[0]; + tmp = &filter->key.ipv4.sip.mask.addr[0]; memcpy(tmp, &match.mask->src, 4); - tmp = &ace->frame.ipv4.dip.value.addr[0]; + tmp = &filter->key.ipv4.dip.value.addr[0]; memcpy(tmp, &match.key->dst, 4); - tmp = &ace->frame.ipv4.dip.mask.addr[0]; + tmp = &filter->key.ipv4.dip.mask.addr[0]; memcpy(tmp, &match.mask->dst, 4); match_protocol = false; } @@ -151,10 +151,10 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - ace->frame.ipv4.sport.value = ntohs(match.key->src); - ace->frame.ipv4.sport.mask = ntohs(match.mask->src); - ace->frame.ipv4.dport.value = ntohs(match.key->dst); - ace->frame.ipv4.dport.mask = ntohs(match.mask->dst); + filter->key.ipv4.sport.value = ntohs(match.key->src); + filter->key.ipv4.sport.mask = ntohs(match.mask->src); + filter->key.ipv4.dport.value = ntohs(match.key->dst); + filter->key.ipv4.dport.mask = ntohs(match.mask->dst); match_protocol = false; } @@ -162,11 +162,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); - ace->type = OCELOT_ACE_TYPE_ANY; - ace->vlan.vid.value = match.key->vlan_id; - ace->vlan.vid.mask = match.mask->vlan_id; - ace->vlan.pcp.value[0] = match.key->vlan_priority; - ace->vlan.pcp.mask[0] = match.mask->vlan_priority; + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->vlan.vid.value = match.key->vlan_id; + filter->vlan.vid.mask = match.mask->vlan_id; + filter->vlan.pcp.value[0] = match.key->vlan_priority; + filter->vlan.pcp.mask[0] = match.mask->vlan_priority; match_protocol = false; } @@ -175,76 +175,76 @@ static int ocelot_flower_parse(struct flow_cls_offload *f, /* TODO: support SNAP, LLC etc */ if (proto < ETH_P_802_3_MIN) return -EOPNOTSUPP; - ace->type = OCELOT_ACE_TYPE_ETYPE; - *(__be16 *)ace->frame.etype.etype.value = htons(proto); - *(__be16 *)ace->frame.etype.etype.mask = htons(0xffff); + filter->key_type = OCELOT_VCAP_KEY_ETYPE; + *(__be16 *)filter->key.etype.etype.value = htons(proto); + *(__be16 *)filter->key.etype.etype.mask = htons(0xffff); } - /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */ + /* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */ - ace->prio = f->common.prio; - ace->id = f->cookie; - return ocelot_flower_parse_action(f, ace); + filter->prio = f->common.prio; + filter->id = f->cookie; + return ocelot_flower_parse_action(f, filter); } -static -struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port, - struct flow_cls_offload *f) +static struct ocelot_vcap_filter +*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, + struct flow_cls_offload *f) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; - ace = kzalloc(sizeof(*ace), GFP_KERNEL); - if (!ace) + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) return NULL; - ace->ingress_port_mask = BIT(port); - return ace; + filter->ingress_port_mask = BIT(port); + return filter; } int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; int ret; - ace = ocelot_ace_rule_create(ocelot, port, f); - if (!ace) + filter = ocelot_vcap_filter_create(ocelot, port, f); + if (!filter) return -ENOMEM; - ret = ocelot_flower_parse(f, ace); + ret = ocelot_flower_parse(f, filter); if (ret) { - kfree(ace); + kfree(filter); return ret; } - return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack); + return ocelot_vcap_filter_add(ocelot, filter, f->common.extack); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace); int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule ace; + struct ocelot_vcap_filter filter; - ace.prio = f->common.prio; - ace.id = f->cookie; + filter.prio = f->common.prio; + filter.id = f->cookie; - return ocelot_ace_rule_offload_del(ocelot, &ace); + return ocelot_vcap_filter_del(ocelot, &filter); } EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy); int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress) { - struct ocelot_ace_rule ace; + struct ocelot_vcap_filter filter; int ret; - ace.prio = f->common.prio; - ace.id = f->cookie; - ret = ocelot_ace_rule_stats_update(ocelot, &ace); + filter.prio = f->common.prio; + filter.id = f->cookie; + ret = ocelot_vcap_filter_stats_update(ocelot, &filter); if (ret) return ret; - flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0, 0x0, + flow_stats_update(&f->stats, 0x0, filter.stats.pkts, 0, 0x0, FLOW_ACTION_HW_STATS_IMMEDIATE); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index 79d18442aa9b..be6f2286a5cd 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -33,9 +33,9 @@ struct qos_policer_conf { int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, struct qos_policer_conf *conf); -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol); +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol); -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix); +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix); #endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 33b5b015e8a7..8597034fd3b7 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -302,10 +302,10 @@ static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data, } static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { - switch (ace->action) { - case OCELOT_ACL_ACTION_DROP: + switch (filter->action) { + case OCELOT_VCAP_ACTION_DROP: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1); @@ -314,7 +314,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0); break; - case OCELOT_ACL_ACTION_TRAP: + case OCELOT_VCAP_ACTION_TRAP: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0); @@ -322,12 +322,12 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1); break; - case OCELOT_ACL_ACTION_POLICE: + case OCELOT_VCAP_ACTION_POLICE: vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1); vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, - ace->pol_ix); + filter->pol_ix); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0); vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0); break; @@ -335,11 +335,11 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data, } static void is2_entry_set(struct ocelot *ocelot, int ix, - struct ocelot_ace_rule *ace) + struct ocelot_vcap_filter *filter) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; + struct ocelot_vcap_key_vlan *tag = &filter->vlan; u32 val, msk, type, type_mask = 0xf, i, count; - struct ocelot_ace_vlan *tag = &ace->vlan; struct ocelot_vcap_u64 payload; struct vcap_data data; int row = (ix / 2); @@ -355,19 +355,19 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, data.tg_sw = VCAP_TG_HALF; is2_data_get(ocelot, &data, ix); data.tg = (data.tg & ~data.tg_mask); - if (ace->prio != 0) + if (filter->prio != 0) data.tg |= data.tg_value; data.type = IS2_ACTION_TYPE_NORMAL; vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0); vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0, - ~ace->ingress_port_mask); + ~filter->ingress_port_mask); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH, OCELOT_VCAP_BIT_ANY); - vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc); - vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc); + vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc); + vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged); vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID, tag->vid.value, tag->vid.mask); @@ -375,9 +375,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, tag->pcp.value[0], tag->pcp.mask[0]); vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei); - switch (ace->type) { - case OCELOT_ACE_TYPE_ETYPE: { - struct ocelot_ace_frame_etype *etype = &ace->frame.etype; + switch (filter->key_type) { + case OCELOT_VCAP_KEY_ETYPE: { + struct ocelot_vcap_key_etype *etype = &filter->key.etype; type = IS2_TYPE_ETYPE; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -398,8 +398,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, etype->data.value, etype->data.mask); break; } - case OCELOT_ACE_TYPE_LLC: { - struct ocelot_ace_frame_llc *llc = &ace->frame.llc; + case OCELOT_VCAP_KEY_LLC: { + struct ocelot_vcap_key_llc *llc = &filter->key.llc; type = IS2_TYPE_LLC; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -414,8 +414,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, payload.value, payload.mask); break; } - case OCELOT_ACE_TYPE_SNAP: { - struct ocelot_ace_frame_snap *snap = &ace->frame.snap; + case OCELOT_VCAP_KEY_SNAP: { + struct ocelot_vcap_key_snap *snap = &filter->key.snap; type = IS2_TYPE_SNAP; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC, @@ -423,12 +423,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC, snap->smac.value, snap->smac.mask); vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP, - ace->frame.snap.snap.value, - ace->frame.snap.snap.mask); + filter->key.snap.snap.value, + filter->key.snap.snap.mask); break; } - case OCELOT_ACE_TYPE_ARP: { - struct ocelot_ace_frame_arp *arp = &ace->frame.arp; + case OCELOT_VCAP_KEY_ARP: { + struct ocelot_vcap_key_arp *arp = &filter->key.arp; type = IS2_TYPE_ARP; vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC, @@ -469,20 +469,20 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, 0, 0); break; } - case OCELOT_ACE_TYPE_IPV4: - case OCELOT_ACE_TYPE_IPV6: { + case OCELOT_VCAP_KEY_IPV4: + case OCELOT_VCAP_KEY_IPV6: { enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp; enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg; enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh; - struct ocelot_ace_frame_ipv4 *ipv4 = NULL; - struct ocelot_ace_frame_ipv6 *ipv6 = NULL; + struct ocelot_vcap_key_ipv4 *ipv4 = NULL; + struct ocelot_vcap_key_ipv6 *ipv6 = NULL; struct ocelot_vcap_udp_tcp *sport, *dport; struct ocelot_vcap_ipv4 sip, dip; struct ocelot_vcap_u8 proto, ds; struct ocelot_vcap_u48 *ip_data; - if (ace->type == OCELOT_ACE_TYPE_IPV4) { - ipv4 = &ace->frame.ipv4; + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) { + ipv4 = &filter->key.ipv4; ttl = ipv4->ttl; fragment = ipv4->fragment; options = ipv4->options; @@ -503,7 +503,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, sport_eq_dport = ipv4->sport_eq_dport; seq_zero = ipv4->seq_zero; } else { - ipv6 = &ace->frame.ipv6; + ipv6 = &filter->key.ipv6; ttl = ipv6->ttl; fragment = OCELOT_VCAP_BIT_ANY; options = OCELOT_VCAP_BIT_ANY; @@ -607,7 +607,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, } break; } - case OCELOT_ACE_TYPE_ANY: + case OCELOT_VCAP_KEY_ANY: default: type = 0; type_mask = 0; @@ -623,9 +623,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, } vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask); - is2_action_set(ocelot, &data, ace); + is2_action_set(ocelot, &data, filter); vcap_data_set(data.counter, data.counter_offset, - vcap_is2->counter_width, ace->stats.pkts); + vcap_is2->counter_width, filter->stats.pkts); /* Write row */ vcap_entry2cache(ocelot, &data); @@ -633,7 +633,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix, vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL); } -static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule, +static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filter, int ix) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; @@ -648,55 +648,56 @@ static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule, cnt = vcap_data_get(data.counter, data.counter_offset, vcap_is2->counter_width); - rule->stats.pkts = cnt; + filter->stats.pkts = cnt; } -static void ocelot_ace_rule_add(struct ocelot *ocelot, - struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; struct list_head *pos, *n; - if (rule->action == OCELOT_ACL_ACTION_POLICE) { + if (filter->action == OCELOT_VCAP_ACTION_POLICE) { block->pol_lpr--; - rule->pol_ix = block->pol_lpr; - ocelot_ace_policer_add(ocelot, rule->pol_ix, &rule->pol); + filter->pol_ix = block->pol_lpr; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, &filter->pol); } block->count++; if (list_empty(&block->rules)) { - list_add(&rule->list, &block->rules); + list_add(&filter->list, &block->rules); return; } list_for_each_safe(pos, n, &block->rules) { - tmp = list_entry(pos, struct ocelot_ace_rule, list); - if (rule->prio < tmp->prio) + tmp = list_entry(pos, struct ocelot_vcap_filter, list); + if (filter->prio < tmp->prio) break; } - list_add(&rule->list, pos->prev); + list_add(&filter->list, pos->prev); } -static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; int index = -1; list_for_each_entry(tmp, &block->rules, list) { ++index; - if (rule->id == tmp->id) + if (filter->id == tmp->id) break; } return index; } -static struct ocelot_ace_rule* -ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index) +static struct ocelot_vcap_filter* +ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block, + int index) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; int i = 0; list_for_each_entry(tmp, &block->rules, list) { @@ -739,15 +740,16 @@ static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port, ANA_PORT_VCAP_S2_CFG, port); } -static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) +static bool +ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter) { u16 proto, mask; - if (ace->type != OCELOT_ACE_TYPE_ETYPE) + if (filter->key_type != OCELOT_VCAP_KEY_ETYPE) return false; - proto = ntohs(*(__be16 *)ace->frame.etype.etype.value); - mask = ntohs(*(__be16 *)ace->frame.etype.etype.mask); + proto = ntohs(*(__be16 *)filter->key.etype.etype.value); + mask = ntohs(*(__be16 *)filter->key.etype.etype.mask); /* ETH_P_ALL match, so all protocols below are included */ if (mask == 0) @@ -762,49 +764,51 @@ static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace) return false; } -static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace) +static bool +ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter) { - if (ace->type == OCELOT_ACE_TYPE_SNAP) + if (filter->key_type == OCELOT_VCAP_KEY_SNAP) return true; - if (ace->type == OCELOT_ACE_TYPE_ARP) + if (filter->key_type == OCELOT_VCAP_KEY_ARP) return true; - if (ace->type == OCELOT_ACE_TYPE_IPV4) + if (filter->key_type == OCELOT_VCAP_KEY_IPV4) return true; - if (ace->type == OCELOT_ACE_TYPE_IPV6) + if (filter->key_type == OCELOT_VCAP_KEY_IPV6) return true; return false; } -static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot, - struct ocelot_ace_rule *ace) +static bool +ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter *tmp; unsigned long port; int i; - if (ocelot_ace_is_problematic_mac_etype(ace)) { + if (ocelot_vcap_is_problematic_mac_etype(filter)) { /* Search for any non-MAC_ETYPE rules on the port */ for (i = 0; i < block->count; i++) { - tmp = ocelot_ace_rule_get_rule_index(block, i); - if (tmp->ingress_port_mask & ace->ingress_port_mask && - ocelot_ace_is_problematic_non_mac_etype(tmp)) + tmp = ocelot_vcap_block_find_filter(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + ocelot_vcap_is_problematic_non_mac_etype(tmp)) return false; } - for_each_set_bit(port, &ace->ingress_port_mask, + for_each_set_bit(port, &filter->ingress_port_mask, ocelot->num_phys_ports) ocelot_match_all_as_mac_etype(ocelot, port, true); - } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) { + } else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) { /* Search for any MAC_ETYPE rules on the port */ for (i = 0; i < block->count; i++) { - tmp = ocelot_ace_rule_get_rule_index(block, i); - if (tmp->ingress_port_mask & ace->ingress_port_mask && - ocelot_ace_is_problematic_mac_etype(tmp)) + tmp = ocelot_vcap_block_find_filter(block, i); + if (tmp->ingress_port_mask & filter->ingress_port_mask && + ocelot_vcap_is_problematic_mac_etype(tmp)) return false; } - for_each_set_bit(port, &ace->ingress_port_mask, + for_each_set_bit(port, &filter->ingress_port_mask, ocelot->num_phys_ports) ocelot_match_all_as_mac_etype(ocelot, port, false); } @@ -812,39 +816,40 @@ static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot, return true; } -int ocelot_ace_rule_offload_add(struct ocelot *ocelot, - struct ocelot_ace_rule *rule, - struct netlink_ext_ack *extack) +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter, + struct netlink_ext_ack *extack) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *ace; + struct ocelot_vcap_block *block = &ocelot->block; int i, index; - if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) { + if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) { NL_SET_ERR_MSG_MOD(extack, "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules"); return -EBUSY; } - /* Add rule to the linked list */ - ocelot_ace_rule_add(ocelot, block, rule); + /* Add filter to the linked list */ + ocelot_vcap_filter_add_to_block(ocelot, block, filter); - /* Get the index of the inserted rule */ - index = ocelot_ace_rule_get_index_id(block, rule); + /* Get the index of the inserted filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); - /* Move down the rules to make place for the new rule */ + /* Move down the rules to make place for the new filter */ for (i = block->count - 1; i > index; i--) { - ace = ocelot_ace_rule_get_rule_index(block, i); - is2_entry_set(ocelot, i, ace); + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter(block, i); + is2_entry_set(ocelot, i, tmp); } - /* Now insert the new rule */ - is2_entry_set(ocelot, index, rule); + /* Now insert the new filter */ + is2_entry_set(ocelot, index, filter); return 0; } -int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol) +int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) { struct qos_policer_conf pp = { 0 }; @@ -858,7 +863,7 @@ int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix, return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); } -int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) +int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) { struct qos_policer_conf pp = { 0 }; @@ -867,44 +872,44 @@ int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix) return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); } -static void ocelot_ace_police_del(struct ocelot *ocelot, - struct ocelot_acl_block *block, - u32 ix) +static void ocelot_vcap_police_del(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + u32 ix) { - struct ocelot_ace_rule *ace; + struct ocelot_vcap_filter *filter; int index = -1; if (ix < block->pol_lpr) return; - list_for_each_entry(ace, &block->rules, list) { + list_for_each_entry(filter, &block->rules, list) { index++; - if (ace->action == OCELOT_ACL_ACTION_POLICE && - ace->pol_ix < ix) { - ace->pol_ix += 1; - ocelot_ace_policer_add(ocelot, ace->pol_ix, - &ace->pol); - is2_entry_set(ocelot, index, ace); + if (filter->action == OCELOT_VCAP_ACTION_POLICE && + filter->pol_ix < ix) { + filter->pol_ix += 1; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, + &filter->pol); + is2_entry_set(ocelot, index, filter); } } - ocelot_ace_policer_del(ocelot, block->pol_lpr); + ocelot_vcap_policer_del(ocelot, block->pol_lpr); block->pol_lpr++; } -static void ocelot_ace_rule_del(struct ocelot *ocelot, - struct ocelot_acl_block *block, - struct ocelot_ace_rule *rule) +static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + struct ocelot_vcap_filter *filter) { - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_filter *tmp; struct list_head *pos, *q; list_for_each_safe(pos, q, &block->rules) { - tmp = list_entry(pos, struct ocelot_ace_rule, list); - if (tmp->id == rule->id) { - if (tmp->action == OCELOT_ACL_ACTION_POLICE) - ocelot_ace_police_del(ocelot, block, - tmp->pol_ix); + tmp = list_entry(pos, struct ocelot_vcap_filter, list); + if (tmp->id == filter->id) { + if (tmp->action == OCELOT_VCAP_ACTION_POLICE) + ocelot_vcap_police_del(ocelot, block, + tmp->pol_ix); list_del(pos); kfree(tmp); @@ -914,56 +919,57 @@ static void ocelot_ace_rule_del(struct ocelot *ocelot, block->count--; } -int ocelot_ace_rule_offload_del(struct ocelot *ocelot, - struct ocelot_ace_rule *rule) +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule del_ace; - struct ocelot_ace_rule *ace; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter del_filter; int i, index; - memset(&del_ace, 0, sizeof(del_ace)); + memset(&del_filter, 0, sizeof(del_filter)); - /* Gets index of the rule */ - index = ocelot_ace_rule_get_index_id(block, rule); + /* Gets index of the filter */ + index = ocelot_vcap_block_get_filter_index(block, filter); - /* Delete rule */ - ocelot_ace_rule_del(ocelot, block, rule); + /* Delete filter */ + ocelot_vcap_block_remove_filter(ocelot, block, filter); - /* Move up all the blocks over the deleted rule */ + /* Move up all the blocks over the deleted filter */ for (i = index; i < block->count; i++) { - ace = ocelot_ace_rule_get_rule_index(block, i); - is2_entry_set(ocelot, i, ace); + struct ocelot_vcap_filter *tmp; + + tmp = ocelot_vcap_block_find_filter(block, i); + is2_entry_set(ocelot, i, tmp); } - /* Now delete the last rule, because it is duplicated */ - is2_entry_set(ocelot, block->count, &del_ace); + /* Now delete the last filter, because it is duplicated */ + is2_entry_set(ocelot, block->count, &del_filter); return 0; } -int ocelot_ace_rule_stats_update(struct ocelot *ocelot, - struct ocelot_ace_rule *rule) +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *filter) { - struct ocelot_acl_block *block = &ocelot->acl_block; - struct ocelot_ace_rule *tmp; + struct ocelot_vcap_block *block = &ocelot->block; + struct ocelot_vcap_filter *tmp; int index; - index = ocelot_ace_rule_get_index_id(block, rule); - is2_entry_get(ocelot, rule, index); + index = ocelot_vcap_block_get_filter_index(block, filter); + is2_entry_get(ocelot, filter, index); /* After we get the result we need to clear the counters */ - tmp = ocelot_ace_rule_get_rule_index(block, index); + tmp = ocelot_vcap_block_find_filter(block, index); tmp->stats.pkts = 0; is2_entry_set(ocelot, index, tmp); return 0; } -int ocelot_ace_init(struct ocelot *ocelot) +int ocelot_vcap_init(struct ocelot *ocelot) { const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2]; - struct ocelot_acl_block *block = &ocelot->acl_block; + struct ocelot_vcap_block *block = &ocelot->block; struct vcap_data data; memset(&data, 0, sizeof(data)); @@ -994,7 +1000,7 @@ int ocelot_ace_init(struct ocelot *ocelot) block->pol_lpr = OCELOT_POLICER_DISCARD - 1; - INIT_LIST_HEAD(&ocelot->acl_block.rules); + INIT_LIST_HEAD(&ocelot->block.rules); return 0; } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h index 099e177f2617..0dfbfc011b2e 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.h +++ b/drivers/net/ethernet/mscc/ocelot_vcap.h @@ -3,8 +3,8 @@ * Copyright (c) 2019 Microsemi Corporation */ -#ifndef _MSCC_OCELOT_ACE_H_ -#define _MSCC_OCELOT_ACE_H_ +#ifndef _MSCC_OCELOT_VCAP_H_ +#define _MSCC_OCELOT_VCAP_H_ #include "ocelot.h" #include "ocelot_police.h" @@ -76,31 +76,31 @@ struct ocelot_vcap_udp_tcp { u16 mask; }; -enum ocelot_ace_type { - OCELOT_ACE_TYPE_ANY, - OCELOT_ACE_TYPE_ETYPE, - OCELOT_ACE_TYPE_LLC, - OCELOT_ACE_TYPE_SNAP, - OCELOT_ACE_TYPE_ARP, - OCELOT_ACE_TYPE_IPV4, - OCELOT_ACE_TYPE_IPV6 +enum ocelot_vcap_key_type { + OCELOT_VCAP_KEY_ANY, + OCELOT_VCAP_KEY_ETYPE, + OCELOT_VCAP_KEY_LLC, + OCELOT_VCAP_KEY_SNAP, + OCELOT_VCAP_KEY_ARP, + OCELOT_VCAP_KEY_IPV4, + OCELOT_VCAP_KEY_IPV6 }; -struct ocelot_ace_vlan { +struct ocelot_vcap_key_vlan { struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */ struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ enum ocelot_vcap_bit dei; /* DEI */ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ }; -struct ocelot_ace_frame_etype { +struct ocelot_vcap_key_etype { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; struct ocelot_vcap_u16 etype; struct ocelot_vcap_u16 data; /* MAC data */ }; -struct ocelot_ace_frame_llc { +struct ocelot_vcap_key_llc { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; @@ -108,7 +108,7 @@ struct ocelot_ace_frame_llc { struct ocelot_vcap_u32 llc; }; -struct ocelot_ace_frame_snap { +struct ocelot_vcap_key_snap { struct ocelot_vcap_u48 dmac; struct ocelot_vcap_u48 smac; @@ -116,7 +116,7 @@ struct ocelot_ace_frame_snap { struct ocelot_vcap_u40 snap; }; -struct ocelot_ace_frame_arp { +struct ocelot_vcap_key_arp { struct ocelot_vcap_u48 smac; enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */ enum ocelot_vcap_bit req; /* Opcode request/reply */ @@ -133,7 +133,7 @@ struct ocelot_ace_frame_arp { struct ocelot_vcap_ipv4 dip; /* Target IP address */ }; -struct ocelot_ace_frame_ipv4 { +struct ocelot_vcap_key_ipv4 { enum ocelot_vcap_bit ttl; /* TTL zero */ enum ocelot_vcap_bit fragment; /* Fragment */ enum ocelot_vcap_bit options; /* Header options */ @@ -155,7 +155,7 @@ struct ocelot_ace_frame_ipv4 { enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ }; -struct ocelot_ace_frame_ipv6 { +struct ocelot_vcap_key_ipv6 { struct ocelot_vcap_u8 proto; /* IPv6 protocol */ struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */ enum ocelot_vcap_bit ttl; /* TTL zero */ @@ -174,58 +174,58 @@ struct ocelot_ace_frame_ipv6 { enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */ }; -enum ocelot_ace_action { - OCELOT_ACL_ACTION_DROP, - OCELOT_ACL_ACTION_TRAP, - OCELOT_ACL_ACTION_POLICE, +enum ocelot_vcap_action { + OCELOT_VCAP_ACTION_DROP, + OCELOT_VCAP_ACTION_TRAP, + OCELOT_VCAP_ACTION_POLICE, }; -struct ocelot_ace_stats { +struct ocelot_vcap_stats { u64 bytes; u64 pkts; u64 used; }; -struct ocelot_ace_rule { +struct ocelot_vcap_filter { struct list_head list; u16 prio; u32 id; - enum ocelot_ace_action action; - struct ocelot_ace_stats stats; + enum ocelot_vcap_action action; + struct ocelot_vcap_stats stats; unsigned long ingress_port_mask; enum ocelot_vcap_bit dmac_mc; enum ocelot_vcap_bit dmac_bc; - struct ocelot_ace_vlan vlan; + struct ocelot_vcap_key_vlan vlan; - enum ocelot_ace_type type; + enum ocelot_vcap_key_type key_type; union { - /* ocelot_ACE_TYPE_ANY: No specific fields */ - struct ocelot_ace_frame_etype etype; - struct ocelot_ace_frame_llc llc; - struct ocelot_ace_frame_snap snap; - struct ocelot_ace_frame_arp arp; - struct ocelot_ace_frame_ipv4 ipv4; - struct ocelot_ace_frame_ipv6 ipv6; - } frame; + /* OCELOT_VCAP_KEY_ANY: No specific fields */ + struct ocelot_vcap_key_etype etype; + struct ocelot_vcap_key_llc llc; + struct ocelot_vcap_key_snap snap; + struct ocelot_vcap_key_arp arp; + struct ocelot_vcap_key_ipv4 ipv4; + struct ocelot_vcap_key_ipv6 ipv6; + } key; struct ocelot_policer pol; u32 pol_ix; }; -int ocelot_ace_rule_offload_add(struct ocelot *ocelot, - struct ocelot_ace_rule *rule, - struct netlink_ext_ack *extack); -int ocelot_ace_rule_offload_del(struct ocelot *ocelot, - struct ocelot_ace_rule *rule); -int ocelot_ace_rule_stats_update(struct ocelot *ocelot, - struct ocelot_ace_rule *rule); +int ocelot_vcap_filter_add(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule, + struct netlink_ext_ack *extack); +int ocelot_vcap_filter_del(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); +int ocelot_vcap_filter_stats_update(struct ocelot *ocelot, + struct ocelot_vcap_filter *rule); -int ocelot_ace_init(struct ocelot *ocelot); +int ocelot_vcap_init(struct ocelot *ocelot); int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv, struct flow_cls_offload *f, bool ingress); -#endif /* _MSCC_OCELOT_ACE_H_ */ +#endif /* _MSCC_OCELOT_VCAP_H_ */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 4953e9994df3..fa2c3904049e 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -470,7 +470,7 @@ struct ocelot_ops { int (*reset)(struct ocelot *ocelot); }; -struct ocelot_acl_block { +struct ocelot_vcap_block { struct list_head rules; int count; int pol_lpr; @@ -535,7 +535,7 @@ struct ocelot { struct list_head multicast; - struct ocelot_acl_block acl_block; + struct ocelot_vcap_block block; const struct vcap_field *vcap_is2_keys; const struct vcap_field *vcap_is2_actions; From c73b0ad36ea316e71b5b599228c9d5906045c235 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 18:43:47 +0300 Subject: [PATCH 0142/2056] net: mscc: ocelot: unexpose ocelot_vcap_policer_{add,del} Remove the function prototypes from ocelot_police.h and make these functions static. We need to move them above their callers. Note that moving the implementations to ocelot_police.c is not trivially possible due to dependency on is2_entry_set() which is static to ocelot_vcap.c. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_police.h | 5 -- drivers/net/ethernet/mscc/ocelot_vcap.c | 96 +++++++++++------------ 2 files changed, 45 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_police.h b/drivers/net/ethernet/mscc/ocelot_police.h index be6f2286a5cd..7adb05f71999 100644 --- a/drivers/net/ethernet/mscc/ocelot_police.h +++ b/drivers/net/ethernet/mscc/ocelot_police.h @@ -33,9 +33,4 @@ struct qos_policer_conf { int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix, struct qos_policer_conf *conf); -int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol); - -int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix); - #endif /* _MSCC_OCELOT_POLICE_H_ */ diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 8597034fd3b7..3ef620faf995 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -651,6 +651,49 @@ static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filt filter->stats.pkts = cnt; } +static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, + struct ocelot_policer *pol) +{ + struct qos_policer_conf pp = { 0 }; + + if (!pol) + return -EINVAL; + + pp.mode = MSCC_QOS_RATE_MODE_DATA; + pp.pir = pol->rate; + pp.pbs = pol->burst; + + return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); +} + +static void ocelot_vcap_policer_del(struct ocelot *ocelot, + struct ocelot_vcap_block *block, + u32 pol_ix) +{ + struct ocelot_vcap_filter *filter; + struct qos_policer_conf pp = {0}; + int index = -1; + + if (pol_ix < block->pol_lpr) + return; + + list_for_each_entry(filter, &block->rules, list) { + index++; + if (filter->action == OCELOT_VCAP_ACTION_POLICE && + filter->pol_ix < pol_ix) { + filter->pol_ix += 1; + ocelot_vcap_policer_add(ocelot, filter->pol_ix, + &filter->pol); + is2_entry_set(ocelot, index, filter); + } + } + + pp.mode = MSCC_QOS_RATE_MODE_DISABLED; + qos_policer_conf_set(ocelot, 0, pol_ix, &pp); + + block->pol_lpr++; +} + static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot, struct ocelot_vcap_block *block, struct ocelot_vcap_filter *filter) @@ -848,55 +891,6 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot, return 0; } -int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix, - struct ocelot_policer *pol) -{ - struct qos_policer_conf pp = { 0 }; - - if (!pol) - return -EINVAL; - - pp.mode = MSCC_QOS_RATE_MODE_DATA; - pp.pir = pol->rate; - pp.pbs = pol->burst; - - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); -} - -int ocelot_vcap_policer_del(struct ocelot *ocelot, u32 pol_ix) -{ - struct qos_policer_conf pp = { 0 }; - - pp.mode = MSCC_QOS_RATE_MODE_DISABLED; - - return qos_policer_conf_set(ocelot, 0, pol_ix, &pp); -} - -static void ocelot_vcap_police_del(struct ocelot *ocelot, - struct ocelot_vcap_block *block, - u32 ix) -{ - struct ocelot_vcap_filter *filter; - int index = -1; - - if (ix < block->pol_lpr) - return; - - list_for_each_entry(filter, &block->rules, list) { - index++; - if (filter->action == OCELOT_VCAP_ACTION_POLICE && - filter->pol_ix < ix) { - filter->pol_ix += 1; - ocelot_vcap_policer_add(ocelot, filter->pol_ix, - &filter->pol); - is2_entry_set(ocelot, index, filter); - } - } - - ocelot_vcap_policer_del(ocelot, block->pol_lpr); - block->pol_lpr++; -} - static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, struct ocelot_vcap_block *block, struct ocelot_vcap_filter *filter) @@ -908,8 +902,8 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot, tmp = list_entry(pos, struct ocelot_vcap_filter, list); if (tmp->id == filter->id) { if (tmp->action == OCELOT_VCAP_ACTION_POLICE) - ocelot_vcap_police_del(ocelot, block, - tmp->pol_ix); + ocelot_vcap_policer_del(ocelot, block, + tmp->pol_ix); list_del(pos); kfree(tmp); From 8eaf8d99409009b7ab7853f3ac603928458c2022 Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Thu, 18 Jun 2020 16:36:31 -0400 Subject: [PATCH 0143/2056] Remove redundant condition in qdisc_graft parent cannot be NULL here since its in the else part of the if (parent == NULL) condition. Remove the extra check on parent pointer. Signed-off-by: Gaurav Singh Signed-off-by: David S. Miller --- net/sched/sch_api.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 9a3449b56bd6..11ebba60da3b 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1093,8 +1093,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, int err; /* Only support running class lockless if parent is lockless */ - if (new && (new->flags & TCQ_F_NOLOCK) && - parent && !(parent->flags & TCQ_F_NOLOCK)) + if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK)) qdisc_clear_nolock(new); if (!cops || !cops->graft) From 78e57f152c001eed0321ba4413a07c9e33e753e6 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 18 Jun 2020 14:22:15 -0700 Subject: [PATCH 0144/2056] net: Avoid overwriting valid skb->napi_id This will be useful to allow busy poll for tunneled traffic. In case of busy poll for sessions over tunnels, the underlying physical device's queues need to be polled. Tunnels schedule NAPI either via netif_rx() for backlog queue or schedule the gro_cell_poll(). netif_rx() propagates the valid skb->napi_id to the socket. OTOH, gro_cell_poll() stamps the skb->napi_id again by calling skb_mark_napi_id() with the tunnel NAPI which is not a busy poll candidate. This was preventing tunneled traffic to use busy poll. A valid NAPI ID in the skb indicates it was already marked for busy poll by a NAPI driver and hence needs to be copied into the socket. Signed-off-by: Amritha Nambiar Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/busy_poll.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index 86e028388bad..b001fa91c14e 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -114,7 +114,11 @@ static inline void skb_mark_napi_id(struct sk_buff *skb, struct napi_struct *napi) { #ifdef CONFIG_NET_RX_BUSY_POLL - skb->napi_id = napi->napi_id; + /* If the skb was already marked with a valid NAPI ID, avoid overwriting + * it. + */ + if (skb->napi_id < MIN_NAPI_ID) + skb->napi_id = napi->napi_id; #endif } From 902053f17dbeff125926bb07e781f6e63f3efe09 Mon Sep 17 00:00:00 2001 From: Tao Ren Date: Thu, 18 Jun 2020 15:04:44 -0700 Subject: [PATCH 0145/2056] of: mdio: preserve phy dev_flags in of_phy_connect() Replace assignment "=" with OR "|=" for "phy->dev_flags" so "dev_flags" configured in phy probe() function can be preserved. The idea is similar to commit e7312efbd5de ("net: phy: modify assignment to OR for dev_flags in phy_attach_direct"). Signed-off-by: Tao Ren Reviewed-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a04afe79529c..f5c46c72f4d3 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -380,7 +380,7 @@ struct phy_device *of_phy_connect(struct net_device *dev, if (!phy) return NULL; - phy->dev_flags = flags; + phy->dev_flags |= flags; ret = phy_connect_direct(dev, phy, hndlr, iface); From 05e22e8395058745bd0312bc488b522197852aff Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:12:34 -0700 Subject: [PATCH 0146/2056] tcp: remove indirect calls for icsk->icsk_af_ops->queue_xmit Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip.h | 6 +----- include/net/tcp.h | 1 + net/ipv4/ip_output.c | 6 ++++++ net/ipv4/tcp_output.c | 7 ++++++- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 04ebe7bf54c6..862c9545833a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -231,11 +231,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct ipcm_cookie *ipc, struct rtable **rtp, struct inet_cork *cork, unsigned int flags); -static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, - struct flowi *fl) -{ - return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); -} +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl); static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) { diff --git a/include/net/tcp.h b/include/net/tcp.h index 4de9485f73d9..e5d7e0b09924 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 090d3097ee15..d946356187ed 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -539,6 +539,12 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, } EXPORT_SYMBOL(__ip_queue_xmit); +int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) +{ + return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); +} +EXPORT_SYMBOL(ip_queue_xmit); + static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a50e1990a845..be1bd37185d8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1064,6 +1064,9 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); } +INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); +INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); + /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial * transmission and possible later retransmissions. @@ -1235,7 +1238,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tcp_add_tx_delay(skb, tp); - err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); + err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit, + inet6_csk_xmit, ip_queue_xmit, + sk, skb, &inet->cork.fl); if (unlikely(err > 0)) { tcp_enter_cwr(sk); From dd2e0b86fc4ee146ac8f3275833d0187efeb950a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 19 Jun 2020 12:12:35 -0700 Subject: [PATCH 0147/2056] tcp: remove indirect calls for icsk->icsk_af_ops->send_check Mitigate RETPOLINE costs in __tcp_transmit_skb() by using INDIRECT_CALL_INET() wrapper. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/ip6_checksum.h | 9 --------- include/net/tcp.h | 3 +++ net/ipv4/tcp_output.c | 5 ++++- net/ipv6/tcp_ipv6.c | 7 +++++++ 4 files changed, 14 insertions(+), 10 deletions(-) diff --git a/include/net/ip6_checksum.h b/include/net/ip6_checksum.h index 27ec612cd4a4..b3f4eaa88672 100644 --- a/include/net/ip6_checksum.h +++ b/include/net/ip6_checksum.h @@ -85,15 +85,6 @@ static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb) th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); } -#if IS_ENABLED(CONFIG_IPV6) -static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) -{ - struct ipv6_pinfo *np = inet6_sk(sk); - - __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); -} -#endif - static inline __sum16 udp_v6_check(int len, const struct in6_addr *saddr, const struct in6_addr *daddr, diff --git a/include/net/tcp.h b/include/net/tcp.h index e5d7e0b09924..cd9cc348dbf9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -932,6 +932,9 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) #endif return 0; } + +INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); + #endif static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index be1bd37185d8..04b70fe31fa2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1066,6 +1066,7 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb, INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)); +INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)); /* This routine actually transmits TCP packets queued in by * tcp_do_sendmsg(). This is used by both the initial @@ -1210,7 +1211,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, } #endif - icsk->icsk_af_ops->send_check(sk, skb); + INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check, + tcp_v6_send_check, tcp_v4_send_check, + sk, skb); if (likely(tcb->tcp_flags & TCPHDR_ACK)) tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index f67d45ff00b4..4502db706f75 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1811,6 +1811,13 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_destructor = tcp_twsk_destructor, }; +INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + + __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr); +} + const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, From 8bf1539515920810b86cf1783dc433b1a25d31df Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Fri, 19 Jun 2020 15:24:13 -0400 Subject: [PATCH 0148/2056] Remove redundant skb null check Remove the redundant null check for skb. Signed-off-by: Gaurav Singh Signed-off-by: David S. Miller --- net/sched/act_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 4c4466f18801..063d8aaf2900 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1473,7 +1473,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_ROOT_MAX + 1]; - u32 portid = skb ? NETLINK_CB(skb).portid : 0; + u32 portid = NETLINK_CB(skb).portid; int ret = 0, ovr = 0; if ((n->nlmsg_type != RTM_GETACTION) && From a9a3320227f649435885d95d24e21912dc539928 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 20 Jun 2020 10:21:26 +0100 Subject: [PATCH 0149/2056] net: mvpp2: add port support helpers The mvpp2 code has tests scattered amongst the code to determine whether the port supports the XLG, and whether the port supports RGMII mode. Rather than having these tests scattered, provide a couple of helper functions, so that future additions can ensure that they get these tests correct. Signed-off-by: Russell King Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 43 ++++++++++++------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index e9f287568026..e98be8372780 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1114,6 +1114,17 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) } } +/* Only GOP port 0 has an XLG MAC */ +static bool mvpp2_port_supports_xlg(struct mvpp2_port *port) +{ + return port->gop_id == 0; +} + +static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port) +{ + return !(port->priv->hw_version == MVPP22 && port->gop_id == 0); +} + /* Port configuration routines */ static bool mvpp2_is_xlg(phy_interface_t interface) { @@ -1194,7 +1205,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port) case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - if (port->gop_id == 0) + if (!mvpp2_port_supports_rgmii(port)) goto invalid_conf; mvpp22_gop_init_rgmii(port); break; @@ -1204,7 +1215,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port) mvpp22_gop_init_sgmii(port); break; case PHY_INTERFACE_MODE_10GBASER: - if (port->gop_id != 0) + if (!mvpp2_port_supports_xlg(port)) goto invalid_conf; mvpp22_gop_init_10gkr(port); break; @@ -1246,7 +1257,7 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port) writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK); } - if (port->gop_id == 0) { + if (mvpp2_port_supports_xlg(port)) { /* Enable the XLG/GIG irqs for this port */ val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); if (mvpp2_is_xlg(port->phy_interface)) @@ -1261,7 +1272,7 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port) { u32 val; - if (port->gop_id == 0) { + if (mvpp2_port_supports_xlg(port)) { val = readl(port->base + MVPP22_XLG_EXT_INT_MASK); val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG | MVPP22_XLG_EXT_INT_MASK_GIG); @@ -1290,7 +1301,7 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port) writel(val, port->base + MVPP22_GMAC_INT_MASK); } - if (port->gop_id == 0) { + if (mvpp2_port_supports_xlg(port)) { val = readl(port->base + MVPP22_XLG_INT_MASK); val |= MVPP22_XLG_INT_MASK_LINK; writel(val, port->base + MVPP22_XLG_INT_MASK); @@ -1328,8 +1339,8 @@ static void mvpp2_port_enable(struct mvpp2_port *port) { u32 val; - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { + if (mvpp2_port_supports_xlg(port) && + mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val |= MVPP22_XLG_CTRL0_PORT_EN; val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS; @@ -1346,8 +1357,8 @@ static void mvpp2_port_disable(struct mvpp2_port *port) { u32 val; - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { + if (mvpp2_port_supports_xlg(port) && + mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_CTRL0_REG); val &= ~MVPP22_XLG_CTRL0_PORT_EN; writel(val, port->base + MVPP22_XLG_CTRL0_REG); @@ -2740,7 +2751,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id) mvpp22_gop_mask_irq(port); - if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) { + if (mvpp2_port_supports_xlg(port) && + mvpp2_is_xlg(port->phy_interface)) { val = readl(port->base + MVPP22_XLG_INT_STAT); if (val & MVPP22_XLG_INT_STAT_LINK) { event = true; @@ -3430,8 +3442,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port) mvpp22_pcs_reset_deassert(port); - /* Only GOP port 0 has an XLG MAC */ - if (port->gop_id == 0) { + if (mvpp2_port_supports_xlg(port)) { ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG); ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; @@ -3443,7 +3454,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port) writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG); } - if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) + if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface)) mvpp2_xlg_max_rx_size_set(port); else mvpp2_gmac_max_rx_size_set(port); @@ -4768,14 +4779,14 @@ static void mvpp2_phylink_validate(struct phylink_config *config, switch (state->interface) { case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_XAUI: - if (port->gop_id != 0) + if (!mvpp2_port_supports_xlg(port)) goto empty_set; break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: - if (port->priv->hw_version == MVPP22 && port->gop_id == 0) + if (!mvpp2_port_supports_rgmii(port)) goto empty_set; break; default: @@ -4791,7 +4802,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config, case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_XAUI: case PHY_INTERFACE_MODE_NA: - if (port->gop_id == 0) { + if (mvpp2_port_supports_xlg(port)) { phylink_set(mask, 10000baseT_Full); phylink_set(mask, 10000baseCR_Full); phylink_set(mask, 10000baseSR_Full); From 6c2b49eb96716e91f202756bfbd3f5fea3b2b172 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 20 Jun 2020 10:21:32 +0100 Subject: [PATCH 0150/2056] net: mvpp2: add mvpp2_phylink_to_port() helper Add a helper to convert the struct phylink_config pointer passed in from phylink to the drivers internal struct mvpp2_port. Signed-off-by: Russell King Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index e98be8372780..e182d7040b70 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4767,12 +4767,16 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, eth_hw_addr_random(dev); } +static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config) +{ + return container_of(config, struct mvpp2_port, phylink_config); +} + static void mvpp2_phylink_validate(struct phylink_config *config, unsigned long *supported, struct phylink_link_state *state) { - struct mvpp2_port *port = container_of(config, struct mvpp2_port, - phylink_config); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; /* Invalid combinations */ @@ -4913,8 +4917,7 @@ static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port, static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state) { - struct mvpp2_port *port = container_of(config, struct mvpp2_port, - phylink_config); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); if (port->priv->hw_version == MVPP22 && port->gop_id == 0) { u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG); @@ -4931,8 +4934,7 @@ static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config, static void mvpp2_mac_an_restart(struct phylink_config *config) { - struct mvpp2_port *port = container_of(config, struct mvpp2_port, - phylink_config); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN, @@ -5105,13 +5107,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct net_device *dev = to_net_dev(config->dev); - struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); bool change_interface = port->phy_interface != state->interface; /* Check for invalid configuration */ if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) { - netdev_err(dev, "Invalid mode on %s\n", dev->name); + netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name); return; } @@ -5151,8 +5152,7 @@ static void mvpp2_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct net_device *dev = to_net_dev(config->dev); - struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val; if (mvpp2_is_xlg(interface)) { @@ -5199,14 +5199,13 @@ static void mvpp2_mac_link_up(struct phylink_config *config, mvpp2_egress_enable(port); mvpp2_ingress_enable(port); - netif_tx_wake_all_queues(dev); + netif_tx_wake_all_queues(port->dev); } static void mvpp2_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct net_device *dev = to_net_dev(config->dev); - struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_port *port = mvpp2_phylink_to_port(config); u32 val; if (!phylink_autoneg_inband(mode)) { @@ -5223,7 +5222,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config, } } - netif_tx_stop_all_queues(dev); + netif_tx_stop_all_queues(port->dev); mvpp2_egress_disable(port); mvpp2_ingress_disable(port); From bd45f644a8fd65fc2dd26b4817cb4b1ff68e393d Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 20 Jun 2020 10:21:37 +0100 Subject: [PATCH 0151/2056] net: mvpp2: add register modification helper Add a helper to read-modify-write a register, and use it in the phylink helpers. Signed-off-by: Russell King Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 84 ++++++++++--------- 1 file changed, 44 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index e182d7040b70..06e160a3bea9 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1132,6 +1132,17 @@ static bool mvpp2_is_xlg(phy_interface_t interface) interface == PHY_INTERFACE_MODE_XAUI; } +static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set) +{ + u32 old, val; + + old = val = readl(ptr); + val &= ~mask; + val |= set; + if (old != val) + writel(val, ptr); +} + static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) { struct mvpp2 *priv = port->priv; @@ -4946,38 +4957,29 @@ static void mvpp2_mac_an_restart(struct phylink_config *config) static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, const struct phylink_link_state *state) { - u32 old_ctrl0, ctrl0; - u32 old_ctrl4, ctrl4; - - old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG); - old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG); - - ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS; + u32 val; + val = MVPP22_XLG_CTRL0_MAC_RESET_DIS; if (state->pause & MLO_PAUSE_TX) - ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; - else - ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; if (state->pause & MLO_PAUSE_RX) - ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - else - ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | - MVPP22_XLG_CTRL4_EN_IDLE_CHECK); - ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_MAC_RESET_DIS | + MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | + MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); + mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, + MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | + MVPP22_XLG_CTRL4_EN_IDLE_CHECK | + MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC, + MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC); - if (old_ctrl0 != ctrl0) - writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG); - if (old_ctrl4 != ctrl4) - writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG); - - if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) { - while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) & - MVPP22_XLG_CTRL0_MAC_RESET_DIS)) - continue; - } + /* Wait for reset to deassert */ + do { + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS)); } static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode, @@ -5157,19 +5159,14 @@ static void mvpp2_mac_link_up(struct phylink_config *config, if (mvpp2_is_xlg(interface)) { if (!phylink_autoneg_inband(mode)) { - val = readl(port->base + MVPP22_XLG_CTRL0_REG); - val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; - val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS; - writel(val, port->base + MVPP22_XLG_CTRL0_REG); + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, + MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | + MVPP22_XLG_CTRL0_FORCE_LINK_PASS, + MVPP22_XLG_CTRL0_FORCE_LINK_PASS); } } else { if (!phylink_autoneg_inband(mode)) { - val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); - val &= ~(MVPP2_GMAC_FORCE_LINK_DOWN | - MVPP2_GMAC_CONFIG_MII_SPEED | - MVPP2_GMAC_CONFIG_GMII_SPEED | - MVPP2_GMAC_CONFIG_FULL_DUPLEX); - val |= MVPP2_GMAC_FORCE_LINK_PASS; + val = MVPP2_GMAC_FORCE_LINK_PASS; if (speed == SPEED_1000 || speed == SPEED_2500) val |= MVPP2_GMAC_CONFIG_GMII_SPEED; @@ -5179,20 +5176,27 @@ static void mvpp2_mac_link_up(struct phylink_config *config, if (duplex == DUPLEX_FULL) val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; - writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG, + MVPP2_GMAC_FORCE_LINK_DOWN | + MVPP2_GMAC_FORCE_LINK_PASS | + MVPP2_GMAC_CONFIG_MII_SPEED | + MVPP2_GMAC_CONFIG_GMII_SPEED | + MVPP2_GMAC_CONFIG_FULL_DUPLEX, val); } /* We can always update the flow control enable bits; * these will only be effective if flow control AN * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled. */ - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - val &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN); + val = 0; if (tx_pause) val |= MVPP22_CTRL4_TX_FC_EN; if (rx_pause) val |= MVPP22_CTRL4_RX_FC_EN; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); + + mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG, + MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN, + val); } mvpp2_port_enable(port); From 63d78cc9766d5ee18d2e2d82c2642fa414827a77 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 20 Jun 2020 10:21:42 +0100 Subject: [PATCH 0152/2056] net: mvpp2: set xlg flow control in mvpp2_mac_link_up() Set the flow control settings in mvpp2_mac_link_up() for 10G links just as we do for 1G and slower links. This is now the preferred location. Signed-off-by: Russell King Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 06e160a3bea9..212fc3b54310 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4959,17 +4959,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode, { u32 val; - val = MVPP22_XLG_CTRL0_MAC_RESET_DIS; - if (state->pause & MLO_PAUSE_TX) - val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; - - if (state->pause & MLO_PAUSE_RX) - val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; - mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, - MVPP22_XLG_CTRL0_MAC_RESET_DIS | - MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | - MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); + MVPP22_XLG_CTRL0_MAC_RESET_DIS, + MVPP22_XLG_CTRL0_MAC_RESET_DIS); mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG, MVPP22_XLG_CTRL4_MACMODSELECT_GMAC | MVPP22_XLG_CTRL4_EN_IDLE_CHECK | @@ -5159,10 +5151,17 @@ static void mvpp2_mac_link_up(struct phylink_config *config, if (mvpp2_is_xlg(interface)) { if (!phylink_autoneg_inband(mode)) { + val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS; + if (tx_pause) + val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN; + if (rx_pause) + val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG, MVPP22_XLG_CTRL0_FORCE_LINK_DOWN | - MVPP22_XLG_CTRL0_FORCE_LINK_PASS, - MVPP22_XLG_CTRL0_FORCE_LINK_PASS); + MVPP22_XLG_CTRL0_FORCE_LINK_PASS | + MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN | + MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val); } } else { if (!phylink_autoneg_inband(mode)) { From 3ca33e3fb4f919b66a72145a87bfeada079e750d Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Fri, 19 Jun 2020 17:10:24 -0700 Subject: [PATCH 0153/2056] Bluetooth: Add hci_dev_lock to get/set device flags Adding hci_dev_lock since hci_conn_params_(lookup|add) require this lock. Suggested-by: Miao-chen Chou Signed-off-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- net/bluetooth/mgmt.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 2a732cab1dc9..5e9b9728eeac 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3895,6 +3895,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", &cp->addr.bdaddr, cp->addr.type); + hci_dev_lock(hdev); + if (cp->addr.type == BDADDR_BREDR) { br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &cp->addr.bdaddr, @@ -3921,6 +3923,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, status = MGMT_STATUS_SUCCESS; done: + hci_dev_unlock(hdev); + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, &rp, sizeof(rp)); } @@ -3959,6 +3963,8 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, goto done; } + hci_dev_lock(hdev); + if (cp->addr.type == BDADDR_BREDR) { br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist, &cp->addr.bdaddr, @@ -3985,6 +3991,8 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, } done: + hci_dev_unlock(hdev); + if (status == MGMT_STATUS_SUCCESS) device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, supported_flags, current_flags); From 5cbd3ebde859bd43bd0584c146060638b1a3abb4 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 22 Jun 2020 13:30:28 +0000 Subject: [PATCH 0154/2056] Bluetooth: use configured params for ext adv MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the extended advertisement feature is enabled, a hardcoded min and max interval of 0x8000 is used. This patch fixes this issue by using the configured min/max value. This was validated by setting min/max in main.conf and making sure the right setting is applied: < HCI Command: LE Set Extended Advertising Parameters (0x08|0x0036) plen 25 #93 [hci0] 10.953011 … Min advertising interval: 181.250 msec (0x0122) Max advertising interval: 181.250 msec (0x0122) … Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Daniel Winkler Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 8 ++++++++ net/bluetooth/hci_request.c | 7 +++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 16ab6ce87883..1f18f71363e9 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2516,4 +2516,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) #define hci_iso_data_len(h) ((h) & 0x3fff) #define hci_iso_data_flags(h) ((h) >> 14) +/* le24 support */ +static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3]) +{ + dst[0] = val & 0xff; + dst[1] = (val & 0xff00) >> 8; + dst[2] = (val & 0xff0000) >> 16; +} + #endif /* __HCI_H */ diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 29decd7e8051..86ae4b953a01 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1799,8 +1799,6 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) int err; struct adv_info *adv_instance; bool secondary_adv; - /* In ext adv set param interval is 3 octets */ - const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; if (instance > 0) { adv_instance = hci_find_adv_instance(hdev, instance); @@ -1833,8 +1831,9 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) memset(&cp, 0, sizeof(cp)); - memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); - memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); + /* In ext adv set param interval is 3 octets */ + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); From 8746f135bb01872ff412d408ea1aa9ebd328c1f5 Mon Sep 17 00:00:00 2001 From: Luiz Augusto von Dentz Date: Wed, 20 May 2020 14:20:14 -0700 Subject: [PATCH 0155/2056] Bluetooth: Disconnect if E0 is used for Level 4 E0 is not allowed with Level 4: BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C page 1319: '128-bit equivalent strength for link and encryption keys required using FIPS approved algorithms (E0 not allowed, SAFER+ not allowed, and P-192 not allowed; encryption key not shortened' SC enabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x0b 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) Secure Connections (Host Support) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with AES-CCM (0x02) SC disabled: > HCI Event: Read Remote Extended Features (0x23) plen 13 Status: Success (0x00) Handle: 256 Page: 1/2 Features: 0x03 0x00 0x00 0x00 0x00 0x00 0x00 0x00 Secure Simple Pairing (Host Support) LE Supported (Host) > HCI Event: Encryption Change (0x08) plen 4 Status: Success (0x00) Handle: 256 Encryption: Enabled with E0 (0x01) [May 8 20:23] Bluetooth: hci0: Invalid security: expect AES but E0 was used < HCI Command: Disconnect (0x01|0x0006) plen 3 Handle: 256 Reason: Authentication Failure (0x05) Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 10 ++++++---- net/bluetooth/hci_conn.c | 17 +++++++++++++++++ net/bluetooth/hci_event.c | 20 ++++++++------------ 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 77d29341b064..836dc997ff94 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1481,11 +1481,13 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) else encrypt = 0x01; - if (conn->sec_level == BT_SECURITY_SDP) - conn->sec_level = BT_SECURITY_LOW; + if (!status) { + if (conn->sec_level == BT_SECURITY_SDP) + conn->sec_level = BT_SECURITY_LOW; - if (conn->pending_sec_level > conn->sec_level) - conn->sec_level = conn->pending_sec_level; + if (conn->pending_sec_level > conn->sec_level) + conn->sec_level = conn->pending_sec_level; + } mutex_lock(&hci_cb_list_lock); list_for_each_entry(cb, &hci_cb_list, list) { diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 47f3a45d7dcb..8805d68e65f2 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1322,6 +1322,23 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 0; } + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + if (hci_conn_ssp_enabled(conn) && !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) return 0; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e08d4dd9a24e..e060fc9ebb18 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3065,27 +3065,23 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + if (ev->status && conn->state == BT_CONNECTED) { if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); hci_conn_drop(conn); goto unlock; } - /* In Secure Connections Only mode, do not allow any connections - * that are not encrypted with AES-CCM using a P-256 authenticated - * combination key. - */ - if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && - (!test_bit(HCI_CONN_AES_CCM, &conn->flags) || - conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) { - hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_drop(conn); - goto unlock; - } - /* Try reading the encryption key size for encrypted ACL links */ if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { struct hci_cp_read_enc_key_size cp; From a2d0d62f4d9e3546134ba08e102ca7decd4ed836 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:41 -0700 Subject: [PATCH 0156/2056] bpf: Switch btf_parse_vmlinux to btf_find_by_name_kind btf_parse_vmlinux() implements manual search for struct bpf_ctx_convert since at the time of implementing btf_find_by_name_kind() was not available. Later btf_find_by_name_kind() was introduced in 27ae7997a661 ("bpf: Introduce BPF_PROG_TYPE_STRUCT_OPS"). It provides similar search functionality and can be leveraged in btf_parse_vmlinux(). Do it. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/6e12d5c3e8a3d552925913ef73a695dd1bb27800.1592600985.git.rdna@fb.com --- kernel/bpf/btf.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 58c9af1d4808..3eb804618a53 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3591,7 +3591,7 @@ struct btf *btf_parse_vmlinux(void) struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; struct btf *btf = NULL; - int err, i; + int err, btf_id; env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); if (!env) @@ -3625,24 +3625,13 @@ struct btf *btf_parse_vmlinux(void) goto errout; /* find struct bpf_ctx_convert for type checking later */ - for (i = 1; i <= btf->nr_types; i++) { - const struct btf_type *t; - const char *tname; - - t = btf_type_by_id(btf, i); - if (!__btf_type_is_struct(t)) - continue; - tname = __btf_name_by_offset(btf, t->name_off); - if (!strcmp(tname, "bpf_ctx_convert")) { - /* btf_parse_vmlinux() runs under bpf_verifier_lock */ - bpf_ctx_convert.t = t; - break; - } - } - if (i > btf->nr_types) { - err = -ENOENT; + btf_id = btf_find_by_name_kind(btf, "bpf_ctx_convert", BTF_KIND_STRUCT); + if (btf_id < 0) { + err = btf_id; goto errout; } + /* btf_parse_vmlinux() runs under bpf_verifier_lock */ + bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); bpf_struct_ops_init(btf, log); From 032a6b3565489a26d6841eefa1fc29d95fc80c66 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:42 -0700 Subject: [PATCH 0157/2056] bpf: Rename bpf_htab to bpf_shtab in sock_map There are two different `struct bpf_htab` in bpf code in the following files: - kernel/bpf/hashtab.c - net/core/sock_map.c It makes it impossible to find proper btf_id by name = "bpf_htab" and kind = BTF_KIND_STRUCT what is needed to support access to map ptr so that bpf program can access `struct bpf_htab` fields. To make it possible one of the struct-s should be renamed, sock_map.c looks like a better candidate for rename since it's specialized version of hashtab. Rename it to bpf_shtab ("sh" stands for Sock Hash). Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/c006a639e03c64ca50fc87c4bb627e0bfba90f4e.1592600985.git.rdna@fb.com --- net/core/sock_map.c | 82 ++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 41 deletions(-) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 4059f94e9bb5..2b884f2d562a 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -655,7 +655,7 @@ const struct bpf_map_ops sock_map_ops = { .map_check_btf = map_check_no_btf, }; -struct bpf_htab_elem { +struct bpf_shtab_elem { struct rcu_head rcu; u32 hash; struct sock *sk; @@ -663,14 +663,14 @@ struct bpf_htab_elem { u8 key[]; }; -struct bpf_htab_bucket { +struct bpf_shtab_bucket { struct hlist_head head; raw_spinlock_t lock; }; -struct bpf_htab { +struct bpf_shtab { struct bpf_map map; - struct bpf_htab_bucket *buckets; + struct bpf_shtab_bucket *buckets; u32 buckets_num; u32 elem_size; struct sk_psock_progs progs; @@ -682,17 +682,17 @@ static inline u32 sock_hash_bucket_hash(const void *key, u32 len) return jhash(key, len, 0); } -static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab, - u32 hash) +static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab, + u32 hash) { return &htab->buckets[hash & (htab->buckets_num - 1)]; } -static struct bpf_htab_elem * +static struct bpf_shtab_elem * sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, u32 key_size) { - struct bpf_htab_elem *elem; + struct bpf_shtab_elem *elem; hlist_for_each_entry_rcu(elem, head, node) { if (elem->hash == hash && @@ -705,10 +705,10 @@ sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key, static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 key_size = map->key_size, hash; - struct bpf_htab_bucket *bucket; - struct bpf_htab_elem *elem; + struct bpf_shtab_bucket *bucket; + struct bpf_shtab_elem *elem; WARN_ON_ONCE(!rcu_read_lock_held()); @@ -719,8 +719,8 @@ static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) return elem ? elem->sk : NULL; } -static void sock_hash_free_elem(struct bpf_htab *htab, - struct bpf_htab_elem *elem) +static void sock_hash_free_elem(struct bpf_shtab *htab, + struct bpf_shtab_elem *elem) { atomic_dec(&htab->count); kfree_rcu(elem, rcu); @@ -729,9 +729,9 @@ static void sock_hash_free_elem(struct bpf_htab *htab, static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, void *link_raw) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_htab_elem *elem_probe, *elem = link_raw; - struct bpf_htab_bucket *bucket; + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); + struct bpf_shtab_elem *elem_probe, *elem = link_raw; + struct bpf_shtab_bucket *bucket; WARN_ON_ONCE(!rcu_read_lock_held()); bucket = sock_hash_select_bucket(htab, elem->hash); @@ -753,10 +753,10 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, static int sock_hash_delete_elem(struct bpf_map *map, void *key) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 hash, key_size = map->key_size; - struct bpf_htab_bucket *bucket; - struct bpf_htab_elem *elem; + struct bpf_shtab_bucket *bucket; + struct bpf_shtab_elem *elem; int ret = -ENOENT; hash = sock_hash_bucket_hash(key, key_size); @@ -774,12 +774,12 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key) return ret; } -static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab, - void *key, u32 key_size, - u32 hash, struct sock *sk, - struct bpf_htab_elem *old) +static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab, + void *key, u32 key_size, + u32 hash, struct sock *sk, + struct bpf_shtab_elem *old) { - struct bpf_htab_elem *new; + struct bpf_shtab_elem *new; if (atomic_inc_return(&htab->count) > htab->map.max_entries) { if (!old) { @@ -803,10 +803,10 @@ static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab, static int sock_hash_update_common(struct bpf_map *map, void *key, struct sock *sk, u64 flags) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); u32 key_size = map->key_size, hash; - struct bpf_htab_elem *elem, *elem_new; - struct bpf_htab_bucket *bucket; + struct bpf_shtab_elem *elem, *elem_new; + struct bpf_shtab_bucket *bucket; struct sk_psock_link *link; struct sk_psock *psock; int ret; @@ -916,8 +916,8 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key, static int sock_hash_get_next_key(struct bpf_map *map, void *key, void *key_next) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_htab_elem *elem, *elem_next; + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); + struct bpf_shtab_elem *elem, *elem_next; u32 hash, key_size = map->key_size; struct hlist_head *head; int i = 0; @@ -931,7 +931,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key, goto find_first_elem; elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)), - struct bpf_htab_elem, node); + struct bpf_shtab_elem, node); if (elem_next) { memcpy(key_next, elem_next->key, key_size); return 0; @@ -943,7 +943,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key, for (; i < htab->buckets_num; i++) { head = &sock_hash_select_bucket(htab, i)->head; elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), - struct bpf_htab_elem, node); + struct bpf_shtab_elem, node); if (elem_next) { memcpy(key_next, elem_next->key, key_size); return 0; @@ -955,7 +955,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key, static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) { - struct bpf_htab *htab; + struct bpf_shtab *htab; int i, err; u64 cost; @@ -977,15 +977,15 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) bpf_map_init_from_attr(&htab->map, attr); htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); - htab->elem_size = sizeof(struct bpf_htab_elem) + + htab->elem_size = sizeof(struct bpf_shtab_elem) + round_up(htab->map.key_size, 8); if (htab->buckets_num == 0 || - htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) { + htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) { err = -EINVAL; goto free_htab; } - cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) + + cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) + (u64) htab->elem_size * htab->map.max_entries; if (cost >= U32_MAX - PAGE_SIZE) { err = -EINVAL; @@ -996,7 +996,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) goto free_htab; htab->buckets = bpf_map_area_alloc(htab->buckets_num * - sizeof(struct bpf_htab_bucket), + sizeof(struct bpf_shtab_bucket), htab->map.numa_node); if (!htab->buckets) { bpf_map_charge_finish(&htab->map.memory); @@ -1017,10 +1017,10 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr) static void sock_hash_free(struct bpf_map *map) { - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - struct bpf_htab_bucket *bucket; + struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map); + struct bpf_shtab_bucket *bucket; struct hlist_head unlink_list; - struct bpf_htab_elem *elem; + struct bpf_shtab_elem *elem; struct hlist_node *node; int i; @@ -1096,7 +1096,7 @@ static void *sock_hash_lookup(struct bpf_map *map, void *key) static void sock_hash_release_progs(struct bpf_map *map) { - psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs); + psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs); } BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops, @@ -1194,7 +1194,7 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) case BPF_MAP_TYPE_SOCKMAP: return &container_of(map, struct bpf_stab, map)->progs; case BPF_MAP_TYPE_SOCKHASH: - return &container_of(map, struct bpf_htab, map)->progs; + return &container_of(map, struct bpf_shtab, map)->progs; default: break; } From 41c48f3a98231738c5ce79f6f2aa6e40ba924d18 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:43 -0700 Subject: [PATCH 0158/2056] bpf: Support access to bpf map fields There are multiple use-cases when it's convenient to have access to bpf map fields, both `struct bpf_map` and map type specific struct-s such as `struct bpf_array`, `struct bpf_htab`, etc. For example while working with sock arrays it can be necessary to calculate the key based on map->max_entries (some_hash % max_entries). Currently this is solved by communicating max_entries via "out-of-band" channel, e.g. via additional map with known key to get info about target map. That works, but is not very convenient and error-prone while working with many maps. In other cases necessary data is dynamic (i.e. unknown at loading time) and it's impossible to get it at all. For example while working with a hash table it can be convenient to know how much capacity is already used (bpf_htab.count.counter for BPF_F_NO_PREALLOC case). At the same time kernel knows this info and can provide it to bpf program. Fill this gap by adding support to access bpf map fields from bpf program for both `struct bpf_map` and map type specific fields. Support is implemented via btf_struct_access() so that a user can define their own `struct bpf_map` or map type specific struct in their program with only necessary fields and preserve_access_index attribute, cast a map to this struct and use a field. For example: struct bpf_map { __u32 max_entries; } __attribute__((preserve_access_index)); struct bpf_array { struct bpf_map map; __u32 elem_size; } __attribute__((preserve_access_index)); struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 4); __type(key, __u32); __type(value, __u32); } m_array SEC(".maps"); SEC("cgroup_skb/egress") int cg_skb(void *ctx) { struct bpf_array *array = (struct bpf_array *)&m_array; struct bpf_map *map = (struct bpf_map *)&m_array; /* .. use map->max_entries or array->map.max_entries .. */ } Similarly to other btf_struct_access() use-cases (e.g. struct tcp_sock in net/ipv4/bpf_tcp_ca.c) the patch allows access to any fields of corresponding struct. Only reading from map fields is supported. For btf_struct_access() to work there should be a way to know btf id of a struct that corresponds to a map type. To get btf id there should be a way to get a stringified name of map-specific struct, such as "bpf_array", "bpf_htab", etc for a map type. Two new fields are added to `struct bpf_map_ops` to handle it: * .map_btf_name keeps a btf name of a struct returned by map_alloc(); * .map_btf_id is used to cache btf id of that struct. To make btf ids calculation cheaper they're calculated once while preparing btf_vmlinux and cached same way as it's done for btf_id field of `struct bpf_func_proto` While calculating btf ids, struct names are NOT checked for collision. Collisions will be checked as a part of the work to prepare btf ids used in verifier in compile time that should land soon. The only known collision for `struct bpf_htab` (kernel/bpf/hashtab.c vs net/core/sock_map.c) was fixed earlier. Both new fields .map_btf_name and .map_btf_id must be set for a map type for the feature to work. If neither is set for a map type, verifier will return ENOTSUPP on a try to access map_ptr of corresponding type. If just one of them set, it's verifier misconfiguration. Only `struct bpf_array` for BPF_MAP_TYPE_ARRAY and `struct bpf_htab` for BPF_MAP_TYPE_HASH are supported by this patch. Other map types will be supported separately. The feature is available only for CONFIG_DEBUG_INFO_BTF=y and gated by perfmon_capable() so that unpriv programs won't have access to bpf map fields. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/6479686a0cd1e9067993df57b4c3eef0e276fec9.1592600985.git.rdna@fb.com --- include/linux/bpf.h | 9 ++ include/linux/bpf_verifier.h | 1 + kernel/bpf/arraymap.c | 3 + kernel/bpf/btf.c | 40 +++++++++ kernel/bpf/hashtab.c | 3 + kernel/bpf/verifier.c | 84 +++++++++++++++++-- .../selftests/bpf/verifier/map_ptr_mixing.c | 2 +- 7 files changed, 132 insertions(+), 10 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 07052d44bca1..1e1501ee53ce 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -92,6 +92,10 @@ struct bpf_map_ops { int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, struct poll_table_struct *pts); + + /* BTF name and id of struct allocated by map_alloc */ + const char * const map_btf_name; + int *map_btf_id; }; struct bpf_map_memory { @@ -1109,6 +1113,11 @@ static inline bool bpf_allow_ptr_leaks(void) return perfmon_capable(); } +static inline bool bpf_allow_ptr_to_map_access(void) +{ + return perfmon_capable(); +} + static inline bool bpf_bypass_spec_v1(void) { return perfmon_capable(); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index ca08db4ffb5f..53c7bd568c5d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -379,6 +379,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; + bool allow_ptr_to_map_access; bool bpf_capable; bool bypass_spec_v1; bool bypass_spec_v4; diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 11584618e861..e7caa48812fb 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -494,6 +494,7 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) vma->vm_pgoff + pgoff); } +static int array_map_btf_id; const struct bpf_map_ops array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, @@ -510,6 +511,8 @@ const struct bpf_map_ops array_map_ops = { .map_check_btf = array_map_check_btf, .map_lookup_batch = generic_map_lookup_batch, .map_update_batch = generic_map_update_batch, + .map_btf_name = "bpf_array", + .map_btf_id = &array_map_btf_id, }; const struct bpf_map_ops percpu_array_map_ops = { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 3eb804618a53..e377d1981730 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3571,6 +3571,41 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf, return ctx_type; } +static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = { +#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) +#define BPF_LINK_TYPE(_id, _name) +#define BPF_MAP_TYPE(_id, _ops) \ + [_id] = &_ops, +#include +#undef BPF_PROG_TYPE +#undef BPF_LINK_TYPE +#undef BPF_MAP_TYPE +}; + +static int btf_vmlinux_map_ids_init(const struct btf *btf, + struct bpf_verifier_log *log) +{ + const struct bpf_map_ops *ops; + int i, btf_id; + + for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) { + ops = btf_vmlinux_map_ops[i]; + if (!ops || (!ops->map_btf_name && !ops->map_btf_id)) + continue; + if (!ops->map_btf_name || !ops->map_btf_id) { + bpf_log(log, "map type %d is misconfigured\n", i); + return -EINVAL; + } + btf_id = btf_find_by_name_kind(btf, ops->map_btf_name, + BTF_KIND_STRUCT); + if (btf_id < 0) + return btf_id; + *ops->map_btf_id = btf_id; + } + + return 0; +} + static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, struct btf *btf, const struct btf_type *t, @@ -3633,6 +3668,11 @@ struct btf *btf_parse_vmlinux(void) /* btf_parse_vmlinux() runs under bpf_verifier_lock */ bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); + /* find bpf map structs for map_ptr access checking */ + err = btf_vmlinux_map_ids_init(btf, log); + if (err < 0) + goto errout; + bpf_struct_ops_init(btf, log); btf_verifier_env_free(env); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index b4b288a3c3c9..2c5999e02060 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1614,6 +1614,7 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, true, false); } +static int htab_map_btf_id; const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1625,6 +1626,8 @@ const struct bpf_map_ops htab_map_ops = { .map_gen_lookup = htab_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, BATCH_OPS(htab), + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_map_btf_id, }; const struct bpf_map_ops htab_lru_map_ops = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a1857c4ffaaf..7460f967cb75 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1351,6 +1351,19 @@ static void mark_reg_not_init(struct bpf_verifier_env *env, __mark_reg_not_init(env, regs + regno); } +static void mark_btf_ld_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, u32 regno, + enum bpf_reg_type reg_type, u32 btf_id) +{ + if (reg_type == SCALAR_VALUE) { + mark_reg_unknown(env, regs, regno); + return; + } + mark_reg_known_zero(env, regs, regno); + regs[regno].type = PTR_TO_BTF_ID; + regs[regno].btf_id = btf_id; +} + #define DEF_NOT_SUBREG (0) static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) @@ -3182,19 +3195,68 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (ret < 0) return ret; - if (atype == BPF_READ && value_regno >= 0) { - if (ret == SCALAR_VALUE) { - mark_reg_unknown(env, regs, value_regno); - return 0; - } - mark_reg_known_zero(env, regs, value_regno); - regs[value_regno].type = PTR_TO_BTF_ID; - regs[value_regno].btf_id = btf_id; - } + if (atype == BPF_READ && value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); return 0; } +static int check_ptr_to_map_access(struct bpf_verifier_env *env, + struct bpf_reg_state *regs, + int regno, int off, int size, + enum bpf_access_type atype, + int value_regno) +{ + struct bpf_reg_state *reg = regs + regno; + struct bpf_map *map = reg->map_ptr; + const struct btf_type *t; + const char *tname; + u32 btf_id; + int ret; + + if (!btf_vmlinux) { + verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n"); + return -ENOTSUPP; + } + + if (!map->ops->map_btf_id || !*map->ops->map_btf_id) { + verbose(env, "map_ptr access not supported for map type %d\n", + map->map_type); + return -ENOTSUPP; + } + + t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id); + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + + if (!env->allow_ptr_to_map_access) { + verbose(env, + "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n", + tname); + return -EPERM; + } + + if (off < 0) { + verbose(env, "R%d is %s invalid negative access: off=%d\n", + regno, tname, off); + return -EACCES; + } + + if (atype != BPF_READ) { + verbose(env, "only read from %s is supported\n", tname); + return -EACCES; + } + + ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id); + if (ret < 0) + return ret; + + if (value_regno >= 0) + mark_btf_ld_reg(env, regs, value_regno, ret, btf_id); + + return 0; +} + + /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory @@ -3363,6 +3425,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == PTR_TO_BTF_ID) { err = check_ptr_to_btf_access(env, regs, regno, off, size, t, value_regno); + } else if (reg->type == CONST_PTR_TO_MAP) { + err = check_ptr_to_map_access(env, regs, regno, off, size, t, + value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -10951,6 +11016,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->strict_alignment = false; env->allow_ptr_leaks = bpf_allow_ptr_leaks(); + env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access(); env->bypass_spec_v1 = bpf_bypass_spec_v1(); env->bypass_spec_v4 = bpf_bypass_spec_v4(); env->bpf_capable = bpf_capable(); diff --git a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c index cd26ee6b7b1d..1f2b8c4cb26d 100644 --- a/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c +++ b/tools/testing/selftests/bpf/verifier/map_ptr_mixing.c @@ -56,7 +56,7 @@ .fixup_map_in_map = { 16 }, .fixup_map_array_48b = { 13 }, .result = REJECT, - .errstr = "R0 invalid mem access 'map_ptr'", + .errstr = "only read from bpf_array is supported", }, { "cond: two branches returning different map pointers for lookup (tail, tail)", From 2872e9ac33a4440173418147351ed4f93177e763 Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:44 -0700 Subject: [PATCH 0159/2056] bpf: Set map_btf_{name, id} for all map types Set map_btf_name and map_btf_id for all map types so that map fields can be accessed by bpf programs. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/a825f808f22af52b018dbe82f1c7d29dab5fc978.1592600985.git.rdna@fb.com --- kernel/bpf/arraymap.c | 15 +++++++++++++++ kernel/bpf/bpf_struct_ops.c | 3 +++ kernel/bpf/cpumap.c | 3 +++ kernel/bpf/devmap.c | 6 ++++++ kernel/bpf/hashtab.c | 12 ++++++++++++ kernel/bpf/local_storage.c | 3 +++ kernel/bpf/lpm_trie.c | 3 +++ kernel/bpf/queue_stack_maps.c | 6 ++++++ kernel/bpf/reuseport_array.c | 3 +++ kernel/bpf/ringbuf.c | 3 +++ kernel/bpf/stackmap.c | 3 +++ net/core/bpf_sk_storage.c | 3 +++ net/core/sock_map.c | 6 ++++++ net/xdp/xskmap.c | 3 +++ 14 files changed, 72 insertions(+) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index e7caa48812fb..ec5cd11032aa 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -515,6 +515,7 @@ const struct bpf_map_ops array_map_ops = { .map_btf_id = &array_map_btf_id, }; +static int percpu_array_map_btf_id; const struct bpf_map_ops percpu_array_map_ops = { .map_alloc_check = array_map_alloc_check, .map_alloc = array_map_alloc, @@ -525,6 +526,8 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_delete_elem = array_map_delete_elem, .map_seq_show_elem = percpu_array_map_seq_show_elem, .map_check_btf = array_map_check_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &percpu_array_map_btf_id, }; static int fd_array_map_alloc_check(union bpf_attr *attr) @@ -871,6 +874,7 @@ static void prog_array_map_free(struct bpf_map *map) fd_array_map_free(map); } +static int prog_array_map_btf_id; const struct bpf_map_ops prog_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = prog_array_map_alloc, @@ -886,6 +890,8 @@ const struct bpf_map_ops prog_array_map_ops = { .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, .map_release_uref = prog_array_map_clear, .map_seq_show_elem = prog_array_map_seq_show_elem, + .map_btf_name = "bpf_array", + .map_btf_id = &prog_array_map_btf_id, }; static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, @@ -964,6 +970,7 @@ static void perf_event_fd_array_release(struct bpf_map *map, rcu_read_unlock(); } +static int perf_event_array_map_btf_id; const struct bpf_map_ops perf_event_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, @@ -975,6 +982,8 @@ const struct bpf_map_ops perf_event_array_map_ops = { .map_fd_put_ptr = perf_event_fd_array_put_ptr, .map_release = perf_event_fd_array_release, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &perf_event_array_map_btf_id, }; #ifdef CONFIG_CGROUPS @@ -997,6 +1006,7 @@ static void cgroup_fd_array_free(struct bpf_map *map) fd_array_map_free(map); } +static int cgroup_array_map_btf_id; const struct bpf_map_ops cgroup_array_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_map_alloc, @@ -1007,6 +1017,8 @@ const struct bpf_map_ops cgroup_array_map_ops = { .map_fd_get_ptr = cgroup_fd_array_get_ptr, .map_fd_put_ptr = cgroup_fd_array_put_ptr, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &cgroup_array_map_btf_id, }; #endif @@ -1080,6 +1092,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map, return insn - insn_buf; } +static int array_of_maps_map_btf_id; const struct bpf_map_ops array_of_maps_map_ops = { .map_alloc_check = fd_array_map_alloc_check, .map_alloc = array_of_map_alloc, @@ -1092,4 +1105,6 @@ const struct bpf_map_ops array_of_maps_map_ops = { .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = array_of_map_gen_lookup, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_array", + .map_btf_id = &array_of_maps_map_btf_id, }; diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index c6b0decaa46a..969c5d47f81f 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -611,6 +611,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr) return map; } +static int bpf_struct_ops_map_btf_id; const struct bpf_map_ops bpf_struct_ops_map_ops = { .map_alloc_check = bpf_struct_ops_map_alloc_check, .map_alloc = bpf_struct_ops_map_alloc, @@ -620,6 +621,8 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = { .map_delete_elem = bpf_struct_ops_map_delete_elem, .map_update_elem = bpf_struct_ops_map_update_elem, .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem, + .map_btf_name = "bpf_struct_ops_map", + .map_btf_id = &bpf_struct_ops_map_btf_id, }; /* "const void *" because some subsystem is diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 27595fc6da56..bd8658055c16 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -543,6 +543,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } +static int cpu_map_btf_id; const struct bpf_map_ops cpu_map_ops = { .map_alloc = cpu_map_alloc, .map_free = cpu_map_free, @@ -551,6 +552,8 @@ const struct bpf_map_ops cpu_map_ops = { .map_lookup_elem = cpu_map_lookup_elem, .map_get_next_key = cpu_map_get_next_key, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_cpu_map", + .map_btf_id = &cpu_map_btf_id, }; static int bq_flush_to_queue(struct xdp_bulk_queue *bq) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 0cbb72cdaf63..58acc46861ef 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -747,6 +747,7 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, map, key, value, map_flags); } +static int dev_map_btf_id; const struct bpf_map_ops dev_map_ops = { .map_alloc = dev_map_alloc, .map_free = dev_map_free, @@ -755,8 +756,11 @@ const struct bpf_map_ops dev_map_ops = { .map_update_elem = dev_map_update_elem, .map_delete_elem = dev_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_dtab", + .map_btf_id = &dev_map_btf_id, }; +static int dev_map_hash_map_btf_id; const struct bpf_map_ops dev_map_hash_ops = { .map_alloc = dev_map_alloc, .map_free = dev_map_free, @@ -765,6 +769,8 @@ const struct bpf_map_ops dev_map_hash_ops = { .map_update_elem = dev_map_hash_update_elem, .map_delete_elem = dev_map_hash_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_dtab", + .map_btf_id = &dev_map_hash_map_btf_id, }; static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 2c5999e02060..acd06081d81d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1630,6 +1630,7 @@ const struct bpf_map_ops htab_map_ops = { .map_btf_id = &htab_map_btf_id, }; +static int htab_lru_map_btf_id; const struct bpf_map_ops htab_lru_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1642,6 +1643,8 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_gen_lookup = htab_lru_map_gen_lookup, .map_seq_show_elem = htab_map_seq_show_elem, BATCH_OPS(htab_lru), + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_lru_map_btf_id, }; /* Called from eBPF program */ @@ -1746,6 +1749,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, rcu_read_unlock(); } +static int htab_percpu_map_btf_id; const struct bpf_map_ops htab_percpu_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1756,8 +1760,11 @@ const struct bpf_map_ops htab_percpu_map_ops = { .map_delete_elem = htab_map_delete_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, BATCH_OPS(htab_percpu), + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_percpu_map_btf_id, }; +static int htab_lru_percpu_map_btf_id; const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_alloc_check = htab_map_alloc_check, .map_alloc = htab_map_alloc, @@ -1768,6 +1775,8 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { .map_delete_elem = htab_lru_map_delete_elem, .map_seq_show_elem = htab_percpu_map_seq_show_elem, BATCH_OPS(htab_lru_percpu), + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_lru_percpu_map_btf_id, }; static int fd_htab_map_alloc_check(union bpf_attr *attr) @@ -1890,6 +1899,7 @@ static void htab_of_map_free(struct bpf_map *map) fd_htab_map_free(map); } +static int htab_of_maps_map_btf_id; const struct bpf_map_ops htab_of_maps_map_ops = { .map_alloc_check = fd_htab_map_alloc_check, .map_alloc = htab_of_map_alloc, @@ -1902,4 +1912,6 @@ const struct bpf_map_ops htab_of_maps_map_ops = { .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, .map_gen_lookup = htab_of_map_gen_lookup, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_htab", + .map_btf_id = &htab_of_maps_map_btf_id, }; diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 33d01866bcc2..51bd5a8cb01b 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -409,6 +409,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, rcu_read_unlock(); } +static int cgroup_storage_map_btf_id; const struct bpf_map_ops cgroup_storage_map_ops = { .map_alloc = cgroup_storage_map_alloc, .map_free = cgroup_storage_map_free, @@ -418,6 +419,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = { .map_delete_elem = cgroup_storage_delete_elem, .map_check_btf = cgroup_storage_check_btf, .map_seq_show_elem = cgroup_storage_seq_show_elem, + .map_btf_name = "bpf_cgroup_storage_map", + .map_btf_id = &cgroup_storage_map_btf_id, }; int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map) diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index c8cc4e4cf98d..1abd4e3f906d 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -735,6 +735,7 @@ static int trie_check_btf(const struct bpf_map *map, -EINVAL : 0; } +static int trie_map_btf_id; const struct bpf_map_ops trie_map_ops = { .map_alloc = trie_alloc, .map_free = trie_free, @@ -743,4 +744,6 @@ const struct bpf_map_ops trie_map_ops = { .map_update_elem = trie_update_elem, .map_delete_elem = trie_delete_elem, .map_check_btf = trie_check_btf, + .map_btf_name = "lpm_trie", + .map_btf_id = &trie_map_btf_id, }; diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 05c8e043b9d2..80c66a6d7c54 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -262,6 +262,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, return -EINVAL; } +static int queue_map_btf_id; const struct bpf_map_ops queue_map_ops = { .map_alloc_check = queue_stack_map_alloc_check, .map_alloc = queue_stack_map_alloc, @@ -273,8 +274,11 @@ const struct bpf_map_ops queue_map_ops = { .map_pop_elem = queue_map_pop_elem, .map_peek_elem = queue_map_peek_elem, .map_get_next_key = queue_stack_map_get_next_key, + .map_btf_name = "bpf_queue_stack", + .map_btf_id = &queue_map_btf_id, }; +static int stack_map_btf_id; const struct bpf_map_ops stack_map_ops = { .map_alloc_check = queue_stack_map_alloc_check, .map_alloc = queue_stack_map_alloc, @@ -286,4 +290,6 @@ const struct bpf_map_ops stack_map_ops = { .map_pop_elem = stack_map_pop_elem, .map_peek_elem = stack_map_peek_elem, .map_get_next_key = queue_stack_map_get_next_key, + .map_btf_name = "bpf_queue_stack", + .map_btf_id = &stack_map_btf_id, }; diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index 21cde24386db..a09922f656e4 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -345,6 +345,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key, return 0; } +static int reuseport_array_map_btf_id; const struct bpf_map_ops reuseport_array_ops = { .map_alloc_check = reuseport_array_alloc_check, .map_alloc = reuseport_array_alloc, @@ -352,4 +353,6 @@ const struct bpf_map_ops reuseport_array_ops = { .map_lookup_elem = reuseport_array_lookup_elem, .map_get_next_key = reuseport_array_get_next_key, .map_delete_elem = reuseport_array_delete_elem, + .map_btf_name = "reuseport_array", + .map_btf_id = &reuseport_array_map_btf_id, }; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index 180414bb0d3e..dbf37aff4827 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -294,6 +294,7 @@ static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp, return 0; } +static int ringbuf_map_btf_id; const struct bpf_map_ops ringbuf_map_ops = { .map_alloc = ringbuf_map_alloc, .map_free = ringbuf_map_free, @@ -303,6 +304,8 @@ const struct bpf_map_ops ringbuf_map_ops = { .map_update_elem = ringbuf_map_update_elem, .map_delete_elem = ringbuf_map_delete_elem, .map_get_next_key = ringbuf_map_get_next_key, + .map_btf_name = "bpf_ringbuf_map", + .map_btf_id = &ringbuf_map_btf_id, }; /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself, diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 599488f25e40..27dc9b1b08a5 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -613,6 +613,7 @@ static void stack_map_free(struct bpf_map *map) put_callchain_buffers(); } +static int stack_trace_map_btf_id; const struct bpf_map_ops stack_trace_map_ops = { .map_alloc = stack_map_alloc, .map_free = stack_map_free, @@ -621,6 +622,8 @@ const struct bpf_map_ops stack_trace_map_ops = { .map_update_elem = stack_map_update_elem, .map_delete_elem = stack_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_stack_map", + .map_btf_id = &stack_trace_map_btf_id, }; static int __init stack_map_init(void) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 1dae4b543243..6f921c4ddc2c 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -919,6 +919,7 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) return -ENOENT; } +static int sk_storage_map_btf_id; const struct bpf_map_ops sk_storage_map_ops = { .map_alloc_check = bpf_sk_storage_map_alloc_check, .map_alloc = bpf_sk_storage_map_alloc, @@ -928,6 +929,8 @@ const struct bpf_map_ops sk_storage_map_ops = { .map_update_elem = bpf_fd_sk_storage_update_elem, .map_delete_elem = bpf_fd_sk_storage_delete_elem, .map_check_btf = bpf_sk_storage_map_check_btf, + .map_btf_name = "bpf_sk_storage_map", + .map_btf_id = &sk_storage_map_btf_id, }; const struct bpf_func_proto bpf_sk_storage_get_proto = { diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 2b884f2d562a..4c1123c749bb 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -643,6 +643,7 @@ const struct bpf_func_proto bpf_msg_redirect_map_proto = { .arg4_type = ARG_ANYTHING, }; +static int sock_map_btf_id; const struct bpf_map_ops sock_map_ops = { .map_alloc = sock_map_alloc, .map_free = sock_map_free, @@ -653,6 +654,8 @@ const struct bpf_map_ops sock_map_ops = { .map_lookup_elem = sock_map_lookup, .map_release_uref = sock_map_release_progs, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_stab", + .map_btf_id = &sock_map_btf_id, }; struct bpf_shtab_elem { @@ -1176,6 +1179,7 @@ const struct bpf_func_proto bpf_msg_redirect_hash_proto = { .arg4_type = ARG_ANYTHING, }; +static int sock_hash_map_btf_id; const struct bpf_map_ops sock_hash_ops = { .map_alloc = sock_hash_alloc, .map_free = sock_hash_free, @@ -1186,6 +1190,8 @@ const struct bpf_map_ops sock_hash_ops = { .map_lookup_elem_sys_only = sock_hash_lookup_sys, .map_release_uref = sock_hash_release_progs, .map_check_btf = map_check_no_btf, + .map_btf_name = "bpf_shtab", + .map_btf_id = &sock_hash_map_btf_id, }; static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index 1dc7208c71ba..8367adbbe9df 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -254,6 +254,7 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, spin_unlock_bh(&map->lock); } +static int xsk_map_btf_id; const struct bpf_map_ops xsk_map_ops = { .map_alloc = xsk_map_alloc, .map_free = xsk_map_free, @@ -264,4 +265,6 @@ const struct bpf_map_ops xsk_map_ops = { .map_update_elem = xsk_map_update_elem, .map_delete_elem = xsk_map_delete_elem, .map_check_btf = map_check_no_btf, + .map_btf_name = "xsk_map", + .map_btf_id = &xsk_map_btf_id, }; From b1b53d413f16c6b5078edb127e660e67332e4d2f Mon Sep 17 00:00:00 2001 From: Andrey Ignatov Date: Fri, 19 Jun 2020 14:11:45 -0700 Subject: [PATCH 0160/2056] selftests/bpf: Test access to bpf map pointer Add selftests to test access to map pointers from bpf program for all map types except struct_ops (that one would need additional work). verifier test focuses mostly on scenarios that must be rejected. prog_tests test focuses on accessing multiple fields both scalar and a nested struct from bpf program and verifies that those fields have expected values. Signed-off-by: Andrey Ignatov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/139a6a17f8016491e39347849b951525335c6eb4.1592600985.git.rdna@fb.com --- .../selftests/bpf/prog_tests/map_ptr.c | 32 + .../selftests/bpf/progs/map_ptr_kern.c | 686 ++++++++++++++++++ .../testing/selftests/bpf/verifier/map_ptr.c | 62 ++ 3 files changed, 780 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/map_ptr.c create mode 100644 tools/testing/selftests/bpf/progs/map_ptr_kern.c create mode 100644 tools/testing/selftests/bpf/verifier/map_ptr.c diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c new file mode 100644 index 000000000000..c230a573c373 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/map_ptr.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include +#include + +#include "map_ptr_kern.skel.h" + +void test_map_ptr(void) +{ + struct map_ptr_kern *skel; + __u32 duration = 0, retval; + char buf[128]; + int err; + + skel = map_ptr_kern__open_and_load(); + if (CHECK(!skel, "skel_open_load", "open_load failed\n")) + return; + + err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4, + sizeof(pkt_v4), buf, NULL, &retval, NULL); + + if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno)) + goto cleanup; + + if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval, + skel->bss->g_map_type, skel->bss->g_line)) + goto cleanup; + +cleanup: + map_ptr_kern__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c new file mode 100644 index 000000000000..473665cac67e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +#include +#include + +#define LOOP_BOUND 0xf +#define MAX_ENTRIES 8 +#define HALF_ENTRIES (MAX_ENTRIES >> 1) + +_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND"); + +enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC; +__u32 g_line = 0; + +#define VERIFY_TYPE(type, func) ({ \ + g_map_type = type; \ + if (!func()) \ + return 0; \ +}) + + +#define VERIFY(expr) ({ \ + g_line = __LINE__; \ + if (!(expr)) \ + return 0; \ +}) + +struct bpf_map_memory { + __u32 pages; +} __attribute__((preserve_access_index)); + +struct bpf_map { + enum bpf_map_type map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 id; + struct bpf_map_memory memory; +} __attribute__((preserve_access_index)); + +static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size, + __u32 value_size, __u32 max_entries) +{ + VERIFY(map->map_type == g_map_type); + VERIFY(map->key_size == key_size); + VERIFY(map->value_size == value_size); + VERIFY(map->max_entries == max_entries); + VERIFY(map->id > 0); + VERIFY(map->memory.pages > 0); + + return 1; +} + +static inline int check_bpf_map_ptr(struct bpf_map *indirect, + struct bpf_map *direct) +{ + VERIFY(indirect->map_type == direct->map_type); + VERIFY(indirect->key_size == direct->key_size); + VERIFY(indirect->value_size == direct->value_size); + VERIFY(indirect->max_entries == direct->max_entries); + VERIFY(indirect->id == direct->id); + VERIFY(indirect->memory.pages == direct->memory.pages); + + return 1; +} + +static inline int check(struct bpf_map *indirect, struct bpf_map *direct, + __u32 key_size, __u32 value_size, __u32 max_entries) +{ + VERIFY(check_bpf_map_ptr(indirect, direct)); + VERIFY(check_bpf_map_fields(indirect, key_size, value_size, + max_entries)); + return 1; +} + +static inline int check_default(struct bpf_map *indirect, + struct bpf_map *direct) +{ + VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32), + MAX_ENTRIES)); + return 1; +} + +typedef struct { + int counter; +} atomic_t; + +struct bpf_htab { + struct bpf_map map; + atomic_t count; + __u32 n_buckets; + __u32 elem_size; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */ + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_hash SEC(".maps"); + +static inline int check_hash(void) +{ + struct bpf_htab *hash = (struct bpf_htab *)&m_hash; + struct bpf_map *map = (struct bpf_map *)&m_hash; + int i; + + VERIFY(check_default(&hash->map, map)); + + VERIFY(hash->n_buckets == MAX_ENTRIES); + VERIFY(hash->elem_size == 64); + + VERIFY(hash->count.counter == 0); + for (i = 0; i < HALF_ENTRIES; ++i) { + const __u32 key = i; + const __u32 val = 1; + + if (bpf_map_update_elem(hash, &key, &val, 0)) + return 0; + } + VERIFY(hash->count.counter == HALF_ENTRIES); + + return 1; +} + +struct bpf_array { + struct bpf_map map; + __u32 elem_size; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_array SEC(".maps"); + +static inline int check_array(void) +{ + struct bpf_array *array = (struct bpf_array *)&m_array; + struct bpf_map *map = (struct bpf_map *)&m_array; + int i, n_lookups = 0, n_keys = 0; + + VERIFY(check_default(&array->map, map)); + + VERIFY(array->elem_size == 8); + + for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) { + const __u32 key = i; + __u32 *val = bpf_map_lookup_elem(array, &key); + + ++n_lookups; + if (val) + ++n_keys; + } + + VERIFY(n_lookups == MAX_ENTRIES); + VERIFY(n_keys == MAX_ENTRIES); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_prog_array SEC(".maps"); + +static inline int check_prog_array(void) +{ + struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array; + struct bpf_map *map = (struct bpf_map *)&m_prog_array; + + VERIFY(check_default(&prog_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_perf_event_array SEC(".maps"); + +static inline int check_perf_event_array(void) +{ + struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array; + struct bpf_map *map = (struct bpf_map *)&m_perf_event_array; + + VERIFY(check_default(&perf_event_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_percpu_hash SEC(".maps"); + +static inline int check_percpu_hash(void) +{ + struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash; + struct bpf_map *map = (struct bpf_map *)&m_percpu_hash; + + VERIFY(check_default(&percpu_hash->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_percpu_array SEC(".maps"); + +static inline int check_percpu_array(void) +{ + struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array; + struct bpf_map *map = (struct bpf_map *)&m_percpu_array; + + VERIFY(check_default(&percpu_array->map, map)); + + return 1; +} + +struct bpf_stack_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u64); +} m_stack_trace SEC(".maps"); + +static inline int check_stack_trace(void) +{ + struct bpf_stack_map *stack_trace = + (struct bpf_stack_map *)&m_stack_trace; + struct bpf_map *map = (struct bpf_map *)&m_stack_trace; + + VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64), + MAX_ENTRIES)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_cgroup_array SEC(".maps"); + +static inline int check_cgroup_array(void) +{ + struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array; + struct bpf_map *map = (struct bpf_map *)&m_cgroup_array; + + VERIFY(check_default(&cgroup_array->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_lru_hash SEC(".maps"); + +static inline int check_lru_hash(void) +{ + struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash; + struct bpf_map *map = (struct bpf_map *)&m_lru_hash; + + VERIFY(check_default(&lru_hash->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_lru_percpu_hash SEC(".maps"); + +static inline int check_lru_percpu_hash(void) +{ + struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash; + struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash; + + VERIFY(check_default(&lru_percpu_hash->map, map)); + + return 1; +} + +struct lpm_trie { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct lpm_key { + struct bpf_lpm_trie_key trie_key; + __u32 data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __uint(max_entries, MAX_ENTRIES); + __type(key, struct lpm_key); + __type(value, __u32); +} m_lpm_trie SEC(".maps"); + +static inline int check_lpm_trie(void) +{ + struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie; + struct bpf_map *map = (struct bpf_map *)&m_lpm_trie; + + VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32), + MAX_ENTRIES)); + + return 1; +} + +struct inner_map { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); +} inner_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); + __array(values, struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u32); + }); +} m_array_of_maps SEC(".maps") = { + .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 }, +}; + +static inline int check_array_of_maps(void) +{ + struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps; + struct bpf_map *map = (struct bpf_map *)&m_array_of_maps; + + VERIFY(check_default(&array_of_maps->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); + __array(values, struct inner_map); +} m_hash_of_maps SEC(".maps") = { + .values = { + [2] = &inner_map, + }, +}; + +static inline int check_hash_of_maps(void) +{ + struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps; + struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps; + + VERIFY(check_default(&hash_of_maps->map, map)); + + return 1; +} + +struct bpf_dtab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_devmap SEC(".maps"); + +static inline int check_devmap(void) +{ + struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap; + struct bpf_map *map = (struct bpf_map *)&m_devmap; + + VERIFY(check_default(&devmap->map, map)); + + return 1; +} + +struct bpf_stab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_sockmap SEC(".maps"); + +static inline int check_sockmap(void) +{ + struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap; + struct bpf_map *map = (struct bpf_map *)&m_sockmap; + + VERIFY(check_default(&sockmap->map, map)); + + return 1; +} + +struct bpf_cpu_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_cpumap SEC(".maps"); + +static inline int check_cpumap(void) +{ + struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap; + struct bpf_map *map = (struct bpf_map *)&m_cpumap; + + VERIFY(check_default(&cpumap->map, map)); + + return 1; +} + +struct xsk_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_XSKMAP); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_xskmap SEC(".maps"); + +static inline int check_xskmap(void) +{ + struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap; + struct bpf_map *map = (struct bpf_map *)&m_xskmap; + + VERIFY(check_default(&xskmap->map, map)); + + return 1; +} + +struct bpf_shtab { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SOCKHASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_sockhash SEC(".maps"); + +static inline int check_sockhash(void) +{ + struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash; + struct bpf_map *map = (struct bpf_map *)&m_sockhash; + + VERIFY(check_default(&sockhash->map, map)); + + return 1; +} + +struct bpf_cgroup_storage_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} m_cgroup_storage SEC(".maps"); + +static inline int check_cgroup_storage(void) +{ + struct bpf_cgroup_storage_map *cgroup_storage = + (struct bpf_cgroup_storage_map *)&m_cgroup_storage; + struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage; + + VERIFY(check(&cgroup_storage->map, map, + sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0)); + + return 1; +} + +struct reuseport_array { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_reuseport_sockarray SEC(".maps"); + +static inline int check_reuseport_sockarray(void) +{ + struct reuseport_array *reuseport_sockarray = + (struct reuseport_array *)&m_reuseport_sockarray; + struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray; + + VERIFY(check_default(&reuseport_sockarray->map, map)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} m_percpu_cgroup_storage SEC(".maps"); + +static inline int check_percpu_cgroup_storage(void) +{ + struct bpf_cgroup_storage_map *percpu_cgroup_storage = + (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage; + struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage; + + VERIFY(check(&percpu_cgroup_storage->map, map, + sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0)); + + return 1; +} + +struct bpf_queue_stack { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, MAX_ENTRIES); + __type(value, __u32); +} m_queue SEC(".maps"); + +static inline int check_queue(void) +{ + struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue; + struct bpf_map *map = (struct bpf_map *)&m_queue; + + VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_STACK); + __uint(max_entries, MAX_ENTRIES); + __type(value, __u32); +} m_stack SEC(".maps"); + +static inline int check_stack(void) +{ + struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack; + struct bpf_map *map = (struct bpf_map *)&m_stack; + + VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES)); + + return 1; +} + +struct bpf_sk_storage_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, __u32); + __type(value, __u32); +} m_sk_storage SEC(".maps"); + +static inline int check_sk_storage(void) +{ + struct bpf_sk_storage_map *sk_storage = + (struct bpf_sk_storage_map *)&m_sk_storage; + struct bpf_map *map = (struct bpf_map *)&m_sk_storage; + + VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0)); + + return 1; +} + +struct { + __uint(type, BPF_MAP_TYPE_DEVMAP_HASH); + __uint(max_entries, MAX_ENTRIES); + __type(key, __u32); + __type(value, __u32); +} m_devmap_hash SEC(".maps"); + +static inline int check_devmap_hash(void) +{ + struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash; + struct bpf_map *map = (struct bpf_map *)&m_devmap_hash; + + VERIFY(check_default(&devmap_hash->map, map)); + + return 1; +} + +struct bpf_ringbuf_map { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 12); +} m_ringbuf SEC(".maps"); + +static inline int check_ringbuf(void) +{ + struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf; + struct bpf_map *map = (struct bpf_map *)&m_ringbuf; + + VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12)); + + return 1; +} + +SEC("cgroup_skb/egress") +int cg_skb(void *ctx) +{ + VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash); + VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array); + VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array); + VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array); + VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace); + VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array); + VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash); + VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash); + VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie); + VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps); + VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps); + VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap); + VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap); + VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap); + VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap); + VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash); + VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage); + VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, + check_reuseport_sockarray); + VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, + check_percpu_cgroup_storage); + VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue); + VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack); + VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage); + VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash); + VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf); + + return 1; +} + +__u32 _version SEC("version") = 1; +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c new file mode 100644 index 000000000000..b52209db8250 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/map_ptr.c @@ -0,0 +1,62 @@ +{ + "bpf_map_ptr: read with negative offset rejected", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "R1 is bpf_array invalid negative access: off=-8", +}, +{ + "bpf_map_ptr: write rejected", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 3 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "only read from bpf_array is supported", +}, +{ + "bpf_map_ptr: read non-existent field rejected", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = REJECT, + .errstr = "cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4", +}, +{ + "bpf_map_ptr: read ops field accepted", + .insns = { + BPF_MOV64_IMM(BPF_REG_6, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 1 }, + .result_unpriv = REJECT, + .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN", + .result = ACCEPT, + .retval = 1, +}, From 1bdb6c9a1c43fdf9b83b2331dfc6229bd2e71d9b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 20 Jun 2020 23:21:12 -0700 Subject: [PATCH 0161/2056] libbpf: Add a bunch of attribute getters/setters for map definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a bunch of getter for various aspects of BPF map. Some of these attribute (e.g., key_size, value_size, type, etc) are available right now in struct bpf_map_def, but this patch adds getter allowing to fetch them individually. bpf_map_def approach isn't very scalable, when ABI stability requirements are taken into account. It's much easier to extend libbpf and add support for new features, when each aspect of BPF map has separate getter/setter. Getters follow the common naming convention of not explicitly having "get" in its name: bpf_map__type() returns map type, bpf_map__key_size() returns key_size. Setters, though, explicitly have set in their name: bpf_map__set_type(), bpf_map__set_key_size(). This patch ensures we now have a getter and a setter for the following map attributes: - type; - max_entries; - map_flags; - numa_node; - key_size; - value_size; - ifindex. bpf_map__resize() enforces unnecessary restriction of max_entries > 0. It is unnecessary, because libbpf actually supports zero max_entries for some cases (e.g., for PERF_EVENT_ARRAY map) and treats it specially during map creation time. To allow setting max_entries=0, new bpf_map__set_max_entries() setter is added. bpf_map__resize()'s behavior is preserved for backwards compatibility reasons. Map ifindex getter is added as well. There is a setter already, but no corresponding getter. Fix this assymetry as well. bpf_map__set_ifindex() itself is converted from void function into error-returning one, similar to other setters. The only error returned right now is -EBUSY, if BPF map is already loaded and has corresponding FD. One lacking attribute with no ability to get/set or even specify it declaratively is numa_node. This patch fixes this gap and both adds programmatic getter/setter, as well as adds support for numa_node field in BTF-defined map. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/20200621062112.3006313-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 102 ++++++++++++++++++++++++++++++++++++--- tools/lib/bpf/libbpf.h | 30 ++++++++++-- tools/lib/bpf/libbpf.map | 14 ++++++ 3 files changed, 135 insertions(+), 11 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 477c679ed945..259a6360475f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -310,6 +310,7 @@ struct bpf_map { int map_ifindex; int inner_map_fd; struct bpf_map_def def; + __u32 numa_node; __u32 btf_var_idx; __u32 btf_key_type_id; __u32 btf_value_type_id; @@ -1957,6 +1958,10 @@ static int parse_btf_map_def(struct bpf_object *obj, return -EINVAL; pr_debug("map '%s': found map_flags = %u.\n", map->name, map->def.map_flags); + } else if (strcmp(name, "numa_node") == 0) { + if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node)) + return -EINVAL; + pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node); } else if (strcmp(name, "key_size") == 0) { __u32 sz; @@ -3222,18 +3227,25 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) return err; } +__u32 bpf_map__max_entries(const struct bpf_map *map) +{ + return map->def.max_entries; +} + +int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.max_entries = max_entries; + return 0; +} + int bpf_map__resize(struct bpf_map *map, __u32 max_entries) { if (!map || !max_entries) return -EINVAL; - /* If map already created, its attributes can't be changed. */ - if (map->fd >= 0) - return -EBUSY; - - map->def.max_entries = max_entries; - - return 0; + return bpf_map__set_max_entries(map, max_entries); } static int @@ -3603,6 +3615,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.map_flags = def->map_flags; create_attr.key_size = def->key_size; create_attr.value_size = def->value_size; + create_attr.numa_node = map->numa_node; if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { int nr_cpus; @@ -7088,6 +7101,71 @@ const char *bpf_map__name(const struct bpf_map *map) return map ? map->name : NULL; } +enum bpf_map_type bpf_map__type(const struct bpf_map *map) +{ + return map->def.type; +} + +int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.type = type; + return 0; +} + +__u32 bpf_map__map_flags(const struct bpf_map *map) +{ + return map->def.map_flags; +} + +int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.map_flags = flags; + return 0; +} + +__u32 bpf_map__numa_node(const struct bpf_map *map) +{ + return map->numa_node; +} + +int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) +{ + if (map->fd >= 0) + return -EBUSY; + map->numa_node = numa_node; + return 0; +} + +__u32 bpf_map__key_size(const struct bpf_map *map) +{ + return map->def.key_size; +} + +int bpf_map__set_key_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.key_size = size; + return 0; +} + +__u32 bpf_map__value_size(const struct bpf_map *map) +{ + return map->def.value_size; +} + +int bpf_map__set_value_size(struct bpf_map *map, __u32 size) +{ + if (map->fd >= 0) + return -EBUSY; + map->def.value_size = size; + return 0; +} + __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) { return map ? map->btf_key_type_id : 0; @@ -7140,9 +7218,17 @@ bool bpf_map__is_internal(const struct bpf_map *map) return map->libbpf_type != LIBBPF_MAP_UNSPEC; } -void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +__u32 bpf_map__ifindex(const struct bpf_map *map) { + return map->map_ifindex; +} + +int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) +{ + if (map->fd >= 0) + return -EBUSY; map->map_ifindex = ifindex; + return 0; } int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 334437af3014..fdd279fb1866 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -418,11 +418,38 @@ bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); LIBBPF_API struct bpf_map * bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); +/* get/set map FD */ LIBBPF_API int bpf_map__fd(const struct bpf_map *map); +LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); +/* get map definition */ LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); +/* get map name */ LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); +/* get/set map type */ +LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); +/* get/set map size (max_entries) */ +LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); +LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); +/* get/set map flags */ +LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags); +/* get/set map NUMA node */ +LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node); +/* get/set map key size */ +LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size); +/* get/set map value size */ +LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size); +/* get map key/value BTF type IDs */ LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); +/* get/set map if_index */ +LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, @@ -430,11 +457,8 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, const void *data, size_t size); -LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); -LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); -LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c914347f5065..9914e0db4859 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -272,4 +272,18 @@ LIBBPF_0.0.9 { } LIBBPF_0.0.8; LIBBPF_0.1.0 { + global: + bpf_map__ifindex; + bpf_map__key_size; + bpf_map__map_flags; + bpf_map__max_entries; + bpf_map__numa_node; + bpf_map__set_key_size; + bpf_map__set_map_flags; + bpf_map__set_max_entries; + bpf_map__set_numa_node; + bpf_map__set_type; + bpf_map__set_value_size; + bpf_map__type; + bpf_map__value_size; } LIBBPF_0.0.9; From a829eb0d5dc5415bef380cf53e09a0123e716951 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:47 +0000 Subject: [PATCH 0162/2056] net/devlink: Prepare devlink port functions to fill extack Prepare devlink port related functions to optionally fill up the extack information which will be used in subsequent patch by port function attribute(s). Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 2cafbc808b09..05197631d52a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -566,7 +566,8 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, enum devlink_command cmd, u32 portid, - u32 seq, int flags) + u32 seq, int flags, + struct netlink_ext_ack *extack) { void *hdr; @@ -634,7 +635,8 @@ static void devlink_port_notify(struct devlink_port *devlink_port, if (!msg) return; - err = devlink_nl_port_fill(msg, devlink, devlink_port, cmd, 0, 0, 0); + err = devlink_nl_port_fill(msg, devlink, devlink_port, cmd, 0, 0, 0, + NULL); if (err) { nlmsg_free(msg); return; @@ -708,7 +710,8 @@ static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb, err = devlink_nl_port_fill(msg, devlink, devlink_port, DEVLINK_CMD_PORT_NEW, - info->snd_portid, info->snd_seq, 0); + info->snd_portid, info->snd_seq, 0, + info->extack); if (err) { nlmsg_free(msg); return err; @@ -740,7 +743,8 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg, DEVLINK_CMD_NEW, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - NLM_F_MULTI); + NLM_F_MULTI, + cb->extack); if (err) { mutex_unlock(&devlink->lock); goto out; From 2a916ecc405686c1d86f632281bc06aa75ebae4e Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:48 +0000 Subject: [PATCH 0163/2056] net/devlink: Support querying hardware address of port function PCI PF and VF devlink port can manage the function represented by a devlink port. Enable users to query port function's hardware address. Example of a PCI VF port which supports a port function: $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:11:22:33:44:66 $ devlink port show pci/0000:06:00.0/2 -jp { "port": { "pci/0000:06:00.0/2": { "type": "eth", "netdev": "enp6s0pf0vf1", "flavour": "pcivf", "pfnum": 0, "vfnum": 1, "function": { "hw_addr": "00:11:22:33:44:66" } } } } Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 12 ++++++++++ include/uapi/linux/devlink.h | 10 ++++++++ net/core/devlink.c | 45 ++++++++++++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index 1df6dfec26c2..56fc9cdb189d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1107,6 +1107,18 @@ struct devlink_ops { int (*trap_policer_counter_get)(struct devlink *devlink, const struct devlink_trap_policer *policer, u64 *p_drops); + /** + * @port_function_hw_addr_get: Port function's hardware address get function. + * + * Should be used by device drivers to report the hardware address of a function managed + * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port + * function handling for a particular port. + * + * Note: @extack can be NULL when port notifier queries the port function. + */ + int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 08563e6a424d..07d0af8f5923 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -451,6 +451,8 @@ enum devlink_attr { DEVLINK_ATTR_TRAP_POLICER_RATE, /* u64 */ DEVLINK_ATTR_TRAP_POLICER_BURST, /* u64 */ + DEVLINK_ATTR_PORT_FUNCTION, /* nested */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, @@ -497,4 +499,12 @@ enum devlink_resource_unit { DEVLINK_RESOURCE_UNIT_ENTRY, }; +enum devlink_port_function_attr { + DEVLINK_PORT_FUNCTION_ATTR_UNSPEC, + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */ + + __DEVLINK_PORT_FUNCTION_ATTR_MAX, + DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1 +}; + #endif /* _UAPI_LINUX_DEVLINK_H_ */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 05197631d52a..b6848b607e9c 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -563,6 +563,49 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, return 0; } +static int +devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = port->devlink; + const struct devlink_ops *ops; + struct nlattr *function_attr; + bool empty_nest = true; + int err = 0; + + function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION); + if (!function_attr) + return -EMSGSIZE; + + ops = devlink->ops; + if (ops->port_function_hw_addr_get) { + int uninitialized_var(hw_addr_len); + u8 hw_addr[MAX_ADDR_LEN]; + + err = ops->port_function_hw_addr_get(devlink, port, hw_addr, &hw_addr_len, extack); + if (err == -EOPNOTSUPP) { + /* Port function attributes are optional for a port. If port doesn't + * support function attribute, returning -EOPNOTSUPP is not an error. + */ + err = 0; + goto out; + } else if (err) { + goto out; + } + err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr); + if (err) + goto out; + empty_nest = false; + } + +out: + if (err || empty_nest) + nla_nest_cancel(msg, function_attr); + else + nla_nest_end(msg, function_attr); + return err; +} + static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, struct devlink_port *devlink_port, enum devlink_command cmd, u32 portid, @@ -608,6 +651,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, spin_unlock_bh(&devlink_port->type_lock); if (devlink_nl_port_attrs_put(msg, devlink_port)) goto nla_put_failure; + if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack)) + goto nla_put_failure; genlmsg_end(msg, hdr); return 0; From a1e8ae907c8d67f57432190bb742802a76516b00 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:49 +0000 Subject: [PATCH 0164/2056] net/devlink: Support setting hardware address of port function PCI PF and VF devlink port can manage the function represented by a devlink port. Allow users to set port function's hardware address. Example of a PCI VF port which supports a port function: $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:00:00:00:00:00 $ devlink port function set pci/0000:06:00.0/2 hw_addr 00:11:22:33:44:55 $ devlink port show pci/0000:06:00.0/2 pci/0000:06:00.0/2: type eth netdev enp6s0pf0vf1 flavour pcivf pfnum 0 vfnum 1 function: hw_addr 00:11:22:33:44:55 Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 10 ++++++ net/core/devlink.c | 76 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index 56fc9cdb189d..7007f93585a5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1119,6 +1119,16 @@ struct devlink_ops { int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); + /** + * @port_function_hw_addr_set: Port function's hardware address set function. + * + * Should be used by device drivers to set the hardware address of a function managed + * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port + * function handling for a particular port. + */ + int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack); }; static inline void *devlink_priv(struct devlink *devlink) diff --git a/net/core/devlink.c b/net/core/devlink.c index b6848b607e9c..baa45eca6b5a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -85,6 +85,10 @@ EXPORT_SYMBOL(devlink_dpipe_header_ipv6); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg); EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr); +static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = { + [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY }, +}; + static LIST_HEAD(devlink_list); /* devlink_mutex @@ -827,6 +831,67 @@ static int devlink_port_type_set(struct devlink *devlink, return -EOPNOTSUPP; } +static int +devlink_port_function_hw_addr_set(struct devlink *devlink, struct devlink_port *port, + const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + const struct devlink_ops *ops; + const u8 *hw_addr; + int hw_addr_len; + int err; + + hw_addr = nla_data(attr); + hw_addr_len = nla_len(attr); + if (hw_addr_len > MAX_ADDR_LEN) { + NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long"); + return -EINVAL; + } + if (port->type == DEVLINK_PORT_TYPE_ETH) { + if (hw_addr_len != ETH_ALEN) { + NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device"); + return -EINVAL; + } + if (!is_unicast_ether_addr(hw_addr)) { + NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported"); + return -EINVAL; + } + } + + ops = devlink->ops; + if (!ops->port_function_hw_addr_set) { + NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes"); + return -EOPNOTSUPP; + } + + err = ops->port_function_hw_addr_set(devlink, port, hw_addr, hw_addr_len, extack); + if (err) + return err; + + devlink_port_notify(port, DEVLINK_CMD_PORT_NEW); + return 0; +} + +static int +devlink_port_function_set(struct devlink *devlink, struct devlink_port *port, + const struct nlattr *attr, struct netlink_ext_ack *extack) +{ + struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr, + devlink_function_nl_policy, extack); + if (err < 0) { + NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes"); + return err; + } + + attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR]; + if (attr) + err = devlink_port_function_hw_addr_set(devlink, port, attr, extack); + + return err; +} + static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info) { @@ -842,6 +907,16 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, if (err) return err; } + + if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) { + struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION]; + struct netlink_ext_ack *extack = info->extack; + + err = devlink_port_function_set(devlink, devlink_port, attr, extack); + if (err) + return err; + } + return 0; } @@ -6758,6 +6833,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 }, [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 }, [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 }, + [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED }, }; static const struct genl_ops devlink_nl_ops[] = { From fa997825ebeca820f4001a9e6d285345d3a535ba Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:50 +0000 Subject: [PATCH 0165/2056] net/mlx5: Constify mac address pointer Since none of the functions need to modify the input mac address, constify them. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 +- include/linux/mlx5/vport.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 1116ab9bea6c..d6a585a143dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1127,7 +1127,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); } -static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) +static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) { ((u8 *)node_guid)[7] = mac[0]; ((u8 *)node_guid)[6] = mac[1]; @@ -1779,7 +1779,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) /* Vport Administration */ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]) + u16 vport, const u8 *mac) { struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); u64 node_guid; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5175e98c0b3..165a23efc608 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -311,7 +311,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, u8 mac[ETH_ALEN]); + u16 vport, const u8 *mac); int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state); int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index c107d92dc118..88cdb9bb4c4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -173,7 +173,7 @@ int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr) EXPORT_SYMBOL_GPL(mlx5_query_mac_address); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, - u16 vport, u8 *addr) + u16 vport, const u8 *addr) { void *in; int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 8170da1e9f70..4db87bcfce7b 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -75,7 +75,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, - u16 vport, u8 *addr); + u16 vport, const u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, From bd93975353d534175c03a6bc8928a2443a7d8d34 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:51 +0000 Subject: [PATCH 0166/2056] net/mlx5: E-switch, Introduce and use eswitch support check helper Introduce an helper routine to get esw from a devlink device and use it at eswitch callbacks and in subsequent patch. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 23 +++++++ .../net/ethernet/mellanox/mlx5/core/eswitch.h | 1 + .../mellanox/mlx5/core/eswitch_offloads.c | 66 ++++++++----------- 3 files changed, 50 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d6a585a143dc..9f04fd10cb1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -63,6 +63,29 @@ struct vport_addr { static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw); static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw); +static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) +{ + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -EOPNOTSUPP; + + if (!MLX5_ESWITCH_MANAGER(dev)) + return -EPERM; + + return 0; +} + +struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + int err; + + err = mlx5_eswitch_check(dev); + if (err) + return ERR_PTR(err); + + return dev->priv.eswitch; +} + struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 165a23efc608..dde5a36fee9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -634,6 +634,7 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); for ((vport) = (nvfs); \ (vport) >= (esw)->first_host_vport; (vport)--) +struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink); struct mlx5_vport *__must_check mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 060354bb211a..74a2b76c7c07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2279,17 +2279,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } -static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) -{ - if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return -EOPNOTSUPP; - - if(!MLX5_ESWITCH_MANAGER(dev)) - return -EPERM; - - return 0; -} - static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) { /* devlink commands in NONE eswitch mode are currently supported only @@ -2302,14 +2291,13 @@ static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw) int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; u16 cur_mlx5_mode, mlx5_mode = 0; + struct mlx5_eswitch *esw; int err; - err = mlx5_eswitch_check(dev); - if (err) - return err; + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; @@ -2338,16 +2326,15 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch *esw; int err; - err = mlx5_eswitch_check(dev); - if (err) - return err; + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); mutex_lock(&esw->mode_lock); - err = eswitch_devlink_esw_mode_check(dev->priv.eswitch); + err = eswitch_devlink_esw_mode_check(esw); if (err) goto unlock; @@ -2361,13 +2348,13 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; int err, vport, num_vport; + struct mlx5_eswitch *esw; u8 mlx5_mode; - err = mlx5_eswitch_check(dev); - if (err) - return err; + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); mutex_lock(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); @@ -2424,13 +2411,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch *esw; int err; - err = mlx5_eswitch_check(dev); - if (err) - return err; + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); mutex_lock(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); @@ -2448,12 +2434,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, struct netlink_ext_ack *extack) { struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch *esw; int err; - err = mlx5_eswitch_check(dev); - if (err) - return err; + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); mutex_lock(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); @@ -2508,13 +2494,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, enum devlink_eswitch_encap_mode *encap) { - struct mlx5_core_dev *dev = devlink_priv(devlink); - struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch *esw; int err; - err = mlx5_eswitch_check(dev); - if (err) - return err; + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + mutex_lock(&esw->mode_lock); err = eswitch_devlink_esw_mode_check(esw); From 443bf36eb543238cbd6399d658839e2e17c8dab2 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:52 +0000 Subject: [PATCH 0167/2056] net/mlx5: Move helper to eswitch layer To use port number to port index conversion at eswitch level, move it to eswitch header. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 8 +------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 7 +++++++ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 006807e04eda..20ff8526d212 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1181,12 +1181,6 @@ is_devlink_port_supported(const struct mlx5_core_dev *dev, mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport); } -static unsigned int -vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num) -{ - return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; -} - static int register_devlink_port(struct mlx5_core_dev *dev, struct mlx5e_rep_priv *rpriv) { @@ -1200,7 +1194,7 @@ static int register_devlink_port(struct mlx5_core_dev *dev, return 0; mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); - dl_port_index = vport_to_devlink_port_index(dev, rep->vport); + dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport); pfnum = PCI_FUNC(dev->pdev->devfn); if (rep->vport == MLX5_VPORT_UPLINK) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index dde5a36fee9d..8f537183e977 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -565,6 +565,13 @@ static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, return index; } +static inline unsigned int +mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, + u16 vport_num) +{ + return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; +} + /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); From f099fde16db3d2594a54ba8c94ce9fa3557aa3e1 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:53 +0000 Subject: [PATCH 0168/2056] net/mlx5: E-switch, Support querying port function mac address Support querying mac address of the eswitch devlink port function. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/devlink.c | 1 + .../net/ethernet/mellanox/mlx5/core/eswitch.c | 43 +++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/eswitch.h | 11 +++++ 3 files changed, 55 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index a99fe4b02b9b..3177d2458fa5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -113,6 +113,7 @@ static const struct devlink_ops mlx5_devlink_ops = { .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get, .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set, .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, + .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, #endif .flash_update = mlx5_devlink_flash_update, .info_get = mlx5_devlink_info_get, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 9f04fd10cb1e..999e51656e16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1845,6 +1845,49 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, return err; } +static bool +is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num) +{ + return vport_num == MLX5_VPORT_PF || + mlx5_eswitch_is_vf_vport(esw, vport_num); +} + +int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, + struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + int err = -EOPNOTSUPP; + u16 vport_num; + + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); + if (!is_port_function_supported(esw, vport_num)) + return -EOPNOTSUPP; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid port"); + return PTR_ERR(vport); + } + + mutex_lock(&esw->state_lock); + if (vport->enabled) { + ether_addr_copy(hw_addr, vport->info.mac); + *hw_addr_len = ETH_ALEN; + err = 0; + } else { + NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); + } + mutex_unlock(&esw->state_lock); + return err; +} + int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 8f537183e977..19cd0af7afda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -450,6 +450,11 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, enum devlink_eswitch_encap_mode *encap); +int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, + struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack); + void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, @@ -572,6 +577,12 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num; } +static inline u16 +mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index) +{ + return dl_port_index & 0xffff; +} + /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); From 1094795ce49d75e99b0f8853dfd5e622a5743732 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:54 +0000 Subject: [PATCH 0169/2056] net/mlx5: Split mac address setting function for using state_lock Refactor mac address setting function to let caller hold the necessary state_lock mutex, so that subsequent patch and use this helper routine. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/eswitch.c | 38 ++++++++++++------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 999e51656e16..2c08411e34ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1801,46 +1801,56 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) } /* Vport Administration */ -int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, - u16 vport, const u8 *mac) +static int +mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, const u8 *mac) { - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + u16 vport_num = evport->vport; u64 node_guid; int err = 0; - if (IS_ERR(evport)) - return PTR_ERR(evport); if (is_multicast_ether_addr(mac)) return -EINVAL; - mutex_lock(&esw->state_lock); - if (evport->info.spoofchk && !is_valid_ether_addr(mac)) mlx5_core_warn(esw->dev, "Set invalid MAC while spoofchk is on, vport(%d)\n", - vport); + vport_num); - err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac); if (err) { mlx5_core_warn(esw->dev, "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n", - vport, err); - goto unlock; + vport_num, err); + return err; } node_guid_gen_from_mac(&node_guid, mac); - err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); + err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid); if (err) mlx5_core_warn(esw->dev, "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", - vport, err); + vport_num, err); ether_addr_copy(evport->info.mac, mac); evport->info.node_guid = node_guid; if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) err = esw_acl_ingress_lgcy_setup(esw, evport); -unlock: + return err; +} + +int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, + u16 vport, const u8 *mac) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int err = 0; + + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = mlx5_esw_set_vport_mac_locked(esw, evport, mac); mutex_unlock(&esw->state_lock); return err; } From 330077d14de12df5697ef192a88b11cc2166cd47 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 19 Jun 2020 03:32:55 +0000 Subject: [PATCH 0170/2056] net/mlx5: E-switch, Supporting setting devlink port function mac address Enable user to set mac address of the PCI PF and VF port function. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/devlink.c | 1 + .../net/ethernet/mellanox/mlx5/core/eswitch.c | 36 +++++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/eswitch.h | 4 +++ 3 files changed, 41 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index 3177d2458fa5..c709e9a385f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -114,6 +114,7 @@ static const struct devlink_ops mlx5_devlink_ops = { .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set, .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, + .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set, #endif .flash_update = mlx5_devlink_flash_update, .info_get = mlx5_devlink_info_get, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2c08411e34ee..c656c9f081c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1898,6 +1898,42 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, return err; } +int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, + struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct mlx5_eswitch *esw; + struct mlx5_vport *vport; + int err = -EOPNOTSUPP; + u16 vport_num; + + esw = mlx5_devlink_eswitch_get(devlink); + if (IS_ERR(esw)) { + NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); + return PTR_ERR(esw); + } + + vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index); + if (!is_port_function_supported(esw, vport_num)) { + NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr"); + return -EINVAL; + } + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid port"); + return PTR_ERR(vport); + } + + mutex_lock(&esw->state_lock); + if (vport->enabled) + err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr); + else + NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled"); + mutex_unlock(&esw->state_lock); + return err; +} + int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 19cd0af7afda..67e09902bd88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -454,6 +454,10 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); +int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, + struct devlink_port *port, + const u8 *hw_addr, int hw_addr_len, + struct netlink_ext_ack *extack); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); From 272c2330adc9c68284cb0066719160c24bfe605f Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:52 -0400 Subject: [PATCH 0171/2056] xfrm: bail early on slave pass over skb This is prep work for initial support of bonding hardware encryption pass-through support. The bonding driver will fill in the slave_dev pointer, and we use that to know not to skb_push() again on a given skb that was already processed on the bond device. CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- include/net/xfrm.h | 1 + net/xfrm/xfrm_device.c | 34 +++++++++++++++++----------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 094fe682f5d7..e20b2b27ec48 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -127,6 +127,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; + struct net_device *slave_dev; unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index f50d1f97cf8e..b8918fc5248b 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -106,6 +106,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur struct sk_buff *skb2, *nskb, *pskb = NULL; netdev_features_t esp_features = features; struct xfrm_offload *xo = xfrm_offload(skb); + struct net_device *dev = skb->dev; struct sec_path *sp; if (!xo) @@ -119,6 +120,10 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) return skb; + /* This skb was already validated on the master dev */ + if ((x->xso.dev != dev) && (x->xso.slave_dev == dev)) + return skb; + local_irq_save(flags); sd = this_cpu_ptr(&softnet_data); err = !skb_queue_empty(&sd->xfrm_backlog); @@ -129,25 +134,20 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur return skb; } - if (skb_is_gso(skb)) { - struct net_device *dev = skb->dev; + if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) { + struct sk_buff *segs; - if (unlikely(x->xso.dev != dev)) { - struct sk_buff *segs; + /* Packet got rerouted, fixup features and segment it. */ + esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP); - /* Packet got rerouted, fixup features and segment it. */ - esp_features = esp_features & ~(NETIF_F_HW_ESP - | NETIF_F_GSO_ESP); - - segs = skb_gso_segment(skb, esp_features); - if (IS_ERR(segs)) { - kfree_skb(skb); - atomic_long_inc(&dev->tx_dropped); - return NULL; - } else { - consume_skb(skb); - skb = segs; - } + segs = skb_gso_segment(skb, esp_features); + if (IS_ERR(segs)) { + kfree_skb(skb); + atomic_long_inc(&dev->tx_dropped); + return NULL; + } else { + consume_skb(skb); + skb = segs; } } From 0dea9ea97e4615f7ed2cc129d4caaa6c8102d349 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:53 -0400 Subject: [PATCH 0172/2056] ixgbe_ipsec: become aware of when running as a bonding slave Slave devices in a bond doing hardware encryption also need to be aware that they're slaves, so we operate on the slave instead of the bonding master to do the actual hardware encryption offload bits. CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Acked-by: Jeff Kirsher Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- .../net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 39 +++++++++++++++---- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 113f6087c7c9..26b0a58a064d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -432,6 +432,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, char *alg_name = NULL; int key_len; + if (xs->xso.slave_dev) + dev = xs->xso.slave_dev; + if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; @@ -478,8 +481,8 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) { struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_adapter *adapter; + struct ixgbe_hw *hw; u32 mfval, manc, reg; int num_filters = 4; bool manc_ipv4; @@ -497,6 +500,12 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) #define BMCIP_V6 0x3 #define BMCIP_MASK 0x3 + if (xs->xso.slave_dev) + dev = xs->xso.slave_dev; + + adapter = netdev_priv(dev); + hw = &adapter->hw; + manc = IXGBE_READ_REG(hw, IXGBE_MANC); manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); @@ -561,14 +570,21 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) { struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_adapter *adapter; + struct ixgbe_ipsec *ipsec; + struct ixgbe_hw *hw; int checked, match, first; u16 sa_idx; int ret; int i; + if (xs->xso.slave_dev) + dev = xs->xso.slave_dev; + + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + hw = &adapter->hw; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", xs->id.proto); @@ -746,12 +762,19 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_ipsec *ipsec = adapter->ipsec; - struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_adapter *adapter; + struct ixgbe_ipsec *ipsec; + struct ixgbe_hw *hw; u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; + if (xs->xso.slave_dev) + dev = xs->xso.slave_dev; + + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + hw = &adapter->hw; + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa *rsa; u8 ipi; From bf3a058de5728a23237b1649bedba668c2bf3c79 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:54 -0400 Subject: [PATCH 0173/2056] mlx5: become aware of when running as a bonding slave I've been unable to get my hands on suitable supported hardware to date, but I believe this ought to be all that is needed to enable the mlx5 driver to also work with bonding active-backup crypto offload passthru. CC: Boris Pismenny CC: Saeed Mahameed CC: Leon Romanovsky CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 92eb3bad4acd..72ad6664bd73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -210,6 +210,9 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) struct net_device *netdev = x->xso.dev; struct mlx5e_priv *priv; + if (x->xso.slave_dev) + netdev = x->xso.slave_dev; + priv = netdev_priv(netdev); if (x->props.aalgo != SADB_AALG_NONE) { @@ -291,6 +294,9 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) unsigned int sa_handle; int err; + if (x->xso.slave_dev) + netdev = x->xso.slave_dev; + priv = netdev_priv(netdev); err = mlx5e_xfrm_validate_state(x); From 18cb261afd7bf50134e5ccacc5ec91ea16efadd4 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Fri, 19 Jun 2020 10:31:55 -0400 Subject: [PATCH 0174/2056] bonding: support hardware encryption offload to slaves Currently, this support is limited to active-backup mode, as I'm not sure about the feasilibity of mapping an xfrm_state's offload handle to multiple hardware devices simultaneously, and we rely on being able to pass some hints to both the xfrm and NIC driver about whether or not they're operating on a slave device. I've tested this atop an Intel x520 device (ixgbe) using libreswan in transport mode, succesfully achieving ~4.3Gbps throughput with netperf (more or less identical to throughput on a bare NIC in this system), as well as successful failover and recovery mid-netperf. v2: just use CONFIG_XFRM_OFFLOAD for wrapping, isolate more code with it CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 127 +++++++++++++++++++++++++++++++- include/net/bonding.h | 3 + 2 files changed, 128 insertions(+), 2 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 004919aea5fb..90939ccf2a94 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -278,8 +279,6 @@ const char *bond_mode_name(int mode) return names[mode]; } -/*---------------------------------- VLAN -----------------------------------*/ - /** * bond_dev_queue_xmit - Prepare skb for xmit. * @@ -302,6 +301,8 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, return dev_queue_xmit(skb); } +/*---------------------------------- VLAN -----------------------------------*/ + /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, * We don't protect the slave list iteration with a lock because: * a. This operation is performed in IOCTL context, @@ -372,6 +373,84 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, return 0; } +/*---------------------------------- XFRM -----------------------------------*/ + +#ifdef CONFIG_XFRM_OFFLOAD +/** + * bond_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int bond_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = rtnl_dereference(bond->curr_active_slave); + + xs->xso.slave_dev = slave->dev; + bond->xs = xs; + + if (!(slave->dev->xfrmdev_ops + && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + return -EINVAL; + } + + return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); +} + +/** + * bond_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void bond_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *slave = rtnl_dereference(bond->curr_active_slave); + + if (!slave) + return; + + xs->xso.slave_dev = slave->dev; + + if (!(slave->dev->xfrmdev_ops + && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); + return; + } + + slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +} + +/** + * bond_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + struct net_device *bond_dev = xs->xso.dev; + struct bonding *bond = netdev_priv(bond_dev); + struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); + struct net_device *slave_dev = curr_active->dev; + + if (!(slave_dev->xfrmdev_ops + && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { + slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); + return false; + } + + xs->xso.slave_dev = slave_dev; + return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +} + +static const struct xfrmdev_ops bond_xfrmdev_ops = { + .xdo_dev_state_add = bond_ipsec_add_sa, + .xdo_dev_state_delete = bond_ipsec_del_sa, + .xdo_dev_offload_ok = bond_ipsec_offload_ok, +}; +#endif /* CONFIG_XFRM_OFFLOAD */ + /*------------------------------- Link status -------------------------------*/ /* Set the carrier state for the master according to the state of its @@ -879,6 +958,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return; if (new_active) { +#ifdef CONFIG_XFRM_OFFLOAD + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) && bond->xs) + bond_ipsec_del_sa(bond->xs); +#endif /* CONFIG_XFRM_OFFLOAD */ + new_active->last_link_up = jiffies; if (new_active->link == BOND_LINK_BACK) { @@ -941,6 +1025,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) bond_should_notify_peers(bond); } +#ifdef CONFIG_XFRM_OFFLOAD + if (old_active && bond->xs) { + xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); + bond_ipsec_add_sa(bond->xs); + } +#endif /* CONFIG_XFRM_OFFLOAD */ + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) { bond->send_peer_notif--; @@ -1127,15 +1218,24 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) +#ifdef CONFIG_XFRM_OFFLOAD +#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) +#endif /* CONFIG_XFRM_OFFLOAD */ + #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_ALL_TSO) + static void bond_compute_features(struct bonding *bond) { unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; netdev_features_t vlan_features = BOND_VLAN_FEATURES; netdev_features_t enc_features = BOND_ENC_FEATURES; +#ifdef CONFIG_XFRM_OFFLOAD + netdev_features_t xfrm_features = BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ netdev_features_t mpls_features = BOND_MPLS_FEATURES; struct net_device *bond_dev = bond->dev; struct list_head *iter; @@ -1157,6 +1257,12 @@ static void bond_compute_features(struct bonding *bond) slave->dev->hw_enc_features, BOND_ENC_FEATURES); +#ifdef CONFIG_XFRM_OFFLOAD + xfrm_features = netdev_increment_features(xfrm_features, + slave->dev->hw_enc_features, + BOND_XFRM_FEATURES); +#endif /* CONFIG_XFRM_OFFLOAD */ + mpls_features = netdev_increment_features(mpls_features, slave->dev->mpls_features, BOND_MPLS_FEATURES); @@ -1176,6 +1282,9 @@ static void bond_compute_features(struct bonding *bond) NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX | NETIF_F_GSO_UDP_L4; +#ifdef CONFIG_XFRM_OFFLOAD + bond_dev->hw_enc_features |= xfrm_features; +#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->mpls_features = mpls_features; bond_dev->gso_max_segs = gso_max_segs; netif_set_gso_max_size(bond_dev, gso_max_size); @@ -1464,6 +1573,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n"); } + if (slave_dev->features & NETIF_F_HW_ESP) + slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n"); + /* Old ifenslave binaries are no longer supported. These can * be identified with moderate accuracy by the state of the slave: * the current ifenslave will set the interface down prior to @@ -4540,6 +4652,13 @@ void bond_setup(struct net_device *bond_dev) bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); +#ifdef CONFIG_XFRM_OFFLOAD + /* set up xfrm device ops (only supported in active-backup right now) */ + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; + bond->xs = NULL; +#endif /* CONFIG_XFRM_OFFLOAD */ + /* don't acquire bond device's netif_tx_lock when transmitting */ bond_dev->features |= NETIF_F_LLTX; @@ -4558,6 +4677,10 @@ void bond_setup(struct net_device *bond_dev) NETIF_F_HW_VLAN_CTAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; +#ifdef CONFIG_XFRM_OFFLOAD + if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + bond_dev->hw_features |= BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; } diff --git a/include/net/bonding.h b/include/net/bonding.h index aa854a9c01e2..a00e1764e9b1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -238,6 +238,9 @@ struct bonding { struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; +#ifdef CONFIG_XFRM_OFFLOAD + struct xfrm_state *xs; +#endif /* CONFIG_XFRM_OFFLOAD */ }; #define bond_slave_get_rcu(dev) \ From b3591c2a366167b5ae985dc3c88a55db3d770cb8 Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Sat, 20 Jun 2020 11:30:32 +0100 Subject: [PATCH 0175/2056] net: dsa: qca8k: Switch to PHYLINK instead of PHYLIB Update the driver to use the new PHYLINK callbacks, removing the legacy adjust_link callback. Signed-off-by: Jonathan McDowell Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 310 +++++++++++++++++++++++++++------------- 1 file changed, 212 insertions(+), 98 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index d2b5ab403e06..63b84789f16b 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -418,55 +419,6 @@ qca8k_mib_init(struct qca8k_priv *priv) mutex_unlock(&priv->reg_mutex); } -static int -qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) -{ - u32 reg, val; - - switch (port) { - case 0: - reg = QCA8K_REG_PORT0_PAD_CTRL; - break; - case 6: - reg = QCA8K_REG_PORT6_PAD_CTRL; - break; - default: - pr_err("Can't set PAD_CTRL on port %d\n", port); - return -EINVAL; - } - - /* Configure a port to be directly connected to an external - * PHY or MAC. - */ - switch (mode) { - case PHY_INTERFACE_MODE_RGMII: - /* RGMII mode means no delay so don't enable the delay */ - val = QCA8K_PORT_PAD_RGMII_EN; - qca8k_write(priv, reg, val); - break; - case PHY_INTERFACE_MODE_RGMII_ID: - /* RGMII_ID needs internal delay. This is enabled through - * PORT5_PAD_CTRL for all ports, rather than individual port - * registers - */ - qca8k_write(priv, reg, - QCA8K_PORT_PAD_RGMII_EN | - QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) | - QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY)); - qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, - QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); - break; - case PHY_INTERFACE_MODE_SGMII: - qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); - break; - default: - pr_err("xMII mode %d not supported\n", mode); - return -EINVAL; - } - - return 0; -} - static void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) { @@ -639,9 +591,7 @@ static int qca8k_setup(struct dsa_switch *ds) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA; int ret, i; - u32 mask; /* Make sure that port 0 is the cpu port */ if (!dsa_is_cpu_port(ds, 0)) { @@ -661,24 +611,9 @@ qca8k_setup(struct dsa_switch *ds) if (ret) return ret; - /* Initialize CPU port pad mode (xMII type, delays...) */ - ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode); - if (ret) { - pr_err("Can't find phy-mode for master device\n"); - return ret; - } - ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode); - if (ret < 0) - return ret; - - /* Enable CPU Port, force it to maximum bandwidth and full-duplex */ - mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW | - QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX; - qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask); + /* Enable CPU Port */ qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); - qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); - priv->port_sts[QCA8K_CPU_PORT].enabled = 1; /* Enable MIB counters */ qca8k_mib_init(priv); @@ -693,10 +628,9 @@ qca8k_setup(struct dsa_switch *ds) qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i), QCA8K_PORT_LOOKUP_MEMBER, 0); - /* Disable MAC by default on all user ports */ + /* Disable MAC by default on all ports */ for (i = 1; i < QCA8K_NUM_PORTS; i++) - if (dsa_is_user_port(ds, i)) - qca8k_port_set_status(priv, i, 0); + qca8k_port_set_status(priv, i, 0); /* Forward all unknown frames to CPU port for Linux processing */ qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1, @@ -743,44 +677,222 @@ qca8k_setup(struct dsa_switch *ds) } static void -qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) +qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, + const struct phylink_link_state *state) { struct qca8k_priv *priv = ds->priv; u32 reg; - /* Force fixed-link setting for CPU port, skip others. */ - if (!phy_is_pseudo_fixed_link(phy)) - return; + switch (port) { + case 0: /* 1st CPU port */ + if (state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && + state->interface != PHY_INTERFACE_MODE_SGMII) + return; - /* Set port speed */ - switch (phy->speed) { - case 10: - reg = QCA8K_PORT_STATUS_SPEED_10; + reg = QCA8K_REG_PORT0_PAD_CTRL; break; - case 100: - reg = QCA8K_PORT_STATUS_SPEED_100; - break; - case 1000: - reg = QCA8K_PORT_STATUS_SPEED_1000; + case 1: + case 2: + case 3: + case 4: + case 5: + /* Internal PHY, nothing to do */ + return; + case 6: /* 2nd CPU port / external PHY */ + if (state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && + state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_1000BASEX) + return; + + reg = QCA8K_REG_PORT6_PAD_CTRL; break; default: - dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n", - port, phy->speed); + dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port); return; } - /* Set duplex mode */ - if (phy->duplex == DUPLEX_FULL) - reg |= QCA8K_PORT_STATUS_DUPLEX; + if (port != 6 && phylink_autoneg_inband(mode)) { + dev_err(ds->dev, "%s: in-band negotiation unsupported\n", + __func__); + return; + } - /* Force flow control */ - if (dsa_is_cpu_port(ds, port)) - reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW; + switch (state->interface) { + case PHY_INTERFACE_MODE_RGMII: + /* RGMII mode means no delay so don't enable the delay */ + qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN); + break; + case PHY_INTERFACE_MODE_RGMII_ID: + /* RGMII_ID needs internal delay. This is enabled through + * PORT5_PAD_CTRL for all ports, rather than individual port + * registers + */ + qca8k_write(priv, reg, + QCA8K_PORT_PAD_RGMII_EN | + QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) | + QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY)); + qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL, + QCA8K_PORT_PAD_RGMII_RX_DELAY_EN); + break; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_1000BASEX: + /* Enable SGMII on the port */ + qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); + break; + default: + dev_err(ds->dev, "xMII mode %s not supported for port %d\n", + phy_modes(state->interface), port); + return; + } +} + +static void +qca8k_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + switch (port) { + case 0: /* 1st CPU port */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && + state->interface != PHY_INTERFACE_MODE_SGMII) + goto unsupported; + break; + case 1: + case 2: + case 3: + case 4: + case 5: + /* Internal PHY */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_GMII) + goto unsupported; + break; + case 6: /* 2nd CPU port / external PHY */ + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_RGMII && + state->interface != PHY_INTERFACE_MODE_RGMII_ID && + state->interface != PHY_INTERFACE_MODE_SGMII && + state->interface != PHY_INTERFACE_MODE_1000BASEX) + goto unsupported; + break; + default: +unsupported: + linkmode_zero(supported); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + phylink_set(mask, 1000baseX_Full); + + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + linkmode_and(supported, supported, mask); + linkmode_and(state->advertising, state->advertising, mask); +} + +static int +qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg; + + reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port)); + + state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); + state->an_complete = state->link; + state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); + state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : + DUPLEX_HALF; + + switch (reg & QCA8K_PORT_STATUS_SPEED) { + case QCA8K_PORT_STATUS_SPEED_10: + state->speed = SPEED_10; + break; + case QCA8K_PORT_STATUS_SPEED_100: + state->speed = SPEED_100; + break; + case QCA8K_PORT_STATUS_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->pause = MLO_PAUSE_NONE; + if (reg & QCA8K_PORT_STATUS_RXFLOW) + state->pause |= MLO_PAUSE_RX; + if (reg & QCA8K_PORT_STATUS_TXFLOW) + state->pause |= MLO_PAUSE_TX; + + return 1; +} + +static void +qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, + phy_interface_t interface) +{ + struct qca8k_priv *priv = ds->priv; - /* Force link down before changing MAC options */ qca8k_port_set_status(priv, port, 0); +} + +static void +qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, + phy_interface_t interface, struct phy_device *phydev, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct qca8k_priv *priv = ds->priv; + u32 reg; + + if (phylink_autoneg_inband(mode)) { + reg = QCA8K_PORT_STATUS_LINK_AUTO; + } else { + switch (speed) { + case SPEED_10: + reg = QCA8K_PORT_STATUS_SPEED_10; + break; + case SPEED_100: + reg = QCA8K_PORT_STATUS_SPEED_100; + break; + case SPEED_1000: + reg = QCA8K_PORT_STATUS_SPEED_1000; + break; + default: + reg = QCA8K_PORT_STATUS_LINK_AUTO; + break; + } + + if (duplex == DUPLEX_FULL) + reg |= QCA8K_PORT_STATUS_DUPLEX; + + if (rx_pause || dsa_is_cpu_port(ds, port)) + reg |= QCA8K_PORT_STATUS_RXFLOW; + + if (tx_pause || dsa_is_cpu_port(ds, port)) + reg |= QCA8K_PORT_STATUS_TXFLOW; + } + + reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; + qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); - qca8k_port_set_status(priv, port, 1); } static void @@ -937,13 +1049,11 @@ qca8k_port_enable(struct dsa_switch *ds, int port, { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - if (!dsa_is_user_port(ds, port)) - return 0; - qca8k_port_set_status(priv, port, 1); priv->port_sts[port].enabled = 1; - phy_support_asym_pause(phy); + if (dsa_is_user_port(ds, port)) + phy_support_asym_pause(phy); return 0; } @@ -1026,7 +1136,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port, static const struct dsa_switch_ops qca8k_switch_ops = { .get_tag_protocol = qca8k_get_tag_protocol, .setup = qca8k_setup, - .adjust_link = qca8k_adjust_link, .get_strings = qca8k_get_strings, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, @@ -1040,6 +1149,11 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, + .phylink_validate = qca8k_phylink_validate, + .phylink_mac_link_state = qca8k_phylink_mac_link_state, + .phylink_mac_config = qca8k_phylink_mac_config, + .phylink_mac_link_down = qca8k_phylink_mac_link_down, + .phylink_mac_link_up = qca8k_phylink_mac_link_up, }; static int From f6dadd559886f8cc12d68e98ad77d1da08bfe978 Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Sat, 20 Jun 2020 11:31:05 +0100 Subject: [PATCH 0176/2056] net: dsa: qca8k: Improve SGMII interface handling This patch improves the handling of the SGMII interface on the QCA8K devices. Previously the driver did no configuration of the port, even if it was selected. We now configure it up in the appropriate PHY/MAC/Base-X mode depending on what phylink tells us we are connected to and ensure it is enabled. Tested with a device where the CPU connection is RGMII (i.e. the common current use case) + one where the CPU connection is SGMII. I don't have any devices where the SGMII interface is brought out to something other than the CPU. Signed-off-by: Jonathan McDowell Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 33 ++++++++++++++++++++++++++++++++- drivers/net/dsa/qca8k.h | 13 +++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 63b84789f16b..11d1c290d90f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -673,6 +673,9 @@ qca8k_setup(struct dsa_switch *ds) /* Flush the FDB table */ qca8k_fdb_flush(priv); + /* We don't have interrupts for link changes, so we need to poll */ + ds->pcs_poll = true; + return 0; } @@ -681,7 +684,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) { struct qca8k_priv *priv = ds->priv; - u32 reg; + u32 reg, val; switch (port) { case 0: /* 1st CPU port */ @@ -740,6 +743,34 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode, case PHY_INTERFACE_MODE_1000BASEX: /* Enable SGMII on the port */ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN); + + /* Enable/disable SerDes auto-negotiation as necessary */ + val = qca8k_read(priv, QCA8K_REG_PWS); + if (phylink_autoneg_inband(mode)) + val &= ~QCA8K_PWS_SERDES_AEN_DIS; + else + val |= QCA8K_PWS_SERDES_AEN_DIS; + qca8k_write(priv, QCA8K_REG_PWS, val); + + /* Configure the SGMII parameters */ + val = qca8k_read(priv, QCA8K_REG_SGMII_CTRL); + + val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX | + QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD; + + if (dsa_is_cpu_port(ds, port)) { + /* CPU port, we're talking to the CPU MAC, be a PHY */ + val &= ~QCA8K_SGMII_MODE_CTRL_MASK; + val |= QCA8K_SGMII_MODE_CTRL_PHY; + } else if (state->interface == PHY_INTERFACE_MODE_SGMII) { + val &= ~QCA8K_SGMII_MODE_CTRL_MASK; + val |= QCA8K_SGMII_MODE_CTRL_MAC; + } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) { + val &= ~QCA8K_SGMII_MODE_CTRL_MASK; + val |= QCA8K_SGMII_MODE_CTRL_BASEX; + } + + qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val); break; default: dev_err(ds->dev, "xMII mode %s not supported for port %d\n", diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 42d6ea24eb14..10ef2bca2cde 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -36,6 +36,8 @@ #define QCA8K_MAX_DELAY 3 #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24) #define QCA8K_PORT_PAD_SGMII_EN BIT(7) +#define QCA8K_REG_PWS 0x010 +#define QCA8K_PWS_SERDES_AEN_DIS BIT(7) #define QCA8K_REG_MODULE_EN 0x030 #define QCA8K_MODULE_EN_MIB BIT(0) #define QCA8K_REG_MIB 0x034 @@ -69,6 +71,7 @@ #define QCA8K_PORT_STATUS_LINK_UP BIT(8) #define QCA8K_PORT_STATUS_LINK_AUTO BIT(9) #define QCA8K_PORT_STATUS_LINK_PAUSE BIT(10) +#define QCA8K_PORT_STATUS_FLOW_AUTO BIT(12) #define QCA8K_REG_PORT_HDR_CTRL(_i) (0x9c + (_i * 4)) #define QCA8K_PORT_HDR_CTRL_RX_MASK GENMASK(3, 2) #define QCA8K_PORT_HDR_CTRL_RX_S 2 @@ -77,6 +80,16 @@ #define QCA8K_PORT_HDR_CTRL_ALL 2 #define QCA8K_PORT_HDR_CTRL_MGMT 1 #define QCA8K_PORT_HDR_CTRL_NONE 0 +#define QCA8K_REG_SGMII_CTRL 0x0e0 +#define QCA8K_SGMII_EN_PLL BIT(1) +#define QCA8K_SGMII_EN_RX BIT(2) +#define QCA8K_SGMII_EN_TX BIT(3) +#define QCA8K_SGMII_EN_SD BIT(4) +#define QCA8K_SGMII_CLK125M_DELAY BIT(7) +#define QCA8K_SGMII_MODE_CTRL_MASK (BIT(22) | BIT(23)) +#define QCA8K_SGMII_MODE_CTRL_BASEX (0 << 22) +#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22) +#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22) /* EEE control registers */ #define QCA8K_REG_EEE_CTRL 0x100 From a997b33701745523981d715fbf80cbc000dc229b Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Sat, 20 Jun 2020 11:31:16 +0100 Subject: [PATCH 0177/2056] net: dsa: qca8k: Minor comment spelling fix Signed-off-by: Jonathan McDowell Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 11d1c290d90f..4acad5fa0c84 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -647,7 +647,7 @@ qca8k_setup(struct dsa_switch *ds) QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds)); } - /* Invividual user ports get connected to CPU port only */ + /* Individual user ports get connected to CPU port only */ if (dsa_is_user_port(ds, i)) { int shift = 16 * (i % 2); From be3fb56d6ad163947f541f9e6210fa37fb3b5e6d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 20:18:30 +0300 Subject: [PATCH 0178/2056] net: dsa: sja1105: remove empty structures from config table ops Sparse is complaining and giving the following warning message: 'Using plain integer as NULL pointer'. This is not what's going on, instead {0} is used as a zero initializer for the structure members, to indicate that the particular chip revision does not support those particular config tables. But since the config tables are declared globally, the unpopulated elements are zero-initialized anyway. So, to make sparse shut up, let's remove the zero initializers. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- .../net/dsa/sja1105/sja1105_dynamic_config.c | 21 ---------------- .../net/dsa/sja1105/sja1105_static_config.c | 24 ------------------- 2 files changed, 45 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 4471eeccc293..331593ace215 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -639,8 +639,6 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr, /* SJA1105E/T: First generation */ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { - [BLK_IDX_SCHEDULE] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_VL_LOOKUP] = { .entry_packing = sja1105et_vl_lookup_entry_packing, .cmd_packing = sja1105_vl_lookup_cmd_packing, @@ -649,8 +647,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD, .addr = 0x35, }, - [BLK_IDX_VL_POLICING] = {0}, - [BLK_IDX_VL_FORWARDING] = {0}, [BLK_IDX_L2_LOOKUP] = { .entry_packing = sja1105et_dyn_l2_lookup_entry_packing, .cmd_packing = sja1105et_l2_lookup_cmd_packing, @@ -667,7 +663,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD, .addr = 0x20, }, - [BLK_IDX_L2_POLICING] = {0}, [BLK_IDX_VLAN_LOOKUP] = { .entry_packing = sja1105_vlan_lookup_entry_packing, .cmd_packing = sja1105_vlan_lookup_cmd_packing, @@ -692,9 +687,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD, .addr = 0x36, }, - [BLK_IDX_SCHEDULE_PARAMS] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, - [BLK_IDX_VL_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .entry_packing = sja1105et_l2_lookup_params_entry_packing, .cmd_packing = sja1105et_l2_lookup_params_cmd_packing, @@ -703,8 +695,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, .addr = 0x38, }, - [BLK_IDX_L2_FORWARDING_PARAMS] = {0}, - [BLK_IDX_AVB_PARAMS] = {0}, [BLK_IDX_GENERAL_PARAMS] = { .entry_packing = sja1105et_general_params_entry_packing, .cmd_packing = sja1105et_general_params_cmd_packing, @@ -729,13 +719,10 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD, .addr = 0x2c, }, - [BLK_IDX_XMII_PARAMS] = {0}, }; /* SJA1105P/Q/R/S: Second generation */ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { - [BLK_IDX_SCHEDULE] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, [BLK_IDX_VL_LOOKUP] = { .entry_packing = sja1105_vl_lookup_entry_packing, .cmd_packing = sja1105_vl_lookup_cmd_packing, @@ -744,8 +731,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD, .addr = 0x47, }, - [BLK_IDX_VL_POLICING] = {0}, - [BLK_IDX_VL_FORWARDING] = {0}, [BLK_IDX_L2_LOOKUP] = { .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing, .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing, @@ -762,7 +747,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD, .addr = 0x24, }, - [BLK_IDX_L2_POLICING] = {0}, [BLK_IDX_VLAN_LOOKUP] = { .entry_packing = sja1105_vlan_lookup_entry_packing, .cmd_packing = sja1105_vlan_lookup_cmd_packing, @@ -787,9 +771,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD, .addr = 0x4B, }, - [BLK_IDX_SCHEDULE_PARAMS] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, - [BLK_IDX_VL_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing, .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing, @@ -798,7 +779,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, .addr = 0x54, }, - [BLK_IDX_L2_FORWARDING_PARAMS] = {0}, [BLK_IDX_AVB_PARAMS] = { .entry_packing = sja1105pqrs_avb_params_entry_packing, .cmd_packing = sja1105pqrs_avb_params_cmd_packing, @@ -831,7 +811,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD, .addr = 0x32, }, - [BLK_IDX_XMII_PARAMS] = {0}, }; /* Provides read access to the settings through the dynamic interface diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index ff3fe471efc2..fb44c0a72285 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -839,11 +839,6 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config) /* SJA1105E: First generation, no TTEthernet */ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { - [BLK_IDX_SCHEDULE] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, - [BLK_IDX_VL_LOOKUP] = {0}, - [BLK_IDX_VL_POLICING] = {0}, - [BLK_IDX_VL_FORWARDING] = {0}, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105et_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -874,9 +869,6 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, - [BLK_IDX_SCHEDULE_PARAMS] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, - [BLK_IDX_VL_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105et_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -1035,11 +1027,6 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { /* SJA1105P: Second generation, no TTEthernet, no SGMII */ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { - [BLK_IDX_SCHEDULE] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, - [BLK_IDX_VL_LOOKUP] = {0}, - [BLK_IDX_VL_POLICING] = {0}, - [BLK_IDX_VL_FORWARDING] = {0}, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -1070,9 +1057,6 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, - [BLK_IDX_SCHEDULE_PARAMS] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, - [BLK_IDX_VL_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105pqrs_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), @@ -1231,11 +1215,6 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { /* SJA1105R: Second generation, no TTEthernet, SGMII */ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { - [BLK_IDX_SCHEDULE] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0}, - [BLK_IDX_VL_LOOKUP] = {0}, - [BLK_IDX_VL_POLICING] = {0}, - [BLK_IDX_VL_FORWARDING] = {0}, [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -1266,9 +1245,6 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY, .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT, }, - [BLK_IDX_SCHEDULE_PARAMS] = {0}, - [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0}, - [BLK_IDX_VL_FORWARDING_PARAMS] = {0}, [BLK_IDX_L2_LOOKUP_PARAMS] = { .packing = sja1105pqrs_l2_lookup_params_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry), From 718e44b6ea1c6abbd7ffc62cc16af5feca5ac528 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 20:18:31 +0300 Subject: [PATCH 0179/2056] net: dsa: sja1105: make config table operation structures constant The per-chip instantiations of struct sja1105_table_ops and struct sja1105_dynamic_table_ops can be made constant, so do that. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_dynamic_config.c | 4 ++-- drivers/net/dsa/sja1105/sja1105_dynamic_config.h | 4 ++-- drivers/net/dsa/sja1105/sja1105_static_config.c | 12 ++++++------ drivers/net/dsa/sja1105/sja1105_static_config.h | 12 ++++++------ 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c index 331593ace215..75247f342124 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c @@ -638,7 +638,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr, #define OP_SEARCH BIT(3) /* SJA1105E/T: First generation */ -struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { +const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { [BLK_IDX_VL_LOOKUP] = { .entry_packing = sja1105et_vl_lookup_entry_packing, .cmd_packing = sja1105_vl_lookup_cmd_packing, @@ -722,7 +722,7 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = { }; /* SJA1105P/Q/R/S: Second generation */ -struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { +const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = { [BLK_IDX_VL_LOOKUP] = { .entry_packing = sja1105_vl_lookup_entry_packing, .cmd_packing = sja1105_vl_lookup_cmd_packing, diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h index 1fc0d13dc623..28d4eb5efb8b 100644 --- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h +++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h @@ -34,7 +34,7 @@ struct sja1105_mgmt_entry { u64 index; }; -extern struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN]; -extern struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN]; +extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN]; +extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN]; #endif diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c index fb44c0a72285..139b7b4fbd0d 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.c +++ b/drivers/net/dsa/sja1105/sja1105_static_config.c @@ -838,7 +838,7 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config) /* Compatibility matrices */ /* SJA1105E: First generation, no TTEthernet */ -struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { +const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { [BLK_IDX_L2_LOOKUP] = { .packing = sja1105et_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -908,7 +908,7 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = { }; /* SJA1105T: First generation, TTEthernet */ -struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { +const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { [BLK_IDX_SCHEDULE] = { .packing = sja1105_schedule_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_schedule_entry), @@ -1026,7 +1026,7 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = { }; /* SJA1105P: Second generation, no TTEthernet, no SGMII */ -struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { +const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -1096,7 +1096,7 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = { }; /* SJA1105Q: Second generation, TTEthernet, no SGMII */ -struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { +const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { [BLK_IDX_SCHEDULE] = { .packing = sja1105_schedule_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_schedule_entry), @@ -1214,7 +1214,7 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = { }; /* SJA1105R: Second generation, no TTEthernet, SGMII */ -struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { +const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { [BLK_IDX_L2_LOOKUP] = { .packing = sja1105pqrs_l2_lookup_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry), @@ -1284,7 +1284,7 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = { }; /* SJA1105S: Second generation, TTEthernet, SGMII */ -struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = { +const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = { [BLK_IDX_SCHEDULE] = { .packing = sja1105_schedule_entry_packing, .unpacked_entry_size = sizeof(struct sja1105_schedule_entry), diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h index ee0f10062763..bc7606899289 100644 --- a/drivers/net/dsa/sja1105/sja1105_static_config.h +++ b/drivers/net/dsa/sja1105/sja1105_static_config.h @@ -381,12 +381,12 @@ struct sja1105_static_config { struct sja1105_table tables[BLK_IDX_MAX]; }; -extern struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX]; -extern struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX]; -extern struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX]; -extern struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX]; -extern struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX]; -extern struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX]; +extern const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX]; +extern const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX]; +extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX]; +extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX]; +extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX]; +extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX]; size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op); void From 13c832a41df2af7f7c435ab2dac611099b88d97a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 20 Jun 2020 20:18:32 +0300 Subject: [PATCH 0180/2056] net: dsa: sja1105: make the instantiations of struct sja1105_info constant Since struct sja1105_private only holds a const pointer to one of these structures based on device tree compatible string, the structures themselves can be made const. Also add an empty line between each structure definition, to appease checkpatch. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105.h | 12 ++++++------ drivers/net/dsa/sja1105/sja1105_spi.c | 17 +++++++++++------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h index 29ed21687295..ba70b40a9a95 100644 --- a/drivers/net/dsa/sja1105/sja1105.h +++ b/drivers/net/dsa/sja1105/sja1105.h @@ -262,12 +262,12 @@ int sja1105_static_config_upload(struct sja1105_private *priv); int sja1105_inhibit_tx(const struct sja1105_private *priv, unsigned long port_bitmap, bool tx_inhibited); -extern struct sja1105_info sja1105e_info; -extern struct sja1105_info sja1105t_info; -extern struct sja1105_info sja1105p_info; -extern struct sja1105_info sja1105q_info; -extern struct sja1105_info sja1105r_info; -extern struct sja1105_info sja1105s_info; +extern const struct sja1105_info sja1105e_info; +extern const struct sja1105_info sja1105t_info; +extern const struct sja1105_info sja1105p_info; +extern const struct sja1105_info sja1105q_info; +extern const struct sja1105_info sja1105r_info; +extern const struct sja1105_info sja1105s_info; /* From sja1105_clocking.c */ diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c index bb52b9c841b2..704dcf1d1c01 100644 --- a/drivers/net/dsa/sja1105/sja1105_spi.c +++ b/drivers/net/dsa/sja1105/sja1105_spi.c @@ -507,7 +507,7 @@ static struct sja1105_regs sja1105pqrs_regs = { .ptpsyncts = 0x1F, }; -struct sja1105_info sja1105e_info = { +const struct sja1105_info sja1105e_info = { .device_id = SJA1105E_DEVICE_ID, .part_no = SJA1105ET_PART_NO, .static_ops = sja1105e_table_ops, @@ -523,7 +523,8 @@ struct sja1105_info sja1105e_info = { .regs = &sja1105et_regs, .name = "SJA1105E", }; -struct sja1105_info sja1105t_info = { + +const struct sja1105_info sja1105t_info = { .device_id = SJA1105T_DEVICE_ID, .part_no = SJA1105ET_PART_NO, .static_ops = sja1105t_table_ops, @@ -539,7 +540,8 @@ struct sja1105_info sja1105t_info = { .regs = &sja1105et_regs, .name = "SJA1105T", }; -struct sja1105_info sja1105p_info = { + +const struct sja1105_info sja1105p_info = { .device_id = SJA1105PR_DEVICE_ID, .part_no = SJA1105P_PART_NO, .static_ops = sja1105p_table_ops, @@ -556,7 +558,8 @@ struct sja1105_info sja1105p_info = { .regs = &sja1105pqrs_regs, .name = "SJA1105P", }; -struct sja1105_info sja1105q_info = { + +const struct sja1105_info sja1105q_info = { .device_id = SJA1105QS_DEVICE_ID, .part_no = SJA1105Q_PART_NO, .static_ops = sja1105q_table_ops, @@ -573,7 +576,8 @@ struct sja1105_info sja1105q_info = { .regs = &sja1105pqrs_regs, .name = "SJA1105Q", }; -struct sja1105_info sja1105r_info = { + +const struct sja1105_info sja1105r_info = { .device_id = SJA1105PR_DEVICE_ID, .part_no = SJA1105R_PART_NO, .static_ops = sja1105r_table_ops, @@ -590,7 +594,8 @@ struct sja1105_info sja1105r_info = { .regs = &sja1105pqrs_regs, .name = "SJA1105R", }; -struct sja1105_info sja1105s_info = { + +const struct sja1105_info sja1105s_info = { .device_id = SJA1105QS_DEVICE_ID, .part_no = SJA1105S_PART_NO, .static_ops = sja1105s_table_ops, From b5872cd0e823e4cb50b3a75cd9522167eeb676a2 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sat, 20 Jun 2020 22:01:56 +0530 Subject: [PATCH 0181/2056] devlink: Add support for board.serial_number to info_get cb. Board serial number is a serial number, often available in PCI *Vital Product Data*. Also, update devlink-info.rst documentation file. Cc: Jiri Pirko Cc: Jakub Kicinski Signed-off-by: Vasundhara Volam Reviewed-by: Michael Chan Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/devlink/devlink-info.rst | 12 +++++------- include/net/devlink.h | 2 ++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 8 ++++++++ 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/Documentation/networking/devlink/devlink-info.rst b/Documentation/networking/devlink/devlink-info.rst index 3fe11401b838..7572bf6de5c1 100644 --- a/Documentation/networking/devlink/devlink-info.rst +++ b/Documentation/networking/devlink/devlink-info.rst @@ -44,9 +44,11 @@ versions is generally discouraged - here, and via any other Linux API. reported for two ports of the same device or on two hosts of a multi-host device should be identical. - .. note:: ``devlink-info`` API should be extended with a new field - if devices want to report board/product serial number (often - reported in PCI *Vital Product Data* capability). + * - ``board.serial_number`` + - Board serial number of the device. + + This is usually the serial number of the board, often available in + PCI *Vital Product Data*. * - ``fixed`` - Group for hardware identifiers, and versions of components @@ -201,10 +203,6 @@ Future work The following extensions could be useful: - - product serial number - NIC boards often get labeled with a board serial - number rather than ASIC serial number; it'd be useful to add board serial - numbers to the API if they can be retrieved from the device; - - on-disk firmware file names - drivers list the file names of firmware they may need to load onto devices via the ``MODULE_FIRMWARE()`` macro. These, however, are per module, rather than per device. It'd be useful to list diff --git a/include/net/devlink.h b/include/net/devlink.h index 7007f93585a5..428f55f8197c 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1284,6 +1284,8 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn); int devlink_info_driver_name_put(struct devlink_info_req *req, const char *name); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn); int devlink_info_version_fixed_put(struct devlink_info_req *req, const char *version_name, const char *version_value); diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 07d0af8f5923..87c83a82991b 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -453,6 +453,8 @@ enum devlink_attr { DEVLINK_ATTR_PORT_FUNCTION, /* nested */ + DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index baa45eca6b5a..455998a57671 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -4502,6 +4502,14 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn) } EXPORT_SYMBOL_GPL(devlink_info_serial_number_put); +int devlink_info_board_serial_number_put(struct devlink_info_req *req, + const char *bsn) +{ + return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, + bsn); +} +EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put); + static int devlink_info_version_put(struct devlink_info_req *req, int attr, const char *version_name, const char *version_value) From 9bf88b9fc8a4a8dd38992a7a065e459c645c9545 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Sat, 20 Jun 2020 22:01:57 +0530 Subject: [PATCH 0182/2056] bnxt_en: Add board.serial_number field to info_get cb Add board.serial_number field info to info_get cb via devlink, if driver can fetch the information from the device. Cc: Jiri Pirko Cc: Jakub Kicinski Signed-off-by: Vasundhara Volam Reviewed-by: Michael Chan Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index a812beb46325..2bd610fafc58 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -411,6 +411,12 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, return rc; } + if (strlen(bp->board_serialno)) { + rc = devlink_info_board_serial_number_put(req, bp->board_serialno); + if (rc) + return rc; + } + sprintf(buf, "%X", bp->chip_num); rc = devlink_info_version_fixed_put(req, DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); From 3efdb92426bf479aa87d38a55b66c81d2a60f41b Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Sat, 20 Jun 2020 21:26:40 +0200 Subject: [PATCH 0183/2056] dt-bindings: net: dwmac-meson: Add a compatible string for G12A onwards Amlogic Meson G12A, G12B and SM1 have the same (at least as far as we know at the time of writing) PRG_ETHERNET glue register implementation. This implementation however is slightly different from AXG as it now has an undocument "auto cali idx val" register in PRG_ETH1[17:16] which seems to be related to RGMII Ethernet. Add a compatible string for G12A and newer so the new registers can be used. Signed-off-by: Martin Blumenstingl Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml index 64c20c92c07d..85fefe3a0444 100644 --- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml @@ -22,6 +22,7 @@ select: - amlogic,meson8m2-dwmac - amlogic,meson-gxbb-dwmac - amlogic,meson-axg-dwmac + - amlogic,meson-g12a-dwmac required: - compatible @@ -36,6 +37,7 @@ allOf: - amlogic,meson8m2-dwmac - amlogic,meson-gxbb-dwmac - amlogic,meson-axg-dwmac + - amlogic,meson-g12a-dwmac then: properties: @@ -95,6 +97,7 @@ properties: - amlogic,meson8m2-dwmac - amlogic,meson-gxbb-dwmac - amlogic,meson-axg-dwmac + - amlogic,meson-g12a-dwmac contains: enum: - snps,dwmac-3.70a From a4f63342d03d2dfb61d9e52f30d084d0780aaddd Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Sat, 20 Jun 2020 21:26:41 +0200 Subject: [PATCH 0184/2056] net: stmmac: dwmac-meson8b: add a compatible string for G12A SoCs Amlogic Meson G12A, G12B and SM1 have the same (at least as far as we know at the time of writing) PRG_ETHERNET glue register implementation. This implementation however is slightly different from AXG as it now has an undocument "auto cali idx val" register in PRG_ETH1[17:16] which seems to be related to RGMII Ethernet. Add a new compatible string for G12A SoCs so the logic for this new register can be implemented in the future. Signed-off-by: Martin Blumenstingl Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 234e8b6816ce..544bc621146c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -491,6 +491,10 @@ static const struct of_device_id meson8b_dwmac_match[] = { .compatible = "amlogic,meson-axg-dwmac", .data = &meson_axg_dwmac_data, }, + { + .compatible = "amlogic,meson-g12a-dwmac", + .data = &meson_axg_dwmac_data, + }, { } }; MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); From bd869245a3dcca1c8a77dfade7d3dc69a68365df Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:35:42 +0200 Subject: [PATCH 0185/2056] net: core: try to runtime-resume detached device in __dev_open A netdevice may be marked as detached because the parent is runtime-suspended and not accessible whilst interface or link is down. An example are PCI network devices that go into PCI D3hot, see e.g. __igc_shutdown() or rtl8169_net_suspend(). If netdevice is down and marked as detached we can only open it if we runtime-resume it before __dev_open() calls netif_device_present(). Therefore, if netdevice is detached, try to runtime-resume the parent and only return with an error if it's still detached. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- net/core/dev.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index cea7fc1716ca..c5ec4d50acd1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -143,6 +143,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -1492,8 +1493,13 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) ASSERT_RTNL(); - if (!netif_device_present(dev)) - return -ENODEV; + if (!netif_device_present(dev)) { + /* may be detached because parent is runtime-suspended */ + if (dev->dev.parent) + pm_runtime_resume(dev->dev.parent); + if (!netif_device_present(dev)) + return -ENODEV; + } /* Block netpoll from trying to do any rx path servicing. * If we don't do this there is a chance ndo_poll_controller From 476c4f5de3689a39a097ad20120ca5653a52dad4 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:36:26 +0200 Subject: [PATCH 0186/2056] r8169: mark device as not present when in PCI D3 Mark the netdevice as detached whenever we go into PCI D3hot. This allows to remove some checks e.g. from ethtool ops because dev_ethtool() checks for netif_device_present() in the beginning. In this context move waking up the queue out of rtl_reset_work() because in cases where netif_device_attach() is called afterwards the queue should be woken up by the latter function only. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 25 +++++++++++++---------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a3c4187d918b..ec724e69931d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3977,10 +3977,9 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) static void rtl_reset_work(struct rtl8169_private *tp) { - struct net_device *dev = tp->dev; int i; - netif_stop_queue(dev); + netif_stop_queue(tp->dev); rtl8169_cleanup(tp, false); @@ -3989,7 +3988,6 @@ static void rtl_reset_work(struct rtl8169_private *tp) napi_enable(&tp->napi); rtl_hw_start(tp); - netif_wake_queue(dev); } static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue) @@ -4574,8 +4572,10 @@ static void rtl_task(struct work_struct *work) !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) goto out_unlock; - if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) + if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) { rtl_reset_work(tp); + netif_wake_queue(tp->dev); + } out_unlock: rtl_unlock_work(tp); } @@ -4820,11 +4820,10 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) static void rtl8169_net_suspend(struct rtl8169_private *tp) { - if (!netif_running(tp->dev)) - return; - netif_device_detach(tp->dev); - rtl8169_down(tp); + + if (netif_running(tp->dev)) + rtl8169_down(tp); } #ifdef CONFIG_PM @@ -4840,8 +4839,6 @@ static int __maybe_unused rtl8169_suspend(struct device *device) static void __rtl8169_resume(struct rtl8169_private *tp) { - netif_device_attach(tp->dev); - rtl_pll_power_up(tp); rtl8169_init_phy(tp); @@ -4863,6 +4860,8 @@ static int __maybe_unused rtl8169_resume(struct device *device) if (netif_running(tp->dev)) __rtl8169_resume(tp); + netif_device_attach(tp->dev); + return 0; } @@ -4870,8 +4869,10 @@ static int rtl8169_runtime_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - if (!tp->TxDescArray) + if (!tp->TxDescArray) { + netif_device_detach(tp->dev); return 0; + } rtl_lock_work(tp); __rtl8169_set_wol(tp, WAKE_PHY); @@ -4895,6 +4896,8 @@ static int rtl8169_runtime_resume(struct device *device) if (tp->TxDescArray) __rtl8169_resume(tp); + netif_device_attach(tp->dev); + return 0; } From ec2f204bddb5f9b7819507b9b5df5ca6197ce912 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:37:01 +0200 Subject: [PATCH 0187/2056] r8169: remove no longer needed checks for device being runtime-active Because the netdevice is marked as detached now when parent is not accessible we can remove quite some checks. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 54 +++-------------------- 1 file changed, 6 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index ec724e69931d..81a1cea0989d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1422,24 +1422,17 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); if (wol->wolopts & ~WAKE_ANY) return -EINVAL; - pm_runtime_get_noresume(d); - rtl_lock_work(tp); tp->saved_wolopts = wol->wolopts; - - if (pm_runtime_active(d)) - __rtl8169_set_wol(tp, tp->saved_wolopts); + __rtl8169_set_wol(tp, tp->saved_wolopts); rtl_unlock_work(tp); - pm_runtime_put_noidle(d); - return 0; } @@ -1657,17 +1650,10 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - struct rtl8169_counters *counters = tp->counters; + struct rtl8169_counters *counters; - ASSERT_RTNL(); - - pm_runtime_get_noresume(d); - - if (pm_runtime_active(d)) - rtl8169_update_counters(tp); - - pm_runtime_put_noidle(d); + counters = tp->counters; + rtl8169_update_counters(tp); data[0] = le64_to_cpu(counters->tx_packets); data[1] = le64_to_cpu(counters->rx_packets); @@ -1899,48 +1885,26 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); - int ret; if (!rtl_supports_eee(tp)) return -EOPNOTSUPP; - pm_runtime_get_noresume(d); - - if (!pm_runtime_active(d)) { - ret = -EOPNOTSUPP; - } else { - ret = phy_ethtool_get_eee(tp->phydev, data); - } - - pm_runtime_put_noidle(d); - - return ret; + return phy_ethtool_get_eee(tp->phydev, data); } static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); int ret; if (!rtl_supports_eee(tp)) return -EOPNOTSUPP; - pm_runtime_get_noresume(d); - - if (!pm_runtime_active(d)) { - ret = -EOPNOTSUPP; - goto out; - } - ret = phy_ethtool_set_eee(tp->phydev, data); if (!ret) tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV); -out: - pm_runtime_put_noidle(d); return ret; } @@ -2219,19 +2183,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) static int rtl_set_mac_address(struct net_device *dev, void *p) { struct rtl8169_private *tp = netdev_priv(dev); - struct device *d = tp_to_dev(tp); int ret; ret = eth_mac_addr(dev, p); if (ret) return ret; - pm_runtime_get_noresume(d); - - if (pm_runtime_active(d)) - rtl_rar_set(tp, dev->dev_addr); - - pm_runtime_put_noidle(d); + rtl_rar_set(tp, dev->dev_addr); return 0; } From 567ca57faa6266da931679fd4c4eb9863855f804 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:37:50 +0200 Subject: [PATCH 0188/2056] r8169: add rtl8169_up Factor out bringing device up to a new function rtl8169_up(), similar to rtl8169_down() for bringing the device down. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 48 ++++++++--------------- 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 81a1cea0989d..4d3e7db94bdd 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4613,6 +4613,19 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl_unlock_work(tp); } +static void rtl8169_up(struct rtl8169_private *tp) +{ + rtl_lock_work(tp); + rtl_pll_power_up(tp); + rtl8169_init_phy(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_reset_work(tp); + + phy_start(tp->phydev); + rtl_unlock_work(tp); +} + static int rtl8169_close(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); @@ -4688,25 +4701,10 @@ static int rtl_open(struct net_device *dev) if (retval) goto err_free_irq; - rtl_lock_work(tp); - - set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); - - napi_enable(&tp->napi); - - rtl8169_init_phy(tp); - - rtl_pll_power_up(tp); - - rtl_hw_start(tp); - + rtl8169_up(tp); rtl8169_init_counter_offsets(tp); - - phy_start(tp->phydev); netif_start_queue(dev); - rtl_unlock_work(tp); - pm_runtime_put_sync(&pdev->dev); out: return retval; @@ -4795,20 +4793,6 @@ static int __maybe_unused rtl8169_suspend(struct device *device) return 0; } -static void __rtl8169_resume(struct rtl8169_private *tp) -{ - rtl_pll_power_up(tp); - rtl8169_init_phy(tp); - - phy_start(tp->phydev); - - rtl_lock_work(tp); - napi_enable(&tp->napi); - set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); - rtl_reset_work(tp); - rtl_unlock_work(tp); -} - static int __maybe_unused rtl8169_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); @@ -4816,7 +4800,7 @@ static int __maybe_unused rtl8169_resume(struct device *device) rtl_rar_set(tp, tp->dev->dev_addr); if (netif_running(tp->dev)) - __rtl8169_resume(tp); + rtl8169_up(tp); netif_device_attach(tp->dev); @@ -4852,7 +4836,7 @@ static int rtl8169_runtime_resume(struct device *device) rtl_unlock_work(tp); if (tp->TxDescArray) - __rtl8169_resume(tp); + rtl8169_up(tp); netif_device_attach(tp->dev); From abe5fc42f9ce942c96d50bf6b44886b70d5759ec Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:38:24 +0200 Subject: [PATCH 0189/2056] r8169: use RTNL to protect critical sections Most relevant ops (open, close, ethtool ops) are protected with RTNL lock by net core. Make sure that such ops can't be interrupted by e.g. (runtime-)suspending by taking the RTNL lock in suspend ops and the PCI error handler. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 4d3e7db94bdd..5a6d53fe8f86 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4524,6 +4524,7 @@ static void rtl_task(struct work_struct *work) struct rtl8169_private *tp = container_of(work, struct rtl8169_private, wk.work); + rtnl_lock(); rtl_lock_work(tp); if (!netif_running(tp->dev) || @@ -4536,6 +4537,7 @@ static void rtl_task(struct work_struct *work) } out_unlock: rtl_unlock_work(tp); + rtnl_unlock(); } static int rtl8169_poll(struct napi_struct *napi, int budget) @@ -4788,7 +4790,9 @@ static int __maybe_unused rtl8169_suspend(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); + rtnl_lock(); rtl8169_net_suspend(tp); + rtnl_unlock(); return 0; } @@ -4816,11 +4820,13 @@ static int rtl8169_runtime_suspend(struct device *device) return 0; } + rtnl_lock(); rtl_lock_work(tp); __rtl8169_set_wol(tp, WAKE_PHY); rtl_unlock_work(tp); rtl8169_net_suspend(tp); + rtnl_unlock(); return 0; } @@ -4882,7 +4888,9 @@ static void rtl_shutdown(struct pci_dev *pdev) { struct rtl8169_private *tp = pci_get_drvdata(pdev); + rtnl_lock(); rtl8169_net_suspend(tp); + rtnl_unlock(); /* Restore original MAC address */ rtl_rar_set(tp, tp->dev->perm_addr); From 06a14ab852fbd33b237b1bbe6515e9cbd8e3800e Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:38:55 +0200 Subject: [PATCH 0190/2056] r8169: remove driver-specific mutex Now that the critical sections are protected with RTNL lock, we don't need a separate mutex any longer. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 45 ----------------------- 1 file changed, 45 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 5a6d53fe8f86..154488202f0f 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -611,7 +611,6 @@ struct rtl8169_private { struct { DECLARE_BITMAP(flags, RTL_FLAG_MAX); - struct mutex mutex; struct work_struct work; } wk; @@ -663,16 +662,6 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp) return &tp->pci_dev->dev; } -static void rtl_lock_work(struct rtl8169_private *tp) -{ - mutex_lock(&tp->wk.mutex); -} - -static void rtl_unlock_work(struct rtl8169_private *tp) -{ - mutex_unlock(&tp->wk.mutex); -} - static void rtl_lock_config_regs(struct rtl8169_private *tp) { RTL_W8(tp, Cfg9346, Cfg9346_Lock); @@ -1348,10 +1337,8 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); - rtl_lock_work(tp); wol->supported = WAKE_ANY; wol->wolopts = tp->saved_wolopts; - rtl_unlock_work(tp); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1426,13 +1413,9 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~WAKE_ANY) return -EINVAL; - rtl_lock_work(tp); - tp->saved_wolopts = wol->wolopts; __rtl8169_set_wol(tp, tp->saved_wolopts); - rtl_unlock_work(tp); - return 0; } @@ -1495,8 +1478,6 @@ static int rtl8169_set_features(struct net_device *dev, { struct rtl8169_private *tp = netdev_priv(dev); - rtl_lock_work(tp); - rtl_set_rx_config_features(tp, features); if (features & NETIF_F_RXCSUM) @@ -1514,8 +1495,6 @@ static int rtl8169_set_features(struct net_device *dev, RTL_W16(tp, CPlusCmd, tp->cp_cmd); rtl_pci_commit(tp); - rtl_unlock_work(tp); - return 0; } @@ -1541,10 +1520,8 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, u32 *dw = p; int i; - rtl_lock_work(tp); for (i = 0; i < R8169_REGS_SIZE; i += 4) memcpy_fromio(dw++, data++, 4); - rtl_unlock_work(tp); } static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { @@ -1860,8 +1837,6 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale); w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units); - rtl_lock_work(tp); - RTL_W16(tp, IntrMitigate, w); /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */ @@ -1877,8 +1852,6 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) RTL_W16(tp, CPlusCmd, tp->cp_cmd); rtl_pci_commit(tp); - rtl_unlock_work(tp); - return 0; } @@ -2162,8 +2135,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp) static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) { - rtl_lock_work(tp); - rtl_unlock_config_regs(tp); RTL_W32(tp, MAC4, addr[4] | addr[5] << 8); @@ -2176,8 +2147,6 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) rtl_rar_exgmac_set(tp, addr); rtl_lock_config_regs(tp); - - rtl_unlock_work(tp); } static int rtl_set_mac_address(struct net_device *dev, void *p) @@ -4525,7 +4494,6 @@ static void rtl_task(struct work_struct *work) container_of(work, struct rtl8169_private, wk.work); rtnl_lock(); - rtl_lock_work(tp); if (!netif_running(tp->dev) || !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) @@ -4536,7 +4504,6 @@ static void rtl_task(struct work_struct *work) netif_wake_queue(tp->dev); } out_unlock: - rtl_unlock_work(tp); rtnl_unlock(); } @@ -4599,8 +4566,6 @@ static int r8169_phy_connect(struct rtl8169_private *tp) static void rtl8169_down(struct rtl8169_private *tp) { - rtl_lock_work(tp); - /* Clear all task flags */ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); @@ -4611,13 +4576,10 @@ static void rtl8169_down(struct rtl8169_private *tp) rtl8169_cleanup(tp, true); rtl_pll_power_down(tp); - - rtl_unlock_work(tp); } static void rtl8169_up(struct rtl8169_private *tp) { - rtl_lock_work(tp); rtl_pll_power_up(tp); rtl8169_init_phy(tp); napi_enable(&tp->napi); @@ -4625,7 +4587,6 @@ static void rtl8169_up(struct rtl8169_private *tp) rtl_reset_work(tp); phy_start(tp->phydev); - rtl_unlock_work(tp); } static int rtl8169_close(struct net_device *dev) @@ -4821,10 +4782,7 @@ static int rtl8169_runtime_suspend(struct device *device) } rtnl_lock(); - rtl_lock_work(tp); __rtl8169_set_wol(tp, WAKE_PHY); - rtl_unlock_work(tp); - rtl8169_net_suspend(tp); rtnl_unlock(); @@ -4837,9 +4795,7 @@ static int rtl8169_runtime_resume(struct device *device) rtl_rar_set(tp, tp->dev->dev_addr); - rtl_lock_work(tp); __rtl8169_set_wol(tp, tp->saved_wolopts); - rtl_unlock_work(tp); if (tp->TxDescArray) rtl8169_up(tp); @@ -5296,7 +5252,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return rc; } - mutex_init(&tp->wk.mutex); INIT_WORK(&tp->wk.work, rtl_task); u64_stats_init(&tp->rx_stats.syncp); u64_stats_init(&tp->tx_stats.syncp); From 288302dab34ece4993ea3dd011299f406b78f237 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sat, 20 Jun 2020 22:39:35 +0200 Subject: [PATCH 0191/2056] r8169: improve rtl8169_runtime_resume Simplify rtl8169_runtime_resume() by calling rtl8169_resume(). Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 154488202f0f..7e2a62a9ed61 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4758,13 +4758,13 @@ static int __maybe_unused rtl8169_suspend(struct device *device) return 0; } -static int __maybe_unused rtl8169_resume(struct device *device) +static int rtl8169_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); rtl_rar_set(tp, tp->dev->dev_addr); - if (netif_running(tp->dev)) + if (tp->TxDescArray) rtl8169_up(tp); netif_device_attach(tp->dev); @@ -4793,16 +4793,9 @@ static int rtl8169_runtime_resume(struct device *device) { struct rtl8169_private *tp = dev_get_drvdata(device); - rtl_rar_set(tp, tp->dev->dev_addr); - __rtl8169_set_wol(tp, tp->saved_wolopts); - if (tp->TxDescArray) - rtl8169_up(tp); - - netif_device_attach(tp->dev); - - return 0; + return rtl8169_resume(device); } static int rtl8169_runtime_idle(struct device *device) From ef0f9545cbf160591df2ff245f539bdcff692992 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:50 +0300 Subject: [PATCH 0192/2056] net: phy: marvell: use a single style for referencing functions The kernel in general does not use &func referencing format. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 222 +++++++++++++++++++------------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index c9ecf3c8c3fd..ee9c352f67ab 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2625,12 +2625,12 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1101", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &marvell_config_init, - .config_aneg = &m88e1101_config_aneg, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = marvell_config_init, + .config_aneg = m88e1101_config_aneg, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2643,12 +2643,12 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1112", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1111_config_init, - .config_aneg = &marvell_config_aneg, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1111_config_init, + .config_aneg = marvell_config_aneg, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2663,13 +2663,13 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1111", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1111_config_init, - .config_aneg = &marvell_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1111_config_init, + .config_aneg = marvell_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2684,12 +2684,12 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1118", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1118_config_init, - .config_aneg = &m88e1118_config_aneg, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1118_config_init, + .config_aneg = m88e1118_config_aneg, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2701,15 +2701,15 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", /* PHY_GBIT_FEATURES */ - .probe = &m88e1121_probe, - .config_init = &marvell_config_init, - .config_aneg = &m88e1121_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .probe = m88e1121_probe, + .config_init = marvell_config_init, + .config_aneg = m88e1121_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2724,16 +2724,16 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1318S", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1318_config_init, - .config_aneg = &m88e1318_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .get_wol = &m88e1318_get_wol, - .set_wol = &m88e1318_set_wol, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1318_config_init, + .config_aneg = m88e1318_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .get_wol = m88e1318_get_wol, + .set_wol = m88e1318_set_wol, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2746,13 +2746,13 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1145", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1145_config_init, - .config_aneg = &m88e1101_config_aneg, - .read_status = &genphy_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1145_config_init, + .config_aneg = m88e1101_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2767,12 +2767,12 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1149R", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1149_config_init, - .config_aneg = &m88e1118_config_aneg, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1149_config_init, + .config_aneg = m88e1118_config_aneg, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2785,12 +2785,12 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1240", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1111_config_init, - .config_aneg = &marvell_config_aneg, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1111_config_init, + .config_aneg = marvell_config_aneg, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2803,11 +2803,11 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1116R", /* PHY_GBIT_FEATURES */ .probe = marvell_probe, - .config_init = &m88e1116r_config_init, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e1116r_config_init, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2822,17 +2822,17 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1510", .features = PHY_GBIT_FIBRE_FEATURES, .flags = PHY_POLL_CABLE_TEST, - .probe = &m88e1510_probe, - .config_init = &m88e1510_config_init, - .config_aneg = &m88e1510_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .get_wol = &m88e1318_get_wol, - .set_wol = &m88e1318_set_wol, - .resume = &marvell_resume, - .suspend = &marvell_suspend, + .probe = m88e1510_probe, + .config_init = m88e1510_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .get_wol = m88e1318_get_wol, + .set_wol = m88e1318_set_wol, + .resume = marvell_resume, + .suspend = marvell_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2852,14 +2852,14 @@ static struct phy_driver marvell_drivers[] = { /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .probe = m88e1510_probe, - .config_init = &marvell_config_init, - .config_aneg = &m88e1510_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2878,14 +2878,14 @@ static struct phy_driver marvell_drivers[] = { .probe = m88e1510_probe, /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, - .config_init = &marvell_config_init, - .config_aneg = &m88e1510_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2903,14 +2903,14 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E3016", /* PHY_BASIC_FEATURES */ .probe = marvell_probe, - .config_init = &m88e3016_config_init, - .aneg_done = &marvell_aneg_done, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = m88e3016_config_init, + .aneg_done = marvell_aneg_done, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, @@ -2924,14 +2924,14 @@ static struct phy_driver marvell_drivers[] = { /* PHY_GBIT_FEATURES */ .flags = PHY_POLL_CABLE_TEST, .probe = m88e6390_probe, - .config_init = &marvell_config_init, - .config_aneg = &m88e6390_config_aneg, - .read_status = &marvell_read_status, - .ack_interrupt = &marvell_ack_interrupt, - .config_intr = &marvell_config_intr, - .did_interrupt = &m88e1121_did_interrupt, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .config_init = marvell_config_init, + .config_aneg = m88e6390_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, .read_page = marvell_read_page, .write_page = marvell_write_page, .get_sset_count = marvell_get_sset_count, From a602ea86e9f0d82f5c7ba1d3f7487d4097380b96 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:51 +0300 Subject: [PATCH 0193/2056] net: phy: marvell: Add Marvell 88E1340S support Add support for this new phy ID. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 23 +++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 24 insertions(+) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index ee9c352f67ab..0842deb33085 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2943,6 +2943,28 @@ static struct phy_driver marvell_drivers[] = { .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start, .cable_test_get_status = marvell_vct7_cable_test_get_status, }, + { + .phy_id = MARVELL_PHY_ID_88E1340S, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1340S", + .probe = m88e1510_probe, + /* PHY_GBIT_FEATURES */ + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, }; module_phy_driver(marvell_drivers); @@ -2963,6 +2985,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index af6b11d4d673..c4390e9cbf15 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -15,6 +15,7 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1340S 0x01410dc0 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 From f59babf95ef969a18744082ee16e4dfd17743c0b Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Sun, 21 Jun 2020 10:59:52 +0300 Subject: [PATCH 0194/2056] net: phy: marvell: Add Marvell 88E1548P support Add support for this new phy ID. Signed-off-by: Maxim Kochetkov Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 23 +++++++++++++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 24 insertions(+) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0842deb33085..bb86ac0bd092 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2965,6 +2965,28 @@ static struct phy_driver marvell_drivers[] = { .get_tunable = m88e1540_get_tunable, .set_tunable = m88e1540_set_tunable, }, + { + .phy_id = MARVELL_PHY_ID_88E1548P, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1548P", + .probe = m88e1510_probe, + .features = PHY_GBIT_FIBRE_FEATURES, + .config_init = marvell_config_init, + .config_aneg = m88e1510_config_aneg, + .read_status = marvell_read_status, + .ack_interrupt = marvell_ack_interrupt, + .config_intr = marvell_config_intr, + .did_interrupt = m88e1121_did_interrupt, + .resume = genphy_resume, + .suspend = genphy_suspend, + .read_page = marvell_read_page, + .write_page = marvell_write_page, + .get_sset_count = marvell_get_sset_count, + .get_strings = marvell_get_strings, + .get_stats = marvell_get_stats, + .get_tunable = m88e1540_get_tunable, + .set_tunable = m88e1540_set_tunable, + }, }; module_phy_driver(marvell_drivers); @@ -2986,6 +3008,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK }, { } }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index c4390e9cbf15..ff7b7607c8cf 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -20,6 +20,7 @@ #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 #define MARVELL_PHY_ID_88E1545 0x01410ea0 +#define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 From 3cc9a15a0bb1bde93d5430facda3f3e07c2d3d87 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 21 Jun 2020 11:34:33 +0300 Subject: [PATCH 0195/2056] mlxsw: spectrum: Split handling of pedit mangle by chip type Certain ACL actions are only available on some Spectrum revisions. In particular, L4_PORT_ACTION is not available on Spectrum-1. Introduce a new ops struct intended to hold these differences, mlxsw_sp_rulei_ops. Prime it with a sole member, act_mangle_field, meant for handling of pedit mangles. Create two ops structures, one for Spectrum-1, the other for Spectrum-2 and above. Add callbacks for act_mangle_field and dispatch to the common handler. Invoke mlxsw_sp_rulei_ops.act_mangle_field from the field mangler instead of calling the common handler directly. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 3 ++ .../net/ethernet/mellanox/mlxsw/spectrum.h | 13 +++++ .../ethernet/mellanox/mlxsw/spectrum_acl.c | 51 ++++++++++++++++--- 3 files changed, 60 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 55af877763ed..7d7ebd99f09e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4594,6 +4594,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; + mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; @@ -4621,6 +4622,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; @@ -4644,6 +4646,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; + mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6e87457dd635..17dd16d82f87 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -120,6 +120,7 @@ struct mlxsw_sp_kvdl; struct mlxsw_sp_nve; struct mlxsw_sp_kvdl_ops; struct mlxsw_sp_mr_tcam_ops; +struct mlxsw_sp_acl_rulei_ops; struct mlxsw_sp_acl_tcam_ops; struct mlxsw_sp_nve_ops; struct mlxsw_sp_sb_vals; @@ -164,6 +165,7 @@ struct mlxsw_sp { const struct mlxsw_afa_ops *afa_ops; const struct mlxsw_afk_ops *afk_ops; const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops; + const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops; const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops; const struct mlxsw_sp_nve_ops **nve_ops_arr; const struct mlxsw_sp_rif_ops **rif_ops_arr; @@ -856,6 +858,17 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp); u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val); +struct mlxsw_sp_acl_mangle_action; + +struct mlxsw_sp_acl_rulei_ops { + int (*act_mangle_field)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_acl_mangle_action *mact, u32 val, + struct netlink_ext_ack *extack); +}; + +extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops; +extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops; + /* spectrum_acl_tcam.c */ struct mlxsw_sp_acl_tcam; struct mlxsw_sp_acl_tcam_region; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 47da9ee0045d..cadcec6dbe19 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -563,11 +563,39 @@ mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp, case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN: return mlxsw_afa_block_append_qos_ecn(rulei->act_block, val, extack); + default: + return -EOPNOTSUPP; } +} - /* We shouldn't have gotten a match in the first place! */ - WARN_ONCE(1, "Unhandled mangle field"); - return -EINVAL; +static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_acl_mangle_action *mact, + u32 val, struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack); + if (err != -EOPNOTSUPP) + return err; + + NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field"); + return err; +} + +static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_acl_mangle_action *mact, + u32 val, struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack); + if (err != -EOPNOTSUPP) + return err; + + NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field"); + return err; } int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp, @@ -576,6 +604,7 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp, u32 offset, u32 mask, u32 val, struct netlink_ext_ack *extack) { + const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops; struct mlxsw_sp_acl_mangle_action *mact; size_t i; @@ -585,13 +614,13 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp, mact->offset == offset && mact->mask == mask) { val >>= mact->shift; - return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, - rulei, mact, - val, extack); + return acl_rulei_ops->act_mangle_field(mlxsw_sp, + rulei, mact, + val, extack); } } - NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field"); + NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field"); return -EINVAL; } @@ -930,3 +959,11 @@ int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val) return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp, &acl->tcam, val); } + +struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = { + .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field, +}; + +struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = { + .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field, +}; From faad0525c0f4062d4edcf0eb0663ed9685fc38dc Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 21 Jun 2020 11:34:34 +0300 Subject: [PATCH 0196/2056] mlxsw: core_acl_flex_actions: Add L4_PORT_ACTION Add fields related to L4_PORT_ACTION, which is used for changing of TCP and UDP port numbers. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index c3d04319ff44..a0bf0b86e25b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -1684,3 +1684,29 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter); + +/* L4 Port Action + * -------------- + * The L4_PORT_ACTION is used for modifying the sport and dport fields of the packet, e.g. for NAT. + * If (the L4 is TCP) or if (the L4 is UDP and checksum field!=0) then the L4 checksum is updated. + */ + +#define MLXSW_AFA_L4PORT_CODE 0x12 +#define MLXSW_AFA_L4PORT_SIZE 1 + +enum mlxsw_afa_l4port_s_d { + /* configure src_l4_port */ + MLXSW_AFA_L4PORT_S_D_SRC, + /* configure dst_l4_port */ + MLXSW_AFA_L4PORT_S_D_DST, +}; + +/* afa_l4port_s_d + * Source or destination. + */ +MLXSW_ITEM32(afa, l4port, s_d, 0x00, 31, 1); + +/* afa_l4port_l4_port + * Number of port to change to. + */ +MLXSW_ITEM32(afa, l4port, l4_port, 0x08, 0, 16); From ce10d7d4ad08396055a38d78b87a144307fc14a9 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 21 Jun 2020 11:34:35 +0300 Subject: [PATCH 0197/2056] mlxsw: spectrum_acl: Support FLOW_ACTION_MANGLE for TCP, UDP ports Spectrum-2 supports an ACL action L4_PORT, which allows TCP and UDP source and destination port number change. Offload suitable mangles to this action. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 25 +++++++++++++++++++ .../mellanox/mlxsw/core_acl_flex_actions.h | 2 ++ .../ethernet/mellanox/mlxsw/spectrum_acl.c | 24 ++++++++++++++++++ 3 files changed, 51 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index a0bf0b86e25b..30a7d5afdec7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -1710,3 +1710,28 @@ MLXSW_ITEM32(afa, l4port, s_d, 0x00, 31, 1); * Number of port to change to. */ MLXSW_ITEM32(afa, l4port, l4_port, 0x08, 0, 16); + +static void mlxsw_afa_l4port_pack(char *payload, enum mlxsw_afa_l4port_s_d s_d, u16 l4_port) +{ + mlxsw_afa_l4port_s_d_set(payload, s_d); + mlxsw_afa_l4port_l4_port_set(payload, l4_port); +} + +int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port, + struct netlink_ext_ack *extack) +{ + enum mlxsw_afa_l4port_s_d s_d = is_dport ? MLXSW_AFA_L4PORT_S_D_DST : + MLXSW_AFA_L4PORT_S_D_SRC; + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_L4PORT_CODE, + MLXSW_AFA_L4PORT_SIZE); + + if (IS_ERR(act)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append L4_PORT action"); + return PTR_ERR(act); + } + + mlxsw_afa_l4port_pack(act, s_d, l4_port); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_l4port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 8c2705e16ef7..a72350399bcf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -82,5 +82,7 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid, int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, u16 expected_irif, u16 min_mtu, bool rmid_valid, u32 kvdl_index); +int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port, + struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index cadcec6dbe19..a671156a1428 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -508,6 +508,8 @@ enum mlxsw_sp_acl_mangle_field { MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD, MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP, MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN, + MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT, + MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT, }; struct mlxsw_sp_acl_mangle_action { @@ -538,13 +540,26 @@ struct mlxsw_sp_acl_mangle_action { MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6, \ _offset, _mask, _shift, _field) +#define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \ + MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field) + +#define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \ + MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field) + static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = { MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD), MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP), MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN), + MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD), MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP), MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN), + + MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT), + MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0, IP_DPORT), + + MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT), + MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0, IP_DPORT), }; static int @@ -594,6 +609,15 @@ static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp, if (err != -EOPNOTSUPP) return err; + switch (mact->field) { + case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT: + return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack); + case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT: + return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack); + default: + break; + } + NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field"); return err; } From 13bd5d025602bb5374f54e144dec577b74718493 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sun, 21 Jun 2020 11:34:36 +0300 Subject: [PATCH 0198/2056] selftests: forwarding: Add a test for pedit munge tcp, udp sport, dport Add a test that checks that pedit adjusts port numbers of tcp and udp packets. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../selftests/net/forwarding/pedit_l4port.sh | 198 ++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/pedit_l4port.sh diff --git a/tools/testing/selftests/net/forwarding/pedit_l4port.sh b/tools/testing/selftests/net/forwarding/pedit_l4port.sh new file mode 100755 index 000000000000..5f20d289ee43 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/pedit_l4port.sh @@ -0,0 +1,198 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# This test sends traffic from H1 to H2. Either on ingress of $swp1, or on egress of $swp2, the +# traffic is acted upon by a pedit action. An ingress filter installed on $h2 verifies that the +# packet looks like expected. +# +# +----------------------+ +----------------------+ +# | H1 | | H2 | +# | + $h1 | | $h2 + | +# | | 192.0.2.1/28 | | 192.0.2.2/28 | | +# +----|-----------------+ +----------------|-----+ +# | | +# +----|----------------------------------------------------------------|-----+ +# | SW | | | +# | +-|----------------------------------------------------------------|-+ | +# | | + $swp1 BR $swp2 + | | +# | +--------------------------------------------------------------------+ | +# +---------------------------------------------------------------------------+ + +ALL_TESTS=" + ping_ipv4 + test_udp_sport + test_udp_dport + test_tcp_sport + test_tcp_dport +" + +NUM_NETIFS=4 +source lib.sh +source tc_common.sh + +: ${HIT_TIMEOUT:=2000} # ms + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h1_destroy() +{ + simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/28 2001:db8:1::2/64 + tc qdisc add dev $h2 clsact +} + +h2_destroy() +{ + tc qdisc del dev $h2 clsact + simple_if_fini $h2 192.0.2.2/28 2001:db8:1::2/64 +} + +switch_create() +{ + ip link add name br1 up type bridge vlan_filtering 1 + ip link set dev $swp1 master br1 + ip link set dev $swp1 up + ip link set dev $swp2 master br1 + ip link set dev $swp2 up + + tc qdisc add dev $swp1 clsact + tc qdisc add dev $swp2 clsact +} + +switch_destroy() +{ + tc qdisc del dev $swp2 clsact + tc qdisc del dev $swp1 clsact + + ip link set dev $swp2 nomaster + ip link set dev $swp1 nomaster + ip link del dev br1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + swp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + h2mac=$(mac_get $h2) + + vrf_prepare + h1_create + h2_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h2_destroy + h1_destroy + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.2 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:1::2 +} + +do_test_pedit_l4port_one() +{ + local pedit_locus=$1; shift + local pedit_prot=$1; shift + local pedit_action=$1; shift + local match_prot=$1; shift + local match_flower=$1; shift + local mz_flags=$1; shift + local saddr=$1; shift + local daddr=$1; shift + + tc filter add $pedit_locus handle 101 pref 1 \ + flower action pedit ex munge $pedit_action + tc filter add dev $h2 ingress handle 101 pref 1 prot $match_prot \ + flower skip_hw $match_flower action pass + + RET=0 + + $MZ $mz_flags $h1 -c 10 -d 20msec -p 100 \ + -a own -b $h2mac -q -t $pedit_prot sp=54321,dp=12345 + + local pkts + pkts=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= 10" \ + tc_rule_handle_stats_get "dev $h2 ingress" 101) + check_err $? "Expected to get 10 packets, but got $pkts." + + pkts=$(tc_rule_handle_stats_get "$pedit_locus" 101) + ((pkts >= 10)) + check_err $? "Expected to get 10 packets on pedit rule, but got $pkts." + + log_test "$pedit_locus pedit $pedit_action" + + tc filter del dev $h2 ingress pref 1 + tc filter del $pedit_locus pref 1 +} + +do_test_pedit_l4port() +{ + local locus=$1; shift + local prot=$1; shift + local pedit_port=$1; shift + local flower_port=$1; shift + local port + + for port in 1 11111 65535; do + do_test_pedit_l4port_one "$locus" "$prot" \ + "$prot $pedit_port set $port" \ + ip "ip_proto $prot $flower_port $port" \ + "-A 192.0.2.1 -B 192.0.2.2" + done +} + +test_udp_sport() +{ + do_test_pedit_l4port "dev $swp1 ingress" udp sport src_port + do_test_pedit_l4port "dev $swp2 egress" udp sport src_port +} + +test_udp_dport() +{ + do_test_pedit_l4port "dev $swp1 ingress" udp dport dst_port + do_test_pedit_l4port "dev $swp2 egress" udp dport dst_port +} + +test_tcp_sport() +{ + do_test_pedit_l4port "dev $swp1 ingress" tcp sport src_port + do_test_pedit_l4port "dev $swp2 egress" tcp sport src_port +} + +test_tcp_dport() +{ + do_test_pedit_l4port "dev $swp1 ingress" tcp dport dst_port + do_test_pedit_l4port "dev $swp2 egress" tcp dport dst_port +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From 2e33efe32e019328916ce653dc1265d637261993 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:55 -0700 Subject: [PATCH 0199/2056] libbpf: Generalize libbpf externs support Switch existing Kconfig externs to be just one of few possible kinds of more generic externs. This refactoring is in preparation for ksymbol extern support, added in the follow up patch. There are no functional changes intended. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hao Luo Link: https://lore.kernel.org/bpf/20200619231703.738941-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 344 ++++++++++++++++++++++++----------------- 1 file changed, 205 insertions(+), 139 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 259a6360475f..ffccb5af32a5 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -330,24 +330,35 @@ struct bpf_map { enum extern_type { EXT_UNKNOWN, - EXT_CHAR, - EXT_BOOL, - EXT_INT, - EXT_TRISTATE, - EXT_CHAR_ARR, + EXT_KCFG, +}; + +enum kcfg_type { + KCFG_UNKNOWN, + KCFG_CHAR, + KCFG_BOOL, + KCFG_INT, + KCFG_TRISTATE, + KCFG_CHAR_ARR, }; struct extern_desc { - const char *name; + enum extern_type type; int sym_idx; int btf_id; - enum extern_type type; - int sz; - int align; - int data_off; - bool is_signed; - bool is_weak; + int sec_btf_id; + const char *name; bool is_set; + bool is_weak; + union { + struct { + enum kcfg_type type; + int sz; + int align; + int data_off; + bool is_signed; + } kcfg; + }; }; static LIST_HEAD(bpf_objects_list); @@ -1424,19 +1435,19 @@ static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, return NULL; } -static int set_ext_value_tri(struct extern_desc *ext, void *ext_val, - char value) +static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, + char value) { - switch (ext->type) { - case EXT_BOOL: + switch (ext->kcfg.type) { + case KCFG_BOOL: if (value == 'm') { - pr_warn("extern %s=%c should be tristate or char\n", + pr_warn("extern (kcfg) %s=%c should be tristate or char\n", ext->name, value); return -EINVAL; } *(bool *)ext_val = value == 'y' ? true : false; break; - case EXT_TRISTATE: + case KCFG_TRISTATE: if (value == 'y') *(enum libbpf_tristate *)ext_val = TRI_YES; else if (value == 'm') @@ -1444,14 +1455,14 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val, else /* value == 'n' */ *(enum libbpf_tristate *)ext_val = TRI_NO; break; - case EXT_CHAR: + case KCFG_CHAR: *(char *)ext_val = value; break; - case EXT_UNKNOWN: - case EXT_INT: - case EXT_CHAR_ARR: + case KCFG_UNKNOWN: + case KCFG_INT: + case KCFG_CHAR_ARR: default: - pr_warn("extern %s=%c should be bool, tristate, or char\n", + pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n", ext->name, value); return -EINVAL; } @@ -1459,29 +1470,29 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val, return 0; } -static int set_ext_value_str(struct extern_desc *ext, char *ext_val, - const char *value) +static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, + const char *value) { size_t len; - if (ext->type != EXT_CHAR_ARR) { - pr_warn("extern %s=%s should char array\n", ext->name, value); + if (ext->kcfg.type != KCFG_CHAR_ARR) { + pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); return -EINVAL; } len = strlen(value); if (value[len - 1] != '"') { - pr_warn("extern '%s': invalid string config '%s'\n", + pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; } /* strip quotes */ len -= 2; - if (len >= ext->sz) { - pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", - ext->name, value, len, ext->sz - 1); - len = ext->sz - 1; + if (len >= ext->kcfg.sz) { + pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", + ext->name, value, len, ext->kcfg.sz - 1); + len = ext->kcfg.sz - 1; } memcpy(ext_val, value + 1, len); ext_val[len] = '\0'; @@ -1508,11 +1519,11 @@ static int parse_u64(const char *value, __u64 *res) return 0; } -static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v) +static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) { - int bit_sz = ext->sz * 8; + int bit_sz = ext->kcfg.sz * 8; - if (ext->sz == 8) + if (ext->kcfg.sz == 8) return true; /* Validate that value stored in u64 fits in integer of `ext->sz` @@ -1527,26 +1538,26 @@ static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v) * For unsigned target integer, check that all the (64 - Y) bits are * zero. */ - if (ext->is_signed) + if (ext->kcfg.is_signed) return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); else return (v >> bit_sz) == 0; } -static int set_ext_value_num(struct extern_desc *ext, void *ext_val, - __u64 value) +static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, + __u64 value) { - if (ext->type != EXT_INT && ext->type != EXT_CHAR) { - pr_warn("extern %s=%llu should be integer\n", + if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { + pr_warn("extern (kcfg) %s=%llu should be integer\n", ext->name, (unsigned long long)value); return -EINVAL; } - if (!is_ext_value_in_range(ext, value)) { - pr_warn("extern %s=%llu value doesn't fit in %d bytes\n", - ext->name, (unsigned long long)value, ext->sz); + if (!is_kcfg_value_in_range(ext, value)) { + pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n", + ext->name, (unsigned long long)value, ext->kcfg.sz); return -ERANGE; } - switch (ext->sz) { + switch (ext->kcfg.sz) { case 1: *(__u8 *)ext_val = value; break; case 2: *(__u16 *)ext_val = value; break; case 4: *(__u32 *)ext_val = value; break; @@ -1592,30 +1603,30 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj, if (!ext || ext->is_set) return 0; - ext_val = data + ext->data_off; + ext_val = data + ext->kcfg.data_off; value = sep + 1; switch (*value) { case 'y': case 'n': case 'm': - err = set_ext_value_tri(ext, ext_val, *value); + err = set_kcfg_value_tri(ext, ext_val, *value); break; case '"': - err = set_ext_value_str(ext, ext_val, value); + err = set_kcfg_value_str(ext, ext_val, value); break; default: /* assume integer */ err = parse_u64(value, &num); if (err) { - pr_warn("extern %s=%s should be integer\n", + pr_warn("extern (kcfg) %s=%s should be integer\n", ext->name, value); return err; } - err = set_ext_value_num(ext, ext_val, num); + err = set_kcfg_value_num(ext, ext_val, num); break; } if (err) return err; - pr_debug("extern %s=%s\n", ext->name, value); + pr_debug("extern (kcfg) %s=%s\n", ext->name, value); return 0; } @@ -1686,16 +1697,20 @@ static int bpf_object__read_kconfig_mem(struct bpf_object *obj, static int bpf_object__init_kconfig_map(struct bpf_object *obj) { - struct extern_desc *last_ext; + struct extern_desc *last_ext = NULL, *ext; size_t map_sz; - int err; + int i, err; - if (obj->nr_extern == 0) + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type == EXT_KCFG) + last_ext = ext; + } + + if (!last_ext) return 0; - last_ext = &obj->externs[obj->nr_extern - 1]; - map_sz = last_ext->data_off + last_ext->sz; - + map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, obj->efile.symbols_shndx, NULL, map_sz); @@ -2714,8 +2729,33 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name) return -ENOENT; } -static enum extern_type find_extern_type(const struct btf *btf, int id, - bool *is_signed) +static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { + const struct btf_var_secinfo *vs; + const struct btf_type *t; + int i, j, n; + + if (!btf) + return -ESRCH; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (!btf_is_datasec(t)) + continue; + + vs = btf_var_secinfos(t); + for (j = 0; j < btf_vlen(t); j++, vs++) { + if (vs->type == ext_btf_id) + return i; + } + } + + return -ENOENT; +} + +static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, + bool *is_signed) { const struct btf_type *t; const char *name; @@ -2730,29 +2770,29 @@ static enum extern_type find_extern_type(const struct btf *btf, int id, int enc = btf_int_encoding(t); if (enc & BTF_INT_BOOL) - return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN; + return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; if (is_signed) *is_signed = enc & BTF_INT_SIGNED; if (t->size == 1) - return EXT_CHAR; + return KCFG_CHAR; if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) - return EXT_UNKNOWN; - return EXT_INT; + return KCFG_UNKNOWN; + return KCFG_INT; } case BTF_KIND_ENUM: if (t->size != 4) - return EXT_UNKNOWN; + return KCFG_UNKNOWN; if (strcmp(name, "libbpf_tristate")) - return EXT_UNKNOWN; - return EXT_TRISTATE; + return KCFG_UNKNOWN; + return KCFG_TRISTATE; case BTF_KIND_ARRAY: if (btf_array(t)->nelems == 0) - return EXT_UNKNOWN; - if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR) - return EXT_UNKNOWN; - return EXT_CHAR_ARR; + return KCFG_UNKNOWN; + if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) + return KCFG_UNKNOWN; + return KCFG_CHAR_ARR; default: - return EXT_UNKNOWN; + return KCFG_UNKNOWN; } } @@ -2761,23 +2801,29 @@ static int cmp_externs(const void *_a, const void *_b) const struct extern_desc *a = _a; const struct extern_desc *b = _b; - /* descending order by alignment requirements */ - if (a->align != b->align) - return a->align > b->align ? -1 : 1; - /* ascending order by size, within same alignment class */ - if (a->sz != b->sz) - return a->sz < b->sz ? -1 : 1; + if (a->type != b->type) + return a->type < b->type ? -1 : 1; + + if (a->type == EXT_KCFG) { + /* descending order by alignment requirements */ + if (a->kcfg.align != b->kcfg.align) + return a->kcfg.align > b->kcfg.align ? -1 : 1; + /* ascending order by size, within same alignment class */ + if (a->kcfg.sz != b->kcfg.sz) + return a->kcfg.sz < b->kcfg.sz ? -1 : 1; + } + /* resolve ties by name */ return strcmp(a->name, b->name); } static int bpf_object__collect_externs(struct bpf_object *obj) { + struct btf_type *sec, *kcfg_sec = NULL; const struct btf_type *t; struct extern_desc *ext; - int i, n, off, btf_id; - struct btf_type *sec; - const char *ext_name; + int i, n, off; + const char *ext_name, *sec_name; Elf_Scn *scn; GElf_Shdr sh; @@ -2823,22 +2869,39 @@ static int bpf_object__collect_externs(struct bpf_object *obj) ext->name = btf__name_by_offset(obj->btf, t->name_off); ext->sym_idx = i; ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK; - ext->sz = btf__resolve_size(obj->btf, t->type); - if (ext->sz <= 0) { - pr_warn("failed to resolve size of extern '%s': %d\n", - ext_name, ext->sz); - return ext->sz; + + ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); + if (ext->sec_btf_id <= 0) { + pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", + ext_name, ext->btf_id, ext->sec_btf_id); + return ext->sec_btf_id; } - ext->align = btf__align_of(obj->btf, t->type); - if (ext->align <= 0) { - pr_warn("failed to determine alignment of extern '%s': %d\n", - ext_name, ext->align); - return -EINVAL; - } - ext->type = find_extern_type(obj->btf, t->type, - &ext->is_signed); - if (ext->type == EXT_UNKNOWN) { - pr_warn("extern '%s' type is unsupported\n", ext_name); + sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); + sec_name = btf__name_by_offset(obj->btf, sec->name_off); + + if (strcmp(sec_name, KCONFIG_SEC) == 0) { + kcfg_sec = sec; + ext->type = EXT_KCFG; + ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); + if (ext->kcfg.sz <= 0) { + pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.sz); + return ext->kcfg.sz; + } + ext->kcfg.align = btf__align_of(obj->btf, t->type); + if (ext->kcfg.align <= 0) { + pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", + ext_name, ext->kcfg.align); + return -EINVAL; + } + ext->kcfg.type = find_kcfg_type(obj->btf, t->type, + &ext->kcfg.is_signed); + if (ext->kcfg.type == KCFG_UNKNOWN) { + pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); + return -ENOTSUP; + } + } else { + pr_warn("unrecognized extern section '%s'\n", sec_name); return -ENOTSUP; } } @@ -2847,42 +2910,40 @@ static int bpf_object__collect_externs(struct bpf_object *obj) if (!obj->nr_extern) return 0; - /* sort externs by (alignment, size, name) and calculate their offsets - * within a map */ + /* sort externs by type, for kcfg ones also by (align, size, name) */ qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); - off = 0; - for (i = 0; i < obj->nr_extern; i++) { - ext = &obj->externs[i]; - ext->data_off = roundup(off, ext->align); - off = ext->data_off + ext->sz; - pr_debug("extern #%d: symbol %d, off %u, name %s\n", - i, ext->sym_idx, ext->data_off, ext->name); - } - btf_id = btf__find_by_name(obj->btf, KCONFIG_SEC); - if (btf_id <= 0) { - pr_warn("no BTF info found for '%s' datasec\n", KCONFIG_SEC); - return -ESRCH; - } + if (kcfg_sec) { + sec = kcfg_sec; + /* for kcfg externs calculate their offsets within a .kconfig map */ + off = 0; + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KCFG) + continue; - sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id); - sec->size = off; - n = btf_vlen(sec); - for (i = 0; i < n; i++) { - struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; - - t = btf__type_by_id(obj->btf, vs->type); - ext_name = btf__name_by_offset(obj->btf, t->name_off); - ext = find_extern_by_name(obj, ext_name); - if (!ext) { - pr_warn("failed to find extern definition for BTF var '%s'\n", - ext_name); - return -ESRCH; + ext->kcfg.data_off = roundup(off, ext->kcfg.align); + off = ext->kcfg.data_off + ext->kcfg.sz; + pr_debug("extern #%d (kcfg): symbol %d, off %u, name %s\n", + i, ext->sym_idx, ext->kcfg.data_off, ext->name); } - vs->offset = ext->data_off; - btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; - } + sec->size = off; + n = btf_vlen(sec); + for (i = 0; i < n; i++) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + t = btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, t->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vs->offset = ext->kcfg.data_off; + } + } return 0; } @@ -3012,11 +3073,11 @@ static int bpf_program__record_reloc(struct bpf_program *prog, sym_idx); return -LIBBPF_ERRNO__RELOC; } - pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n", - i, ext->name, ext->sym_idx, ext->data_off, insn_idx); + pr_debug("found extern #%d '%s' (sym %d) for insn %u\n", + i, ext->name, ext->sym_idx, insn_idx); reloc_desc->type = RELO_EXTERN; reloc_desc->insn_idx = insn_idx; - reloc_desc->sym_off = ext->data_off; + reloc_desc->sym_off = i; /* sym_off stores extern index */ return 0; } @@ -4941,6 +5002,7 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) for (i = 0; i < prog->nr_reloc; i++) { struct reloc_desc *relo = &prog->reloc_desc[i]; struct bpf_insn *insn = &prog->insns[relo->insn_idx]; + struct extern_desc *ext; if (relo->insn_idx + 1 >= (int)prog->insns_cnt) { pr_warn("relocation out of range: '%s'\n", @@ -4959,9 +5021,10 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) insn[0].imm = obj->maps[relo->map_idx].fd; break; case RELO_EXTERN: + ext = &obj->externs[relo->sym_off]; insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; - insn[1].imm = relo->sym_off; + insn[1].imm = ext->kcfg.data_off; break; case RELO_CALL: err = bpf_program__reloc_text(prog, obj, relo); @@ -5585,30 +5648,33 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, { bool need_config = false; struct extern_desc *ext; + void *kcfg_data = NULL; int err, i; - void *data; if (obj->nr_extern == 0) return 0; - data = obj->maps[obj->kconfig_map_idx].mmaped; + if (obj->kconfig_map_idx >= 0) + kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; - if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { - void *ext_val = data + ext->data_off; + if (ext->type == EXT_KCFG && + strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { + void *ext_val = kcfg_data + ext->kcfg.data_off; __u32 kver = get_kernel_version(); if (!kver) { pr_warn("failed to get kernel version\n"); return -EINVAL; } - err = set_ext_value_num(ext, ext_val, kver); + err = set_kcfg_value_num(ext, ext_val, kver); if (err) return err; - pr_debug("extern %s=0x%x\n", ext->name, kver); - } else if (strncmp(ext->name, "CONFIG_", 7) == 0) { + pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); + } else if (ext->type == EXT_KCFG && + strncmp(ext->name, "CONFIG_", 7) == 0) { need_config = true; } else { pr_warn("unrecognized extern '%s'\n", ext->name); @@ -5616,20 +5682,20 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, } } if (need_config && extra_kconfig) { - err = bpf_object__read_kconfig_mem(obj, extra_kconfig, data); + err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); if (err) return -EINVAL; need_config = false; for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; - if (!ext->is_set) { + if (ext->type == EXT_KCFG && !ext->is_set) { need_config = true; break; } } } if (need_config) { - err = bpf_object__read_kconfig_file(obj, data); + err = bpf_object__read_kconfig_file(obj, kcfg_data); if (err) return -EINVAL; } From 1c0c7074fefd769f62dda155e881ca90c9e3e75a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:56 -0700 Subject: [PATCH 0200/2056] libbpf: Add support for extracting kernel symbol addresses Add support for another (in addition to existing Kconfig) special kind of externs in BPF code, kernel symbol externs. Such externs allow BPF code to "know" kernel symbol address and either use it for comparisons with kernel data structures (e.g., struct file's f_op pointer, to distinguish different kinds of file), or, with the help of bpf_probe_user_kernel(), to follow pointers and read data from global variables. Kernel symbol addresses are found through /proc/kallsyms, which should be present in the system. Currently, such kernel symbol variables are typeless: they have to be defined as `extern const void ` and the only operation you can do (in C code) with them is to take its address. Such extern should reside in a special section '.ksyms'. bpf_helpers.h header provides __ksym macro for this. Strong vs weak semantics stays the same as with Kconfig externs. If symbol is not found in /proc/kallsyms, this will be a failure for strong (non-weak) extern, but will be defaulted to 0 for weak externs. If the same symbol is defined multiple times in /proc/kallsyms, then it will be error if any of the associated addresses differs. In that case, address is ambiguous, so libbpf falls on the side of caution, rather than confusing user with randomly chosen address. In the future, once kernel is extended with variables BTF information, such ksym externs will be supported in a typed version, which will allow BPF program to read variable's contents directly, similarly to how it's done for fentry/fexit input arguments. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hao Luo Link: https://lore.kernel.org/bpf/20200619231703.738941-3-andriin@fb.com --- tools/lib/bpf/bpf_helpers.h | 1 + tools/lib/bpf/btf.h | 5 ++ tools/lib/bpf/libbpf.c | 144 ++++++++++++++++++++++++++++++++++-- 3 files changed, 144 insertions(+), 6 deletions(-) diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index f67dce2af802..a510d8ed716f 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -75,5 +75,6 @@ enum libbpf_tristate { }; #define __kconfig __attribute__((section(".kconfig"))) +#define __ksym __attribute__((section(".ksyms"))) #endif diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 70c1b7ec2bd0..06cd1731c154 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -168,6 +168,11 @@ static inline bool btf_kflag(const struct btf_type *t) return BTF_INFO_KFLAG(t->info); } +static inline bool btf_is_void(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_UNKN; +} + static inline bool btf_is_int(const struct btf_type *t) { return btf_kind(t) == BTF_KIND_INT; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ffccb5af32a5..18461deb1b19 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -285,6 +285,7 @@ struct bpf_struct_ops { #define BSS_SEC ".bss" #define RODATA_SEC ".rodata" #define KCONFIG_SEC ".kconfig" +#define KSYMS_SEC ".ksyms" #define STRUCT_OPS_SEC ".struct_ops" enum libbpf_map_type { @@ -331,6 +332,7 @@ struct bpf_map { enum extern_type { EXT_UNKNOWN, EXT_KCFG, + EXT_KSYM, }; enum kcfg_type { @@ -358,6 +360,9 @@ struct extern_desc { int data_off; bool is_signed; } kcfg; + struct { + unsigned long long addr; + } ksym; }; }; @@ -2817,9 +2822,25 @@ static int cmp_externs(const void *_a, const void *_b) return strcmp(a->name, b->name); } +static int find_int_btf_id(const struct btf *btf) +{ + const struct btf_type *t; + int i, n; + + n = btf__get_nr_types(btf); + for (i = 1; i <= n; i++) { + t = btf__type_by_id(btf, i); + + if (btf_is_int(t) && btf_int_bits(t) == 32) + return i; + } + + return 0; +} + static int bpf_object__collect_externs(struct bpf_object *obj) { - struct btf_type *sec, *kcfg_sec = NULL; + struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; const struct btf_type *t; struct extern_desc *ext; int i, n, off; @@ -2900,6 +2921,17 @@ static int bpf_object__collect_externs(struct bpf_object *obj) pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); return -ENOTSUP; } + } else if (strcmp(sec_name, KSYMS_SEC) == 0) { + const struct btf_type *vt; + + ksym_sec = sec; + ext->type = EXT_KSYM; + + vt = skip_mods_and_typedefs(obj->btf, t->type, NULL); + if (!btf_is_void(vt)) { + pr_warn("extern (ksym) '%s' is not typeless (void)\n", ext_name); + return -ENOTSUP; + } } else { pr_warn("unrecognized extern section '%s'\n", sec_name); return -ENOTSUP; @@ -2913,6 +2945,46 @@ static int bpf_object__collect_externs(struct bpf_object *obj) /* sort externs by type, for kcfg ones also by (align, size, name) */ qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); + /* for .ksyms section, we need to turn all externs into allocated + * variables in BTF to pass kernel verification; we do this by + * pretending that each extern is a 8-byte variable + */ + if (ksym_sec) { + /* find existing 4-byte integer type in BTF to use for fake + * extern variables in DATASEC + */ + int int_btf_id = find_int_btf_id(obj->btf); + + for (i = 0; i < obj->nr_extern; i++) { + ext = &obj->externs[i]; + if (ext->type != EXT_KSYM) + continue; + pr_debug("extern (ksym) #%d: symbol %d, name %s\n", + i, ext->sym_idx, ext->name); + } + + sec = ksym_sec; + n = btf_vlen(sec); + for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { + struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; + struct btf_type *vt; + + vt = (void *)btf__type_by_id(obj->btf, vs->type); + ext_name = btf__name_by_offset(obj->btf, vt->name_off); + ext = find_extern_by_name(obj, ext_name); + if (!ext) { + pr_warn("failed to find extern definition for BTF var '%s'\n", + ext_name); + return -ESRCH; + } + btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; + vt->type = int_btf_id; + vs->offset = off; + vs->size = sizeof(int); + } + sec->size = off; + } + if (kcfg_sec) { sec = kcfg_sec; /* for kcfg externs calculate their offsets within a .kconfig map */ @@ -2924,7 +2996,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) ext->kcfg.data_off = roundup(off, ext->kcfg.align); off = ext->kcfg.data_off + ext->kcfg.sz; - pr_debug("extern #%d (kcfg): symbol %d, off %u, name %s\n", + pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", i, ext->sym_idx, ext->kcfg.data_off, ext->name); } sec->size = off; @@ -5022,9 +5094,14 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) break; case RELO_EXTERN: ext = &obj->externs[relo->sym_off]; - insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; - insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; - insn[1].imm = ext->kcfg.data_off; + if (ext->type == EXT_KCFG) { + insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; + insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; + insn[1].imm = ext->kcfg.data_off; + } else /* EXT_KSYM */ { + insn[0].imm = (__u32)ext->ksym.addr; + insn[1].imm = ext->ksym.addr >> 32; + } break; case RELO_CALL: err = bpf_program__reloc_text(prog, obj, relo); @@ -5643,10 +5720,58 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) return 0; } +static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +{ + char sym_type, sym_name[500]; + unsigned long long sym_addr; + struct extern_desc *ext; + int ret, err = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (!f) { + err = -errno; + pr_warn("failed to open /proc/kallsyms: %d\n", err); + return err; + } + + while (true) { + ret = fscanf(f, "%llx %c %499s%*[^\n]\n", + &sym_addr, &sym_type, sym_name); + if (ret == EOF && feof(f)) + break; + if (ret != 3) { + pr_warn("failed to read kallasyms entry: %d\n", ret); + err = -EINVAL; + goto out; + } + + ext = find_extern_by_name(obj, sym_name); + if (!ext || ext->type != EXT_KSYM) + continue; + + if (ext->is_set && ext->ksym.addr != sym_addr) { + pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", + sym_name, ext->ksym.addr, sym_addr); + err = -EINVAL; + goto out; + } + if (!ext->is_set) { + ext->is_set = true; + ext->ksym.addr = sym_addr; + pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); + } + } + +out: + fclose(f); + return err; +} + static int bpf_object__resolve_externs(struct bpf_object *obj, const char *extra_kconfig) { - bool need_config = false; + bool need_config = false, need_kallsyms = false; struct extern_desc *ext; void *kcfg_data = NULL; int err, i; @@ -5676,6 +5801,8 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, } else if (ext->type == EXT_KCFG && strncmp(ext->name, "CONFIG_", 7) == 0) { need_config = true; + } else if (ext->type == EXT_KSYM) { + need_kallsyms = true; } else { pr_warn("unrecognized extern '%s'\n", ext->name); return -EINVAL; @@ -5699,6 +5826,11 @@ static int bpf_object__resolve_externs(struct bpf_object *obj, if (err) return -EINVAL; } + if (need_kallsyms) { + err = bpf_object__read_kallsyms_file(obj); + if (err) + return -EINVAL; + } for (i = 0; i < obj->nr_extern; i++) { ext = &obj->externs[i]; From b7ddfab20a6af3a0e366000eada63adf6a7683e7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:57 -0700 Subject: [PATCH 0201/2056] selftests/bpf: Add __ksym extern selftest Validate libbpf is able to handle weak and strong kernel symbol externs in BPF code correctly. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Hao Luo Link: https://lore.kernel.org/bpf/20200619231703.738941-4-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/ksyms.c | 71 +++++++++++++++++++ .../testing/selftests/bpf/progs/test_ksyms.c | 32 +++++++++ 2 files changed, 103 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/ksyms.c create mode 100644 tools/testing/selftests/bpf/progs/test_ksyms.c diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c new file mode 100644 index 000000000000..e3d6777226a8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/ksyms.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include +#include "test_ksyms.skel.h" +#include + +static int duration; + +static __u64 kallsyms_find(const char *sym) +{ + char type, name[500]; + __u64 addr, res = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (CHECK(!f, "kallsyms_fopen", "failed to open: %d\n", errno)) + return 0; + + while (fscanf(f, "%llx %c %499s%*[^\n]\n", &addr, &type, name) > 0) { + if (strcmp(name, sym) == 0) { + res = addr; + goto out; + } + } + + CHECK(false, "not_found", "symbol %s not found\n", sym); +out: + fclose(f); + return res; +} + +void test_ksyms(void) +{ + __u64 link_fops_addr = kallsyms_find("bpf_link_fops"); + const char *btf_path = "/sys/kernel/btf/vmlinux"; + struct test_ksyms *skel; + struct test_ksyms__data *data; + struct stat st; + __u64 btf_size; + int err; + + if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno)) + return; + btf_size = st.st_size; + + skel = test_ksyms__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n")) + return; + + err = test_ksyms__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + /* trigger tracepoint */ + usleep(1); + + data = skel->data; + CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops", + "got 0x%llx, exp 0x%llx\n", + data->out__bpf_link_fops, link_fops_addr); + CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1", + "got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0); + CHECK(data->out__btf_size != btf_size, "btf_size", + "got %llu, exp %llu\n", data->out__btf_size, btf_size); + CHECK(data->out__per_cpu_start != 0, "__per_cpu_start", + "got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0); + +cleanup: + test_ksyms__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_ksyms.c b/tools/testing/selftests/bpf/progs/test_ksyms.c new file mode 100644 index 000000000000..6c9cbb5a3bdf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_ksyms.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ + +#include +#include +#include + +__u64 out__bpf_link_fops = -1; +__u64 out__bpf_link_fops1 = -1; +__u64 out__btf_size = -1; +__u64 out__per_cpu_start = -1; + +extern const void bpf_link_fops __ksym; +extern const void __start_BTF __ksym; +extern const void __stop_BTF __ksym; +extern const void __per_cpu_start __ksym; +/* non-existing symbol, weak, default to zero */ +extern const void bpf_link_fops1 __ksym __weak; + +SEC("raw_tp/sys_enter") +int handler(const void *ctx) +{ + out__bpf_link_fops = (__u64)&bpf_link_fops; + out__btf_size = (__u64)(&__stop_BTF - &__start_BTF); + out__per_cpu_start = (__u64)&__per_cpu_start; + + out__bpf_link_fops1 = (__u64)&bpf_link_fops1; + + return 0; +} + +char _license[] SEC("license") = "GPL"; From a479b8ce4ed1457f814be6f67a8447a9af38f235 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:58 -0700 Subject: [PATCH 0202/2056] tools/bpftool: Move map/prog parsing logic into common Move functions that parse map and prog by id/tag/name/etc outside of map.c/prog.c, respectively. These functions are used outside of those files and are generic enough to be in common. This also makes heavy-weight map.c and prog.c more decoupled from the rest of bpftool files and facilitates more lightweight bootstrap bpftool variant. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200619231703.738941-5-andriin@fb.com --- tools/bpf/bpftool/common.c | 308 +++++++++++++++++++++++++++++++++++++ tools/bpf/bpftool/main.h | 2 + tools/bpf/bpftool/map.c | 156 ------------------- tools/bpf/bpftool/prog.c | 152 ------------------ 4 files changed, 310 insertions(+), 308 deletions(-) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index c47bdc65de8e..6c864c3683fc 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -581,3 +581,311 @@ print_all_levels(__maybe_unused enum libbpf_print_level level, { return vfprintf(stderr, format, args); } + +static int prog_fd_by_nametag(void *nametag, int **fds, bool tag) +{ + unsigned int id = 0; + int fd, nb_fds = 0; + void *tmp; + int err; + + while (true) { + struct bpf_prog_info info = {}; + __u32 len = sizeof(info); + + err = bpf_prog_get_next_id(id, &id); + if (err) { + if (errno != ENOENT) { + p_err("%s", strerror(errno)); + goto err_close_fds; + } + return nb_fds; + } + + fd = bpf_prog_get_fd_by_id(id); + if (fd < 0) { + p_err("can't get prog by id (%u): %s", + id, strerror(errno)); + goto err_close_fds; + } + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + p_err("can't get prog info (%u): %s", + id, strerror(errno)); + goto err_close_fd; + } + + if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) || + (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) { + close(fd); + continue; + } + + if (nb_fds > 0) { + tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); + if (!tmp) { + p_err("failed to realloc"); + goto err_close_fd; + } + *fds = tmp; + } + (*fds)[nb_fds++] = fd; + } + +err_close_fd: + close(fd); +err_close_fds: + while (--nb_fds >= 0) + close((*fds)[nb_fds]); + return -1; +} + +int prog_parse_fds(int *argc, char ***argv, int **fds) +{ + if (is_prefix(**argv, "id")) { + unsigned int id; + char *endptr; + + NEXT_ARGP(); + + id = strtoul(**argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as ID", **argv); + return -1; + } + NEXT_ARGP(); + + (*fds)[0] = bpf_prog_get_fd_by_id(id); + if ((*fds)[0] < 0) { + p_err("get by id (%u): %s", id, strerror(errno)); + return -1; + } + return 1; + } else if (is_prefix(**argv, "tag")) { + unsigned char tag[BPF_TAG_SIZE]; + + NEXT_ARGP(); + + if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2, + tag + 3, tag + 4, tag + 5, tag + 6, tag + 7) + != BPF_TAG_SIZE) { + p_err("can't parse tag"); + return -1; + } + NEXT_ARGP(); + + return prog_fd_by_nametag(tag, fds, true); + } else if (is_prefix(**argv, "name")) { + char *name; + + NEXT_ARGP(); + + name = **argv; + if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { + p_err("can't parse name"); + return -1; + } + NEXT_ARGP(); + + return prog_fd_by_nametag(name, fds, false); + } else if (is_prefix(**argv, "pinned")) { + char *path; + + NEXT_ARGP(); + + path = **argv; + NEXT_ARGP(); + + (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG); + if ((*fds)[0] < 0) + return -1; + return 1; + } + + p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv); + return -1; +} + +int prog_parse_fd(int *argc, char ***argv) +{ + int *fds = NULL; + int nb_fds, fd; + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = prog_parse_fds(argc, argv, &fds); + if (nb_fds != 1) { + if (nb_fds > 1) { + p_err("several programs match this handle"); + while (nb_fds--) + close(fds[nb_fds]); + } + fd = -1; + goto exit_free; + } + + fd = fds[0]; +exit_free: + free(fds); + return fd; +} + +static int map_fd_by_name(char *name, int **fds) +{ + unsigned int id = 0; + int fd, nb_fds = 0; + void *tmp; + int err; + + while (true) { + struct bpf_map_info info = {}; + __u32 len = sizeof(info); + + err = bpf_map_get_next_id(id, &id); + if (err) { + if (errno != ENOENT) { + p_err("%s", strerror(errno)); + goto err_close_fds; + } + return nb_fds; + } + + fd = bpf_map_get_fd_by_id(id); + if (fd < 0) { + p_err("can't get map by id (%u): %s", + id, strerror(errno)); + goto err_close_fds; + } + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + p_err("can't get map info (%u): %s", + id, strerror(errno)); + goto err_close_fd; + } + + if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) { + close(fd); + continue; + } + + if (nb_fds > 0) { + tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); + if (!tmp) { + p_err("failed to realloc"); + goto err_close_fd; + } + *fds = tmp; + } + (*fds)[nb_fds++] = fd; + } + +err_close_fd: + close(fd); +err_close_fds: + while (--nb_fds >= 0) + close((*fds)[nb_fds]); + return -1; +} + +int map_parse_fds(int *argc, char ***argv, int **fds) +{ + if (is_prefix(**argv, "id")) { + unsigned int id; + char *endptr; + + NEXT_ARGP(); + + id = strtoul(**argv, &endptr, 0); + if (*endptr) { + p_err("can't parse %s as ID", **argv); + return -1; + } + NEXT_ARGP(); + + (*fds)[0] = bpf_map_get_fd_by_id(id); + if ((*fds)[0] < 0) { + p_err("get map by id (%u): %s", id, strerror(errno)); + return -1; + } + return 1; + } else if (is_prefix(**argv, "name")) { + char *name; + + NEXT_ARGP(); + + name = **argv; + if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { + p_err("can't parse name"); + return -1; + } + NEXT_ARGP(); + + return map_fd_by_name(name, fds); + } else if (is_prefix(**argv, "pinned")) { + char *path; + + NEXT_ARGP(); + + path = **argv; + NEXT_ARGP(); + + (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP); + if ((*fds)[0] < 0) + return -1; + return 1; + } + + p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv); + return -1; +} + +int map_parse_fd(int *argc, char ***argv) +{ + int *fds = NULL; + int nb_fds, fd; + + fds = malloc(sizeof(int)); + if (!fds) { + p_err("mem alloc failed"); + return -1; + } + nb_fds = map_parse_fds(argc, argv, &fds); + if (nb_fds != 1) { + if (nb_fds > 1) { + p_err("several maps match this handle"); + while (nb_fds--) + close(fds[nb_fds]); + } + fd = -1; + goto exit_free; + } + + fd = fds[0]; +exit_free: + free(fds); + return fd; +} + +int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) +{ + int err; + int fd; + + fd = map_parse_fd(argc, argv); + if (fd < 0) + return -1; + + err = bpf_obj_get_info_by_fd(fd, info, info_len); + if (err) { + p_err("can't get map info: %s", strerror(errno)); + close(fd); + return err; + } + + return fd; +} diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 5cdf0bc049bd..4338ab9d86d4 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -210,7 +210,9 @@ int do_iter(int argc, char **argv); int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what); int prog_parse_fd(int *argc, char ***argv); +int prog_parse_fds(int *argc, char ***argv, int **fds); int map_parse_fd(int *argc, char ***argv); +int map_parse_fds(int *argc, char ***argv, int **fds); int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); struct bpf_prog_linfo; diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index c5fac8068ba1..b9eee19b094c 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -92,162 +92,6 @@ static void *alloc_value(struct bpf_map_info *info) return malloc(info->value_size); } -static int map_fd_by_name(char *name, int **fds) -{ - unsigned int id = 0; - int fd, nb_fds = 0; - void *tmp; - int err; - - while (true) { - struct bpf_map_info info = {}; - __u32 len = sizeof(info); - - err = bpf_map_get_next_id(id, &id); - if (err) { - if (errno != ENOENT) { - p_err("%s", strerror(errno)); - goto err_close_fds; - } - return nb_fds; - } - - fd = bpf_map_get_fd_by_id(id); - if (fd < 0) { - p_err("can't get map by id (%u): %s", - id, strerror(errno)); - goto err_close_fds; - } - - err = bpf_obj_get_info_by_fd(fd, &info, &len); - if (err) { - p_err("can't get map info (%u): %s", - id, strerror(errno)); - goto err_close_fd; - } - - if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) { - close(fd); - continue; - } - - if (nb_fds > 0) { - tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); - if (!tmp) { - p_err("failed to realloc"); - goto err_close_fd; - } - *fds = tmp; - } - (*fds)[nb_fds++] = fd; - } - -err_close_fd: - close(fd); -err_close_fds: - while (--nb_fds >= 0) - close((*fds)[nb_fds]); - return -1; -} - -static int map_parse_fds(int *argc, char ***argv, int **fds) -{ - if (is_prefix(**argv, "id")) { - unsigned int id; - char *endptr; - - NEXT_ARGP(); - - id = strtoul(**argv, &endptr, 0); - if (*endptr) { - p_err("can't parse %s as ID", **argv); - return -1; - } - NEXT_ARGP(); - - (*fds)[0] = bpf_map_get_fd_by_id(id); - if ((*fds)[0] < 0) { - p_err("get map by id (%u): %s", id, strerror(errno)); - return -1; - } - return 1; - } else if (is_prefix(**argv, "name")) { - char *name; - - NEXT_ARGP(); - - name = **argv; - if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { - p_err("can't parse name"); - return -1; - } - NEXT_ARGP(); - - return map_fd_by_name(name, fds); - } else if (is_prefix(**argv, "pinned")) { - char *path; - - NEXT_ARGP(); - - path = **argv; - NEXT_ARGP(); - - (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP); - if ((*fds)[0] < 0) - return -1; - return 1; - } - - p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv); - return -1; -} - -int map_parse_fd(int *argc, char ***argv) -{ - int *fds = NULL; - int nb_fds, fd; - - fds = malloc(sizeof(int)); - if (!fds) { - p_err("mem alloc failed"); - return -1; - } - nb_fds = map_parse_fds(argc, argv, &fds); - if (nb_fds != 1) { - if (nb_fds > 1) { - p_err("several maps match this handle"); - while (nb_fds--) - close(fds[nb_fds]); - } - fd = -1; - goto exit_free; - } - - fd = fds[0]; -exit_free: - free(fds); - return fd; -} - -int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len) -{ - int err; - int fd; - - fd = map_parse_fd(argc, argv); - if (fd < 0) - return -1; - - err = bpf_obj_get_info_by_fd(fd, info, info_len); - if (err) { - p_err("can't get map info: %s", strerror(errno)); - close(fd); - return err; - } - - return fd; -} - static int do_dump_btf(const struct btf_dumper *d, struct bpf_map_info *map_info, void *key, void *value) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index a5eff83496f2..53d47610ff58 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -86,158 +86,6 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) strftime(buf, size, "%FT%T%z", &load_tm); } -static int prog_fd_by_nametag(void *nametag, int **fds, bool tag) -{ - unsigned int id = 0; - int fd, nb_fds = 0; - void *tmp; - int err; - - while (true) { - struct bpf_prog_info info = {}; - __u32 len = sizeof(info); - - err = bpf_prog_get_next_id(id, &id); - if (err) { - if (errno != ENOENT) { - p_err("%s", strerror(errno)); - goto err_close_fds; - } - return nb_fds; - } - - fd = bpf_prog_get_fd_by_id(id); - if (fd < 0) { - p_err("can't get prog by id (%u): %s", - id, strerror(errno)); - goto err_close_fds; - } - - err = bpf_obj_get_info_by_fd(fd, &info, &len); - if (err) { - p_err("can't get prog info (%u): %s", - id, strerror(errno)); - goto err_close_fd; - } - - if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) || - (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) { - close(fd); - continue; - } - - if (nb_fds > 0) { - tmp = realloc(*fds, (nb_fds + 1) * sizeof(int)); - if (!tmp) { - p_err("failed to realloc"); - goto err_close_fd; - } - *fds = tmp; - } - (*fds)[nb_fds++] = fd; - } - -err_close_fd: - close(fd); -err_close_fds: - while (--nb_fds >= 0) - close((*fds)[nb_fds]); - return -1; -} - -static int prog_parse_fds(int *argc, char ***argv, int **fds) -{ - if (is_prefix(**argv, "id")) { - unsigned int id; - char *endptr; - - NEXT_ARGP(); - - id = strtoul(**argv, &endptr, 0); - if (*endptr) { - p_err("can't parse %s as ID", **argv); - return -1; - } - NEXT_ARGP(); - - (*fds)[0] = bpf_prog_get_fd_by_id(id); - if ((*fds)[0] < 0) { - p_err("get by id (%u): %s", id, strerror(errno)); - return -1; - } - return 1; - } else if (is_prefix(**argv, "tag")) { - unsigned char tag[BPF_TAG_SIZE]; - - NEXT_ARGP(); - - if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2, - tag + 3, tag + 4, tag + 5, tag + 6, tag + 7) - != BPF_TAG_SIZE) { - p_err("can't parse tag"); - return -1; - } - NEXT_ARGP(); - - return prog_fd_by_nametag(tag, fds, true); - } else if (is_prefix(**argv, "name")) { - char *name; - - NEXT_ARGP(); - - name = **argv; - if (strlen(name) > BPF_OBJ_NAME_LEN - 1) { - p_err("can't parse name"); - return -1; - } - NEXT_ARGP(); - - return prog_fd_by_nametag(name, fds, false); - } else if (is_prefix(**argv, "pinned")) { - char *path; - - NEXT_ARGP(); - - path = **argv; - NEXT_ARGP(); - - (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG); - if ((*fds)[0] < 0) - return -1; - return 1; - } - - p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv); - return -1; -} - -int prog_parse_fd(int *argc, char ***argv) -{ - int *fds = NULL; - int nb_fds, fd; - - fds = malloc(sizeof(int)); - if (!fds) { - p_err("mem alloc failed"); - return -1; - } - nb_fds = prog_parse_fds(argc, argv, &fds); - if (nb_fds != 1) { - if (nb_fds > 1) { - p_err("several programs match this handle"); - while (nb_fds--) - close(fds[nb_fds]); - } - fd = -1; - goto exit_free; - } - - fd = fds[0]; -exit_free: - free(fds); - return fd; -} - static void show_prog_maps(int fd, __u32 num_maps) { struct bpf_prog_info info = {}; From 16e9b187aba60f9014f11a7e95b878850b6c95e5 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:16:59 -0700 Subject: [PATCH 0203/2056] tools/bpftool: Minimize bootstrap bpftool Build minimal "bootstrap mode" bpftool to enable skeleton (and, later, vmlinux.h generation), instead of building almost complete, but slightly different (w/o skeletons, etc) bpftool to bootstrap complete bpftool build. Current approach doesn't scale well (engineering-wise) when adding more BPF programs to bpftool and other complicated functionality, as it requires constant adjusting of the code to work in both bootstrapped mode and normal mode. So it's better to build only minimal bpftool version that supports only BPF skeleton code generation and BTF-to-C conversion. Thankfully, this is quite easy to accomplish due to internal modularity of bpftool commands. This will also allow to keep adding new functionality to bpftool in general, without the need to care about bootstrap mode for those new parts of bpftool. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200619231703.738941-6-andriin@fb.com --- tools/bpf/bpftool/.gitignore | 2 +- tools/bpf/bpftool/Makefile | 30 +++++++++++++----------------- tools/bpf/bpftool/main.c | 11 +++++++++-- tools/bpf/bpftool/main.h | 27 +++++++++++++++------------ 4 files changed, 38 insertions(+), 32 deletions(-) diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore index 26cde83e1ca3..ce721adf3161 100644 --- a/tools/bpf/bpftool/.gitignore +++ b/tools/bpf/bpftool/.gitignore @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only *.d -/_bpftool +/bpftool-bootstrap /bpftool bpftool*.8 bpf-helpers.* diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 9e85f101be85..eec2da4d45d2 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -116,40 +116,36 @@ CFLAGS += -DHAVE_LIBBFD_SUPPORT SRCS += $(BFD_SRCS) endif +BPFTOOL_BOOTSTRAP := $(if $(OUTPUT),$(OUTPUT)bpftool-bootstrap,./bpftool-bootstrap) + +BOOTSTRAP_OBJS = $(addprefix $(OUTPUT),main.o common.o json_writer.o gen.o btf.o) OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o -_OBJS = $(filter-out $(OUTPUT)prog.o,$(OBJS)) $(OUTPUT)_prog.o -ifeq ($(feature-clang-bpf-global-var),1) - __OBJS = $(OBJS) -else - __OBJS = $(_OBJS) +ifneq ($(feature-clang-bpf-global-var),1) + CFLAGS += -DBPFTOOL_WITHOUT_SKELETONS endif -$(OUTPUT)_prog.o: prog.c - $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $< - -$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF) - $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS) - skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF) $(QUIET_CLANG)$(CLANG) \ -I$(srctree)/tools/include/uapi/ \ -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \ -g -O2 -target bpf -c $< -o $@ -profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o - $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@ +profiler.skel.h: $(BPFTOOL_BOOTSTRAP) skeleton/profiler.bpf.o + $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton skeleton/profiler.bpf.o > $@ $(OUTPUT)prog.o: prog.c profiler.skel.h - $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< $(OUTPUT)feature.o: | zdep -$(OUTPUT)bpftool: $(__OBJS) $(LIBBPF) - $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS) +$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) $(LIBS) + +$(OUTPUT)bpftool: $(OBJS) $(LIBBPF) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS) $(OUTPUT)%.o: %.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< @@ -157,7 +153,7 @@ $(OUTPUT)%.o: %.c clean: $(LIBBPF)-clean $(call QUIET_CLEAN, bpftool) $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d - $(Q)$(RM) -- $(OUTPUT)_bpftool profiler.skel.h skeleton/profiler.bpf.o + $(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) profiler.skel.h skeleton/profiler.bpf.o $(Q)$(RM) -r -- $(OUTPUT)libbpf/ $(call QUIET_CLEAN, core-gen) $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 46bd716a9d86..bf4d7487552a 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -92,9 +92,16 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv, if (argc < 1 && cmds[0].func) return cmds[0].func(argc, argv); - for (i = 0; cmds[i].func; i++) - if (is_prefix(*argv, cmds[i].cmd)) + for (i = 0; cmds[i].cmd; i++) { + if (is_prefix(*argv, cmds[i].cmd)) { + if (!cmds[i].func) { + p_err("command '%s' is not supported in bootstrap mode", + cmds[i].cmd); + return -1; + } return cmds[i].func(argc - 1, argv + 1); + } + } help(argc - 1, argv + 1); diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 4338ab9d86d4..aad7be74e8a7 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -194,19 +194,22 @@ int mount_bpffs_for_pin(const char *name); int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***)); int do_pin_fd(int fd, const char *name); -int do_prog(int argc, char **arg); -int do_map(int argc, char **arg); -int do_link(int argc, char **arg); -int do_event_pipe(int argc, char **argv); -int do_cgroup(int argc, char **arg); -int do_perf(int argc, char **arg); -int do_net(int argc, char **arg); -int do_tracelog(int argc, char **arg); -int do_feature(int argc, char **argv); -int do_btf(int argc, char **argv); +/* commands available in bootstrap mode */ int do_gen(int argc, char **argv); -int do_struct_ops(int argc, char **argv); -int do_iter(int argc, char **argv); +int do_btf(int argc, char **argv); + +/* non-bootstrap only commands */ +int do_prog(int argc, char **arg) __weak; +int do_map(int argc, char **arg) __weak; +int do_link(int argc, char **arg) __weak; +int do_event_pipe(int argc, char **argv) __weak; +int do_cgroup(int argc, char **arg) __weak; +int do_perf(int argc, char **arg) __weak; +int do_net(int argc, char **arg) __weak; +int do_tracelog(int argc, char **arg) __weak; +int do_feature(int argc, char **argv) __weak; +int do_struct_ops(int argc, char **argv) __weak; +int do_iter(int argc, char **argv) __weak; int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what); int prog_parse_fd(int *argc, char ***argv); From 05aca6da3b5ab3c5c6003dbbefc9580d9a6a308b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:17:00 -0700 Subject: [PATCH 0204/2056] tools/bpftool: Generalize BPF skeleton support and generate vmlinux.h Adapt Makefile to support BPF skeleton generation beyond single profiler.bpf.c case. Also add vmlinux.h generation and switch profiler.bpf.c to use it. clang-bpf-global-var feature is extended and renamed to clang-bpf-co-re to check for support of preserve_access_index attribute, which, together with BTF for global variables, is the minimum requirement for modern BPF programs. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200619231703.738941-7-andriin@fb.com --- tools/bpf/bpftool/.gitignore | 3 +- tools/bpf/bpftool/Makefile | 42 ++++++++++++----- tools/bpf/bpftool/skeleton/profiler.bpf.c | 3 +- tools/bpf/bpftool/skeleton/profiler.h | 46 ------------------- tools/build/feature/Makefile | 4 +- tools/build/feature/test-clang-bpf-co-re.c | 9 ++++ .../build/feature/test-clang-bpf-global-var.c | 4 -- 7 files changed, 45 insertions(+), 66 deletions(-) delete mode 100644 tools/bpf/bpftool/skeleton/profiler.h create mode 100644 tools/build/feature/test-clang-bpf-co-re.c delete mode 100644 tools/build/feature/test-clang-bpf-global-var.c diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore index ce721adf3161..3e601bcfd461 100644 --- a/tools/bpf/bpftool/.gitignore +++ b/tools/bpf/bpftool/.gitignore @@ -7,4 +7,5 @@ bpf-helpers.* FEATURE-DUMP.bpftool feature libbpf -profiler.skel.h +/*.skel.h +/vmlinux.h diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index eec2da4d45d2..bdb6e38c6c5c 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -42,6 +42,7 @@ CFLAGS += -O2 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers CFLAGS += $(filter-out -Wswitch-enum,$(EXTRA_WARNINGS)) CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ + -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/kernel/bpf/ \ -I$(srctree)/tools/include \ -I$(srctree)/tools/include/uapi \ @@ -61,9 +62,9 @@ CLANG ?= clang FEATURE_USER = .bpftool FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib libcap \ - clang-bpf-global-var + clang-bpf-co-re FEATURE_DISPLAY = libbfd disassembler-four-args zlib libcap \ - clang-bpf-global-var + clang-bpf-co-re check_feat := 1 NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall @@ -121,20 +122,38 @@ BPFTOOL_BOOTSTRAP := $(if $(OUTPUT),$(OUTPUT)bpftool-bootstrap,./bpftool-bootstr BOOTSTRAP_OBJS = $(addprefix $(OUTPUT),main.o common.o json_writer.o gen.o btf.o) OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o -ifneq ($(feature-clang-bpf-global-var),1) - CFLAGS += -DBPFTOOL_WITHOUT_SKELETONS -endif +VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \ + $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ + ../../../vmlinux \ + /sys/kernel/btf/vmlinux \ + /boot/vmlinux-$(shell uname -r) +VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) -skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF) +ifneq ($(VMLINUX_BTF),) +ifeq ($(feature-clang-bpf-co-re),1) + +BUILD_BPF_SKELS := 1 + +$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP) + $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@ + +$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) $(QUIET_CLANG)$(CLANG) \ + -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/tools/include/uapi/ \ - -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \ + -I$(LIBBPF_PATH) \ + -I$(srctree)/tools/lib \ -g -O2 -target bpf -c $< -o $@ -profiler.skel.h: $(BPFTOOL_BOOTSTRAP) skeleton/profiler.bpf.o - $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton skeleton/profiler.bpf.o > $@ +$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) + $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@ -$(OUTPUT)prog.o: prog.c profiler.skel.h +$(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h + +endif +endif + +CFLAGS += $(if BUILD_BPF_SKELS,,-DBPFTOOL_WITHOUT_SKELETONS) $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< @@ -153,7 +172,7 @@ $(OUTPUT)%.o: %.c clean: $(LIBBPF)-clean $(call QUIET_CLEAN, bpftool) $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d - $(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) profiler.skel.h skeleton/profiler.bpf.o + $(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h $(Q)$(RM) -r -- $(OUTPUT)libbpf/ $(call QUIET_CLEAN, core-gen) $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool @@ -188,6 +207,7 @@ FORCE: zdep: @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi +.SECONDARY: .PHONY: all FORCE clean install uninstall zdep .PHONY: doc doc-clean doc-install doc-uninstall .DEFAULT_GOAL := all diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c index c9d196ddb670..4e3512f700c0 100644 --- a/tools/bpf/bpftool/skeleton/profiler.bpf.c +++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2020 Facebook -#include "profiler.h" -#include +#include #include #include diff --git a/tools/bpf/bpftool/skeleton/profiler.h b/tools/bpf/bpftool/skeleton/profiler.h deleted file mode 100644 index 1f767e9510f7..000000000000 --- a/tools/bpf/bpftool/skeleton/profiler.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ -#ifndef __PROFILER_H -#define __PROFILER_H - -/* useful typedefs from vmlinux.h */ - -typedef signed char __s8; -typedef unsigned char __u8; -typedef short int __s16; -typedef short unsigned int __u16; -typedef int __s32; -typedef unsigned int __u32; -typedef long long int __s64; -typedef long long unsigned int __u64; - -typedef __s8 s8; -typedef __u8 u8; -typedef __s16 s16; -typedef __u16 u16; -typedef __s32 s32; -typedef __u32 u32; -typedef __s64 s64; -typedef __u64 u64; - -enum { - false = 0, - true = 1, -}; - -#ifdef __CHECKER__ -#define __bitwise__ __attribute__((bitwise)) -#else -#define __bitwise__ -#endif - -typedef __u16 __bitwise__ __le16; -typedef __u16 __bitwise__ __be16; -typedef __u32 __bitwise__ __le32; -typedef __u32 __bitwise__ __be32; -typedef __u64 __bitwise__ __le64; -typedef __u64 __bitwise__ __be64; - -typedef __u16 __bitwise__ __sum16; -typedef __u32 __bitwise__ __wsum; - -#endif /* __PROFILER_H */ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index b1f0321180f5..88371f7f0369 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -68,7 +68,7 @@ FILES= \ test-llvm-version.bin \ test-libaio.bin \ test-libzstd.bin \ - test-clang-bpf-global-var.bin \ + test-clang-bpf-co-re.bin \ test-file-handle.bin \ test-libpfm4.bin @@ -325,7 +325,7 @@ $(OUTPUT)test-libaio.bin: $(OUTPUT)test-libzstd.bin: $(BUILD) -lzstd -$(OUTPUT)test-clang-bpf-global-var.bin: +$(OUTPUT)test-clang-bpf-co-re.bin: $(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) | \ grep BTF_KIND_VAR diff --git a/tools/build/feature/test-clang-bpf-co-re.c b/tools/build/feature/test-clang-bpf-co-re.c new file mode 100644 index 000000000000..cb5265bfdd83 --- /dev/null +++ b/tools/build/feature/test-clang-bpf-co-re.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook + +struct test { + int a; + int b; +} __attribute__((preserve_access_index)); + +volatile struct test global_value_for_test = {}; diff --git a/tools/build/feature/test-clang-bpf-global-var.c b/tools/build/feature/test-clang-bpf-global-var.c deleted file mode 100644 index 221f1481d52e..000000000000 --- a/tools/build/feature/test-clang-bpf-global-var.c +++ /dev/null @@ -1,4 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// Copyright (c) 2020 Facebook - -volatile int global_value_for_test = 1; From bd9bedf84b87289b9a87eebfe7917e54373e99f9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:17:01 -0700 Subject: [PATCH 0205/2056] libbpf: Wrap source argument of BPF_CORE_READ macro in parentheses Wrap source argument of BPF_CORE_READ family of macros into parentheses to allow uses like this: BPF_CORE_READ((struct cast_struct *)src, a, b, c); Fixes: 7db3822ab991 ("libbpf: Add BPF_CORE_READ/BPF_CORE_READ_INTO helpers") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200619231703.738941-8-andriin@fb.com --- tools/lib/bpf/bpf_core_read.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index 7009dc90e012..eae5cccff761 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -217,7 +217,7 @@ enum bpf_field_info_kind { */ #define BPF_CORE_READ_INTO(dst, src, a, ...) \ ({ \ - ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \ + ___core_read(bpf_core_read, dst, (src), a, ##__VA_ARGS__) \ }) /* @@ -227,7 +227,7 @@ enum bpf_field_info_kind { */ #define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \ ({ \ - ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \ + ___core_read(bpf_core_read_str, dst, (src), a, ##__VA_ARGS__)\ }) /* @@ -254,8 +254,8 @@ enum bpf_field_info_kind { */ #define BPF_CORE_READ(src, a, ...) \ ({ \ - ___type(src, a, ##__VA_ARGS__) __r; \ - BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \ + ___type((src), a, ##__VA_ARGS__) __r; \ + BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \ __r; \ }) From d53dee3fe0138610fcce5721bae9414377c41ec3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:17:02 -0700 Subject: [PATCH 0206/2056] tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs Add bpf_iter-based way to find all the processes that hold open FDs against BPF object (map, prog, link, btf). bpftool always attempts to discover this, but will silently give up if kernel doesn't yet support bpf_iter BPF programs. Process name and PID are emitted for each process (task group). Sample output for each of 4 BPF objects: $ sudo ./bpftool prog show 2694: cgroup_device tag 8c42dee26e8cd4c2 gpl loaded_at 2020-06-16T15:34:32-0700 uid 0 xlated 648B jited 409B memlock 4096B pids systemd(1) 2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl loaded_at 2020-06-16T18:06:54-0700 uid 0 xlated 48B jited 59B memlock 4096B map_ids 2436 btf_id 1202 pids test_progs(2238417), test_progs(2238445) $ sudo ./bpftool map show 2436: array name test_cgr.bss flags 0x400 key 4B value 8B max_entries 1 memlock 8192B btf_id 1202 pids test_progs(2238417), test_progs(2238445) 2445: array name pid_iter.rodata flags 0x480 key 4B value 4B max_entries 1 memlock 8192B btf_id 1214 frozen pids bpftool(2239612) $ sudo ./bpftool link show 61: cgroup prog 2908 cgroup_id 375301 attach_type egress pids test_progs(2238417), test_progs(2238445) 62: cgroup prog 2908 cgroup_id 375344 attach_type egress pids test_progs(2238417), test_progs(2238445) $ sudo ./bpftool btf show 1202: size 1527B prog_ids 2908,2907 map_ids 2436 pids test_progs(2238417), test_progs(2238445) 1242: size 34684B pids bpftool(2258892) Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com --- tools/bpf/bpftool/Makefile | 2 + tools/bpf/bpftool/btf.c | 6 + tools/bpf/bpftool/link.c | 7 + tools/bpf/bpftool/main.c | 1 + tools/bpf/bpftool/main.h | 27 +++ tools/bpf/bpftool/map.c | 7 + tools/bpf/bpftool/pids.c | 229 ++++++++++++++++++++++ tools/bpf/bpftool/prog.c | 7 + tools/bpf/bpftool/skeleton/pid_iter.bpf.c | 80 ++++++++ tools/bpf/bpftool/skeleton/pid_iter.h | 12 ++ 10 files changed, 378 insertions(+) create mode 100644 tools/bpf/bpftool/pids.c create mode 100644 tools/bpf/bpftool/skeleton/pid_iter.bpf.c create mode 100644 tools/bpf/bpftool/skeleton/pid_iter.h diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index bdb6e38c6c5c..06f436e8191a 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -150,6 +150,8 @@ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) $(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h +$(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h + endif endif diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index faac8189b285..fc9bc7a23db6 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -809,6 +809,7 @@ show_btf_plain(struct bpf_btf_info *info, int fd, printf("%s%u", n++ == 0 ? " map_ids " : ",", obj->obj_id); } + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); printf("\n"); } @@ -841,6 +842,9 @@ show_btf_json(struct bpf_btf_info *info, int fd, jsonw_uint(json_wtr, obj->obj_id); } jsonw_end_array(json_wtr); /* map_ids */ + + emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */ + jsonw_end_object(json_wtr); /* btf object */ } @@ -893,6 +897,7 @@ static int do_show(int argc, char **argv) close(fd); return err; } + build_obj_refs_table(&refs_table, BPF_OBJ_BTF); if (fd >= 0) { err = show_btf(fd, &btf_prog_table, &btf_map_table); @@ -939,6 +944,7 @@ static int do_show(int argc, char **argv) exit_free: delete_btf_table(&btf_prog_table); delete_btf_table(&btf_map_table); + delete_obj_refs_table(&refs_table); return err; } diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index fca57ee8fafe..7329f3134283 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -143,6 +143,9 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) } jsonw_end_array(json_wtr); } + + emit_obj_refs_json(&refs_table, info->id, json_wtr); + jsonw_end_object(json_wtr); return 0; @@ -212,6 +215,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) printf("\n\tpinned %s", obj->path); } } + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); printf("\n"); @@ -257,6 +261,7 @@ static int do_show(int argc, char **argv) if (show_pinned) build_pinned_obj_table(&link_table, BPF_OBJ_LINK); + build_obj_refs_table(&refs_table, BPF_OBJ_LINK); if (argc == 2) { fd = link_parse_fd(&argc, &argv); @@ -296,6 +301,8 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); + delete_obj_refs_table(&refs_table); + return errno == ENOENT ? 0 : -1; } diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index bf4d7487552a..4a191fcbeb82 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -31,6 +31,7 @@ bool relaxed_maps; struct pinned_obj_table prog_table; struct pinned_obj_table map_table; struct pinned_obj_table link_table; +struct obj_refs_table refs_table; static void __noreturn clean_and_exit(int i) { diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index aad7be74e8a7..ce26271e5f0c 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -127,11 +127,13 @@ static const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { extern const char * const map_type_name[]; extern const size_t map_type_name_size; +/* keep in sync with the definition in skeleton/pid_iter.bpf.c */ enum bpf_obj_type { BPF_OBJ_UNKNOWN, BPF_OBJ_PROG, BPF_OBJ_MAP, BPF_OBJ_LINK, + BPF_OBJ_BTF, }; extern const char *bin_name; @@ -139,12 +141,14 @@ extern const char *bin_name; extern json_writer_t *json_wtr; extern bool json_output; extern bool show_pinned; +extern bool show_pids; extern bool block_mount; extern bool verifier_logs; extern bool relaxed_maps; extern struct pinned_obj_table prog_table; extern struct pinned_obj_table map_table; extern struct pinned_obj_table link_table; +extern struct obj_refs_table refs_table; void __printf(1, 2) p_err(const char *fmt, ...); void __printf(1, 2) p_info(const char *fmt, ...); @@ -168,12 +172,35 @@ struct pinned_obj { struct hlist_node hash; }; +struct obj_refs_table { + DECLARE_HASHTABLE(table, 16); +}; + +struct obj_ref { + int pid; + char comm[16]; +}; + +struct obj_refs { + struct hlist_node node; + __u32 id; + int ref_cnt; + struct obj_ref *refs; +}; + struct btf; struct bpf_line_info; int build_pinned_obj_table(struct pinned_obj_table *table, enum bpf_obj_type type); void delete_pinned_obj_table(struct pinned_obj_table *tab); +__weak int build_obj_refs_table(struct obj_refs_table *table, + enum bpf_obj_type type); +__weak void delete_obj_refs_table(struct obj_refs_table *table); +__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, + json_writer_t *json_wtr); +__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, + const char *prefix); void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode); diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index b9eee19b094c..0a6a5d82d380 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -509,6 +509,8 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) jsonw_end_array(json_wtr); } + emit_obj_refs_json(&refs_table, info->id, json_wtr); + jsonw_end_object(json_wtr); return 0; @@ -596,6 +598,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) if (frozen) printf("%sfrozen", info->btf_id ? " " : ""); + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + printf("\n"); return 0; } @@ -654,6 +658,7 @@ static int do_show(int argc, char **argv) if (show_pinned) build_pinned_obj_table(&map_table, BPF_OBJ_MAP); + build_obj_refs_table(&refs_table, BPF_OBJ_MAP); if (argc == 2) return do_show_subset(argc, argv); @@ -697,6 +702,8 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); + delete_obj_refs_table(&refs_table); + return errno == ENOENT ? 0 : -1; } diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c new file mode 100644 index 000000000000..3474a91743ff --- /dev/null +++ b/tools/bpf/bpftool/pids.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2020 Facebook */ +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" +#include "skeleton/pid_iter.h" + +#ifdef BPFTOOL_WITHOUT_SKELETONS + +int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) +{ + p_err("bpftool built without PID iterator support"); + return -ENOTSUP; +} +void delete_obj_refs_table(struct obj_refs_table *table) {} + +#else /* BPFTOOL_WITHOUT_SKELETONS */ + +#include "pid_iter.skel.h" + +static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e) +{ + struct obj_refs *refs; + struct obj_ref *ref; + void *tmp; + int i; + + hash_for_each_possible(table->table, refs, node, e->id) { + if (refs->id != e->id) + continue; + + for (i = 0; i < refs->ref_cnt; i++) { + if (refs->refs[i].pid == e->pid) + return; + } + + tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref)); + if (!tmp) { + p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...", + e->id, e->pid, e->comm); + return; + } + refs->refs = tmp; + ref = &refs->refs[refs->ref_cnt]; + ref->pid = e->pid; + memcpy(ref->comm, e->comm, sizeof(ref->comm)); + refs->ref_cnt++; + + return; + } + + /* new ref */ + refs = calloc(1, sizeof(*refs)); + if (!refs) { + p_err("failed to alloc memory for ID %u, PID %d, COMM %s...", + e->id, e->pid, e->comm); + return; + } + + refs->id = e->id; + refs->refs = malloc(sizeof(*refs->refs)); + if (!refs->refs) { + free(refs); + p_err("failed to alloc memory for ID %u, PID %d, COMM %s...", + e->id, e->pid, e->comm); + return; + } + ref = &refs->refs[0]; + ref->pid = e->pid; + memcpy(ref->comm, e->comm, sizeof(ref->comm)); + refs->ref_cnt = 1; + hash_add(table->table, &refs->node, e->id); +} + +static int __printf(2, 0) +libbpf_print_none(__maybe_unused enum libbpf_print_level level, + __maybe_unused const char *format, + __maybe_unused va_list args) +{ + return 0; +} + +int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) +{ + char buf[4096]; + struct pid_iter_bpf *skel; + struct pid_iter_entry *e; + int err, ret, fd = -1, i; + libbpf_print_fn_t default_print; + + hash_init(table->table); + set_max_rlimit(); + + skel = pid_iter_bpf__open(); + if (!skel) { + p_err("failed to open PID iterator skeleton"); + return -1; + } + + skel->rodata->obj_type = type; + + /* we don't want output polluted with libbpf errors if bpf_iter is not + * supported + */ + default_print = libbpf_set_print(libbpf_print_none); + err = pid_iter_bpf__load(skel); + libbpf_set_print(default_print); + if (err) { + /* too bad, kernel doesn't support BPF iterators yet */ + err = 0; + goto out; + } + err = pid_iter_bpf__attach(skel); + if (err) { + /* if we loaded above successfully, attach has to succeed */ + p_err("failed to attach PID iterator: %d", err); + goto out; + } + + fd = bpf_iter_create(bpf_link__fd(skel->links.iter)); + if (fd < 0) { + err = -errno; + p_err("failed to create PID iterator session: %d", err); + goto out; + } + + while (true) { + ret = read(fd, buf, sizeof(buf)); + if (ret < 0) { + err = -errno; + p_err("failed to read PID iterator output: %d", err); + goto out; + } + if (ret == 0) + break; + if (ret % sizeof(*e)) { + err = -EINVAL; + p_err("invalid PID iterator output format"); + goto out; + } + ret /= sizeof(*e); + + e = (void *)buf; + for (i = 0; i < ret; i++, e++) { + add_ref(table, e); + } + } + err = 0; +out: + if (fd >= 0) + close(fd); + pid_iter_bpf__destroy(skel); + return err; +} + +void delete_obj_refs_table(struct obj_refs_table *table) +{ + struct obj_refs *refs; + struct hlist_node *tmp; + unsigned int bkt; + + hash_for_each_safe(table->table, bkt, tmp, refs, node) { + hash_del(&refs->node); + free(refs->refs); + free(refs); + } +} + +void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_wtr) +{ + struct obj_refs *refs; + struct obj_ref *ref; + int i; + + if (hash_empty(table->table)) + return; + + hash_for_each_possible(table->table, refs, node, id) { + if (refs->id != id) + continue; + if (refs->ref_cnt == 0) + break; + + jsonw_name(json_wtr, "pids"); + jsonw_start_array(json_wtr); + for (i = 0; i < refs->ref_cnt; i++) { + ref = &refs->refs[i]; + jsonw_start_object(json_wtr); + jsonw_int_field(json_wtr, "pid", ref->pid); + jsonw_string_field(json_wtr, "comm", ref->comm); + jsonw_end_object(json_wtr); + } + jsonw_end_array(json_wtr); + break; + } +} + +void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) +{ + struct obj_refs *refs; + struct obj_ref *ref; + int i; + + if (hash_empty(table->table)) + return; + + hash_for_each_possible(table->table, refs, node, id) { + if (refs->id != id) + continue; + if (refs->ref_cnt == 0) + break; + + printf("%s", prefix); + for (i = 0; i < refs->ref_cnt; i++) { + ref = &refs->refs[i]; + printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid); + } + break; + } +} + + +#endif diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 53d47610ff58..e21fa8ad2efa 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -190,6 +190,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) jsonw_end_array(json_wtr); } + emit_obj_refs_json(&refs_table, info->id, json_wtr); + jsonw_end_object(json_wtr); } @@ -256,6 +258,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) if (info->btf_id) printf("\n\tbtf_id %d", info->btf_id); + emit_obj_refs_plain(&refs_table, info->id, "\n\tpids "); + printf("\n"); } @@ -321,6 +325,7 @@ static int do_show(int argc, char **argv) if (show_pinned) build_pinned_obj_table(&prog_table, BPF_OBJ_PROG); + build_obj_refs_table(&refs_table, BPF_OBJ_PROG); if (argc == 2) return do_show_subset(argc, argv); @@ -362,6 +367,8 @@ static int do_show(int argc, char **argv) if (json_output) jsonw_end_array(json_wtr); + delete_obj_refs_table(&refs_table); + return err; } diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c new file mode 100644 index 000000000000..8468a608911e --- /dev/null +++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (c) 2020 Facebook */ +#include +#include +#include +#include +#include "pid_iter.h" + +/* keep in sync with the definition in main.h */ +enum bpf_obj_type { + BPF_OBJ_UNKNOWN, + BPF_OBJ_PROG, + BPF_OBJ_MAP, + BPF_OBJ_LINK, + BPF_OBJ_BTF, +}; + +extern const void bpf_link_fops __ksym; +extern const void bpf_map_fops __ksym; +extern const void bpf_prog_fops __ksym; +extern const void btf_fops __ksym; + +const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN; + +static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type) +{ + switch (type) { + case BPF_OBJ_PROG: + return BPF_CORE_READ((struct bpf_prog *)ent, aux, id); + case BPF_OBJ_MAP: + return BPF_CORE_READ((struct bpf_map *)ent, id); + case BPF_OBJ_BTF: + return BPF_CORE_READ((struct btf *)ent, id); + case BPF_OBJ_LINK: + return BPF_CORE_READ((struct bpf_link *)ent, id); + default: + return 0; + } +} + +SEC("iter/task_file") +int iter(struct bpf_iter__task_file *ctx) +{ + struct file *file = ctx->file; + struct task_struct *task = ctx->task; + struct pid_iter_entry e; + const void *fops; + + if (!file || !task) + return 0; + + switch (obj_type) { + case BPF_OBJ_PROG: + fops = &bpf_prog_fops; + break; + case BPF_OBJ_MAP: + fops = &bpf_map_fops; + break; + case BPF_OBJ_BTF: + fops = &btf_fops; + break; + case BPF_OBJ_LINK: + fops = &bpf_link_fops; + break; + default: + return 0; + } + + if (file->f_op != fops) + return 0; + + e.pid = task->tgid; + e.id = get_obj_id(file->private_data, obj_type); + bpf_probe_read(&e.comm, sizeof(e.comm), task->group_leader->comm); + bpf_seq_write(ctx->meta->seq, &e, sizeof(e)); + + return 0; +} + +char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h new file mode 100644 index 000000000000..5692cf257adb --- /dev/null +++ b/tools/bpf/bpftool/skeleton/pid_iter.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2020 Facebook */ +#ifndef __PID_ITER_H +#define __PID_ITER_H + +struct pid_iter_entry { + __u32 id; + int pid; + char comm[16]; +}; + +#endif From 075c776658190681d2bf9997306f871d6c8a9b36 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 19 Jun 2020 16:17:03 -0700 Subject: [PATCH 0207/2056] tools/bpftool: Add documentation and sample output for process info Add statements about bpftool being able to discover process info, holding reference to BPF map, prog, link, or BTF. Show example output as well. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200619231703.738941-10-andriin@fb.com --- tools/bpf/bpftool/Documentation/bpftool-btf.rst | 5 +++++ tools/bpf/bpftool/Documentation/bpftool-link.rst | 13 ++++++++++++- tools/bpf/bpftool/Documentation/bpftool-map.rst | 8 +++++++- tools/bpf/bpftool/Documentation/bpftool-prog.rst | 11 +++++++++++ 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index ce3a724f50c1..896f4c6c2870 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -36,6 +36,11 @@ DESCRIPTION otherwise list all BTF objects currently loaded on the system. + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BTF + objects. On such kernels bpftool will automatically emit this + information as well. + **bpftool btf dump** *BTF_SRC* Dump BTF entries from a given *BTF_SRC*. diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst index 0e43d7b06c11..38b0949a185b 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-link.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst @@ -37,6 +37,11 @@ DESCRIPTION zero or more named attributes, some of which depend on type of link. + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BPF + links. On such kernels bpftool will automatically emit this + information as well. + **bpftool link pin** *LINK* *FILE* Pin link *LINK* as *FILE*. @@ -82,6 +87,7 @@ EXAMPLES 10: cgroup prog 25 cgroup_id 614 attach_type egress + pids test_progs(223) **# bpftool --json --pretty link show** @@ -91,7 +97,12 @@ EXAMPLES "type": "cgroup", "prog_id": 25, "cgroup_id": 614, - "attach_type": "egress" + "attach_type": "egress", + "pids": [{ + "pid": 223, + "comm": "test_progs" + } + ] } ] diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst index 31101643e57c..5bc2123e9944 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-map.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst @@ -62,6 +62,11 @@ DESCRIPTION Output will start with map ID followed by map type and zero or more named attributes (depending on kernel version). + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BPF + maps. On such kernels bpftool will automatically emit this + information as well. + **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*] Create a new map with given parameters and pin it to *bpffs* as *FILE*. @@ -180,7 +185,8 @@ EXAMPLES :: 10: hash name some_map flags 0x0 - key 4B value 8B max_entries 2048 memlock 167936B + key 4B value 8B max_entries 2048 memlock 167936B + pids systemd(1) The following three commands are equivalent: diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 2b254959d488..412ea3d9bf7f 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -75,6 +75,11 @@ DESCRIPTION program run. Activation or deactivation of the feature is performed via the **kernel.bpf_stats_enabled** sysctl knob. + Since Linux 5.8 bpftool is able to discover information about + processes that hold open file descriptors (FDs) against BPF + programs. On such kernels bpftool will automatically emit this + information as well. + **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }] Dump eBPF instructions of the programs from the kernel. By default, eBPF will be disassembled and printed to standard @@ -243,6 +248,7 @@ EXAMPLES 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10 loaded_at 2017-09-29T20:11:00+0000 uid 0 xlated 528B jited 370B memlock 4096B map_ids 10 + pids systemd(1) **# bpftool --json --pretty prog show** @@ -262,6 +268,11 @@ EXAMPLES "bytes_jited": 370, "bytes_memlock": 4096, "map_ids": [10 + ], + "pids": [{ + "pid": 1, + "comm": "systemd" + } ] } ] From 0897ecf7532577bda3dbcb043ce046a96948889d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:45:59 +0300 Subject: [PATCH 0208/2056] net: mscc: ocelot: fix encoding destination ports into multicast IPv4 address The ocelot hardware designers have made some hacks to support multicast IPv4 and IPv6 addresses. Normally, the MAC table matches on MAC addresses and the destination ports are selected through the DEST_IDX field of the respective MAC table entry. The DEST_IDX points to a Port Group ID (PGID) which contains the bit mask of ports that frames should be forwarded to. But there aren't a lot of PGIDs (only 80 or so) and there are clearly many more IP multicast addresses than that, so it doesn't scale to use this PGID mechanism, so something else was done. Since the first portion of the MAC address is known, the hack they did was to use a single PGID for _flooding_ unknown IPv4 multicast (PGID_MCIPV4 == 62), but for known IP multicast, embed the destination ports into the first 3 bytes of the MAC address recorded in the MAC table. The VSC7514 datasheet explains it like this: 3.9.1.5 IPv4 Multicast Entries MAC table entries with the ENTRY_TYPE = 2 settings are interpreted as IPv4 multicast entries. IPv4 multicasts entries match IPv4 frames, which are classified to the specified VID, and which have DMAC = 0x01005Exxxxxx, where xxxxxx is the lower 24 bits of the MAC address in the entry. Instead of a lookup in the destination mask table (PGID), the destination set is programmed as part of the entry MAC address. This is shown in the following table. Table 78: IPv4 Multicast Destination Mask Destination Ports Record Bit Field --------------------------------------------- Ports 10-0 MAC[34-24] Example: All IPv4 multicast frames in VLAN 12 with MAC 01005E112233 are to be forwarded to ports 3, 8, and 9. This is done by inserting the following entry in the MAC table entry: VALID = 1 VID = 12 MAC = 0x000308112233 ENTRY_TYPE = 2 DEST_IDX = 0 But this procedure is not at all what's going on in the driver. In fact, the code that embeds the ports into the MAC address looks like it hasn't actually been tested. This patch applies the procedure described in the datasheet. Since there are many other fixes to be made around multicast forwarding until it works properly, there is no real reason for this patch to be backported to stable trees, or considered a real fix of something that should have worked. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 52b180280d2f..922c3e855c3a 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -973,14 +973,14 @@ int ocelot_port_obj_add_mdb(struct net_device *dev, addr[0] = 0; if (!new) { - addr[2] = mc->ports << 0; - addr[1] = mc->ports << 8; + addr[1] = mc->ports >> 8; + addr[2] = mc->ports & 0xff; ocelot_mact_forget(ocelot, addr, vid); } mc->ports |= BIT(port); - addr[2] = mc->ports << 0; - addr[1] = mc->ports << 8; + addr[1] = mc->ports >> 8; + addr[2] = mc->ports & 0xff; return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } @@ -1005,9 +1005,9 @@ int ocelot_port_obj_del_mdb(struct net_device *dev, return -ENOENT; memcpy(addr, mc->addr, ETH_ALEN); - addr[2] = mc->ports << 0; - addr[1] = mc->ports << 8; addr[0] = 0; + addr[1] = mc->ports >> 8; + addr[2] = mc->ports & 0xff; ocelot_mact_forget(ocelot, addr, vid); mc->ports &= ~BIT(port); @@ -1017,8 +1017,8 @@ int ocelot_port_obj_del_mdb(struct net_device *dev, return 0; } - addr[2] = mc->ports << 0; - addr[1] = mc->ports << 8; + addr[1] = mc->ports >> 8; + addr[2] = mc->ports & 0xff; return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } From 471beb11c4ecdefd1d8502861c5e151fd642dc6e Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:00 +0300 Subject: [PATCH 0209/2056] net: mscc: ocelot: make the NPI port a proper target for FDB and MDB When used in DSA mode (as seen in Felix), the DEST_IDX in the MAC table should point to the PGID for the CPU port (PGID_CPU) and not for the Ethernet port where the CPU queues are redirected to (also known as Node Processor Interface - NPI). Because for Felix this distinction shouldn't really matter (from DSA perspective, the NPI port _is_ the CPU port), make the ocelot library act upon the CPU port when NPI mode is enabled. This has no effect for the mscc_ocelot driver for VSC7514, because that does not use NPI (and ocelot->npi is -1). Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 922c3e855c3a..4aadb65a6af8 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -535,6 +535,10 @@ int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int pgid = port; + + if (port == ocelot->npi) + pgid = PGID_CPU; if (!vid) { if (!ocelot_port->vlan_aware) @@ -550,7 +554,7 @@ int ocelot_fdb_add(struct ocelot *ocelot, int port, return -EINVAL; } - return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED); + return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED); } EXPORT_SYMBOL(ocelot_fdb_add); @@ -953,6 +957,9 @@ int ocelot_port_obj_add_mdb(struct net_device *dev, u16 vid = mdb->vid; bool new = false; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + if (!vid) vid = ocelot_port->pvid; @@ -997,6 +1004,9 @@ int ocelot_port_obj_del_mdb(struct net_device *dev, int port = priv->chip_port; u16 vid = mdb->vid; + if (port == ocelot->npi) + port = ocelot->num_phys_ports; + if (!vid) vid = ocelot_port->pvid; From 209edf95da63a0ad19750769f473f4ea1553d21d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:01 +0300 Subject: [PATCH 0210/2056] net: dsa: felix: call port mdb operations from ocelot This adds the mdb hooks in felix and exports the mdb functions from ocelot. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 26 ++++++++++++++++++++++++++ drivers/net/ethernet/mscc/ocelot.c | 23 ++++++++--------------- drivers/net/ethernet/mscc/ocelot.h | 5 ----- drivers/net/ethernet/mscc/ocelot_net.c | 26 ++++++++++++++++++++++++++ include/soc/mscc/ocelot.h | 4 ++++ 5 files changed, 64 insertions(+), 20 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 66648986e6e3..25046777c993 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -59,6 +59,29 @@ static int felix_fdb_del(struct dsa_switch *ds, int port, return ocelot_fdb_del(ocelot, port, addr, vid); } +/* This callback needs to be present */ +static int felix_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + return 0; +} + +static void felix_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot *ocelot = ds->priv; + + ocelot_port_mdb_add(ocelot, port, mdb); +} + +static int felix_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot *ocelot = ds->priv; + + return ocelot_port_mdb_del(ocelot, port, mdb); +} + static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, u8 state) { @@ -771,6 +794,9 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, + .port_mdb_prepare = felix_mdb_prepare, + .port_mdb_add = felix_mdb_add, + .port_mdb_del = felix_mdb_del, .port_bridge_join = felix_bridge_join, .port_bridge_leave = felix_bridge_leave, .port_stp_state_set = felix_bridge_stp_state_set, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4aadb65a6af8..468eaf5916e5 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -944,16 +944,12 @@ static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, return NULL; } -int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans) +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; - int port = priv->chip_port; u16 vid = mdb->vid; bool new = false; @@ -991,17 +987,14 @@ int ocelot_port_obj_add_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } -EXPORT_SYMBOL(ocelot_port_obj_add_mdb); +EXPORT_SYMBOL(ocelot_port_mdb_add); -int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb) +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb) { - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; + struct ocelot_port *ocelot_port = ocelot->ports[port]; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; - int port = priv->chip_port; u16 vid = mdb->vid; if (port == ocelot->npi) @@ -1032,7 +1025,7 @@ int ocelot_port_obj_del_mdb(struct net_device *dev, return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); } -EXPORT_SYMBOL(ocelot_port_obj_del_mdb); +EXPORT_SYMBOL(ocelot_port_mdb_del); int ocelot_port_bridge_join(struct ocelot *ocelot, int port, struct net_device *bridge) diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 0c23734a87be..be4a41646e5e 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -97,11 +97,6 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond); void ocelot_port_lag_leave(struct ocelot *ocelot, int port, struct net_device *bond); -int ocelot_port_obj_del_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb); -int ocelot_port_obj_add_mdb(struct net_device *dev, - const struct switchdev_obj_port_mdb *mdb, - struct switchdev_trans *trans); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 80cb1873e9d9..1bad146a0105 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -795,6 +795,32 @@ static int ocelot_port_vlan_del_vlan(struct net_device *dev, return 0; } +static int ocelot_port_obj_add_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb, + struct switchdev_trans *trans) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return ocelot_port_mdb_add(ocelot, port, mdb); +} + +static int ocelot_port_obj_del_mdb(struct net_device *dev, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_port_mdb_del(ocelot, port, mdb); +} + static int ocelot_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans, diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index fa2c3904049e..80415b63ccfa 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -641,5 +641,9 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); +int ocelot_port_mdb_add(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb); +int ocelot_port_mdb_del(struct ocelot *ocelot, int port, + const struct switchdev_obj_port_mdb *mdb); #endif From 96b029b004942ecdb50e40d3e45cdc8a3aec9135 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:02 +0300 Subject: [PATCH 0211/2056] net: mscc: ocelot: introduce macros for iterating over PGIDs The current iterators are impossible to understand at first glance without switching back and forth between the definitions and their actual use in the for loops. So introduce some convenience names to help readability. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 8 ++++---- drivers/net/ethernet/mscc/ocelot_net.c | 2 +- include/soc/mscc/ocelot.h | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 468eaf5916e5..b6254c20f2f0 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1064,10 +1064,10 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) int i, port, lag; /* Reset destination and aggregation PGIDS */ - for (port = 0; port < ocelot->num_phys_ports; port++) + for_each_unicast_dest_pgid(ocelot, port) ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port); - for (i = PGID_AGGR; i < PGID_SRC; i++) + for_each_aggr_pgid(ocelot, i) ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), ANA_PGID_PGID, i); @@ -1089,7 +1089,7 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot) aggr_count++; } - for (i = PGID_AGGR; i < PGID_SRC; i++) { + for_each_aggr_pgid(ocelot, i) { u32 ac; ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i); @@ -1451,7 +1451,7 @@ int ocelot_init(struct ocelot *ocelot) } /* Allow broadcast MAC frames. */ - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) { + for_each_nonreserved_multicast_dest_pgid(ocelot, i) { u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0)); ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 1bad146a0105..702b42543fb7 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -422,7 +422,7 @@ static void ocelot_set_rx_mode(struct net_device *dev) * forwarded to the CPU port. */ val = GENMASK(ocelot->num_phys_ports - 1, 0); - for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) + for_each_nonreserved_multicast_dest_pgid(ocelot, i) ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i); __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync); diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 80415b63ccfa..e050f8121ba2 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -65,6 +65,21 @@ #define PGID_MCIPV4 62 #define PGID_MCIPV6 63 +#define for_each_unicast_dest_pgid(ocelot, pgid) \ + for ((pgid) = 0; \ + (pgid) < (ocelot)->num_phys_ports; \ + (pgid)++) + +#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \ + for ((pgid) = (ocelot)->num_phys_ports + 1; \ + (pgid) < PGID_CPU; \ + (pgid)++) + +#define for_each_aggr_pgid(ocelot, pgid) \ + for ((pgid) = PGID_AGGR; \ + (pgid) < PGID_SRC; \ + (pgid)++) + /* Aggregation PGIDs, one per Link Aggregation Code */ #define PGID_AGGR 64 From 9403c158b8722ada99f5dd7b3717c264879aefa8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 21 Jun 2020 14:46:03 +0300 Subject: [PATCH 0212/2056] net: mscc: ocelot: support IPv4, IPv6 and plain Ethernet mdb entries The current procedure for installing a multicast address is hardcoded for IPv4. But, in the ocelot hardware, there are 3 different procedures for IPv4, IPv6 and for regular L2 multicast. For IPv6 (33-33-xx-xx-xx-xx), it's the same as for IPv4 (01-00-5e-xx-xx-xx), except that the destination port mask is stuffed into first 2 bytes of the MAC address except into first 3 bytes. For plain Ethernet multicast, there's no port-in-address stuffing going on, instead the DEST_IDX (pointer to PGID) is used there, just as for unicast. So we have to use one of the nonreserved multicast PGIDs that the hardware has allocated for this purpose. This patch classifies the type of multicast address based on its first bytes, then redirects to one of the 3 different hardware procedures. Note that this gives us a really better way of redirecting PTP frames sent at 01-1b-19-00-00-00 to the CPU. Previously, Yangbo Lu tried to add a trapping rule for PTP EtherType but got a lot of pushback: https://patchwork.ozlabs.org/project/netdev/patch/20190813025214.18601-5-yangbo.lu@nxp.com/ But right now, that isn't needed at all. The application stack (ptp4l) does this for the PTP multicast addresses it's interested in (which are configurable, and include 01-1b-19-00-00-00): memset(&mreq, 0, sizeof(mreq)); mreq.mr_ifindex = index; mreq.mr_type = PACKET_MR_MULTICAST; mreq.mr_alen = MAC_LEN; memcpy(mreq.mr_address, addr1, MAC_LEN); err1 = setsockopt(fd, SOL_PACKET, PACKET_ADD_MEMBERSHIP, &mreq, sizeof(mreq)); Into the kernel, this translates into a dev_mc_add on the switch network interfaces, and our drivers know that it means they should translate it into a host MDB address (make the CPU port be the destination). Previously, this was broken because all mdb addresses were treated as IPv4 (which 01-1b-19-00-00-00 obviously is not). Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 94 +++++++++++++++++++++++++----- drivers/net/ethernet/mscc/ocelot.h | 1 + 2 files changed, 80 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index b6254c20f2f0..e815aad8d85e 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -944,10 +944,68 @@ static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot, return NULL; } +static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr) +{ + if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e) + return ENTRYTYPE_MACv4; + if (addr[0] == 0x33 && addr[1] == 0x33) + return ENTRYTYPE_MACv6; + return ENTRYTYPE_NORMAL; +} + +static int ocelot_mdb_get_pgid(struct ocelot *ocelot, + enum macaccess_entry_type entry_type) +{ + int pgid; + + /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and + * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the + * destination mask table (PGID), the destination set is programmed as + * part of the entry MAC address.", and the DEST_IDX is set to 0. + */ + if (entry_type == ENTRYTYPE_MACv4 || + entry_type == ENTRYTYPE_MACv6) + return 0; + + for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) { + struct ocelot_multicast *mc; + bool used = false; + + list_for_each_entry(mc, &ocelot->multicast, list) { + if (mc->pgid == pgid) { + used = true; + break; + } + } + + if (!used) + return pgid; + } + + return -1; +} + +static void ocelot_encode_ports_to_mdb(unsigned char *addr, + struct ocelot_multicast *mc, + enum macaccess_entry_type entry_type) +{ + memcpy(addr, mc->addr, ETH_ALEN); + + if (entry_type == ENTRYTYPE_MACv4) { + addr[0] = 0; + addr[1] = mc->ports >> 8; + addr[2] = mc->ports & 0xff; + } else if (entry_type == ENTRYTYPE_MACv6) { + addr[0] = mc->ports >> 8; + addr[1] = mc->ports & 0xff; + } +} + int ocelot_port_mdb_add(struct ocelot *ocelot, int port, const struct switchdev_obj_port_mdb *mdb) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + enum macaccess_entry_type entry_type; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; u16 vid = mdb->vid; @@ -959,33 +1017,40 @@ int ocelot_port_mdb_add(struct ocelot *ocelot, int port, if (!vid) vid = ocelot_port->pvid; + entry_type = ocelot_classify_mdb(mdb->addr); + mc = ocelot_multicast_get(ocelot, mdb->addr, vid); if (!mc) { + int pgid = ocelot_mdb_get_pgid(ocelot, entry_type); + + if (pgid < 0) { + dev_err(ocelot->dev, + "No more PGIDs available for mdb %pM vid %d\n", + mdb->addr, vid); + return -ENOSPC; + } + mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL); if (!mc) return -ENOMEM; memcpy(mc->addr, mdb->addr, ETH_ALEN); mc->vid = vid; + mc->pgid = pgid; list_add_tail(&mc->list, &ocelot->multicast); new = true; } - memcpy(addr, mc->addr, ETH_ALEN); - addr[0] = 0; - if (!new) { - addr[1] = mc->ports >> 8; - addr[2] = mc->ports & 0xff; + ocelot_encode_ports_to_mdb(addr, mc, entry_type); ocelot_mact_forget(ocelot, addr, vid); } mc->ports |= BIT(port); - addr[1] = mc->ports >> 8; - addr[2] = mc->ports & 0xff; + ocelot_encode_ports_to_mdb(addr, mc, entry_type); - return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); + return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type); } EXPORT_SYMBOL(ocelot_port_mdb_add); @@ -993,6 +1058,7 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, const struct switchdev_obj_port_mdb *mdb) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + enum macaccess_entry_type entry_type; unsigned char addr[ETH_ALEN]; struct ocelot_multicast *mc; u16 vid = mdb->vid; @@ -1007,10 +1073,9 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, if (!mc) return -ENOENT; - memcpy(addr, mc->addr, ETH_ALEN); - addr[0] = 0; - addr[1] = mc->ports >> 8; - addr[2] = mc->ports & 0xff; + entry_type = ocelot_classify_mdb(mdb->addr); + + ocelot_encode_ports_to_mdb(addr, mc, entry_type); ocelot_mact_forget(ocelot, addr, vid); mc->ports &= ~BIT(port); @@ -1020,10 +1085,9 @@ int ocelot_port_mdb_del(struct ocelot *ocelot, int port, return 0; } - addr[1] = mc->ports >> 8; - addr[2] = mc->ports & 0xff; + ocelot_encode_ports_to_mdb(addr, mc, entry_type); - return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4); + return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type); } EXPORT_SYMBOL(ocelot_port_mdb_del); diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index be4a41646e5e..394362e23c47 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -46,6 +46,7 @@ struct ocelot_multicast { unsigned char addr[ETH_ALEN]; u16 vid; u16 ports; + int pgid; }; struct ocelot_port_tc { From 75674e31593ba3b18459816d9b52b3d87dbfadc0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 21 Jun 2020 15:36:39 +0100 Subject: [PATCH 0213/2056] net: mtk_eth_soc: use resolved link config in mac_link_up() Convert the mtk_eth_soc driver to use the finalised link parameters in mac_link_up() rather than the parameters in mac_config(). Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 45 ++++++++++++--------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f6a1f8666f95..20db302d31ce 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -344,29 +344,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode, /* Setup gmac */ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); mcr_new = mcr_cur; - mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | - MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | - MAC_MCR_FORCE_RX_FC); mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK; - switch (state->speed) { - case SPEED_2500: - case SPEED_1000: - mcr_new |= MAC_MCR_SPEED_1000; - break; - case SPEED_100: - mcr_new |= MAC_MCR_SPEED_100; - break; - } - if (state->duplex == DUPLEX_FULL) { - mcr_new |= MAC_MCR_FORCE_DPX; - if (state->pause & MLO_PAUSE_TX) - mcr_new |= MAC_MCR_FORCE_TX_FC; - if (state->pause & MLO_PAUSE_RX) - mcr_new |= MAC_MCR_FORCE_RX_FC; - } - /* Only update control register when needed! */ if (mcr_new != mcr_cur) mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); @@ -443,6 +423,31 @@ static void mtk_mac_link_up(struct phylink_config *config, phylink_config); u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | + MAC_MCR_FORCE_RX_FC); + + /* Configure speed */ + switch (speed) { + case SPEED_2500: + case SPEED_1000: + mcr |= MAC_MCR_SPEED_1000; + break; + case SPEED_100: + mcr |= MAC_MCR_SPEED_100; + break; + } + + /* Configure duplex */ + if (duplex == DUPLEX_FULL) + mcr |= MAC_MCR_FORCE_DPX; + + /* Configure pause modes - phylink will avoid these for half duplex */ + if (tx_pause) + mcr |= MAC_MCR_FORCE_TX_FC; + if (rx_pause) + mcr |= MAC_MCR_FORCE_RX_FC; + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); } From c5efcf17bf84b20dd76ca56de6dd99a40a89c4e3 Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Sun, 21 Jun 2020 22:24:30 -0400 Subject: [PATCH 0214/2056] tcindex_change: Remove redundant null check arg cannot be NULL since its already being dereferenced before. Remove the redundant NULL check. Signed-off-by: Gaurav Singh Signed-off-by: David S. Miller --- net/sched/cls_tcindex.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 61e95029c18f..78bec347b8b6 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -533,7 +533,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb, pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," "p %p,r %p,*arg %p\n", - tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL); + tp, handle, tca, arg, opt, p, r, *arg); if (!opt) return 0; From 29cb9868fb69582da3205cf4a5eb5dc8f740ed33 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 23 Jun 2020 13:43:06 +1000 Subject: [PATCH 0215/2056] net/core/devlink.c: remove new uninitialized_var() usage Signed-off-by: Stephen Rothwell Signed-off-by: David S. Miller --- net/core/devlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 455998a57671..6ae36808c152 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -583,7 +583,7 @@ devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *por ops = devlink->ops; if (ops->port_function_hw_addr_get) { - int uninitialized_var(hw_addr_len); + int hw_addr_len; u8 hw_addr[MAX_ADDR_LEN]; err = ops->port_function_hw_addr_get(devlink, port, hw_addr, &hw_addr_len, extack); From 071a02046c262f633ef8d9064cf36fd6def6d0a5 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Mon, 22 Jun 2020 17:53:04 +0300 Subject: [PATCH 0216/2056] net: atlantic: A2: half duplex support This patch adds support for 10M/100M/1G half duplex rates, which are supported by A2 in additional to full duplex rates supported by A1. Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_common.h | 13 ++-- .../net/ethernet/aquantia/atlantic/aq_hw.h | 8 ++- .../net/ethernet/aquantia/atlantic/aq_nic.c | 60 ++++++++++++++----- .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 1 + .../atlantic/hw_atl/hw_atl_utils_fw2x.c | 1 + .../aquantia/atlantic/hw_atl2/hw_atl2.c | 5 +- .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 5 ++ 7 files changed, 70 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 52ad9433cabc..1587528ca3f6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -58,11 +58,14 @@ #define AQ_NIC_RATE_1G BIT(4) #define AQ_NIC_RATE_100M BIT(5) #define AQ_NIC_RATE_10M BIT(6) +#define AQ_NIC_RATE_1G_HALF BIT(7) +#define AQ_NIC_RATE_100M_HALF BIT(8) +#define AQ_NIC_RATE_10M_HALF BIT(9) -#define AQ_NIC_RATE_EEE_10G BIT(7) -#define AQ_NIC_RATE_EEE_5G BIT(8) -#define AQ_NIC_RATE_EEE_2G5 BIT(9) -#define AQ_NIC_RATE_EEE_1G BIT(10) -#define AQ_NIC_RATE_EEE_100M BIT(11) +#define AQ_NIC_RATE_EEE_10G BIT(10) +#define AQ_NIC_RATE_EEE_5G BIT(11) +#define AQ_NIC_RATE_EEE_2G5 BIT(12) +#define AQ_NIC_RATE_EEE_1G BIT(13) +#define AQ_NIC_RATE_EEE_100M BIT(14) #endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index ed5b465bc664..1408a522eff1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_hw.h: Declaration of abstract interface for NIC hardware specific @@ -69,6 +70,7 @@ struct aq_hw_caps_s { struct aq_hw_link_status_s { unsigned int mbps; + bool full_duplex; }; struct aq_stats_s { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 4435c6374f7e..49528fcdc947 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -939,8 +939,11 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, cmd->base.port = PORT_FIBRE; else cmd->base.port = PORT_TP; - /* This driver supports only 10G capable adapters, so DUPLEX_FULL */ - cmd->base.duplex = DUPLEX_FULL; + + cmd->base.duplex = DUPLEX_UNKNOWN; + if (self->link_status.mbps) + cmd->base.duplex = self->link_status.full_duplex ? + DUPLEX_FULL : DUPLEX_HALF; cmd->base.autoneg = self->aq_nic_cfg.is_autoneg; ethtool_link_ksettings_zero_link_mode(cmd, supported); @@ -961,14 +964,26 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, supported, 1000baseT_Full); + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M) ethtool_link_ksettings_add_link_mode(cmd, supported, 100baseT_Full); + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M) ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full); + if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + if (self->aq_nic_cfg.aq_hw_caps->flow_control) { ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); @@ -988,30 +1003,42 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, if (self->aq_nic_cfg.is_autoneg) ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full); - if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G) ethtool_link_ksettings_add_link_mode(cmd, advertising, 5000baseT_Full); - if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5) + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5) ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); - if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); - if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M) + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M) ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX) ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); @@ -1031,27 +1058,32 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, int aq_nic_set_link_ksettings(struct aq_nic_s *self, const struct ethtool_link_ksettings *cmd) { - u32 speed = 0U; + int fduplex = (cmd->base.duplex == DUPLEX_FULL); + u32 speed = cmd->base.speed; u32 rate = 0U; int err = 0; + if (!fduplex && speed > SPEED_1000) { + err = -EINVAL; + goto err_exit; + } + if (cmd->base.autoneg == AUTONEG_ENABLE) { rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk; self->aq_nic_cfg.is_autoneg = true; } else { - speed = cmd->base.speed; - switch (speed) { case SPEED_10: - rate = AQ_NIC_RATE_10M; + rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF; break; case SPEED_100: - rate = AQ_NIC_RATE_100M; + rate = fduplex ? AQ_NIC_RATE_100M + : AQ_NIC_RATE_100M_HALF; break; case SPEED_1000: - rate = AQ_NIC_RATE_1G; + rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF; break; case SPEED_2500: diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 73c0f41df8d8..1d9dee4951f9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -704,6 +704,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) return -EBUSY; } } + link_status->full_duplex = true; return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index eeedd8c90067..013676cd38e4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -274,6 +274,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) } else { link_status->mbps = 0; } + link_status->full_duplex = true; return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index 8df9d4ef36f0..239d077e21d7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -64,8 +64,11 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { AQ_NIC_RATE_5G | AQ_NIC_RATE_2G5 | AQ_NIC_RATE_1G | + AQ_NIC_RATE_1G_HALF | AQ_NIC_RATE_100M | - AQ_NIC_RATE_10M, + AQ_NIC_RATE_100M_HALF | + AQ_NIC_RATE_10M | + AQ_NIC_RATE_10M_HALF, }; static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index 0ffc33bd67d0..d64dfae8803e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -135,6 +135,10 @@ static void a2_link_speed_mask2fw(u32 speed, link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G); link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M); link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M); + + link_options->rate_1G_hd = !!(speed & AQ_NIC_RATE_1G_HALF); + link_options->rate_100M_hd = !!(speed & AQ_NIC_RATE_100M_HALF); + link_options->rate_10M_hd = !!(speed & AQ_NIC_RATE_10M_HALF); } static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed) @@ -202,6 +206,7 @@ static int aq_a2_fw_update_link_status(struct aq_hw_s *self) default: self->aq_link_status.mbps = 0; } + self->aq_link_status.full_duplex = link_status.duplex; return 0; } From e61b28686bae30b824b690c75d011a61f90c52dd Mon Sep 17 00:00:00 2001 From: Nikita Danilov Date: Mon, 22 Jun 2020 17:53:05 +0300 Subject: [PATCH 0217/2056] net: atlantic: remove baseX usage This patch removes 2.5G baseX wrong usage/reporting, since it shouldn't have been mixed with baseT. Signed-off-by: Nikita Danilov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 743d3b13b39d..ffcdda70265b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -618,9 +618,6 @@ static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed) if (speed & AQ_NIC_RATE_EEE_10G) rate |= SUPPORTED_10000baseT_Full; - if (speed & AQ_NIC_RATE_EEE_2G5) - rate |= SUPPORTED_2500baseX_Full; - if (speed & AQ_NIC_RATE_EEE_1G) rate |= SUPPORTED_1000baseT_Full; From ce6a690ccc99a7ece8b061d88d9457ddb556a749 Mon Sep 17 00:00:00 2001 From: Nikita Danilov Date: Mon, 22 Jun 2020 17:53:06 +0300 Subject: [PATCH 0218/2056] net: atlantic: A2: EEE support This patch adds EEE support on A2. Signed-off-by: Nikita Danilov Co-developed-by: Igor Russkikh Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_common.h | 5 ++ .../ethernet/aquantia/atlantic/aq_ethtool.c | 11 +-- .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 80 +++++++++++++++++++ 3 files changed, 91 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index 1587528ca3f6..23b2d390fcdd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -67,5 +67,10 @@ #define AQ_NIC_RATE_EEE_2G5 BIT(12) #define AQ_NIC_RATE_EEE_1G BIT(13) #define AQ_NIC_RATE_EEE_100M BIT(14) +#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\ + AQ_NIC_RATE_EEE_5G |\ + AQ_NIC_RATE_EEE_2G5 |\ + AQ_NIC_RATE_EEE_1G |\ + AQ_NIC_RATE_EEE_100M) #endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index ffcdda70265b..8225187eeef2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_ethtool.c: Definition of ethertool related functions. */ @@ -611,7 +612,7 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev, return 0; } -static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed) +static u32 eee_mask_to_ethtool_mask(u32 speed) { u32 rate = 0; @@ -653,7 +654,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) eee->eee_enabled = !!eee->advertised; eee->tx_lpi_enabled = eee->eee_enabled; - if (eee->advertised & eee->lp_advertised) + if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK) eee->eee_active = true; return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index d64dfae8803e..9216517f6e65 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -7,6 +7,7 @@ #include "aq_hw.h" #include "aq_hw_utils.h" +#include "aq_nic.h" #include "hw_atl/hw_atl_llh.h" #include "hw_atl2_utils.h" #include "hw_atl2_llh.h" @@ -141,6 +142,42 @@ static void a2_link_speed_mask2fw(u32 speed, link_options->rate_10M_hd = !!(speed & AQ_NIC_RATE_10M_HALF); } +static u32 a2_fw_dev_to_eee_mask(struct device_link_caps_s *device_link_caps) +{ + u32 rate = 0; + + if (device_link_caps->eee_10G) + rate |= AQ_NIC_RATE_EEE_10G; + if (device_link_caps->eee_5G) + rate |= AQ_NIC_RATE_EEE_5G; + if (device_link_caps->eee_2P5G) + rate |= AQ_NIC_RATE_EEE_2G5; + if (device_link_caps->eee_1G) + rate |= AQ_NIC_RATE_EEE_1G; + if (device_link_caps->eee_100M) + rate |= AQ_NIC_RATE_EEE_100M; + + return rate; +} + +static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps) +{ + u32 rate = 0; + + if (lkp_link_caps->eee_10G) + rate |= AQ_NIC_RATE_EEE_10G; + if (lkp_link_caps->eee_5G) + rate |= AQ_NIC_RATE_EEE_5G; + if (lkp_link_caps->eee_2P5G) + rate |= AQ_NIC_RATE_EEE_2G5; + if (lkp_link_caps->eee_1G) + rate |= AQ_NIC_RATE_EEE_1G; + if (lkp_link_caps->eee_100M) + rate |= AQ_NIC_RATE_EEE_100M; + + return rate; +} + static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed) { struct link_options_s link_options; @@ -153,6 +190,17 @@ static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed) return hw_atl2_shared_buffer_finish_ack(self); } +static void aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s *self, + struct link_options_s *link_options, + u32 eee_speeds) +{ + link_options->eee_10G = !!(eee_speeds & AQ_NIC_RATE_EEE_10G); + link_options->eee_5G = !!(eee_speeds & AQ_NIC_RATE_EEE_5G); + link_options->eee_2P5G = !!(eee_speeds & AQ_NIC_RATE_EEE_2G5); + link_options->eee_1G = !!(eee_speeds & AQ_NIC_RATE_EEE_1G); + link_options->eee_100M = !!(eee_speeds & AQ_NIC_RATE_EEE_100M); +} + static int aq_a2_fw_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { @@ -163,6 +211,8 @@ static int aq_a2_fw_set_state(struct aq_hw_s *self, switch (state) { case MPI_INIT: link_options.link_up = 1U; + aq_a2_fw_upd_eee_rate_bits(self, &link_options, + self->aq_nic_cfg->eee_speeds); break; case MPI_DEINIT: link_options.link_up = 0U; @@ -265,6 +315,34 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self) return 0; } +static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + aq_a2_fw_upd_eee_rate_bits(self, &link_options, speed); + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static int aq_a2_fw_get_eee_rate(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates) +{ + struct device_link_caps_s device_link_caps; + struct lkp_link_caps_s lkp_link_caps; + + hw_atl2_shared_buffer_read(self, device_link_caps, device_link_caps); + hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps); + + *supported_rates = a2_fw_dev_to_eee_mask(&device_link_caps); + *rate = a2_fw_lkp_to_mask(&lkp_link_caps); + + return 0; +} + static int aq_a2_fw_renegotiate(struct aq_hw_s *self) { struct link_options_s link_options; @@ -322,4 +400,6 @@ const struct aq_fw_ops aq_a2_fw_ops = { .set_state = aq_a2_fw_set_state, .update_link_status = aq_a2_fw_update_link_status, .update_stats = aq_a2_fw_update_stats, + .set_eee_rate = aq_a2_fw_set_eee_rate, + .get_eee_rate = aq_a2_fw_get_eee_rate, }; From 3e168de529b14edd13c6842ff7bd415f25672db8 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Mon, 22 Jun 2020 17:53:07 +0300 Subject: [PATCH 0219/2056] net: atlantic: A2: flow control support This patch adds flow control support on A2. Co-developed-by: Dmitry Bogdanov Signed-off-by: Dmitry Bogdanov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 2 +- .../aquantia/atlantic/hw_atl/hw_atl_b0.h | 2 ++ .../aquantia/atlantic/hw_atl2/hw_atl2.c | 3 ++ .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 36 +++++++++++++++++++ 4 files changed, 42 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 14d79f70cad7..8ed6fd845969 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -108,7 +108,7 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self) return err; } -static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) +int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc) { hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index 30f468f2084d..bd9a6fb005c9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -62,6 +62,8 @@ void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self); int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr); +int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc); + int hw_atl_b0_hw_start(struct aq_hw_s *self); int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index 239d077e21d7..c306c26e802b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -181,6 +181,8 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self) threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); + + hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); } /* QoS 802.1p priority -> TC mapping */ @@ -841,4 +843,5 @@ const struct aq_hw_ops hw_atl2_ops = { .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, .hw_get_fw_version = hw_atl2_utils_get_fw_version, .hw_set_offload = hw_atl_b0_hw_offload_set, + .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index 9216517f6e65..0edcc0253b2e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -190,6 +190,15 @@ static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed) return hw_atl2_shared_buffer_finish_ack(self); } +static void aq_a2_fw_set_mpi_flow_control(struct aq_hw_s *self, + struct link_options_s *link_options) +{ + u32 flow_control = self->aq_nic_cfg->fc.req; + + link_options->pause_rx = !!(flow_control & AQ_NIC_FC_RX); + link_options->pause_tx = !!(flow_control & AQ_NIC_FC_TX); +} + static void aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s *self, struct link_options_s *link_options, u32 eee_speeds) @@ -213,6 +222,7 @@ static int aq_a2_fw_set_state(struct aq_hw_s *self, link_options.link_up = 1U; aq_a2_fw_upd_eee_rate_bits(self, &link_options, self->aq_nic_cfg->eee_speeds); + aq_a2_fw_set_mpi_flow_control(self, &link_options); break; case MPI_DEINIT: link_options.link_up = 0U; @@ -363,6 +373,30 @@ static int aq_a2_fw_renegotiate(struct aq_hw_s *self) return err; } +static int aq_a2_fw_set_flow_control(struct aq_hw_s *self) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + aq_a2_fw_set_mpi_flow_control(self, &link_options); + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static u32 aq_a2_fw_get_flow_control(struct aq_hw_s *self, u32 *fcmode) +{ + struct link_status_s link_status; + + hw_atl2_shared_buffer_read(self, link_status, link_status); + + *fcmode = ((link_status.pause_rx) ? AQ_NIC_FC_RX : 0) | + ((link_status.pause_tx) ? AQ_NIC_FC_TX : 0); + return 0; +} + u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self) { struct version_s version; @@ -402,4 +436,6 @@ const struct aq_fw_ops aq_a2_fw_ops = { .update_stats = aq_a2_fw_update_stats, .set_eee_rate = aq_a2_fw_set_eee_rate, .get_eee_rate = aq_a2_fw_get_eee_rate, + .set_flow_control = aq_a2_fw_set_flow_control, + .get_flow_control = aq_a2_fw_get_flow_control, }; From 2b53b04de3b185ada35155e668a24a68f6a753ba Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Mon, 22 Jun 2020 17:53:08 +0300 Subject: [PATCH 0220/2056] net: atlantic: A2: report link partner capabilities This patch adds link partner capabilities reporting support on A2. In particular, the following capabilities are available for reporting: * link rate; * EEE; * flow control. Signed-off-by: Dmitry Bogdanov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_hw.h | 2 + .../net/ethernet/aquantia/atlantic/aq_nic.c | 49 +++++++++++++++++++ .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 30 ++++++++++++ 3 files changed, 81 insertions(+) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 1408a522eff1..f2663ad22209 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -71,6 +71,8 @@ struct aq_hw_caps_s { struct aq_hw_link_status_s { unsigned int mbps; bool full_duplex; + u32 lp_link_speed_msk; + u32 lp_flow_control; }; struct aq_stats_s { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 49528fcdc947..647b22d89b1a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -935,6 +935,8 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self) void aq_nic_get_link_ksettings(struct aq_nic_s *self, struct ethtool_link_ksettings *cmd) { + u32 lp_link_speed_msk; + if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE) cmd->base.port = PORT_FIBRE; else @@ -1053,6 +1055,53 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self, ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); else ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk; + + if (lp_link_speed_msk & AQ_NIC_RATE_10G) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 10000baseT_Full); + + if (lp_link_speed_msk & AQ_NIC_RATE_5G) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 5000baseT_Full); + + if (lp_link_speed_msk & AQ_NIC_RATE_2G5) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 2500baseT_Full); + + if (lp_link_speed_msk & AQ_NIC_RATE_1G) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 1000baseT_Full); + + if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 1000baseT_Half); + + if (lp_link_speed_msk & AQ_NIC_RATE_100M) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 100baseT_Full); + + if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 100baseT_Half); + + if (lp_link_speed_msk & AQ_NIC_RATE_10M) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 10baseT_Full); + + if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + 10baseT_Half); + + if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + Pause); + if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^ + !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)) + ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, + Asym_Pause); } int aq_nic_set_link_ksettings(struct aq_nic_s *self, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index 0edcc0253b2e..c5d1a1404042 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -164,6 +164,27 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps) { u32 rate = 0; + if (lkp_link_caps->rate_10G) + rate |= AQ_NIC_RATE_10G; + if (lkp_link_caps->rate_5G) + rate |= AQ_NIC_RATE_5G; + if (lkp_link_caps->rate_N5G) + rate |= AQ_NIC_RATE_5GSR; + if (lkp_link_caps->rate_2P5G) + rate |= AQ_NIC_RATE_2G5; + if (lkp_link_caps->rate_1G) + rate |= AQ_NIC_RATE_1G; + if (lkp_link_caps->rate_1G_hd) + rate |= AQ_NIC_RATE_1G_HALF; + if (lkp_link_caps->rate_100M) + rate |= AQ_NIC_RATE_100M; + if (lkp_link_caps->rate_100M_hd) + rate |= AQ_NIC_RATE_100M_HALF; + if (lkp_link_caps->rate_10M) + rate |= AQ_NIC_RATE_10M; + if (lkp_link_caps->rate_10M_hd) + rate |= AQ_NIC_RATE_10M_HALF; + if (lkp_link_caps->eee_10G) rate |= AQ_NIC_RATE_EEE_10G; if (lkp_link_caps->eee_5G) @@ -240,6 +261,7 @@ static int aq_a2_fw_set_state(struct aq_hw_s *self, static int aq_a2_fw_update_link_status(struct aq_hw_s *self) { + struct lkp_link_caps_s lkp_link_caps; struct link_status_s link_status; hw_atl2_shared_buffer_read(self, link_status, link_status); @@ -268,6 +290,14 @@ static int aq_a2_fw_update_link_status(struct aq_hw_s *self) } self->aq_link_status.full_duplex = link_status.duplex; + hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps); + + self->aq_link_status.lp_link_speed_msk = + a2_fw_lkp_to_mask(&lkp_link_caps); + self->aq_link_status.lp_flow_control = + ((lkp_link_caps.pause_rx) ? AQ_NIC_FC_RX : 0) | + ((lkp_link_caps.pause_tx) ? AQ_NIC_FC_TX : 0); + return 0; } From ecab78703f3b87b3e21160719b08819c7cc0f4e5 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Mon, 22 Jun 2020 17:53:09 +0300 Subject: [PATCH 0221/2056] net: atlantic: A2: phy loopback support This patch adds the phy loopback support on A2. Signed-off-by: Dmitry Bogdanov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 5 ++-- .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 2 +- .../aquantia/atlantic/hw_atl/hw_atl_b0.h | 1 + .../aquantia/atlantic/hw_atl2/hw_atl2.c | 1 + .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 23 +++++++++++++++++++ 5 files changed, 29 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 8225187eeef2..e53ba7bfaf61 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -836,6 +836,7 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags) struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; u32 priv_flags; + int ret = 0; cfg = aq_nic_get_cfg(aq_nic); priv_flags = cfg->priv_flags; @@ -857,10 +858,10 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags) dev_open(ndev, NULL); } } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) { - aq_nic_set_loopback(aq_nic); + ret = aq_nic_set_loopback(aq_nic); } - return 0; + return ret; } const struct ethtool_ops aq_ethtool_ops = { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 8ed6fd845969..b023c3324a59 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1556,7 +1556,7 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) return aq_hw_err_from_flags(self); } -static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable) +int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable) { switch (mode) { case AQ_HW_LOOPBACK_DMA_SYS: diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h index bd9a6fb005c9..66d158900141 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h @@ -63,6 +63,7 @@ void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self); int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr); int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc); +int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable); int hw_atl_b0_hw_start(struct aq_hw_s *self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index c306c26e802b..c65e6daad0e5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -843,5 +843,6 @@ const struct aq_hw_ops hw_atl2_ops = { .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, .hw_get_fw_version = hw_atl2_utils_get_fw_version, .hw_set_offload = hw_atl_b0_hw_offload_set, + .hw_set_loopback = hw_atl_b0_set_loopback, .hw_set_fc = hw_atl_b0_set_fc, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index c5d1a1404042..3a9352190816 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -427,6 +427,28 @@ static u32 aq_a2_fw_get_flow_control(struct aq_hw_s *self, u32 *fcmode) return 0; } +static int aq_a2_fw_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + switch (mode) { + case AQ_HW_LOOPBACK_PHYINT_SYS: + link_options.internal_loopback = enable; + break; + case AQ_HW_LOOPBACK_PHYEXT_SYS: + link_options.external_loopback = enable; + break; + default: + return -EINVAL; + } + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self) { struct version_s version; @@ -468,4 +490,5 @@ const struct aq_fw_ops aq_a2_fw_ops = { .get_eee_rate = aq_a2_fw_get_eee_rate, .set_flow_control = aq_a2_fw_set_flow_control, .get_flow_control = aq_a2_fw_get_flow_control, + .set_phyloopback = aq_a2_fw_set_phyloopback, }; From f9c70bdc279b191da8d60777c627702c06e4a37d Mon Sep 17 00:00:00 2001 From: Lihong Kou Date: Tue, 23 Jun 2020 20:28:41 +0800 Subject: [PATCH 0222/2056] Bluetooth: add a mutex lock to avoid UAF in do_enale_set In the case we set or free the global value listen_chan in different threads, we can encounter the UAF problems because the method is not protected by any lock, add one to avoid this bug. BUG: KASAN: use-after-free in l2cap_chan_close+0x48/0x990 net/bluetooth/l2cap_core.c:730 Read of size 8 at addr ffff888096950000 by task kworker/1:102/2868 CPU: 1 PID: 2868 Comm: kworker/1:102 Not tainted 5.5.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: events do_enable_set Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1fb/0x318 lib/dump_stack.c:118 print_address_description+0x74/0x5c0 mm/kasan/report.c:374 __kasan_report+0x149/0x1c0 mm/kasan/report.c:506 kasan_report+0x26/0x50 mm/kasan/common.c:641 __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135 l2cap_chan_close+0x48/0x990 net/bluetooth/l2cap_core.c:730 do_enable_set+0x660/0x900 net/bluetooth/6lowpan.c:1074 process_one_work+0x7f5/0x10f0 kernel/workqueue.c:2264 worker_thread+0xbbc/0x1630 kernel/workqueue.c:2410 kthread+0x332/0x350 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Allocated by task 2870: save_stack mm/kasan/common.c:72 [inline] set_track mm/kasan/common.c:80 [inline] __kasan_kmalloc+0x118/0x1c0 mm/kasan/common.c:515 kasan_kmalloc+0x9/0x10 mm/kasan/common.c:529 kmem_cache_alloc_trace+0x221/0x2f0 mm/slab.c:3551 kmalloc include/linux/slab.h:555 [inline] kzalloc include/linux/slab.h:669 [inline] l2cap_chan_create+0x50/0x320 net/bluetooth/l2cap_core.c:446 chan_create net/bluetooth/6lowpan.c:640 [inline] bt_6lowpan_listen net/bluetooth/6lowpan.c:959 [inline] do_enable_set+0x6a4/0x900 net/bluetooth/6lowpan.c:1078 process_one_work+0x7f5/0x10f0 kernel/workqueue.c:2264 worker_thread+0xbbc/0x1630 kernel/workqueue.c:2410 kthread+0x332/0x350 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 Freed by task 2870: save_stack mm/kasan/common.c:72 [inline] set_track mm/kasan/common.c:80 [inline] kasan_set_free_info mm/kasan/common.c:337 [inline] __kasan_slab_free+0x12e/0x1e0 mm/kasan/common.c:476 kasan_slab_free+0xe/0x10 mm/kasan/common.c:485 __cache_free mm/slab.c:3426 [inline] kfree+0x10d/0x220 mm/slab.c:3757 l2cap_chan_destroy net/bluetooth/l2cap_core.c:484 [inline] kref_put include/linux/kref.h:65 [inline] l2cap_chan_put+0x170/0x190 net/bluetooth/l2cap_core.c:498 do_enable_set+0x66c/0x900 net/bluetooth/6lowpan.c:1075 process_one_work+0x7f5/0x10f0 kernel/workqueue.c:2264 worker_thread+0xbbc/0x1630 kernel/workqueue.c:2410 kthread+0x332/0x350 kernel/kthread.c:255 ret_from_fork+0x24/0x30 arch/x86/entry/entry_64.S:352 The buggy address belongs to the object at ffff888096950000 which belongs to the cache kmalloc-2k of size 2048 The buggy address is located 0 bytes inside of 2048-byte region [ffff888096950000, ffff888096950800) The buggy address belongs to the page: page:ffffea00025a5400 refcount:1 mapcount:0 mapping:ffff8880aa400e00 index:0x0 flags: 0xfffe0000000200(slab) raw: 00fffe0000000200 ffffea00027d1548 ffffea0002397808 ffff8880aa400e00 raw: 0000000000000000 ffff888096950000 0000000100000001 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88809694ff00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ffff88809694ff80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffff888096950000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff888096950080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888096950100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Reported-by: syzbot+96414aa0033c363d8458@syzkaller.appspotmail.com Signed-off-by: Lihong Kou Signed-off-by: Marcel Holtmann --- net/bluetooth/6lowpan.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index bb55d92691b0..cff4944d5b66 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -50,6 +50,7 @@ static bool enable_6lowpan; /* We are listening incoming connections via this channel */ static struct l2cap_chan *listen_chan; +static DEFINE_MUTEX(set_lock); struct lowpan_peer { struct list_head list; @@ -1078,12 +1079,14 @@ static void do_enable_set(struct work_struct *work) enable_6lowpan = set_enable->flag; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); } listen_chan = bt_6lowpan_listen(); + mutex_unlock(&set_lock); kfree(set_enable); } @@ -1135,11 +1138,13 @@ static ssize_t lowpan_control_write(struct file *fp, if (ret == -EINVAL) return ret; + mutex_lock(&set_lock); if (listen_chan) { l2cap_chan_close(listen_chan, 0); l2cap_chan_put(listen_chan); listen_chan = NULL; } + mutex_unlock(&set_lock); if (conn) { struct lowpan_peer *peer; From f645125711c80f9651e4a57403d799070c6ad13b Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 19 Jun 2020 19:52:01 +0000 Subject: [PATCH 0223/2056] Bluetooth: btusb: fix up firmware download sequence Data RAM on the device have to be powered on before starting to download the firmware. Fixes: a1c49c434e15 ("Bluetooth: btusb: Add protocol support for MediaTek MT7668U USB devices") Co-developed-by: Mark Chen Signed-off-by: Mark Chen Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index c7cc8e594166..e42fdd625eb0 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -2934,7 +2934,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) const u8 *fw_ptr; size_t fw_size; int err, dlen; - u8 flag; + u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -2942,6 +2942,20 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) return err; } + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + return err; + } + fw_ptr = fw->data; fw_size = fw->size; From 737cd06072a72e8984e41af8e5919338d0c5bf2b Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Fri, 19 Jun 2020 19:52:02 +0000 Subject: [PATCH 0224/2056] Bluetooth: btmtksdio: fix up firmware download sequence Data RAM on the device have to be powered on before starting to download the firmware. Fixes: 9aebfd4a2200 ("Bluetooth: mediatek: add support for MediaTek MT7663S and MT7668S SDIO devices") Co-developed-by: Mark Chen Signed-off-by: Mark Chen Signed-off-by: Sean Wang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmtksdio.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 519788c442ca..11494cd2a982 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -685,7 +685,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) const u8 *fw_ptr; size_t fw_size; int err, dlen; - u8 flag; + u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { @@ -693,6 +693,20 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname) return err; } + /* Power on data RAM the firmware relies on. */ + param = 1; + wmt_params.op = MTK_WMT_FUNC_CTRL; + wmt_params.flag = 3; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = mtk_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); + return err; + } + fw_ptr = fw->data; fw_size = fw->size; From d63cc24933c774ea464090af1998a7b63f11c166 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 8 Apr 2020 12:42:09 +0300 Subject: [PATCH 0225/2056] net/mlx5: Export resource dump interface Export some of the resource dump API. mlx5_ib driver will use it in downstream patches. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- .../mellanox/mlx5/core/diag/rsc_dump.c | 3 ++ .../mellanox/mlx5/core/diag/rsc_dump.h | 33 +------------ include/linux/mlx5/rsc_dump.h | 48 +++++++++++++++++++ 3 files changed, 52 insertions(+), 32 deletions(-) create mode 100644 include/linux/mlx5/rsc_dump.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 17ab7efe693d..10218c2324cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -130,11 +130,13 @@ struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, cmd->mem_size = key->size; return cmd; } +EXPORT_SYMBOL(mlx5_rsc_dump_cmd_create); void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd) { kfree(cmd); } +EXPORT_SYMBOL(mlx5_rsc_dump_cmd_destroy); int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, struct page *page, int *size) @@ -155,6 +157,7 @@ int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, return more_dump; } +EXPORT_SYMBOL(mlx5_rsc_dump_next); #define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h index 148270073e71..64c4956db6d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h @@ -4,41 +4,10 @@ #ifndef __MLX5_RSC_DUMP_H #define __MLX5_RSC_DUMP_H +#include #include #include "mlx5_core.h" -enum mlx5_sgmt_type { - MLX5_SGMT_TYPE_HW_CQPC, - MLX5_SGMT_TYPE_HW_SQPC, - MLX5_SGMT_TYPE_HW_RQPC, - MLX5_SGMT_TYPE_FULL_SRQC, - MLX5_SGMT_TYPE_FULL_CQC, - MLX5_SGMT_TYPE_FULL_EQC, - MLX5_SGMT_TYPE_FULL_QPC, - MLX5_SGMT_TYPE_SND_BUFF, - MLX5_SGMT_TYPE_RCV_BUFF, - MLX5_SGMT_TYPE_SRQ_BUFF, - MLX5_SGMT_TYPE_CQ_BUFF, - MLX5_SGMT_TYPE_EQ_BUFF, - MLX5_SGMT_TYPE_SX_SLICE, - MLX5_SGMT_TYPE_SX_SLICE_ALL, - MLX5_SGMT_TYPE_RDB, - MLX5_SGMT_TYPE_RX_SLICE_ALL, - MLX5_SGMT_TYPE_MENU, - MLX5_SGMT_TYPE_TERMINATE, - - MLX5_SGMT_TYPE_NUM, /* Keep last */ -}; - -struct mlx5_rsc_key { - enum mlx5_sgmt_type rsc; - int index1; - int index2; - int num_of_obj1; - int num_of_obj2; - int size; -}; - #define MLX5_RSC_DUMP_ALL 0xFFFF struct mlx5_rsc_dump_cmd; struct mlx5_rsc_dump; diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h new file mode 100644 index 000000000000..87415fa754fe --- /dev/null +++ b/include/linux/mlx5/rsc_dump.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies inc. */ + +#include + +#ifndef __MLX5_RSC_DUMP +#define __MLX5_RSC_DUMP + +enum mlx5_sgmt_type { + MLX5_SGMT_TYPE_HW_CQPC, + MLX5_SGMT_TYPE_HW_SQPC, + MLX5_SGMT_TYPE_HW_RQPC, + MLX5_SGMT_TYPE_FULL_SRQC, + MLX5_SGMT_TYPE_FULL_CQC, + MLX5_SGMT_TYPE_FULL_EQC, + MLX5_SGMT_TYPE_FULL_QPC, + MLX5_SGMT_TYPE_SND_BUFF, + MLX5_SGMT_TYPE_RCV_BUFF, + MLX5_SGMT_TYPE_SRQ_BUFF, + MLX5_SGMT_TYPE_CQ_BUFF, + MLX5_SGMT_TYPE_EQ_BUFF, + MLX5_SGMT_TYPE_SX_SLICE, + MLX5_SGMT_TYPE_SX_SLICE_ALL, + MLX5_SGMT_TYPE_RDB, + MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_MENU, + MLX5_SGMT_TYPE_TERMINATE, + + MLX5_SGMT_TYPE_NUM, /* Keep last */ +}; + +struct mlx5_rsc_key { + enum mlx5_sgmt_type rsc; + int index1; + int index2; + int num_of_obj1; + int num_of_obj2; + int size; +}; + +struct mlx5_rsc_dump_cmd; + +struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, + struct mlx5_rsc_key *key); +void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd); +int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, + struct page *page, int *size); +#endif /* __MLX5_RSC_DUMP */ From 608ca553c9a2008908120e0e45b1cfc4aefcfd49 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 8 Apr 2020 12:36:20 +0300 Subject: [PATCH 0226/2056] net/mlx5: Add support in query QP, CQ and MKEY segments Introduce new resource dump segments - PRM_QUERY_QP, PRM_QUERY_CQ and PRM_QUERY_MKEY. These segments contains the resource dump in PRM query format. Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c | 3 +++ include/linux/mlx5/rsc_dump.h | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c index 10218c2324cc..4924a5658853 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c @@ -23,6 +23,9 @@ static const char *const mlx5_rsc_sgmt_name[] = { MLX5_SGMT_STR_ASSING(SX_SLICE_ALL), MLX5_SGMT_STR_ASSING(RDB), MLX5_SGMT_STR_ASSING(RX_SLICE_ALL), + MLX5_SGMT_STR_ASSING(PRM_QUERY_QP), + MLX5_SGMT_STR_ASSING(PRM_QUERY_CQ), + MLX5_SGMT_STR_ASSING(PRM_QUERY_MKEY), }; struct mlx5_rsc_dump { diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h index 87415fa754fe..d11c0b228620 100644 --- a/include/linux/mlx5/rsc_dump.h +++ b/include/linux/mlx5/rsc_dump.h @@ -23,6 +23,9 @@ enum mlx5_sgmt_type { MLX5_SGMT_TYPE_SX_SLICE_ALL, MLX5_SGMT_TYPE_RDB, MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_PRM_QUERY_QP, + MLX5_SGMT_TYPE_PRM_QUERY_CQ, + MLX5_SGMT_TYPE_PRM_QUERY_MKEY, MLX5_SGMT_TYPE_MENU, MLX5_SGMT_TYPE_TERMINATE, From 79a28ddd18e9c653f13f60dfabee15c024e64b9b Mon Sep 17 00:00:00 2001 From: Alexandre Cassen Date: Tue, 23 Jun 2020 10:33:45 +0200 Subject: [PATCH 0227/2056] rtnetlink: add keepalived rtm_protocol Keepalived can set global static ip routes or virtual ip routes dynamically following VRRP protocol states. Using a dedicated rtm_protocol will help keeping track of it. Changes in v2: - fix tab/space indenting Signed-off-by: Alexandre Cassen Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 45 +++++++++++++++++----------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 073e71ef6bdd..879e64950a0a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -257,12 +257,12 @@ enum { /* rtm_protocol */ -#define RTPROT_UNSPEC 0 -#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; - not used by current IPv4 */ -#define RTPROT_KERNEL 2 /* Route installed by kernel */ -#define RTPROT_BOOT 3 /* Route installed during boot */ -#define RTPROT_STATIC 4 /* Route installed by administrator */ +#define RTPROT_UNSPEC 0 +#define RTPROT_REDIRECT 1 /* Route installed by ICMP redirects; + not used by current IPv4 */ +#define RTPROT_KERNEL 2 /* Route installed by kernel */ +#define RTPROT_BOOT 3 /* Route installed during boot */ +#define RTPROT_STATIC 4 /* Route installed by administrator */ /* Values of protocol >= RTPROT_STATIC are not interpreted by kernel; they are just passed from user and back as is. @@ -271,22 +271,23 @@ enum { avoid conflicts. */ -#define RTPROT_GATED 8 /* Apparently, GateD */ -#define RTPROT_RA 9 /* RDISC/ND router advertisements */ -#define RTPROT_MRT 10 /* Merit MRT */ -#define RTPROT_ZEBRA 11 /* Zebra */ -#define RTPROT_BIRD 12 /* BIRD */ -#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ -#define RTPROT_XORP 14 /* XORP */ -#define RTPROT_NTK 15 /* Netsukuku */ -#define RTPROT_DHCP 16 /* DHCP client */ -#define RTPROT_MROUTED 17 /* Multicast daemon */ -#define RTPROT_BABEL 42 /* Babel daemon */ -#define RTPROT_BGP 186 /* BGP Routes */ -#define RTPROT_ISIS 187 /* ISIS Routes */ -#define RTPROT_OSPF 188 /* OSPF Routes */ -#define RTPROT_RIP 189 /* RIP Routes */ -#define RTPROT_EIGRP 192 /* EIGRP Routes */ +#define RTPROT_GATED 8 /* Apparently, GateD */ +#define RTPROT_RA 9 /* RDISC/ND router advertisements */ +#define RTPROT_MRT 10 /* Merit MRT */ +#define RTPROT_ZEBRA 11 /* Zebra */ +#define RTPROT_BIRD 12 /* BIRD */ +#define RTPROT_DNROUTED 13 /* DECnet routing daemon */ +#define RTPROT_XORP 14 /* XORP */ +#define RTPROT_NTK 15 /* Netsukuku */ +#define RTPROT_DHCP 16 /* DHCP client */ +#define RTPROT_MROUTED 17 /* Multicast daemon */ +#define RTPROT_KEEPALIVED 18 /* Keepalived daemon */ +#define RTPROT_BABEL 42 /* Babel daemon */ +#define RTPROT_BGP 186 /* BGP Routes */ +#define RTPROT_ISIS 187 /* ISIS Routes */ +#define RTPROT_OSPF 188 /* OSPF Routes */ +#define RTPROT_RIP 189 /* RIP Routes */ +#define RTPROT_EIGRP 192 /* EIGRP Routes */ /* rtm_scope From 13fdc4193c2f9325cfa11bccd305e9ad40226f27 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Tue, 23 Jun 2020 23:13:01 +0900 Subject: [PATCH 0228/2056] mlxsw: spectrum_dcb: Fix a spelling typo in spectrum_dcb.c This patch fixes a spelling typo in spectrum_dcb.c Signed-off-by: Masanari Iida Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 49a72a8f1f57..f8e3d635b9e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -138,7 +138,7 @@ static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc, ets->prio_tc); if (err) - netdev_warn(dev, "Failed to remove ununsed PGs\n"); + netdev_warn(dev, "Failed to remove unused PGs\n"); return 0; From bdb7b79b4ce864a724250e1d35948c46f135de36 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 22 Jun 2020 20:22:21 -0700 Subject: [PATCH 0229/2056] bpf: Switch most helper return values from 32-bit int to 64-bit long Switch most of BPF helper definitions from returning int to long. These definitions are coming from comments in BPF UAPI header and are used to generate bpf_helper_defs.h (under libbpf) to be later included and used from BPF programs. In actual in-kernel implementation, all the helpers are defined as returning u64, but due to some historical reasons, most of them are actually defined as returning int in UAPI (usually, to return 0 on success, and negative value on error). This actually causes Clang to quite often generate sub-optimal code, because compiler believes that return value is 32-bit, and in a lot of cases has to be up-converted (usually with a pair of 32-bit bit shifts) to 64-bit values, before they can be used further in BPF code. Besides just "polluting" the code, these 32-bit shifts quite often cause problems for cases in which return value matters. This is especially the case for the family of bpf_probe_read_str() functions. There are few other similar helpers (e.g., bpf_read_branch_records()), in which return value is used by BPF program logic to record variable-length data and process it. For such cases, BPF program logic carefully manages offsets within some array or map to read variable-length data. For such uses, it's crucial for BPF verifier to track possible range of register values to prove that all the accesses happen within given memory bounds. Those extraneous zero-extending bit shifts, inserted by Clang (and quite often interleaved with other code, which makes the issues even more challenging and sometimes requires employing extra per-variable compiler barriers), throws off verifier logic and makes it mark registers as having unknown variable offset. We'll study this pattern a bit later below. Another common pattern is to check return of BPF helper for non-zero state to detect error conditions and attempt alternative actions in such case. Even in this simple and straightforward case, this 32-bit vs BPF's native 64-bit mode quite often leads to sub-optimal and unnecessary extra code. We'll look at this pattern as well. Clang's BPF target supports two modes of code generation: ALU32, in which it is capable of using lower 32-bit parts of registers, and no-ALU32, in which only full 64-bit registers are being used. ALU32 mode somewhat mitigates the above described problems, but not in all cases. This patch switches all the cases in which BPF helpers return 0 or negative error from returning int to returning long. It is shown below that such change in definition leads to equivalent or better code. No-ALU32 mode benefits more, but ALU32 mode doesn't degrade or still gets improved code generation. Another class of cases switched from int to long are bpf_probe_read_str()-like helpers, which encode successful case as non-negative values, while still returning negative value for errors. In all of such cases, correctness is preserved due to two's complement encoding of negative values and the fact that all helpers return values with 32-bit absolute value. Two's complement ensures that for negative values higher 32 bits are all ones and when truncated, leave valid negative 32-bit value with the same value. Non-negative values have upper 32 bits set to zero and similarly preserve value when high 32 bits are truncated. This means that just casting to int/u32 is correct and efficient (and in ALU32 mode doesn't require any extra shifts). To minimize the chances of regressions, two code patterns were investigated, as mentioned above. For both patterns, BPF assembly was analyzed in ALU32/NO-ALU32 compiler modes, both with current 32-bit int return type and new 64-bit long return type. Case 1. Variable-length data reading and concatenation. This is quite ubiquitous pattern in tracing/monitoring applications, reading data like process's environment variables, file path, etc. In such case, many pieces of string-like variable-length data are read into a single big buffer, and at the end of the process, only a part of array containing actual data is sent to user-space for further processing. This case is tested in test_varlen.c selftest (in the next patch). Code flow is roughly as follows: void *payload = &sample->payload; u64 len; len = bpf_probe_read_kernel_str(payload, MAX_SZ1, &source_data1); if (len <= MAX_SZ1) { payload += len; sample->len1 = len; } len = bpf_probe_read_kernel_str(payload, MAX_SZ2, &source_data2); if (len <= MAX_SZ2) { payload += len; sample->len2 = len; } /* and so on */ sample->total_len = payload - &sample->payload; /* send over, e.g., perf buffer */ There could be two variations with slightly different code generated: when len is 64-bit integer and when it is 32-bit integer. Both variations were analysed. BPF assembly instructions between two successive invocations of bpf_probe_read_kernel_str() were used to check code regressions. Results are below, followed by short analysis. Left side is using helpers with int return type, the right one is after the switch to long. ALU32 + INT ALU32 + LONG =========== ============ 64-BIT (13 insns): 64-BIT (10 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: if w0 > 256 goto +9 18: if r0 > 256 goto +6 19: w1 = w0 19: r1 = 0 ll 20: r1 <<= 32 21: *(u64 *)(r1 + 0) = r0 21: r1 s>>= 32 22: r6 = 0 ll 22: r2 = 0 ll 24: r6 += r0 24: *(u64 *)(r2 + 0) = r1 00000000000000c8 : 25: r6 = 0 ll 25: r1 = r6 27: r6 += r1 26: w2 = 256 00000000000000e0 : 27: r3 = 0 ll 28: r1 = r6 29: call 115 29: w2 = 256 30: r3 = 0 ll 32: call 115 32-BIT (11 insns): 32-BIT (12 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: if w0 > 256 goto +7 18: if w0 > 256 goto +8 19: r1 = 0 ll 19: r1 = 0 ll 21: *(u32 *)(r1 + 0) = r0 21: *(u32 *)(r1 + 0) = r0 22: w1 = w0 22: r0 <<= 32 23: r6 = 0 ll 23: r0 >>= 32 25: r6 += r1 24: r6 = 0 ll 00000000000000d0 : 26: r6 += r0 26: r1 = r6 00000000000000d8 : 27: w2 = 256 27: r1 = r6 28: r3 = 0 ll 28: w2 = 256 30: call 115 29: r3 = 0 ll 31: call 115 In ALU32 mode, the variant using 64-bit length variable clearly wins and avoids unnecessary zero-extension bit shifts. In practice, this is even more important and good, because BPF code won't need to do extra checks to "prove" that payload/len are within good bounds. 32-bit len is one instruction longer. Clang decided to do 64-to-32 casting with two bit shifts, instead of equivalent `w1 = w0` assignment. The former uses extra register. The latter might potentially lose some range information, but not for 32-bit value. So in this case, verifier infers that r0 is [0, 256] after check at 18:, and shifting 32 bits left/right keeps that range intact. We should probably look into Clang's logic and see why it chooses bitshifts over sub-register assignments for this. NO-ALU32 + INT NO-ALU32 + LONG ============== =============== 64-BIT (14 insns): 64-BIT (10 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: r0 <<= 32 18: if r0 > 256 goto +6 19: r1 = r0 19: r1 = 0 ll 20: r1 >>= 32 21: *(u64 *)(r1 + 0) = r0 21: if r1 > 256 goto +7 22: r6 = 0 ll 22: r0 s>>= 32 24: r6 += r0 23: r1 = 0 ll 00000000000000c8 : 25: *(u64 *)(r1 + 0) = r0 25: r1 = r6 26: r6 = 0 ll 26: r2 = 256 28: r6 += r0 27: r3 = 0 ll 00000000000000e8 : 29: call 115 29: r1 = r6 30: r2 = 256 31: r3 = 0 ll 33: call 115 32-BIT (13 insns): 32-BIT (13 insns): ------------------------------------ ------------------------------------ 17: call 115 17: call 115 18: r1 = r0 18: r1 = r0 19: r1 <<= 32 19: r1 <<= 32 20: r1 >>= 32 20: r1 >>= 32 21: if r1 > 256 goto +6 21: if r1 > 256 goto +6 22: r2 = 0 ll 22: r2 = 0 ll 24: *(u32 *)(r2 + 0) = r0 24: *(u32 *)(r2 + 0) = r0 25: r6 = 0 ll 25: r6 = 0 ll 27: r6 += r1 27: r6 += r1 00000000000000e0 : 00000000000000e0 : 28: r1 = r6 28: r1 = r6 29: r2 = 256 29: r2 = 256 30: r3 = 0 ll 30: r3 = 0 ll 32: call 115 32: call 115 In NO-ALU32 mode, for the case of 64-bit len variable, Clang generates much superior code, as expected, eliminating unnecessary bit shifts. For 32-bit len, code is identical. So overall, only ALU-32 32-bit len case is more-or-less equivalent and the difference stems from internal Clang decision, rather than compiler lacking enough information about types. Case 2. Let's look at the simpler case of checking return result of BPF helper for errors. The code is very simple: long bla; if (bpf_probe_read_kenerl(&bla, sizeof(bla), 0)) return 1; else return 0; ALU32 + CHECK (9 insns) ALU32 + CHECK (9 insns) ==================================== ==================================== 0: r1 = r10 0: r1 = r10 1: r1 += -8 1: r1 += -8 2: w2 = 8 2: w2 = 8 3: r3 = 0 3: r3 = 0 4: call 113 4: call 113 5: w1 = w0 5: r1 = r0 6: w0 = 1 6: w0 = 1 7: if w1 != 0 goto +1 7: if r1 != 0 goto +1 8: w0 = 0 8: w0 = 0 0000000000000048 : 0000000000000048 : 9: exit 9: exit Almost identical code, the only difference is the use of full register assignment (r1 = r0) vs half-registers (w1 = w0) in instruction #5. On 32-bit architectures, new BPF assembly might be slightly less optimal, in theory. But one can argue that's not a big issue, given that use of full registers is still prevalent (e.g., for parameter passing). NO-ALU32 + CHECK (11 insns) NO-ALU32 + CHECK (9 insns) ==================================== ==================================== 0: r1 = r10 0: r1 = r10 1: r1 += -8 1: r1 += -8 2: r2 = 8 2: r2 = 8 3: r3 = 0 3: r3 = 0 4: call 113 4: call 113 5: r1 = r0 5: r1 = r0 6: r1 <<= 32 6: r0 = 1 7: r1 >>= 32 7: if r1 != 0 goto +1 8: r0 = 1 8: r0 = 0 9: if r1 != 0 goto +1 0000000000000048 : 10: r0 = 0 9: exit 0000000000000058 : 11: exit NO-ALU32 is a clear improvement, getting rid of unnecessary zero-extension bit shifts. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200623032224.4020118-1-andriin@fb.com --- include/uapi/linux/bpf.h | 192 ++++++++++++++++----------------- tools/include/uapi/linux/bpf.h | 192 ++++++++++++++++----------------- 2 files changed, 192 insertions(+), 192 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 19684813faae..be0efee49093 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -653,7 +653,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -671,13 +671,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. @@ -695,7 +695,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -775,7 +775,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -792,7 +792,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -817,7 +817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -849,7 +849,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -880,7 +880,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -916,7 +916,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(void *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -953,7 +953,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -969,7 +969,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -981,7 +981,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -1032,7 +1032,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -1098,7 +1098,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -1145,7 +1145,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1190,7 +1190,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1207,7 +1207,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1276,7 +1276,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1294,7 +1294,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1304,7 +1304,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1331,7 +1331,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1358,7 +1358,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1389,7 +1389,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1408,7 +1408,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1420,7 +1420,7 @@ union bpf_attr { * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1444,7 +1444,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1500,7 +1500,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1511,7 +1511,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1532,7 +1532,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1547,7 +1547,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for @@ -1595,14 +1595,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1630,7 +1630,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -1676,7 +1676,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1697,7 +1697,7 @@ union bpf_attr { * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1708,7 +1708,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1727,7 +1727,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1756,7 +1756,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1806,7 +1806,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1817,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1842,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1867,7 +1867,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1911,7 +1911,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1925,7 +1925,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1959,7 +1959,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1977,7 +1977,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -2008,7 +2008,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -2026,7 +2026,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. @@ -2040,7 +2040,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -2056,7 +2056,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -2089,7 +2089,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2111,7 +2111,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2142,7 +2142,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2161,7 +2161,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2175,7 +2175,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. @@ -2189,7 +2189,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2226,7 +2226,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2241,7 +2241,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2257,7 +2257,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2286,7 +2286,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2305,7 +2305,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2370,7 +2370,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2471,7 +2471,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2479,7 +2479,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2489,19 +2489,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2517,7 +2517,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if @@ -2529,7 +2529,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2543,7 +2543,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2591,7 +2591,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2614,7 +2614,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buff *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2651,7 +2651,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2666,7 +2666,7 @@ union bpf_attr { * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2682,7 +2682,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2701,7 +2701,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2718,7 +2718,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2735,7 +2735,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2759,7 +2759,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2810,7 +2810,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2818,7 +2818,7 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. @@ -2859,7 +2859,7 @@ union bpf_attr { * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * - * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -2883,21 +2883,21 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the @@ -2941,7 +2941,7 @@ union bpf_attr { * including the trailing NUL character. On error, a negative * value. * - * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. @@ -2949,14 +2949,14 @@ union bpf_attr { * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * - * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_send_signal_thread(u32 sig) + * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return @@ -2976,7 +2976,7 @@ union bpf_attr { * Return * The 64 bit jiffies * - * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* @@ -2995,7 +2995,7 @@ union bpf_attr { * * **-ENOENT** if architecture does not support branch records. * - * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. @@ -3007,7 +3007,7 @@ union bpf_attr { * * **-ENOENT** if pidns does not exists for the current task. * - * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -3062,7 +3062,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, @@ -3097,7 +3097,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. @@ -3126,7 +3126,7 @@ union bpf_attr { * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * - * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the @@ -3221,7 +3221,7 @@ union bpf_attr { * Return * Requested value, or 0, if flags are not recognized. * - * int bpf_csum_level(struct sk_buff *skb, u64 level) + * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 19684813faae..be0efee49093 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -653,7 +653,7 @@ union bpf_attr { * Map value associated to *key*, or **NULL** if no entry was * found. * - * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) + * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) * Description * Add or update the value of the entry associated to *key* in * *map* with *value*. *flags* is one of: @@ -671,13 +671,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_delete_elem(struct bpf_map *map, const void *key) + * long bpf_map_delete_elem(struct bpf_map *map, const void *key) * Description * Delete entry with *key* from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. @@ -695,7 +695,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) + * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...) * Description * This helper is a "printk()-like" facility for debugging. It * prints a message defined by format *fmt* (of size *fmt_size*) @@ -775,7 +775,7 @@ union bpf_attr { * Return * The SMP id of the processor running the program. * - * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) + * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. *flags* are a combination of @@ -792,7 +792,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) + * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) * Description * Recompute the layer 3 (e.g. IP) checksum for the packet * associated to *skb*. Computation is incremental, so the helper @@ -817,7 +817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) + * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) * Description * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the * packet associated to *skb*. Computation is incremental, so the @@ -849,7 +849,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) + * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) * Description * This special helper is used to trigger a "tail call", or in * other words, to jump into another eBPF program. The same stack @@ -880,7 +880,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) + * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) * Description * Clone and redirect the packet associated to *skb* to another * net device of index *ifindex*. Both ingress and egress @@ -916,7 +916,7 @@ union bpf_attr { * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. * - * int bpf_get_current_comm(void *buf, u32 size_of_buf) + * long bpf_get_current_comm(void *buf, u32 size_of_buf) * Description * Copy the **comm** attribute of the current task into *buf* of * *size_of_buf*. The **comm** attribute contains the name of @@ -953,7 +953,7 @@ union bpf_attr { * Return * The classid, or 0 for the default unconfigured classid. * - * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) + * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) * Description * Push a *vlan_tci* (VLAN tag control information) of protocol * *vlan_proto* to the packet associated to *skb*, then update @@ -969,7 +969,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_vlan_pop(struct sk_buff *skb) + * long bpf_skb_vlan_pop(struct sk_buff *skb) * Description * Pop a VLAN header from the packet associated to *skb*. * @@ -981,7 +981,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Get tunnel metadata. This helper takes a pointer *key* to an * empty **struct bpf_tunnel_key** of **size**, that will be @@ -1032,7 +1032,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) + * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) * Description * Populate tunnel metadata for packet associated to *skb.* The * tunnel metadata is set to the contents of *key*, of *size*. The @@ -1098,7 +1098,7 @@ union bpf_attr { * The value of the perf event counter read from the map, or a * negative error code in case of failure. * - * int bpf_redirect(u32 ifindex, u64 flags) + * long bpf_redirect(u32 ifindex, u64 flags) * Description * Redirect the packet to another net device of index *ifindex*. * This helper is somewhat similar to **bpf_clone_redirect**\ @@ -1145,7 +1145,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1190,7 +1190,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) + * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len) * Description * This helper was provided as an easy way to load data from a * packet. It can be used to load *len* bytes from *offset* from @@ -1207,7 +1207,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) + * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1276,7 +1276,7 @@ union bpf_attr { * The checksum result, or a negative error code in case of * failure. * - * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Retrieve tunnel options metadata for the packet associated to * *skb*, and store the raw tunnel option data to the buffer *opt* @@ -1294,7 +1294,7 @@ union bpf_attr { * Return * The size of the option data retrieved. * - * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) + * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size) * Description * Set tunnel options metadata for the packet associated to *skb* * to the option data contained in the raw buffer *opt* of *size*. @@ -1304,7 +1304,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) + * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) * Description * Change the protocol of the *skb* to *proto*. Currently * supported are transition from IPv4 to IPv6, and from IPv6 to @@ -1331,7 +1331,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_change_type(struct sk_buff *skb, u32 type) + * long bpf_skb_change_type(struct sk_buff *skb, u32 type) * Description * Change the packet type for the packet associated to *skb*. This * comes down to setting *skb*\ **->pkt_type** to *type*, except @@ -1358,7 +1358,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) + * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) * Description * Check whether *skb* is a descendant of the cgroup2 held by * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. @@ -1389,7 +1389,7 @@ union bpf_attr { * Return * A pointer to the current task struct. * - * int bpf_probe_write_user(void *dst, const void *src, u32 len) + * long bpf_probe_write_user(void *dst, const void *src, u32 len) * Description * Attempt in a safe way to write *len* bytes from the buffer * *src* to *dst* in memory. It only works for threads that are in @@ -1408,7 +1408,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) + * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) * Description * Check whether the probe is being run is the context of a given * subset of the cgroup2 hierarchy. The cgroup2 to test is held by @@ -1420,7 +1420,7 @@ union bpf_attr { * * 1, if the *skb* task does not belong to the cgroup2. * * A negative error code, if an error occurred. * - * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) * Description * Resize (trim or grow) the packet associated to *skb* to the * new *len*. The *flags* are reserved for future usage, and must @@ -1444,7 +1444,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) + * long bpf_skb_pull_data(struct sk_buff *skb, u32 len) * Description * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes @@ -1500,7 +1500,7 @@ union bpf_attr { * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. * - * int bpf_get_numa_node_id(void) + * long bpf_get_numa_node_id(void) * Description * Return the id of the current NUMA node. The primary use case * for this helper is the selection of sockets for the local NUMA @@ -1511,7 +1511,7 @@ union bpf_attr { * Return * The id of current NUMA node. * - * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) + * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) * Description * Grows headroom of packet associated to *skb* and adjusts the * offset of the MAC header accordingly, adding *len* bytes of @@ -1532,7 +1532,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that * it is possible to use a negative value for *delta*. This helper @@ -1547,7 +1547,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for @@ -1595,14 +1595,14 @@ union bpf_attr { * is returned (note that **overflowuid** might also be the actual * UID value for the socket). * - * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) + * long bpf_set_hash(struct sk_buff *skb, u32 hash) * Description * Set the full hash for *skb* (set the field *skb*\ **->hash**) * to value *hash*. * Return * 0 * - * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1630,7 +1630,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) + * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) * Description * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. @@ -1676,7 +1676,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) + * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the endpoint referenced by *map* at * index *key*. Depending on its type, this *map* can contain @@ -1697,7 +1697,7 @@ union bpf_attr { * **XDP_REDIRECT** on success, or the value of the two lower bits * of the *flags* argument on error. * - * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) + * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description * Redirect the packet to the socket referenced by *map* (of type * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and @@ -1708,7 +1708,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a *map* referencing sockets. The * *skops* is used as a new value for the entry associated to @@ -1727,7 +1727,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) * Description * Adjust the address pointed by *xdp_md*\ **->data_meta** by * *delta* (which can be positive or negative). Note that this @@ -1756,7 +1756,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) * Description * Read the value of a perf event counter, and store it into *buf* * of size *buf_size*. This helper relies on a *map* of type @@ -1806,7 +1806,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) + * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) * Description * For en eBPF program attached to a perf event, retrieve the * value of the event counter associated to *ctx* and store it in @@ -1817,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) + * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1842,7 +1842,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_regs *regs, u64 rc) + * long bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. @@ -1867,7 +1867,7 @@ union bpf_attr { * Return * 0 * - * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) + * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) * Description * Attempt to set the value of the **bpf_sock_ops_cb_flags** field * for the full TCP socket associated to *bpf_sock_ops* to @@ -1911,7 +1911,7 @@ union bpf_attr { * be set is returned (which comes down to 0 if all bits were set * as required). * - * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) + * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -1925,7 +1925,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, apply the verdict of the eBPF program to * the next *bytes* (number of bytes) of message *msg*. @@ -1959,7 +1959,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) + * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) * Description * For socket policies, prevent the execution of the verdict eBPF * program for message *msg* until *bytes* (byte number) have been @@ -1977,7 +1977,7 @@ union bpf_attr { * Return * 0 * - * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) + * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) * Description * For socket policies, pull in non-linear data from user space * for *msg* and set pointers *msg*\ **->data** and *msg*\ @@ -2008,7 +2008,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) + * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) * Description * Bind the socket associated to *ctx* to the address pointed by * *addr*, of length *addr_len*. This allows for making outgoing @@ -2026,7 +2026,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) + * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is * possible to both shrink and grow the packet tail. @@ -2040,7 +2040,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) + * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) * Description * Retrieve the XFRM state (IP transform framework, see also * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. @@ -2056,7 +2056,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) + * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags) * Description * Return a user or a kernel stack in bpf program provided buffer. * To achieve this, the helper needs *ctx*, which is a pointer @@ -2089,7 +2089,7 @@ union bpf_attr { * A non-negative value equal to or less than *size* on success, * or a negative error in case of failure. * - * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) + * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description * This helper is similar to **bpf_skb_load_bytes**\ () in that * it provides an easy way to load *len* bytes from *offset* @@ -2111,7 +2111,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) + * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) * Description * Do FIB lookup in kernel tables using parameters in *params*. * If lookup is successful and result shows packet is to be @@ -2142,7 +2142,7 @@ union bpf_attr { * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the * packet is not forwarded or needs assist from full stack * - * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) + * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) * Description * Add an entry to, or update a sockhash *map* referencing sockets. * The *skops* is used as a new value for the entry associated to @@ -2161,7 +2161,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) + * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * socket level. If the message *msg* is allowed to pass (i.e. if @@ -2175,7 +2175,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) * Description * This helper is used in programs implementing policies at the * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. @@ -2189,7 +2189,7 @@ union bpf_attr { * Return * **SK_PASS** on success, or **SK_DROP** on error. * - * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) + * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) * Description * Encapsulate the packet associated to *skb* within a Layer 3 * protocol header. This header is provided in the buffer at @@ -2226,7 +2226,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) + * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) * Description * Store *len* bytes from address *from* into the packet * associated to *skb*, at *offset*. Only the flags, tag and TLVs @@ -2241,7 +2241,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) + * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) * Description * Adjust the size allocated to TLVs in the outermost IPv6 * Segment Routing Header contained in the packet associated to @@ -2257,7 +2257,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) + * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) * Description * Apply an IPv6 Segment Routing action of type *action* to the * packet associated to *skb*. Each action takes a parameter @@ -2286,7 +2286,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_repeat(void *ctx) + * long bpf_rc_repeat(void *ctx) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded repeat key message. This delays @@ -2305,7 +2305,7 @@ union bpf_attr { * Return * 0 * - * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) + * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded key press with *scancode*, @@ -2370,7 +2370,7 @@ union bpf_attr { * Return * A pointer to the local storage area. * - * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) * Description * Select a **SO_REUSEPORT** socket from a * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. @@ -2471,7 +2471,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_sk_release(struct bpf_sock *sock) + * long bpf_sk_release(struct bpf_sock *sock) * Description * Release the reference held by *sock*. *sock* must be a * non-**NULL** pointer that was returned from @@ -2479,7 +2479,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) + * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) * Description * Push an element *value* in *map*. *flags* is one of: * @@ -2489,19 +2489,19 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_pop_elem(struct bpf_map *map, void *value) + * long bpf_map_pop_elem(struct bpf_map *map, void *value) * Description * Pop an element from *map*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_map_peek_elem(struct bpf_map *map, void *value) + * long bpf_map_peek_elem(struct bpf_map *map, void *value) * Description * Get an element from *map* without removing it. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * For socket policies, insert *len* bytes into *msg* at offset * *start*. @@ -2517,7 +2517,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) + * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags) * Description * Will remove *len* bytes from a *msg* starting at byte *start*. * This may result in **ENOMEM** errors under certain situations if @@ -2529,7 +2529,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) + * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) * Description * This helper is used in programs implementing IR decoding, to * report a successfully decoded pointer movement. @@ -2543,7 +2543,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_lock(struct bpf_spin_lock *lock) + * long bpf_spin_lock(struct bpf_spin_lock *lock) * Description * Acquire a spinlock represented by the pointer *lock*, which is * stored as part of a value of a map. Taking the lock allows to @@ -2591,7 +2591,7 @@ union bpf_attr { * Return * 0 * - * int bpf_spin_unlock(struct bpf_spin_lock *lock) + * long bpf_spin_unlock(struct bpf_spin_lock *lock) * Description * Release the *lock* previously locked by a call to * **bpf_spin_lock**\ (\ *lock*\ ). @@ -2614,7 +2614,7 @@ union bpf_attr { * A **struct bpf_tcp_sock** pointer on success, or **NULL** in * case of failure. * - * int bpf_skb_ecn_set_ce(struct sk_buff *skb) + * long bpf_skb_ecn_set_ce(struct sk_buff *skb) * Description * Set ECN (Explicit Congestion Notification) field of IP header * to **CE** (Congestion Encountered) if current value is **ECT** @@ -2651,7 +2651,7 @@ union bpf_attr { * result is from *reuse*\ **->socks**\ [] using the hash of the * tuple. * - * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) * Description * Check whether *iph* and *th* contain a valid SYN cookie ACK for * the listening socket in *sk*. @@ -2666,7 +2666,7 @@ union bpf_attr { * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. * - * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) + * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) * Description * Get name of sysctl in /proc/sys/ and copy it into provided by * program buffer *buf* of size *buf_len*. @@ -2682,7 +2682,7 @@ union bpf_attr { * **-E2BIG** if the buffer wasn't big enough (*buf* will contain * truncated name in this case). * - * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get current value of sysctl as it is presented in /proc/sys * (incl. newline, etc), and copy it as a string into provided @@ -2701,7 +2701,7 @@ union bpf_attr { * **-EINVAL** if current value was unavailable, e.g. because * sysctl is uninitialized and read returns -EIO for it. * - * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) + * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) * Description * Get new value being written by user space to sysctl (before * the actual write happens) and copy it as a string into @@ -2718,7 +2718,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) + * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) * Description * Override new value being written by user space to sysctl with * value provided by program in buffer *buf* of size *buf_len*. @@ -2735,7 +2735,7 @@ union bpf_attr { * * **-EINVAL** if sysctl is being read. * - * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) + * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to a long integer according to the given base @@ -2759,7 +2759,7 @@ union bpf_attr { * * **-ERANGE** if resulting value was out of range. * - * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) + * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) * Description * Convert the initial part of the string from buffer *buf* of * size *buf_len* to an unsigned long integer according to the @@ -2810,7 +2810,7 @@ union bpf_attr { * **NULL** if not found or there was an error in adding * a new bpf-local-storage. * - * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) + * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) * Description * Delete a bpf-local-storage from a *sk*. * Return @@ -2818,7 +2818,7 @@ union bpf_attr { * * **-ENOENT** if the bpf-local-storage cannot be found. * - * int bpf_send_signal(u32 sig) + * long bpf_send_signal(u32 sig) * Description * Send signal *sig* to the process of the current task. * The signal may be delivered to any of this process's threads. @@ -2859,7 +2859,7 @@ union bpf_attr { * * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 * - * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -2883,21 +2883,21 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from user space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) * Description * Safely attempt to read *size* bytes from kernel space address * *unsafe_ptr* and store the data in *dst*. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe user address * *unsafe_ptr* to *dst*. The *size* should include the @@ -2941,7 +2941,7 @@ union bpf_attr { * including the trailing NUL character. On error, a negative * value. * - * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. @@ -2949,14 +2949,14 @@ union bpf_attr { * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * - * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) + * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. * - * int bpf_send_signal_thread(u32 sig) + * long bpf_send_signal_thread(u32 sig) * Description * Send signal *sig* to the thread corresponding to the current task. * Return @@ -2976,7 +2976,7 @@ union bpf_attr { * Return * The 64 bit jiffies * - * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) + * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the * branch records (**struct perf_branch_entry**) associated to *ctx* @@ -2995,7 +2995,7 @@ union bpf_attr { * * **-ENOENT** if architecture does not support branch records. * - * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) + * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size) * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. @@ -3007,7 +3007,7 @@ union bpf_attr { * * **-ENOENT** if pidns does not exists for the current task. * - * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -3062,7 +3062,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) + * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, @@ -3097,7 +3097,7 @@ union bpf_attr { * Return * Current *ktime*. * - * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) * Description * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print * out the format string. @@ -3126,7 +3126,7 @@ union bpf_attr { * * **-EOVERFLOW** if an overflow happened: The same object will be tried again. * - * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * long bpf_seq_write(struct seq_file *m, const void *data, u32 len) * Description * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. * The *m* represents the seq_file. The *data* and *len* represent the @@ -3221,7 +3221,7 @@ union bpf_attr { * Return * Requested value, or 0, if flags are not recognized. * - * int bpf_csum_level(struct sk_buff *skb, u64 level) + * long bpf_csum_level(struct sk_buff *skb, u64 level) * Description * Change the skbs checksum level by one layer up or down, or * reset it entirely to none in order to have the stack perform From 5e85c6bb8e74bd9daa4f5815da373d4ac2cb1a35 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 22 Jun 2020 20:22:22 -0700 Subject: [PATCH 0230/2056] selftests/bpf: Add variable-length data concatenation pattern test Add selftest that validates variable-length data reading and concatentation with one big shared data array. This is a common pattern in production use for monitoring and tracing applications, that potentially can read a lot of data, but overall read much less. Such pattern allows to determine precisely what amount of data needs to be sent over perfbuf/ringbuf and maximize efficiency. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200623032224.4020118-2-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/varlen.c | 56 +++++++++++ .../testing/selftests/bpf/progs/test_varlen.c | 96 +++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/varlen.c create mode 100644 tools/testing/selftests/bpf/progs/test_varlen.c diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c new file mode 100644 index 000000000000..7533565e096d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/varlen.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include +#include +#include "test_varlen.skel.h" + +#define CHECK_VAL(got, exp) \ + CHECK((got) != (exp), "check", "got %ld != exp %ld\n", \ + (long)(got), (long)(exp)) + +void test_varlen(void) +{ + int duration = 0, err; + struct test_varlen* skel; + struct test_varlen__bss *bss; + struct test_varlen__data *data; + const char str1[] = "Hello, "; + const char str2[] = "World!"; + const char exp_str[] = "Hello, \0World!\0"; + const int size1 = sizeof(str1); + const int size2 = sizeof(str2); + + skel = test_varlen__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + data = skel->data; + + err = test_varlen__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + bss->test_pid = getpid(); + + /* trigger everything */ + memcpy(bss->buf_in1, str1, size1); + memcpy(bss->buf_in2, str2, size2); + bss->capture = true; + usleep(1); + bss->capture = false; + + CHECK_VAL(bss->payload1_len1, size1); + CHECK_VAL(bss->payload1_len2, size2); + CHECK_VAL(bss->total1, size1 + size2); + CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check", + "doesn't match!"); + + CHECK_VAL(data->payload2_len1, size1); + CHECK_VAL(data->payload2_len2, size2); + CHECK_VAL(data->total2, size1 + size2); + CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", + "doesn't match!"); +cleanup: + test_varlen__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c new file mode 100644 index 000000000000..09691852debf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_varlen.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include "vmlinux.h" +#include +#include +#include + +#define MAX_LEN 256 + +char buf_in1[MAX_LEN] = {}; +char buf_in2[MAX_LEN] = {}; + +int test_pid = 0; +bool capture = false; + +/* .bss */ +long payload1_len1 = 0; +long payload1_len2 = 0; +long total1 = 0; +char payload1[MAX_LEN + MAX_LEN] = {}; + +/* .data */ +int payload2_len1 = -1; +int payload2_len2 = -1; +int total2 = -1; +char payload2[MAX_LEN + MAX_LEN] = { 1 }; + +SEC("raw_tp/sys_enter") +int handler64(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload1; + u64 len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len <= MAX_LEN) { + payload += len; + payload1_len1 = len; + } + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len <= MAX_LEN) { + payload += len; + payload1_len2 = len; + } + + total1 = payload - (void *)payload1; + + return 0; +} + +SEC("tp_btf/sys_enter") +int handler32(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload2; + u32 len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len <= MAX_LEN) { + payload += len; + payload2_len1 = len; + } + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len <= MAX_LEN) { + payload += len; + payload2_len2 = len; + } + + total2 = payload - (void *)payload2; + + return 0; +} + +SEC("tp_btf/sys_exit") +int handler_exit(void *regs) +{ + long bla; + + if (bpf_probe_read_kernel(&bla, sizeof(bla), 0)) + return 1; + else + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; From 2fde1747c986cac28fa66d0cffd7577db042640b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 22 Jun 2020 20:22:23 -0700 Subject: [PATCH 0231/2056] selftests/bpf: Add variable-length data concat pattern less than test Extend original variable-length tests with a case to catch a common existing pattern of testing for < 0 for errors. Note because verifier also tracks upper bounds and we know it can not be greater than MAX_LEN here we can skip upper bound check. In ALU64 enabled compilation converting from long->int return types in probe helpers results in extra instruction pattern, <<= 32, s >>= 32. The trade-off is the non-ALU64 case works. If you really care about every extra insn (XDP case?) then you probably should be using original int type. In addition adding a sext insn to bpf might help the verifier in the general case to avoid these types of tricks. Signed-off-by: John Fastabend Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200623032224.4020118-3-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/varlen.c | 12 ++++ .../testing/selftests/bpf/progs/test_varlen.c | 70 +++++++++++++++++-- 2 files changed, 78 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c index 7533565e096d..c75525eab02c 100644 --- a/tools/testing/selftests/bpf/prog_tests/varlen.c +++ b/tools/testing/selftests/bpf/prog_tests/varlen.c @@ -51,6 +51,18 @@ void test_varlen(void) CHECK_VAL(data->total2, size1 + size2); CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", "doesn't match!"); + + CHECK_VAL(data->payload3_len1, size1); + CHECK_VAL(data->payload3_len2, size2); + CHECK_VAL(data->total3, size1 + size2); + CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", + "doesn't match!"); + + CHECK_VAL(data->payload4_len1, size1); + CHECK_VAL(data->payload4_len2, size2); + CHECK_VAL(data->total4, size1 + size2); + CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check", + "doesn't match!"); cleanup: test_varlen__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c index 09691852debf..cd4b72c55dfe 100644 --- a/tools/testing/selftests/bpf/progs/test_varlen.c +++ b/tools/testing/selftests/bpf/progs/test_varlen.c @@ -26,8 +26,18 @@ int payload2_len2 = -1; int total2 = -1; char payload2[MAX_LEN + MAX_LEN] = { 1 }; +int payload3_len1 = -1; +int payload3_len2 = -1; +int total3= -1; +char payload3[MAX_LEN + MAX_LEN] = { 1 }; + +int payload4_len1 = -1; +int payload4_len2 = -1; +int total4= -1; +char payload4[MAX_LEN + MAX_LEN] = { 1 }; + SEC("raw_tp/sys_enter") -int handler64(void *regs) +int handler64_unsigned(void *regs) { int pid = bpf_get_current_pid_tgid() >> 32; void *payload = payload1; @@ -54,8 +64,34 @@ int handler64(void *regs) return 0; } -SEC("tp_btf/sys_enter") -int handler32(void *regs) +SEC("raw_tp/sys_exit") +int handler64_signed(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload3; + long len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len >= 0) { + payload += len; + payload3_len1 = len; + } + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len >= 0) { + payload += len; + payload3_len2 = len; + } + total3 = payload - (void *)payload3; + + return 0; +} + +SEC("tp/raw_syscalls/sys_enter") +int handler32_unsigned(void *regs) { int pid = bpf_get_current_pid_tgid() >> 32; void *payload = payload2; @@ -82,7 +118,33 @@ int handler32(void *regs) return 0; } -SEC("tp_btf/sys_exit") +SEC("tp/raw_syscalls/sys_exit") +int handler32_signed(void *regs) +{ + int pid = bpf_get_current_pid_tgid() >> 32; + void *payload = payload4; + int len; + + /* ignore irrelevant invocations */ + if (test_pid != pid || !capture) + return 0; + + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]); + if (len >= 0) { + payload += len; + payload4_len1 = len; + } + len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]); + if (len >= 0) { + payload += len; + payload4_len2 = len; + } + total4 = payload - (void *)payload4; + + return 0; +} + +SEC("tp/syscalls/sys_exit_getpid") int handler_exit(void *regs) { long bla; From 9d9d8cc21e3827b89e414f990016836290de3038 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 23 Jun 2020 12:37:10 +0200 Subject: [PATCH 0232/2056] tools, bpftool: Correctly evaluate $(BUILD_BPF_SKELS) in Makefile Currently, if the clang-bpf-co-re feature is not available, the build fails with e.g. CC prog.o prog.c:1462:10: fatal error: profiler.skel.h: No such file or directory 1462 | #include "profiler.skel.h" | ^~~~~~~~~~~~~~~~~ This is due to the fact that the BPFTOOL_WITHOUT_SKELETONS macro is not defined, despite BUILD_BPF_SKELS not being set. Fix this by correctly evaluating $(BUILD_BPF_SKELS) when deciding on whether to add -DBPFTOOL_WITHOUT_SKELETONS to CFLAGS. Fixes: 05aca6da3b5a ("tools/bpftool: Generalize BPF skeleton support and generate vmlinux.h") Signed-off-by: Tobias Klauser Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200623103710.10370-1-tklauser@distanz.ch --- tools/bpf/bpftool/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 06f436e8191a..8c6563e56ffc 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -155,7 +155,7 @@ $(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h endif endif -CFLAGS += $(if BUILD_BPF_SKELS,,-DBPFTOOL_WITHOUT_SKELETONS) +CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS) $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $< From e678e9ddea96590888b034e2a7ad1128bf1ea1ea Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Tue, 23 Jun 2020 09:42:31 -0700 Subject: [PATCH 0233/2056] indirect_call_wrapper: extend indirect wrapper to support up to 4 calls There are many places where 2 annotations are not enough. This patch adds INDIRECT_CALL_3 and INDIRECT_CALL_4 to cover such cases. Signed-off-by: Brian Vazquez Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/linux/indirect_call_wrapper.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h index 00d7e8e919c6..54c02c84906a 100644 --- a/include/linux/indirect_call_wrapper.h +++ b/include/linux/indirect_call_wrapper.h @@ -23,6 +23,16 @@ likely(f == f2) ? f2(__VA_ARGS__) : \ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ }) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) #define INDIRECT_CALLABLE_DECLARE(f) f #define INDIRECT_CALLABLE_SCOPE @@ -30,6 +40,8 @@ #else #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALLABLE_DECLARE(f) #define INDIRECT_CALLABLE_SCOPE static #endif From 55cced4f813bece01f96256d96f283b9210d19ee Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Tue, 23 Jun 2020 09:42:32 -0700 Subject: [PATCH 0234/2056] ipv6: fib6: avoid indirect calls from fib6_rule_lookup It was reported that a considerable amount of cycles were spent on the expensive indirect calls on fib6_rule_lookup. This patch introduces an inline helper called pol_route_func that uses the indirect_call_wrappers to avoid the indirect calls. This patch saves around 50ns per call. Performance was measured on the receiver by checking the amount of syncookies that server was able to generate under a synflood load. Traffic was generated using trafgen[1] which was pushing around 1Mpps on a single queue. Receiver was using only one rx queue which help to create a bottle neck and make the experiment rx-bounded. These are the syncookies generated over 10s from the different runs: Whithout the patch: TcpExtSyncookiesSent 3553749 0.0 TcpExtSyncookiesSent 3550895 0.0 TcpExtSyncookiesSent 3553845 0.0 TcpExtSyncookiesSent 3541050 0.0 TcpExtSyncookiesSent 3539921 0.0 TcpExtSyncookiesSent 3557659 0.0 TcpExtSyncookiesSent 3526812 0.0 TcpExtSyncookiesSent 3536121 0.0 TcpExtSyncookiesSent 3529963 0.0 TcpExtSyncookiesSent 3536319 0.0 With the patch: TcpExtSyncookiesSent 3611786 0.0 TcpExtSyncookiesSent 3596682 0.0 TcpExtSyncookiesSent 3606878 0.0 TcpExtSyncookiesSent 3599564 0.0 TcpExtSyncookiesSent 3601304 0.0 TcpExtSyncookiesSent 3609249 0.0 TcpExtSyncookiesSent 3617437 0.0 TcpExtSyncookiesSent 3608765 0.0 TcpExtSyncookiesSent 3620205 0.0 TcpExtSyncookiesSent 3601895 0.0 Without the patch the average is 354263 pkt/s or 2822 ns/pkt and with the patch the average is 360738 pkt/s or 2772 ns/pkt which gives an estimate of 50 ns per packet. [1] http://netsniff-ng.org/ Changelog since v1: - Change ordering in the ICW (Paolo Abeni) Cc: Luigi Rizzo Cc: Paolo Abeni Reported-by: Eric Dumazet Signed-off-by: Brian Vazquez Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 36 ++++++++++++++++++++++++++++++++++++ net/ipv6/fib6_rules.c | 9 ++++++--- net/ipv6/ip6_fib.c | 3 ++- net/ipv6/route.c | 8 ++++---- 4 files changed, 48 insertions(+), 8 deletions(-) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 3f615a29766e..cc8356fd927f 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -19,6 +19,7 @@ #include #include #include +#include #ifdef CONFIG_IPV6_MULTIPLE_TABLES #define FIB6_TABLE_HASHSZ 256 @@ -552,6 +553,41 @@ struct bpf_iter__ipv6_route { }; #endif +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags)); +static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup, + struct net *net, + struct fib6_table *table, + struct flowi6 *fl6, + const struct sk_buff *skb, + int flags) +{ + return INDIRECT_CALL_4(lookup, + ip6_pol_route_output, + ip6_pol_route_input, + ip6_pol_route_lookup, + __ip6_route_redirect, + net, table, fl6, skb, flags); +} + #ifdef CONFIG_IPV6_MULTIPLE_TABLES static inline bool fib6_has_custom_rules(const struct net *net) { diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index fafe556d21e0..6053ef851555 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -111,11 +111,13 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, } else { struct rt6_info *rt; - rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_local_tbl, fl6, skb, flags); if (rt != net->ipv6.ip6_null_entry && rt->dst.error != -EAGAIN) return &rt->dst; ip6_rt_put_flags(rt, flags); - rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error != -EAGAIN) return &rt->dst; ip6_rt_put_flags(rt, flags); @@ -226,7 +228,8 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp, goto out; } - rt = lookup(net, table, flp6, arg->lookup_data, flags); + rt = pol_lookup_func(lookup, + net, table, flp6, arg->lookup_data, flags); if (rt != net->ipv6.ip6_null_entry) { err = fib6_rule_saddr(net, rule, flags, flp6, ip6_dst_idev(&rt->dst)->dev); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 49ee89bbcba0..25a90f3f705c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -314,7 +314,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, { struct rt6_info *rt; - rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags); + rt = pol_lookup_func(lookup, + net, net->ipv6.fib6_main_tbl, fl6, skb, flags); if (rt->dst.error == -EAGAIN) { ip6_rt_put_flags(rt, flags); rt = net->ipv6.ip6_null_entry; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 82cbb46a2a4f..5852039ca9cf 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1207,7 +1207,7 @@ static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res) return nrt; } -static struct rt6_info *ip6_pol_route_lookup(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2274,7 +2274,7 @@ struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, } EXPORT_SYMBOL_GPL(ip6_pol_route); -static struct rt6_info *ip6_pol_route_input(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2465,7 +2465,7 @@ void ip6_route_input(struct sk_buff *skb) &fl6, skb, flags)); } -static struct rt6_info *ip6_pol_route_output(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, @@ -2912,7 +2912,7 @@ struct ip6rd_flowi { struct in6_addr gateway; }; -static struct rt6_info *__ip6_route_redirect(struct net *net, +INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, const struct sk_buff *skb, From 69c8a8c543949dbfaa86da830bd5c00a546b5b47 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 23 Jun 2020 22:13:45 +0300 Subject: [PATCH 0235/2056] mlxsw: Bump firmware version to XX.2007.1168 This version comes with fixes to the following problems, among others: - Wrong shaper configuration on Spectrum-1 - Bogus temperature reading on Spectrum-2 - Problems in setting egress buffer size after MTU change on Spectrum-2 Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 7d7ebd99f09e..371c1ae0afb4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -45,8 +45,8 @@ #include "../mlxfw/mlxfw.h" #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 2000 -#define MLXSW_SP1_FWREV_SUBMINOR 2714 +#define MLXSW_SP1_FWREV_MINOR 2007 +#define MLXSW_SP1_FWREV_SUBMINOR 1168 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -62,8 +62,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2" #define MLXSW_SP2_FWREV_MAJOR 29 -#define MLXSW_SP2_FWREV_MINOR 2000 -#define MLXSW_SP2_FWREV_SUBMINOR 2714 +#define MLXSW_SP2_FWREV_MINOR 2007 +#define MLXSW_SP2_FWREV_SUBMINOR 1168 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { .major = MLXSW_SP2_FWREV_MAJOR, From 34639fa3832f8a30560581ee65b00a2bc315e467 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 23 Jun 2020 22:13:46 +0300 Subject: [PATCH 0236/2056] mlxsw: Enforce firmware version for Spectrum-3 In a fashion similar to the other Spectrum systems, enforce a specific firmware version for Spectrum-3 so that the driver and firmware are always in sync with regards to new features. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 371c1ae0afb4..f600e5c6b60d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -76,6 +76,21 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = { "." __stringify(MLXSW_SP2_FWREV_MINOR) \ "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2" +#define MLXSW_SP3_FWREV_MAJOR 30 +#define MLXSW_SP3_FWREV_MINOR 2007 +#define MLXSW_SP3_FWREV_SUBMINOR 1168 + +static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { + .major = MLXSW_SP3_FWREV_MAJOR, + .minor = MLXSW_SP3_FWREV_MINOR, + .subminor = MLXSW_SP3_FWREV_SUBMINOR, +}; + +#define MLXSW_SP3_FW_FILENAME \ + "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \ + "." __stringify(MLXSW_SP3_FWREV_MINOR) \ + "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2" + static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; @@ -4642,6 +4657,8 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev; + mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME; mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops; mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops; mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops; @@ -6333,3 +6350,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table); MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table); MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME); MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME); +MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME); From bdfd2d1fa79acd03e18d1683419572f3682b39fd Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Tue, 23 Jun 2020 16:40:01 -0400 Subject: [PATCH 0237/2056] bonding/xfrm: use real_dev instead of slave_dev Rather than requiring every hw crypto capable NIC driver to do a check for slave_dev being set, set real_dev in the xfrm layer and xso init time, and then override it in the bonding driver as needed. Then NIC drivers can always use real_dev, and at the same time, we eliminate the use of a variable name that probably shouldn't have been used in the first place, particularly given recent current events. CC: Boris Pismenny CC: Saeed Mahameed CC: Leon Romanovsky CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org Suggested-by: Saeed Mahameed Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 6 +-- .../net/ethernet/intel/ixgbe/ixgbe_ipsec.c | 47 +++++-------------- .../mellanox/mlx5/core/en_accel/ipsec.c | 10 +--- include/net/xfrm.h | 2 +- net/xfrm/xfrm_device.c | 5 +- 5 files changed, 21 insertions(+), 49 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 90939ccf2a94..4ef99efc37f6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -386,7 +386,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave = rtnl_dereference(bond->curr_active_slave); - xs->xso.slave_dev = slave->dev; + xs->xso.real_dev = slave->dev; bond->xs = xs; if (!(slave->dev->xfrmdev_ops @@ -411,7 +411,7 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) if (!slave) return; - xs->xso.slave_dev = slave->dev; + xs->xso.real_dev = slave->dev; if (!(slave->dev->xfrmdev_ops && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { @@ -440,7 +440,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) return false; } - xs->xso.slave_dev = slave_dev; + xs->xso.real_dev = slave_dev; return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index 26b0a58a064d..6516980965a2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -427,14 +427,11 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - if (!xs->aead) { netdev_err(dev, "Unsupported IPsec algorithm\n"); return -EINVAL; @@ -480,9 +477,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; u32 mfval, manc, reg; int num_filters = 4; bool manc_ipv4; @@ -500,12 +497,6 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) #define BMCIP_V6 0x3 #define BMCIP_MASK 0x3 - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - hw = &adapter->hw; - manc = IXGBE_READ_REG(hw, IXGBE_MANC); manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); @@ -569,22 +560,15 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) **/ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_ipsec *ipsec; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; int checked, match, first; u16 sa_idx; int ret; int i; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - ipsec = adapter->ipsec; - hw = &adapter->hw; - if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", xs->id.proto); @@ -761,20 +745,13 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbe_adapter *adapter; - struct ixgbe_ipsec *ipsec; - struct ixgbe_hw *hw; + struct net_device *dev = xs->xso.real_dev; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct ixgbe_hw *hw = &adapter->hw; u32 zerobuf[4] = {0, 0, 0, 0}; u16 sa_idx; - if (xs->xso.slave_dev) - dev = xs->xso.slave_dev; - - adapter = netdev_priv(dev); - ipsec = adapter->ipsec; - hw = &adapter->hw; - if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { struct rx_sa *rsa; u8 ipi; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 72ad6664bd73..bc55c82b55ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -207,12 +207,9 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) { - struct net_device *netdev = x->xso.dev; + struct net_device *netdev = x->xso.real_dev; struct mlx5e_priv *priv; - if (x->xso.slave_dev) - netdev = x->xso.slave_dev; - priv = netdev_priv(netdev); if (x->props.aalgo != SADB_AALG_NONE) { @@ -288,15 +285,12 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; - struct net_device *netdev = x->xso.dev; + struct net_device *netdev = x->xso.real_dev; struct mlx5_accel_esp_xfrm_attrs attrs; struct mlx5e_priv *priv; unsigned int sa_handle; int err; - if (x->xso.slave_dev) - netdev = x->xso.slave_dev; - priv = netdev_priv(netdev); err = mlx5e_xfrm_validate_state(x); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e20b2b27ec48..e648c9e6c919 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -127,7 +127,7 @@ struct xfrm_state_walk { struct xfrm_state_offload { struct net_device *dev; - struct net_device *slave_dev; + struct net_device *real_dev; unsigned long offload_handle; unsigned int num_exthdrs; u8 flags; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index b8918fc5248b..7b64bb64c822 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -120,8 +120,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) return skb; - /* This skb was already validated on the master dev */ - if ((x->xso.dev != dev) && (x->xso.slave_dev == dev)) + /* This skb was already validated on the upper/virtual dev */ + if ((x->xso.dev != dev) && (x->xso.real_dev == dev)) return skb; local_irq_save(flags); @@ -259,6 +259,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, } xso->dev = dev; + xso->real_dev = dev; xso->num_exthdrs = 1; xso->flags = xuo->flags; From 4640338c36af88b74550b995f425921729f0b59f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 23 Jun 2020 23:04:42 +0200 Subject: [PATCH 0238/2056] r8169: rename RTL8125 to RTL8125A Realtek added new members to the RTL8125 chip family, therefore rename RTL8125 to RTL8125a. Then we use the same chip naming as in the r8125 vendor driver. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 24 +++++++++---------- .../net/ethernet/realtek/r8169_phy_config.c | 18 +++++++------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7e2a62a9ed61..c4136740db44 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -145,8 +145,8 @@ static const struct { [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" }, [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" }, [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, - [RTL_GIGA_MAC_VER_60] = {"RTL8125" }, - [RTL_GIGA_MAC_VER_61] = {"RTL8125", FIRMWARE_8125A_3}, + [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, + [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -2066,7 +2066,7 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp) rtl_eri_set_bits(tp, 0x1b0, 0x0003); } -static void rtl8125_config_eee_mac(struct rtl8169_private *tp) +static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) { r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); @@ -3531,15 +3531,15 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); - rtl8125_config_eee_mac(tp); + rtl8125a_config_eee_mac(tp); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); udelay(10); } -static void rtl_hw_start_8125_1(struct rtl8169_private *tp) +static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) { - static const struct ephy_info e_info_8125_1[] = { + static const struct ephy_info e_info_8125a_1[] = { { 0x01, 0xffff, 0xa812 }, { 0x09, 0xffff, 0x520c }, { 0x04, 0xffff, 0xd000 }, @@ -3571,14 +3571,14 @@ static void rtl_hw_start_8125_1(struct rtl8169_private *tp) /* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8125_1); + rtl_ephy_init(tp, e_info_8125a_1); rtl_hw_start_8125_common(tp); } -static void rtl_hw_start_8125_2(struct rtl8169_private *tp) +static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) { - static const struct ephy_info e_info_8125_2[] = { + static const struct ephy_info e_info_8125a_2[] = { { 0x04, 0xffff, 0xd000 }, { 0x0a, 0xffff, 0x8653 }, { 0x23, 0xffff, 0xab66 }, @@ -3598,7 +3598,7 @@ static void rtl_hw_start_8125_2(struct rtl8169_private *tp) /* disable aspm and clock request before access ephy */ rtl_hw_aspm_clkreq_enable(tp, false); - rtl_ephy_init(tp, e_info_8125_2); + rtl_ephy_init(tp, e_info_8125a_2); rtl_hw_start_8125_common(tp); } @@ -3652,8 +3652,8 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2, [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3, [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, - [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1, - [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2, + [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, + [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, }; if (hw_configs[tp->mac_version]) diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index b73f7d023e99..0cf4893e5624 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -89,7 +89,7 @@ static void rtl8168h_config_eee_phy(struct phy_device *phydev) phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080); } -static void rtl8125_config_eee_phy(struct phy_device *phydev) +static void rtl8125a_config_eee_phy(struct phy_device *phydev) { rtl8168h_config_eee_phy(phydev); @@ -1140,8 +1140,8 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } -static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) +static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) { phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084); phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010); @@ -1175,11 +1175,11 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000); rtl8168g_enable_gphy_10m(phydev); - rtl8125_config_eee_phy(phydev); + rtl8125a_config_eee_phy(phydev); } -static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp, - struct phy_device *phydev) +static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) { int i; @@ -1240,7 +1240,7 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp, phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000); rtl8168g_enable_gphy_10m(phydev); - rtl8125_config_eee_phy(phydev); + rtl8125a_config_eee_phy(phydev); } void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, @@ -1300,8 +1300,8 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config, [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config, [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, - [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config, - [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config, + [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, + [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, }; if (phy_configs[ver]) From 243600ee660531d2b5b5ef3faab90c5f8ff4c2b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:11 -0700 Subject: [PATCH 0239/2056] tcp: add declarations to avoid warnings Remove these errors: net/ipv6/tcp_ipv6.c:1550:29: warning: symbol 'tcp_v6_rcv' was not declared. Should it be static? net/ipv6/tcp_ipv6.c:1770:30: warning: symbol 'tcp_v6_early_demux' was not declared. Should it be static? net/ipv6/tcp_ipv6.c:1550:29: warning: no previous prototype for 'tcp_v6_rcv' [-Wmissing-prototypes] 1550 | INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) | ^~~~~~~~~~ net/ipv6/tcp_ipv6.c:1770:30: warning: no previous prototype for 'tcp_v6_early_demux' [-Wmissing-prototypes] 1770 | INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) | ^~~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/net/tcp.h b/include/net/tcp.h index cd9cc348dbf9..a8c36fa886a4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -934,6 +934,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) } INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); #endif From b03d2142bea8cf7407a0a668ce8f5f115bd226c4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:12 -0700 Subject: [PATCH 0240/2056] tcp: move ipv6_specific declaration to remove a warning ipv6_specific should be declared in tcp include files, not mptcp. This removes the following warning : CHECK net/ipv6/tcp_ipv6.c net/ipv6/tcp_ipv6.c:78:42: warning: symbol 'ipv6_specific' was not declared. Should it be static? Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/mptcp/protocol.h | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index a8c36fa886a4..e6920ae0765c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -933,6 +933,8 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb) return 0; } +extern const struct inet_connection_sock_af_ops ipv6_specific; + INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index db56535dfc29..d4294b6d23e4 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -355,9 +355,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, } extern const struct inet_connection_sock_af_ops ipv4_specific; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) -extern const struct inet_connection_sock_af_ops ipv6_specific; -#endif void mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) From 9b9e2f250e3e6f59ad07e6d03838c27a100e0042 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:13 -0700 Subject: [PATCH 0241/2056] tcp: move ipv4_specific to tcp include file Declare ipv4_specific once, in tcp.h were it belongs. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ include/net/transp_v6.h | 3 --- net/mptcp/protocol.h | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index e6920ae0765c..b0f0f93c681c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -907,6 +907,8 @@ static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) TCP_SKB_CB(skb)->bpf.sk_redir = NULL; } +extern const struct inet_connection_sock_af_ops ipv4_specific; + #if IS_ENABLED(CONFIG_IPV6) /* This is the variant of inet6_iif() that must be used by TCP, * as TCP moves IP6CB into a different location in skb->cb[] diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index a8f6020f1196..da06613c9603 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -56,9 +56,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) -/* address family specific functions */ -extern const struct inet_connection_sock_af_ops ipv4_specific; - void inet6_destroy_sock(struct sock *sk); #define IPV6_SEQ_DGRAM_HEADER \ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d4294b6d23e4..06661781c9af 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -354,8 +354,6 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; } -extern const struct inet_connection_sock_af_ops ipv4_specific; - void mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) int mptcp_proto_v6_init(void); From 5521d95e076238f1615cf1cdb135f318ef798b49 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:14 -0700 Subject: [PATCH 0242/2056] net: move tcp gro declarations to net/tcp.h This patch removes following (C=1 W=1) warnings for CONFIG_RETPOLINE=y : net/ipv4/tcp_offload.c:306:16: warning: symbol 'tcp4_gro_receive' was not declared. Should it be static? net/ipv4/tcp_offload.c:306:17: warning: no previous prototype for 'tcp4_gro_receive' [-Wmissing-prototypes] net/ipv4/tcp_offload.c:319:29: warning: symbol 'tcp4_gro_complete' was not declared. Should it be static? net/ipv4/tcp_offload.c:319:29: warning: no previous prototype for 'tcp4_gro_complete' [-Wmissing-prototypes] CHECK net/ipv6/tcpv6_offload.c net/ipv6/tcpv6_offload.c:16:16: warning: symbol 'tcp6_gro_receive' was not declared. Should it be static? net/ipv6/tcpv6_offload.c:29:29: warning: symbol 'tcp6_gro_complete' was not declared. Should it be static? CC net/ipv6/tcpv6_offload.o net/ipv6/tcpv6_offload.c:16:17: warning: no previous prototype for 'tcp6_gro_receive' [-Wmissing-prototypes] 16 | struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv6/tcpv6_offload.c:29:29: warning: no previous prototype for 'tcp6_gro_complete' [-Wmissing-prototypes] 29 | INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff) | ^~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 4 ++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 4 +--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index b0f0f93c681c..27f848ab3995 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1957,6 +1957,10 @@ void tcp_v4_destroy_sock(struct sock *sk); struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features); struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb); +INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); +INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)); int tcp_gro_complete(struct sk_buff *skb); void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 02aa5cb3a4fd..d8dbff1dd1fa 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1432,8 +1432,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, return inet_gso_segment(skb, features); } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, struct sk_buff *)); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) @@ -1608,7 +1606,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } -INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); int inet_gro_complete(struct sk_buff *skb, int nhoff) { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 7fbb44736a34..78eec5b42385 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "ip6_offload.h" @@ -177,8 +178,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, struct sk_buff *)); INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, @@ -319,7 +318,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } -INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { From 6db693285cd109e566c553694002dea769612b24 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 23 Jun 2020 15:31:15 -0700 Subject: [PATCH 0243/2056] udp: move gro declarations to net/udp.h This removes following warnings : CC net/ipv4/udp_offload.o net/ipv4/udp_offload.c:504:17: warning: no previous prototype for 'udp4_gro_receive' [-Wmissing-prototypes] 504 | struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv4/udp_offload.c:584:29: warning: no previous prototype for 'udp4_gro_complete' [-Wmissing-prototypes] 584 | INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff) | ^~~~~~~~~~~~~~~~~ CHECK net/ipv6/udp_offload.c net/ipv6/udp_offload.c:115:16: warning: symbol 'udp6_gro_receive' was not declared. Should it be static? net/ipv6/udp_offload.c:148:29: warning: symbol 'udp6_gro_complete' was not declared. Should it be static? CC net/ipv6/udp_offload.o net/ipv6/udp_offload.c:115:17: warning: no previous prototype for 'udp6_gro_receive' [-Wmissing-prototypes] 115 | struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) | ^~~~~~~~~~~~~~~~ net/ipv6/udp_offload.c:148:29: warning: no previous prototype for 'udp6_gro_complete' [-Wmissing-prototypes] 148 | INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) | ^~~~~~~~~~~~~~~~~ Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/udp.h | 7 +++++++ net/ipv4/af_inet.c | 3 --- net/ipv6/ip6_offload.c | 4 +--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index a8fa6c0c6ded..5a2d677432f0 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -27,6 +27,7 @@ #include #include #include +#include /** * struct udp_skb_cb - UDP(-Lite) private variables @@ -166,6 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, __be16 dport); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); +INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, + struct sk_buff *)); +INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, struct udphdr *uh, struct sock *sk); int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index d8dbff1dd1fa..ea6ed6d487ed 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1432,8 +1432,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb, return inet_gso_segment(skb, features); } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *, - struct sk_buff *)); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; @@ -1606,7 +1604,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } -INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int)); int inet_gro_complete(struct sk_buff *skb, int nhoff) { __be16 newlen = htons(skb->len - nhoff); diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 78eec5b42385..a80f90bf3ae7 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "ip6_offload.h" @@ -178,8 +179,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } -INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *, - struct sk_buff *)); INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -318,7 +317,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } -INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int)); INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; From 5777cbba79ab041ccc08252abf6ea6bbd1f4c285 Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Tue, 23 Jun 2020 20:55:45 -0400 Subject: [PATCH 0244/2056] xirc2ps_cs: remove dev null check from do_reset(). dev cannot be NULL here since its already being accessed before. Remove the redundant null check. Signed-off-by: Gaurav Singh Signed-off-by: David S. Miller --- drivers/net/ethernet/xircom/xirc2ps_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 480ab7251515..3e3883ad88b0 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1473,7 +1473,7 @@ do_reset(struct net_device *dev, int full) unsigned int ioaddr = dev->base_addr; unsigned value; - pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full); + pr_debug("%s: do_reset(%p,%d)\n", dev->name, dev, full); hardreset(dev); PutByte(XIRCREG_CR, SoftReset); /* set */ From 6f3934576853a4fa60dea74ac8822f0f016ef9e8 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 22 Jun 2020 18:07:41 -0500 Subject: [PATCH 0245/2056] net: ipv6: Use struct_size() helper and kcalloc() Make use of the struct_size() helper instead of an open-coded version in order to avoid any potential type mistakes. Also, remove unnecessary function ipv6_rpl_srh_alloc_size() and replace kzalloc() with kcalloc(), which has a 2-factor argument form for multiplication. This code was detected with the help of Coccinelle and, audited and fixed manually. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- include/net/rpl.h | 6 ------ net/ipv6/exthdrs.c | 2 +- net/ipv6/rpl_iptunnel.c | 3 +-- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/include/net/rpl.h b/include/net/rpl.h index dceff60e8baf..308ef0a05cae 100644 --- a/include/net/rpl.h +++ b/include/net/rpl.h @@ -26,12 +26,6 @@ static inline void rpl_exit(void) {} /* Worst decompression memory usage ipv6 address (16) + pad 7 */ #define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7) -static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n) -{ - return sizeof(struct ipv6_rpl_sr_hdr) + - ((n + 1) * sizeof(struct in6_addr)); -} - size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri, unsigned char cmpre); diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 5a8bbcdcaf2b..e9b366994475 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -580,7 +580,7 @@ static int ipv6_rpl_srh_rcv(struct sk_buff *skb) hdr->segments_left--; i = n - hdr->segments_left; - buf = kzalloc(ipv6_rpl_srh_alloc_size(n + 1) * 2, GFP_ATOMIC); + buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC); if (unlikely(!buf)) { kfree_skb(skb); return -1; diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index c3ececd7cfc1..5fdf3ebb953f 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -136,8 +136,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, oldhdr = ipv6_hdr(skb); - buf = kzalloc(ipv6_rpl_srh_alloc_size(srh->segments_left - 1) * 2, - GFP_ATOMIC); + buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC); if (!buf) return -ENOMEM; From 0cc55e694e851921667dc80972a86feb1ca331ef Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Mon, 22 Jun 2020 22:50:39 -0400 Subject: [PATCH 0246/2056] dcb_doit: remove redundant skb check skb cannot be NULL here since its already being accessed before: sock_net(skb->sk). Remove the redundant null check. Signed-off-by: Gaurav Singh Signed-off-by: David S. Miller --- net/dcb/dcbnl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d2a4553bcf39..84dde5a2066e 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1736,7 +1736,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct net_device *netdev; struct dcbmsg *dcb = nlmsg_data(nlh); struct nlattr *tb[DCB_ATTR_MAX + 1]; - u32 portid = skb ? NETLINK_CB(skb).portid : 0; + u32 portid = NETLINK_CB(skb).portid; int ret = -EINVAL; struct sk_buff *reply_skb; struct nlmsghdr *reply_nlh = NULL; From f9215d6bb53ae1bd2268369d77b3f0855a27ba47 Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Mon, 22 Jun 2020 23:41:19 -0400 Subject: [PATCH 0247/2056] dn_route_rcv: remove redundant dev null check dev cannot be NULL here since its already being accessed before. Remove the redundant null check. Signed-off-by: Gaurav Singh Signed-off-by: David S. Miller --- net/decnet/dn_route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 06b9983325cc..9eb7e4b62d9b 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -670,7 +670,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type if (decnet_debug_level & 1) printk(KERN_DEBUG "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", - (int)flags, (dev) ? dev->name : "???", len, skb->len, + (int)flags, dev->name, len, skb->len, padlen); if (flags & DN_RT_PKT_CNTL) { From a86688fbef1b48395822e90d5273e8abcac95a6c Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 16:43:58 +0530 Subject: [PATCH 0248/2056] pcnet32: Convert to generic power management Remove legacy PM callbacks and use generic operations. With legacy code, drivers were responsible for handling PCI PM operations like pci_save_state(). In generic code, all these are handled by PCI core. The generic suspend() and resume() are called at the same point the legacy ones were called. Thus, it does not affect the normal functioning of the driver. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 07e8211eea51..d32f54d760e7 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -2913,30 +2913,27 @@ static void pcnet32_watchdog(struct timer_list *t) mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); } -static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state) +static int pcnet32_pm_suspend(struct device *device_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device_d); if (netif_running(dev)) { netif_device_detach(dev); pcnet32_close(dev); } - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; } -static int pcnet32_pm_resume(struct pci_dev *pdev) +static int pcnet32_pm_resume(struct device *device_d) { - struct net_device *dev = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); + struct net_device *dev = dev_get_drvdata(device_d); if (netif_running(dev)) { pcnet32_open(dev); netif_device_attach(dev); } + return 0; } @@ -2957,13 +2954,16 @@ static void pcnet32_remove_one(struct pci_dev *pdev) } } +static SIMPLE_DEV_PM_OPS(pcnet32_pm_ops, pcnet32_pm_suspend, pcnet32_pm_resume); + static struct pci_driver pcnet32_driver = { .name = DRV_NAME, .probe = pcnet32_probe_pci, .remove = pcnet32_remove_one, .id_table = pcnet32_pci_tbl, - .suspend = pcnet32_pm_suspend, - .resume = pcnet32_pm_resume, + .driver = { + .pm = &pcnet32_pm_ops, + }, }; /* An additional parameter that may be passed in... */ From 2caf751fe080887eab4c94e8a2060e6bebe92e73 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 16:43:59 +0530 Subject: [PATCH 0249/2056] amd8111e: Convert to generic power management Drivers should not save device registers and/or change the power state of the device. As per the generic PM approach, these are handled by PCI core. The driver should support dev_pm_ops callbacks and bind them to pci_driver. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 30 +++++++++++------------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 7a1286f8e983..c6591b33abcc 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1580,9 +1580,10 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue) if(!err) netif_wake_queue(dev); } -static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state) + +static int amd8111e_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pci_dev); + struct net_device *dev = dev_get_drvdata(dev_d); struct amd8111e_priv *lp = netdev_priv(dev); if (!netif_running(dev)) @@ -1609,34 +1610,24 @@ static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state) if(lp->options & OPTION_WAKE_PHY_ENABLE) amd8111e_enable_link_change(lp); - pci_enable_wake(pci_dev, PCI_D3hot, 1); - pci_enable_wake(pci_dev, PCI_D3cold, 1); + device_set_wakeup_enable(dev_d, 1); } else{ - pci_enable_wake(pci_dev, PCI_D3hot, 0); - pci_enable_wake(pci_dev, PCI_D3cold, 0); + device_set_wakeup_enable(dev_d, 0); } - pci_save_state(pci_dev); - pci_set_power_state(pci_dev, PCI_D3hot); - return 0; } -static int amd8111e_resume(struct pci_dev *pci_dev) + +static int amd8111e_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pci_dev); + struct net_device *dev = dev_get_drvdata(dev_d); struct amd8111e_priv *lp = netdev_priv(dev); if (!netif_running(dev)) return 0; - pci_set_power_state(pci_dev, PCI_D0); - pci_restore_state(pci_dev); - - pci_enable_wake(pci_dev, PCI_D3hot, 0); - pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */ - netif_device_attach(dev); spin_lock_irq(&lp->lock); @@ -1918,13 +1909,14 @@ static const struct pci_device_id amd8111e_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl); +static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume); + static struct pci_driver amd8111e_driver = { .name = MODULE_NAME, .id_table = amd8111e_pci_tbl, .probe = amd8111e_probe_one, .remove = amd8111e_remove_one, - .suspend = amd8111e_suspend, - .resume = amd8111e_resume + .driver.pm = &amd8111e_pm_ops }; module_pci_driver(amd8111e_driver); From c6f0fb5dfea02024ed8d45784a4f2f231aba9c65 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 16:44:00 +0530 Subject: [PATCH 0250/2056] amd-xgbe: Convert to generic power management Use dev_pm_ops structure to call generic suspend() and resume() callbacks. Drivers should avoid saving device register and/or change power states using PCI helper functions. With the generic approach, all these are handled by PCI core. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 7b86240ecd5f..90cb55eb5466 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -421,10 +421,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev) xgbe_free_pdata(pdata); } -#ifdef CONFIG_PM -static int xgbe_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused xgbe_pci_suspend(struct device *dev) { - struct xgbe_prv_data *pdata = pci_get_drvdata(pdev); + struct xgbe_prv_data *pdata = dev_get_drvdata(dev); struct net_device *netdev = pdata->netdev; int ret = 0; @@ -438,9 +437,9 @@ static int xgbe_pci_suspend(struct pci_dev *pdev, pm_message_t state) return ret; } -static int xgbe_pci_resume(struct pci_dev *pdev) +static int __maybe_unused xgbe_pci_resume(struct device *dev) { - struct xgbe_prv_data *pdata = pci_get_drvdata(pdev); + struct xgbe_prv_data *pdata = dev_get_drvdata(dev); struct net_device *netdev = pdata->netdev; int ret = 0; @@ -460,7 +459,6 @@ static int xgbe_pci_resume(struct pci_dev *pdev) return ret; } -#endif /* CONFIG_PM */ static const struct xgbe_version_data xgbe_v2a = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, @@ -502,15 +500,16 @@ static const struct pci_device_id xgbe_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, xgbe_pci_table); +static SIMPLE_DEV_PM_OPS(xgbe_pci_pm_ops, xgbe_pci_suspend, xgbe_pci_resume); + static struct pci_driver xgbe_driver = { .name = XGBE_DRV_NAME, .id_table = xgbe_pci_table, .probe = xgbe_pci_probe, .remove = xgbe_pci_remove, -#ifdef CONFIG_PM - .suspend = xgbe_pci_suspend, - .resume = xgbe_pci_resume, -#endif + .driver = { + .pm = &xgbe_pci_pm_ops, + } }; int xgbe_pci_init(void) From f906d0f9cd43adfce79a84fa0d48702ea7b887d3 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 17:12:24 +0530 Subject: [PATCH 0251/2056] tulip: dmfe: use generic power management With legacy PM hooks, it was the responsibility of a driver to manage PCI states and also the device's power state. The generic approach is to let the PCI core handle the work. The legacy suspend() and resume() were making use of pci_read/write_config_dword() to enable/disable wol. Driver editing configuration registers of a device is not recommended. Thus replace them all with device_wakeup_enable/disable(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/dmfe.c | 49 +++++---------------------- 1 file changed, 9 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index c1884fc9ad32..c3b4abff48b5 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -2081,14 +2081,11 @@ static const struct pci_device_id dmfe_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl); - -#ifdef CONFIG_PM -static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) +static int __maybe_unused dmfe_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pci_dev); + struct net_device *dev = dev_get_drvdata(dev_d); struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; - u32 tmp; /* Disable upper layer interface */ netif_device_detach(dev); @@ -2105,63 +2102,35 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) dmfe_free_rxbuffer(db); /* Enable WOL */ - pci_read_config_dword(pci_dev, 0x40, &tmp); - tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET); - - if (db->wol_mode & WAKE_PHY) - tmp |= DMFE_WOL_LINKCHANGE; - if (db->wol_mode & WAKE_MAGIC) - tmp |= DMFE_WOL_MAGICPACKET; - - pci_write_config_dword(pci_dev, 0x40, tmp); - - pci_enable_wake(pci_dev, PCI_D3hot, 1); - pci_enable_wake(pci_dev, PCI_D3cold, 1); - - /* Power down device*/ - pci_save_state(pci_dev); - pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state)); + device_wakeup_enable(dev_d); return 0; } -static int dmfe_resume(struct pci_dev *pci_dev) +static int __maybe_unused dmfe_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pci_dev); - u32 tmp; - - pci_set_power_state(pci_dev, PCI_D0); - pci_restore_state(pci_dev); + struct net_device *dev = dev_get_drvdata(dev_d); /* Re-initialize DM910X board */ dmfe_init_dm910x(dev); /* Disable WOL */ - pci_read_config_dword(pci_dev, 0x40, &tmp); - - tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET); - pci_write_config_dword(pci_dev, 0x40, tmp); - - pci_enable_wake(pci_dev, PCI_D3hot, 0); - pci_enable_wake(pci_dev, PCI_D3cold, 0); + device_wakeup_disable(dev_d); /* Restart upper layer interface */ netif_device_attach(dev); return 0; } -#else -#define dmfe_suspend NULL -#define dmfe_resume NULL -#endif + +static SIMPLE_DEV_PM_OPS(dmfe_pm_ops, dmfe_suspend, dmfe_resume); static struct pci_driver dmfe_driver = { .name = "dmfe", .id_table = dmfe_pci_tbl, .probe = dmfe_init_one, .remove = dmfe_remove_one, - .suspend = dmfe_suspend, - .resume = dmfe_resume + .driver.pm = &dmfe_pm_ops, }; MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw"); From fc9aebfbdb7ea9c854e4efe9f1f0389fef814c44 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 17:12:25 +0530 Subject: [PATCH 0252/2056] tulip: windbond-840: use generic power management With stable support of generic PM callbacks, drivers no longer need to use legacy .suspend() and .resume() in which they had to maintain PCI states changes and device's power state themselves. Earlier, .resume() was invoking pci_enable_device(). Drivers should not call PCI legacy helper functions, hence, it was removed. This should not change the behavior of the device as this function is called by PCI core if somehow pm_ops is not able to bind with the driver, else, required tasks are managed by the core itself. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/winbond-840.c | 26 ++++++-------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 4d5e4fa53023..5dcc66f60144 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -1530,8 +1530,6 @@ static void w840_remove1(struct pci_dev *pdev) } } -#ifdef CONFIG_PM - /* * suspend/resume synchronization: * - open, close, do_ioctl: @@ -1555,9 +1553,9 @@ static void w840_remove1(struct pci_dev *pdev) * Detach must occur under spin_unlock_irq(), interrupts from a detached * device would cause an irq storm. */ -static int w840_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused w840_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base_addr; @@ -1590,21 +1588,15 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state) return 0; } -static int w840_resume (struct pci_dev *pdev) +static int __maybe_unused w840_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); - int retval = 0; rtnl_lock(); if (netif_device_present(dev)) goto out; /* device not suspended */ if (netif_running(dev)) { - if ((retval = pci_enable_device(pdev))) { - dev_err(&dev->dev, - "pci_enable_device failed in resume\n"); - goto out; - } spin_lock_irq(&np->lock); iowrite32(1, np->base_addr+PCIBusCfg); ioread32(np->base_addr+PCIBusCfg); @@ -1622,19 +1614,17 @@ static int w840_resume (struct pci_dev *pdev) } out: rtnl_unlock(); - return retval; + return 0; } -#endif + +static SIMPLE_DEV_PM_OPS(w840_pm_ops, w840_suspend, w840_resume); static struct pci_driver w840_driver = { .name = DRV_NAME, .id_table = w840_pci_tbl, .probe = w840_probe1, .remove = w840_remove1, -#ifdef CONFIG_PM - .suspend = w840_suspend, - .resume = w840_resume, -#endif + .driver.pm = &w840_pm_ops, }; static int __init w840_init(void) From 8cfa989ae3f2e208bccd4007b82c02e2f3863014 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 17:12:26 +0530 Subject: [PATCH 0253/2056] tulip: de2104x: use generic power management With the support of generic PM callbacks, drivers no longer need to use legacy .suspend() and .resume() in which they had to maintain PCI states changes and device's power state themselves. Earlier, .suspend() and .resume() were invoking pci_disable_device() and pci_enable_device() respectively to manage the device's power state. With generic PM, it is no longer needed. The driver is expected to just implement driver-specific operations and leave power transitions to PCI core. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de2104x.c | 25 ++++++++---------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 592454f444ce..cb116b530f5e 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -2105,11 +2105,10 @@ static void de_remove_one(struct pci_dev *pdev) free_netdev(dev); } -#ifdef CONFIG_PM - -static int de_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused de_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata (pdev); + struct pci_dev *pdev = to_pci_dev(dev_d); + struct net_device *dev = pci_get_drvdata(pdev); struct de_private *de = netdev_priv(dev); rtnl_lock(); @@ -2136,7 +2135,6 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state) de_clean_rings(de); de_adapter_sleep(de); - pci_disable_device(pdev); } else { netif_device_detach(dev); } @@ -2144,21 +2142,17 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state) return 0; } -static int de_resume (struct pci_dev *pdev) +static int __maybe_unused de_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata (pdev); + struct pci_dev *pdev = to_pci_dev(dev_d); + struct net_device *dev = pci_get_drvdata(pdev); struct de_private *de = netdev_priv(dev); - int retval = 0; rtnl_lock(); if (netif_device_present(dev)) goto out; if (!netif_running(dev)) goto out_attach; - if ((retval = pci_enable_device(pdev))) { - netdev_err(dev, "pci_enable_device failed in resume\n"); - goto out; - } pci_set_master(pdev); de_init_rings(de); de_init_hw(de); @@ -2169,17 +2163,14 @@ static int de_resume (struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume); static struct pci_driver de_driver = { .name = DRV_NAME, .id_table = de_pci_tbl, .probe = de_init_one, .remove = de_remove_one, -#ifdef CONFIG_PM - .suspend = de_suspend, - .resume = de_resume, -#endif + .driver.pm = &de_pm_ops, }; static int __init de_init (void) From 77eb16e9b28754183ffa74ae5dac6e6e6e92c93c Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 17:12:27 +0530 Subject: [PATCH 0254/2056] tulip: tulip_core: use generic power management With the support of generic PM callbacks, drivers no longer need to use legacy .suspend() and .resume() in which they had to maintain PCI states changes and device's power state themselves. Earlier, .suspend() and .resume() were invoking pci_disable_device() and pci_enable_device() respectively to manage the device's power state. driver also invoked pci_save/restore_state() and pci_set_power_sitate(). With generic PM, it is no longer needed. The driver is expected to just implement driver-specific operations and leave power transitions to PCI core. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/tulip_core.c | 51 +++++---------------- 1 file changed, 12 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 15efc294f513..9db23527275a 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1803,13 +1803,9 @@ static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts) } } -#ifdef CONFIG_PM - - -static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused tulip_suspend(struct device *dev_d) { - pci_power_t pstate; - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct tulip_private *tp = netdev_priv(dev); if (!dev) @@ -1825,45 +1821,27 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) free_irq(tp->pdev->irq, dev); save_state: - pci_save_state(pdev); - pci_disable_device(pdev); - pstate = pci_choose_state(pdev, state); - if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) { - int rc; - - tulip_set_wolopts(pdev, tp->wolinfo.wolopts); - rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts); - if (rc) - pr_err("pci_enable_wake failed (%d)\n", rc); - } - pci_set_power_state(pdev, pstate); + tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts); + device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts); return 0; } - -static int tulip_resume(struct pci_dev *pdev) +static int __maybe_unused tulip_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct pci_dev *pdev = to_pci_dev(dev_d); + struct net_device *dev = dev_get_drvdata(dev_d); struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; - int retval; unsigned int tmp; + int retval = 0; if (!dev) return -EINVAL; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - if (!netif_running(dev)) return 0; - if ((retval = pci_enable_device(pdev))) { - pr_err("pci_enable_device failed in resume\n"); - return retval; - } - retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); if (retval) { @@ -1872,8 +1850,7 @@ static int tulip_resume(struct pci_dev *pdev) } if (tp->flags & COMET_PM) { - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); + device_set_wakeup_enable(dev_d, 0); /* Clear the PMES flag */ tmp = ioread32(ioaddr + CSR20); @@ -1891,9 +1868,6 @@ static int tulip_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ - - static void tulip_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); @@ -1937,15 +1911,14 @@ static void poll_tulip (struct net_device *dev) } #endif +static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume); + static struct pci_driver tulip_driver = { .name = DRV_NAME, .id_table = tulip_pci_tbl, .probe = tulip_init_one, .remove = tulip_remove_one, -#ifdef CONFIG_PM - .suspend = tulip_suspend, - .resume = tulip_resume, -#endif /* CONFIG_PM */ + .driver.pm = &tulip_pm_ops, }; From 6c3cb945ed79f4a9a1eede1889e165f326dcf6e9 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 22 Jun 2020 17:12:28 +0530 Subject: [PATCH 0255/2056] tulip: uli526x: use generic power management With the support of generic PM callbacks, drivers no longer need to use legacy .suspend() and .resume() in which they had to maintain PCI states changes and device's power state themselves. Legacy PM involves usage of PCI helper functions like pci_enable_wake() which is no longer recommended. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/uli526x.c | 48 ++++-------------------- 1 file changed, 8 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index f726436b1985..f942399f0f32 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -1163,65 +1163,41 @@ static void uli526x_dynamic_reset(struct net_device *dev) netif_wake_queue(dev); } - -#ifdef CONFIG_PM - /* * Suspend the interface. */ -static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused uli526x_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - pci_power_t power_state; - int err; + struct net_device *dev = dev_get_drvdata(dev_d); ULI526X_DBUG(0, "uli526x_suspend", 0); - pci_save_state(pdev); - if (!netif_running(dev)) return 0; netif_device_detach(dev); uli526x_reset_prepare(dev); - power_state = pci_choose_state(pdev, state); - pci_enable_wake(pdev, power_state, 0); - err = pci_set_power_state(pdev, power_state); - if (err) { - netif_device_attach(dev); - /* Re-initialize ULI526X board */ - uli526x_init(dev); - /* Restart upper layer interface */ - netif_wake_queue(dev); - } + device_set_wakeup_enable(dev_d, 0); - return err; + return 0; } /* * Resume the interface. */ -static int uli526x_resume(struct pci_dev *pdev) +static int __maybe_unused uli526x_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - int err; + struct net_device *dev = dev_get_drvdata(dev_d); ULI526X_DBUG(0, "uli526x_resume", 0); - pci_restore_state(pdev); if (!netif_running(dev)) return 0; - err = pci_set_power_state(pdev, PCI_D0); - if (err) { - netdev_warn(dev, "Could not put device into D0\n"); - return err; - } - netif_device_attach(dev); /* Re-initialize ULI526X board */ uli526x_init(dev); @@ -1231,14 +1207,6 @@ static int uli526x_resume(struct pci_dev *pdev) return 0; } -#else /* !CONFIG_PM */ - -#define uli526x_suspend NULL -#define uli526x_resume NULL - -#endif /* !CONFIG_PM */ - - /* * free all allocated rx buffer */ @@ -1761,14 +1729,14 @@ static const struct pci_device_id uli526x_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl); +static SIMPLE_DEV_PM_OPS(uli526x_pm_ops, uli526x_suspend, uli526x_resume); static struct pci_driver uli526x_driver = { .name = "uli526x", .id_table = uli526x_pci_tbl, .probe = uli526x_init_one, .remove = uli526x_remove_one, - .suspend = uli526x_suspend, - .resume = uli526x_resume, + .driver.pm = &uli526x_pm_ops, }; MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); From 0cc8fecf041d3e5285380da62cc6662bdc942d8c Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 22 Jun 2020 20:35:32 +0530 Subject: [PATCH 0256/2056] net: phy: Allow mdio buses to auto-probe c45 devices The mdiobus_scan logic is currently hardcoded to only work with c22 devices. This works fairly well in most cases, but its possible that a c45 device doesn't respond despite being a standard phy. If the parent hardware is capable, it makes sense to scan for c22 devices before falling back to c45. As we want this to reflect the capabilities of the STA, lets add a field to the mii_bus structure to represent the capability. That way devices can opt into the extended scanning. Existing users should continue to default to c22 only scanning as long as they are zero'ing the structure before use. Signed-off-by: Jeremy Linton Signed-off-by: Calvin Johnson Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 18 ++++++++++++++++-- include/linux/phy.h | 8 ++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6ceee82b2839..ab9233c558d8 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -739,10 +739,24 @@ EXPORT_SYMBOL(mdiobus_free); */ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { - struct phy_device *phydev; + struct phy_device *phydev = ERR_PTR(-ENODEV); int err; - phydev = get_phy_device(bus, addr, false); + switch (bus->probe_capabilities) { + case MDIOBUS_NO_CAP: + case MDIOBUS_C22: + phydev = get_phy_device(bus, addr, false); + break; + case MDIOBUS_C45: + phydev = get_phy_device(bus, addr, true); + break; + case MDIOBUS_C22_C45: + phydev = get_phy_device(bus, addr, false); + if (IS_ERR(phydev)) + phydev = get_phy_device(bus, addr, true); + break; + } + if (IS_ERR(phydev)) return phydev; diff --git a/include/linux/phy.h b/include/linux/phy.h index 9248dd2ce4ca..7860d56c6bf5 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -298,6 +298,14 @@ struct mii_bus { /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; + /* bus capabilities, used for probing */ + enum { + MDIOBUS_NO_CAP = 0, + MDIOBUS_C22, + MDIOBUS_C45, + MDIOBUS_C22_C45, + } probe_capabilities; + /* protect access to the shared element */ struct mutex shared_lock; From 229f4bb47512ece7b8ca0bd1a097fee30886b9b8 Mon Sep 17 00:00:00 2001 From: Calvin Johnson Date: Mon, 22 Jun 2020 20:35:33 +0530 Subject: [PATCH 0257/2056] net/fsl: acpize xgmac_mdio Add ACPI support for xgmac MDIO bus registration while maintaining the existing DT support. The function mdiobus_register() inside of_mdiobus_register(), brings up all the PHYs on the mdio bus and attach them to the bus. Signed-off-by: Jeremy Linton Signed-off-by: Calvin Johnson Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/xgmac_mdio.c | 32 ++++++++++++++------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index c82c85ef5fb3..b4ed5f837975 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -245,14 +245,19 @@ static int xgmac_mdio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; - struct resource res; + struct resource *res; struct mdio_fsl_priv *priv; int ret; - ret = of_address_to_resource(np, 0, &res); - if (ret) { + /* In DPAA-1, MDIO is one of the many FMan sub-devices. The FMan + * defines a register space that spans a large area, covering all the + * subdevice areas. Therefore, MDIO cannot claim exclusive access to + * this register area. + */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { dev_err(&pdev->dev, "could not obtain address\n"); - return ret; + return -EINVAL; } bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); @@ -263,21 +268,21 @@ static int xgmac_mdio_probe(struct platform_device *pdev) bus->read = xgmac_mdio_read; bus->write = xgmac_mdio_write; bus->parent = &pdev->dev; - snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start); /* Set the PHY base address */ priv = bus->priv; - priv->mdio_base = of_iomap(np, 0); + priv->mdio_base = ioremap(res->start, resource_size(res)); if (!priv->mdio_base) { ret = -ENOMEM; goto err_ioremap; } - priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, - "little-endian"); + priv->is_little_endian = device_property_read_bool(&pdev->dev, + "little-endian"); - priv->has_a011043 = of_property_read_bool(pdev->dev.of_node, - "fsl,erratum-a011043"); + priv->has_a011043 = device_property_read_bool(&pdev->dev, + "fsl,erratum-a011043"); ret = of_mdiobus_register(bus, np); if (ret) { @@ -320,10 +325,17 @@ static const struct of_device_id xgmac_mdio_match[] = { }; MODULE_DEVICE_TABLE(of, xgmac_mdio_match); +static const struct acpi_device_id xgmac_acpi_match[] = { + { "NXP0006" }, + { } +}; +MODULE_DEVICE_TABLE(acpi, xgmac_acpi_match); + static struct platform_driver xgmac_mdio_driver = { .driver = { .name = "fsl-fman_xmdio", .of_match_table = xgmac_mdio_match, + .acpi_match_table = xgmac_acpi_match, }, .probe = xgmac_mdio_probe, .remove = xgmac_mdio_remove, From 0f183fd151c8c7e215385edfbdc5112d743434dc Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Mon, 22 Jun 2020 20:35:34 +0530 Subject: [PATCH 0258/2056] net/fsl: enable extended scanning in xgmac_mdio Since we know the xgmac hardware always has a c45 compliant bus, let's try scanning for c22 capable PHYs first. If we fail to find any, then it will fall back to c45 automatically. Signed-off-by: Jeremy Linton Signed-off-by: Calvin Johnson Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/xgmac_mdio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index b4ed5f837975..98be51d8b08c 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -268,6 +268,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) bus->read = xgmac_mdio_read; bus->write = xgmac_mdio_write; bus->parent = &pdev->dev; + bus->probe_capabilities = MDIOBUS_C22_C45; snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start); /* Set the PHY base address */ From 547030c8e663520348557a5c7ed1eacf72e6bf86 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 22 Jun 2020 19:30:22 +0300 Subject: [PATCH 0259/2056] net: thunderbolt: Add comment clarifying prtcstns flags ThunderboltIP protocol currently has two flags from which we only support and set match frags ID. The first flag is reserved for full E2E flow control. Add a comment that clarifies them. Suggested-by: Yehezkel Bernat Signed-off-by: Mika Westerberg Reviewed-by: Yehezkel Bernat Signed-off-by: David S. Miller --- drivers/net/thunderbolt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index dacb4f680fd4..c2e44083032c 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1335,6 +1335,10 @@ static int __init tbnet_init(void) tb_property_add_immediate(tbnet_dir, "prtcid", 1); tb_property_add_immediate(tbnet_dir, "prtcvers", 1); tb_property_add_immediate(tbnet_dir, "prtcrevs", 1); + /* Currently only announce support for match frags ID (bit 1). Bit 0 + * is reserved for full E2E flow control which we do not support at + * the moment. + */ tb_property_add_immediate(tbnet_dir, "prtcstns", TBNET_MATCH_FRAGS_ID); From 0558c396040734bc1d361919566a581fd41aa539 Mon Sep 17 00:00:00 2001 From: tannerlove Date: Mon, 22 Jun 2020 13:43:24 -0400 Subject: [PATCH 0260/2056] selftests/net: plug rxtimestamp test into kselftest framework Run rxtimestamp as part of TEST_PROGS. Analogous to other tests, add new rxtimestamp.sh wrapper script, so that the test runs isolated from background traffic in a private network namespace. Also ignore failures of test case #6 by default. This case verifies that a receive timestamp is not reported if timestamp reporting is enabled for a socket, but generation is disabled. Receive timestamp generation has to be enabled globally, as no associated socket is known yet. A background process that enables rx timestamp generation therefore causes a false positive. Ntpd is one example that does. Add a "--strict" option to cause failure in the event that any test case fails, including test #6. This is useful for environments that are known to not have such background processes. Tested: make -C tools/testing/selftests TARGETS="net" run_tests Signed-off-by: Tanner Love Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 1 + tools/testing/selftests/net/rxtimestamp.c | 11 +++++++++-- tools/testing/selftests/net/rxtimestamp.sh | 4 ++++ 3 files changed, 14 insertions(+), 2 deletions(-) create mode 100755 tools/testing/selftests/net/rxtimestamp.sh diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 895ec992b2f1..bfacb960450f 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -17,6 +17,7 @@ TEST_PROGS += route_localnet.sh TEST_PROGS += reuseaddr_ports_exhausted.sh TEST_PROGS += txtimestamp.sh TEST_PROGS += vrf-xfrm-tests.sh +TEST_PROGS += rxtimestamp.sh TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index 422e7761254d..d4ea86a13e52 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -44,6 +44,7 @@ struct test_case { struct options sockopt; struct tstamps expected; bool enabled; + bool warn_on_fail; }; struct sof_flag { @@ -89,7 +90,7 @@ static struct test_case test_cases[] = { }, { { so_timestamping: SOF_TIMESTAMPING_SOFTWARE }, - {} + warn_on_fail : true }, { { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE @@ -115,6 +116,7 @@ static struct option long_options[] = { { "tcp", no_argument, 0, 't' }, { "udp", no_argument, 0, 'u' }, { "ip", no_argument, 0, 'i' }, + { "strict", no_argument, 0, 'S' }, { NULL, 0, NULL, 0 }, }; @@ -327,6 +329,7 @@ int main(int argc, char **argv) { bool all_protocols = true; bool all_tests = true; + bool strict = false; int arg_index = 0; int failures = 0; int s, t; @@ -363,6 +366,9 @@ int main(int argc, char **argv) all_protocols = false; socket_types[0].enabled = true; break; + case 'S': + strict = true; + break; default: error(1, 0, "Failed to parse parameters."); } @@ -379,7 +385,8 @@ int main(int argc, char **argv) printf("Starting testcase %d...\n", t); if (run_test_case(socket_types[s], test_cases[t])) { - failures++; + if (strict || !test_cases[t].warn_on_fail) + failures++; printf("FAILURE in test case "); print_test_case(&test_cases[t]); } diff --git a/tools/testing/selftests/net/rxtimestamp.sh b/tools/testing/selftests/net/rxtimestamp.sh new file mode 100755 index 000000000000..91631e88bf46 --- /dev/null +++ b/tools/testing/selftests/net/rxtimestamp.sh @@ -0,0 +1,4 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +./in_netns.sh ./rxtimestamp $@ From 5fff701c838e0e9afeac942d2fe20879d2b260aa Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Wed, 24 Jun 2020 02:03:22 +0530 Subject: [PATCH 0261/2056] cxgb4: always sync access when flashing PHY firmware Access to on-chip memory for flashing PHY firmware must always be synchronized. So, ensure the callers take on-chip memory lock. Also fixes following sparse warning: sge.c:1641:26: warning: context imbalance in 't4_load_phy_fw' - different lock contexts for basic block Fixes: 01b6961410b7 ("cxgb4: Add PHY firmware support for T420-BT cards") Fixes: 4ee339e1e92a ("cxgb4: add support to flash PHY image") Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 +-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 5 +++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 5 +++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 14 +++----------- 4 files changed, 10 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 466a61ba23ce..d811df1b93d9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1813,8 +1813,7 @@ int t4_get_pfres(struct adapter *adapter); int t4_read_flash(struct adapter *adapter, unsigned int addr, unsigned int nwords, u32 *data, int byte_oriented); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); -int t4_load_phy_fw(struct adapter *adap, - int win, spinlock_t *lock, +int t4_load_phy_fw(struct adapter *adap, int win, int (*phy_fw_version)(const u8 *, size_t), const u8 *phy_fw_data, size_t phy_fw_size); int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 3dd28e5856ba..37d86af44074 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1305,8 +1305,9 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev, return ret; } - ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, - NULL, data, size); + spin_lock_bh(&adap->win0_lock); + ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size); + spin_unlock_bh(&adap->win0_lock); if (ret) dev_err(adap->pdev_dev, "Failed to load PHY FW\n"); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 7423980bc49a..87505a0d906a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4146,9 +4146,10 @@ static int adap_init0_phy(struct adapter *adap) /* Load PHY Firmware onto adapter. */ - ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, - phy_info->phy_fw_version, + spin_lock_bh(&adap->win0_lock); + ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version, (u8 *)phyf->data, phyf->size); + spin_unlock_bh(&adap->win0_lock); if (ret < 0) dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", -ret); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 9d557f3cd3aa..8f2b7c9aa384 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3752,7 +3752,6 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * t4_load_phy_fw - download port PHY firmware * @adap: the adapter * @win: the PCI-E Memory Window index to use for t4_memory_rw() - * @win_lock: the lock to use to guard the memory copy * @phy_fw_version: function to check PHY firmware versions * @phy_fw_data: the PHY firmware image to write * @phy_fw_size: image size @@ -3761,9 +3760,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * @phy_fw_version is supplied, then it will be used to determine if * it's necessary to perform the transfer by comparing the version * of any existing adapter PHY firmware with that of the passed in - * PHY firmware image. If @win_lock is non-NULL then it will be used - * around the call to t4_memory_rw() which transfers the PHY firmware - * to the adapter. + * PHY firmware image. * * A negative error number will be returned if an error occurs. If * version number support is available and there's no need to upgrade @@ -3775,14 +3772,13 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver) * contents. Thus, loading PHY firmware on such adapters must happen * after any FW_RESET_CMDs ... */ -int t4_load_phy_fw(struct adapter *adap, - int win, spinlock_t *win_lock, +int t4_load_phy_fw(struct adapter *adap, int win, int (*phy_fw_version)(const u8 *, size_t), const u8 *phy_fw_data, size_t phy_fw_size) { + int cur_phy_fw_ver = 0, new_phy_fw_vers = 0; unsigned long mtype = 0, maddr = 0; u32 param, val; - int cur_phy_fw_ver = 0, new_phy_fw_vers = 0; int ret; /* If we have version number support, then check to see if the adapter @@ -3822,13 +3818,9 @@ int t4_load_phy_fw(struct adapter *adap, /* Copy the supplied PHY Firmware image to the adapter memory location * allocated by the adapter firmware. */ - if (win_lock) - spin_lock_bh(win_lock); ret = t4_memory_rw(adap, win, mtype, maddr, phy_fw_size, (__be32 *)phy_fw_data, T4_MEMORY_WRITE); - if (win_lock) - spin_unlock_bh(win_lock); if (ret) return ret; From f35d2117e28dc381613aa3ebf3755bf9dc4080a1 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Wed, 24 Jun 2020 02:03:23 +0530 Subject: [PATCH 0262/2056] cxgb4: move device dump arrays in header to C file Move all arrays related to device dump in header file to C file. Also, move the function that shares the arrays to the same C file. Fixes following warnings reported by make W=1 in several places: cudbg_entity.h:513:18: warning: 't6_hma_ireg_array' defined but not used [-Wunused-const-variable=] 513 | static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { Fixes: a7975a2f9a79 ("cxgb4: collect register dump") Fixes: 17b332f48074 ("cxgb4: add support to read serial flash") Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cudbg_entity.h | 161 ------- .../net/ethernet/chelsio/cxgb4/cudbg_lib.c | 406 ++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cudbg_lib.h | 2 + .../net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 248 +---------- 4 files changed, 410 insertions(+), 407 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index dcab94cc2dee..876f90e5795e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -350,167 +350,6 @@ struct cudbg_qdesc_info { #define IREG_NUM_ELEM 4 -static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { - {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ - {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ - {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ - {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ - {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ - {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ - {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ - {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ - {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ - {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ - {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ - {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ -}; - -static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { - {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ - {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ - {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ - {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ - {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ - {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ - {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ - {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ - {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ - {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ - {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ -}; - -static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { - {0x7e18, 0x7e1c, 0x0, 12} -}; - -static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { - {0x7e18, 0x7e1c, 0x0, 12} -}; - -static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { - {0x7e50, 0x7e54, 0x0, 13}, - {0x7e50, 0x7e54, 0x10, 6}, - {0x7e50, 0x7e54, 0x18, 21}, - {0x7e50, 0x7e54, 0x30, 32}, - {0x7e50, 0x7e54, 0x50, 22}, - {0x7e50, 0x7e54, 0x68, 12} -}; - -static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { - {0x7e50, 0x7e54, 0x0, 13}, - {0x7e50, 0x7e54, 0x10, 6}, - {0x7e50, 0x7e54, 0x18, 8}, - {0x7e50, 0x7e54, 0x20, 13}, - {0x7e50, 0x7e54, 0x30, 16}, - {0x7e50, 0x7e54, 0x40, 16}, - {0x7e50, 0x7e54, 0x50, 16}, - {0x7e50, 0x7e54, 0x60, 6}, - {0x7e50, 0x7e54, 0x68, 4} -}; - -static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { - {0x10cc, 0x10d0, 0x0, 16}, - {0x10cc, 0x10d4, 0x0, 16}, -}; - -static const u32 t6_sge_qbase_index_array[] = { - /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ - 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, -}; - -static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { - {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ - {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ - {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ -}; - -static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { - {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ - {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ -}; - -static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { - {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ - {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ -}; - -static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { - {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ - {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ -}; - #define CUDBG_NUM_PCIE_CONFIG_REGS 0x61 -static const u32 t5_pcie_config_array[][2] = { - {0x0, 0x34}, - {0x3c, 0x40}, - {0x50, 0x64}, - {0x70, 0x80}, - {0x94, 0xa0}, - {0xb0, 0xb8}, - {0xd0, 0xd4}, - {0x100, 0x128}, - {0x140, 0x148}, - {0x150, 0x164}, - {0x170, 0x178}, - {0x180, 0x194}, - {0x1a0, 0x1b8}, - {0x1c0, 0x208}, -}; - -static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { - {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ - {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ - {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ -}; - -static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { - {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ - {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ -}; - -static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { - {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ - {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ - {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ - {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ - {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ - {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ - {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ - {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ - {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ - {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ - {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ -}; - -static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { - {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ - {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ - {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ - {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ - {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ - {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ - {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ - {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ - {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ - {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ - {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ - {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ - {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ -}; - -static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { - {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ -}; #endif /* __CUDBG_ENTITY_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index cd9494c5ff37..9960e9d206fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -15,6 +15,412 @@ #include "cudbg_lib.h" #include "cudbg_zlib.h" +static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { + {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ + {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ + {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ + {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ + {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ + {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ + {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ + {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ + {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ + {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ + {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ + {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ +}; + +static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { + {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ + {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ + {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ + {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ + {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ + {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ + {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ + {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ + {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ + {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ + {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ +}; + +static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { + {0x7e18, 0x7e1c, 0x0, 12} +}; + +static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { + {0x7e18, 0x7e1c, 0x0, 12} +}; + +static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { + {0x7e50, 0x7e54, 0x0, 13}, + {0x7e50, 0x7e54, 0x10, 6}, + {0x7e50, 0x7e54, 0x18, 21}, + {0x7e50, 0x7e54, 0x30, 32}, + {0x7e50, 0x7e54, 0x50, 22}, + {0x7e50, 0x7e54, 0x68, 12} +}; + +static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { + {0x7e50, 0x7e54, 0x0, 13}, + {0x7e50, 0x7e54, 0x10, 6}, + {0x7e50, 0x7e54, 0x18, 8}, + {0x7e50, 0x7e54, 0x20, 13}, + {0x7e50, 0x7e54, 0x30, 16}, + {0x7e50, 0x7e54, 0x40, 16}, + {0x7e50, 0x7e54, 0x50, 16}, + {0x7e50, 0x7e54, 0x60, 6}, + {0x7e50, 0x7e54, 0x68, 4} +}; + +static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { + {0x10cc, 0x10d0, 0x0, 16}, + {0x10cc, 0x10d4, 0x0, 16}, +}; + +static const u32 t6_sge_qbase_index_array[] = { + /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */ + 0x1250, 0x1240, 0x1244, 0x1248, 0x124c, +}; + +static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { + {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ + {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ + {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ +}; + +static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { + {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ + {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ +}; + +static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { + {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ + {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ +}; + +static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { + {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ + {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ +}; + +static const u32 t5_pcie_config_array[][2] = { + {0x0, 0x34}, + {0x3c, 0x40}, + {0x50, 0x64}, + {0x70, 0x80}, + {0x94, 0xa0}, + {0xb0, 0xb8}, + {0xd0, 0xd4}, + {0x100, 0x128}, + {0x140, 0x148}, + {0x150, 0x164}, + {0x170, 0x178}, + {0x180, 0x194}, + {0x1a0, 0x1b8}, + {0x1c0, 0x208}, +}; + +static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { + {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ + {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ + {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ +}; + +static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { + {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ + {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ +}; + +static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ + {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ + {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ + {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ + {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ + {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ + {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ + {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ + {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ + {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ +}; + +static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { + {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */ + {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ +}; + +static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { + {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ +}; + +u32 cudbg_get_entity_length(struct adapter *adap, u32 entity) +{ + struct cudbg_tcam tcam_region = { 0 }; + u32 value, n = 0, len = 0; + + switch (entity) { + case CUDBG_REG_DUMP: + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T4: + len = T4_REGMAP_SIZE; + break; + case CHELSIO_T5: + case CHELSIO_T6: + len = T5_REGMAP_SIZE; + break; + default: + break; + } + break; + case CUDBG_DEV_LOG: + len = adap->params.devlog.size; + break; + case CUDBG_CIM_LA: + if (is_t6(adap->params.chip)) { + len = adap->params.cim_la_size / 10 + 1; + len *= 10 * sizeof(u32); + } else { + len = adap->params.cim_la_size / 8; + len *= 8 * sizeof(u32); + } + len += sizeof(u32); /* for reading CIM LA configuration */ + break; + case CUDBG_CIM_MA_LA: + len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); + break; + case CUDBG_CIM_QCFG: + len = sizeof(struct cudbg_cim_qcfg); + break; + case CUDBG_CIM_IBQ_TP0: + case CUDBG_CIM_IBQ_TP1: + case CUDBG_CIM_IBQ_ULP: + case CUDBG_CIM_IBQ_SGE0: + case CUDBG_CIM_IBQ_SGE1: + case CUDBG_CIM_IBQ_NCSI: + len = CIM_IBQ_SIZE * 4 * sizeof(u32); + break; + case CUDBG_CIM_OBQ_ULP0: + len = cudbg_cim_obq_size(adap, 0); + break; + case CUDBG_CIM_OBQ_ULP1: + len = cudbg_cim_obq_size(adap, 1); + break; + case CUDBG_CIM_OBQ_ULP2: + len = cudbg_cim_obq_size(adap, 2); + break; + case CUDBG_CIM_OBQ_ULP3: + len = cudbg_cim_obq_size(adap, 3); + break; + case CUDBG_CIM_OBQ_SGE: + len = cudbg_cim_obq_size(adap, 4); + break; + case CUDBG_CIM_OBQ_NCSI: + len = cudbg_cim_obq_size(adap, 5); + break; + case CUDBG_CIM_OBQ_RXQ0: + len = cudbg_cim_obq_size(adap, 6); + break; + case CUDBG_CIM_OBQ_RXQ1: + len = cudbg_cim_obq_size(adap, 7); + break; + case CUDBG_EDC0: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM0_ENABLE_F) { + value = t4_read_reg(adap, MA_EDRAM0_BAR_A); + len = EDRAM0_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_EDC1: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM1_ENABLE_F) { + value = t4_read_reg(adap, MA_EDRAM1_BAR_A); + len = EDRAM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_MC0: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EXT_MEM0_ENABLE_F) { + value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); + len = EXT_MEM0_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_MC1: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EXT_MEM1_ENABLE_F) { + value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + len = EXT_MEM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_RSS: + len = t4_chip_rss_size(adap) * sizeof(u16); + break; + case CUDBG_RSS_VF_CONF: + len = adap->params.arch.vfcount * + sizeof(struct cudbg_rss_vf_conf); + break; + case CUDBG_PATH_MTU: + len = NMTUS * sizeof(u16); + break; + case CUDBG_PM_STATS: + len = sizeof(struct cudbg_pm_stats); + break; + case CUDBG_HW_SCHED: + len = sizeof(struct cudbg_hw_sched); + break; + case CUDBG_TP_INDIRECT: + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + n = sizeof(t5_tp_pio_array) + + sizeof(t5_tp_tm_pio_array) + + sizeof(t5_tp_mib_index_array); + break; + case CHELSIO_T6: + n = sizeof(t6_tp_pio_array) + + sizeof(t6_tp_tm_pio_array) + + sizeof(t6_tp_mib_index_array); + break; + default: + break; + } + n = n / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + break; + case CUDBG_SGE_INDIRECT: + len = sizeof(struct ireg_buf) * 2 + + sizeof(struct sge_qbase_reg_field); + break; + case CUDBG_ULPRX_LA: + len = sizeof(struct cudbg_ulprx_la); + break; + case CUDBG_TP_LA: + len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); + break; + case CUDBG_MEMINFO: + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_meminfo); + break; + case CUDBG_CIM_PIF_LA: + len = sizeof(struct cudbg_cim_pif_la); + len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); + break; + case CUDBG_CLK: + len = sizeof(struct cudbg_clk_info); + break; + case CUDBG_PCIE_INDIRECT: + n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + break; + case CUDBG_PM_INDIRECT: + n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + break; + case CUDBG_TID_INFO: + len = sizeof(struct cudbg_tid_info_region_rev1); + break; + case CUDBG_PCIE_CONFIG: + len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; + break; + case CUDBG_DUMP_CONTEXT: + len = cudbg_dump_context_size(adap); + break; + case CUDBG_MPS_TCAM: + len = sizeof(struct cudbg_mps_tcam) * + adap->params.arch.mps_tcam_size; + break; + case CUDBG_VPD_DATA: + len = sizeof(struct cudbg_vpd_data); + break; + case CUDBG_LE_TCAM: + cudbg_fill_le_tcam_info(adap, &tcam_region); + len = sizeof(struct cudbg_tcam) + + sizeof(struct cudbg_tid_data) * tcam_region.max_tid; + break; + case CUDBG_CCTRL: + len = sizeof(u16) * NMTUS * NCCTRL_WIN; + break; + case CUDBG_MA_INDIRECT: + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + n = sizeof(t6_ma_ireg_array) / + (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + } + break; + case CUDBG_ULPTX_LA: + len = sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_ulptx_la); + break; + case CUDBG_UP_CIM_INDIRECT: + n = 0; + if (is_t5(adap->params.chip)) + n = sizeof(t5_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + else if (is_t6(adap->params.chip)) + n = sizeof(t6_up_cim_reg_array) / + ((IREG_NUM_ELEM + 1) * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + break; + case CUDBG_PBT_TABLE: + len = sizeof(struct cudbg_pbt_tables); + break; + case CUDBG_MBOX_LOG: + len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; + break; + case CUDBG_HMA_INDIRECT: + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + n = sizeof(t6_hma_ireg_array) / + (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + } + break; + case CUDBG_HMA: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & HMA_MUX_F) { + /* In T6, there's no MC1. So, HMA shares MC1 + * address space. + */ + value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); + len = EXT_MEM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_QDESC: + cudbg_fill_qdesc_num_and_size(adap, NULL, &len); + break; + default: + break; + } + + return len; +} + static int cudbg_do_compression(struct cudbg_init *pdbg_init, struct cudbg_buffer *pin_buff, struct cudbg_buffer *dbg_buff) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 0f488d52797b..d6d6cd298930 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -165,6 +165,8 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, int cudbg_collect_flash(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); + +u32 cudbg_get_entity_length(struct adapter *adap, u32 entity); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index d7afe0746878..77648e4ab4cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -70,250 +70,6 @@ static const struct cxgb4_collect_entity cxgb4_collect_flash_dump[] = { { CUDBG_FLASH, cudbg_collect_flash }, }; -static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) -{ - struct cudbg_tcam tcam_region = { 0 }; - u32 value, n = 0, len = 0; - - switch (entity) { - case CUDBG_REG_DUMP: - switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { - case CHELSIO_T4: - len = T4_REGMAP_SIZE; - break; - case CHELSIO_T5: - case CHELSIO_T6: - len = T5_REGMAP_SIZE; - break; - default: - break; - } - break; - case CUDBG_DEV_LOG: - len = adap->params.devlog.size; - break; - case CUDBG_CIM_LA: - if (is_t6(adap->params.chip)) { - len = adap->params.cim_la_size / 10 + 1; - len *= 10 * sizeof(u32); - } else { - len = adap->params.cim_la_size / 8; - len *= 8 * sizeof(u32); - } - len += sizeof(u32); /* for reading CIM LA configuration */ - break; - case CUDBG_CIM_MA_LA: - len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); - break; - case CUDBG_CIM_QCFG: - len = sizeof(struct cudbg_cim_qcfg); - break; - case CUDBG_CIM_IBQ_TP0: - case CUDBG_CIM_IBQ_TP1: - case CUDBG_CIM_IBQ_ULP: - case CUDBG_CIM_IBQ_SGE0: - case CUDBG_CIM_IBQ_SGE1: - case CUDBG_CIM_IBQ_NCSI: - len = CIM_IBQ_SIZE * 4 * sizeof(u32); - break; - case CUDBG_CIM_OBQ_ULP0: - len = cudbg_cim_obq_size(adap, 0); - break; - case CUDBG_CIM_OBQ_ULP1: - len = cudbg_cim_obq_size(adap, 1); - break; - case CUDBG_CIM_OBQ_ULP2: - len = cudbg_cim_obq_size(adap, 2); - break; - case CUDBG_CIM_OBQ_ULP3: - len = cudbg_cim_obq_size(adap, 3); - break; - case CUDBG_CIM_OBQ_SGE: - len = cudbg_cim_obq_size(adap, 4); - break; - case CUDBG_CIM_OBQ_NCSI: - len = cudbg_cim_obq_size(adap, 5); - break; - case CUDBG_CIM_OBQ_RXQ0: - len = cudbg_cim_obq_size(adap, 6); - break; - case CUDBG_CIM_OBQ_RXQ1: - len = cudbg_cim_obq_size(adap, 7); - break; - case CUDBG_EDC0: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EDRAM0_ENABLE_F) { - value = t4_read_reg(adap, MA_EDRAM0_BAR_A); - len = EDRAM0_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_EDC1: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EDRAM1_ENABLE_F) { - value = t4_read_reg(adap, MA_EDRAM1_BAR_A); - len = EDRAM1_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_MC0: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EXT_MEM0_ENABLE_F) { - value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); - len = EXT_MEM0_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_MC1: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & EXT_MEM1_ENABLE_F) { - value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); - len = EXT_MEM1_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_RSS: - len = t4_chip_rss_size(adap) * sizeof(u16); - break; - case CUDBG_RSS_VF_CONF: - len = adap->params.arch.vfcount * - sizeof(struct cudbg_rss_vf_conf); - break; - case CUDBG_PATH_MTU: - len = NMTUS * sizeof(u16); - break; - case CUDBG_PM_STATS: - len = sizeof(struct cudbg_pm_stats); - break; - case CUDBG_HW_SCHED: - len = sizeof(struct cudbg_hw_sched); - break; - case CUDBG_TP_INDIRECT: - switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { - case CHELSIO_T5: - n = sizeof(t5_tp_pio_array) + - sizeof(t5_tp_tm_pio_array) + - sizeof(t5_tp_mib_index_array); - break; - case CHELSIO_T6: - n = sizeof(t6_tp_pio_array) + - sizeof(t6_tp_tm_pio_array) + - sizeof(t6_tp_mib_index_array); - break; - default: - break; - } - n = n / (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n; - break; - case CUDBG_SGE_INDIRECT: - len = sizeof(struct ireg_buf) * 2 + - sizeof(struct sge_qbase_reg_field); - break; - case CUDBG_ULPRX_LA: - len = sizeof(struct cudbg_ulprx_la); - break; - case CUDBG_TP_LA: - len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); - break; - case CUDBG_MEMINFO: - len = sizeof(struct cudbg_ver_hdr) + - sizeof(struct cudbg_meminfo); - break; - case CUDBG_CIM_PIF_LA: - len = sizeof(struct cudbg_cim_pif_la); - len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); - break; - case CUDBG_CLK: - len = sizeof(struct cudbg_clk_info); - break; - case CUDBG_PCIE_INDIRECT: - n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n * 2; - break; - case CUDBG_PM_INDIRECT: - n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n * 2; - break; - case CUDBG_TID_INFO: - len = sizeof(struct cudbg_tid_info_region_rev1); - break; - case CUDBG_PCIE_CONFIG: - len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; - break; - case CUDBG_DUMP_CONTEXT: - len = cudbg_dump_context_size(adap); - break; - case CUDBG_MPS_TCAM: - len = sizeof(struct cudbg_mps_tcam) * - adap->params.arch.mps_tcam_size; - break; - case CUDBG_VPD_DATA: - len = sizeof(struct cudbg_vpd_data); - break; - case CUDBG_LE_TCAM: - cudbg_fill_le_tcam_info(adap, &tcam_region); - len = sizeof(struct cudbg_tcam) + - sizeof(struct cudbg_tid_data) * tcam_region.max_tid; - break; - case CUDBG_CCTRL: - len = sizeof(u16) * NMTUS * NCCTRL_WIN; - break; - case CUDBG_MA_INDIRECT: - if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { - n = sizeof(t6_ma_ireg_array) / - (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n * 2; - } - break; - case CUDBG_ULPTX_LA: - len = sizeof(struct cudbg_ver_hdr) + - sizeof(struct cudbg_ulptx_la); - break; - case CUDBG_UP_CIM_INDIRECT: - n = 0; - if (is_t5(adap->params.chip)) - n = sizeof(t5_up_cim_reg_array) / - ((IREG_NUM_ELEM + 1) * sizeof(u32)); - else if (is_t6(adap->params.chip)) - n = sizeof(t6_up_cim_reg_array) / - ((IREG_NUM_ELEM + 1) * sizeof(u32)); - len = sizeof(struct ireg_buf) * n; - break; - case CUDBG_PBT_TABLE: - len = sizeof(struct cudbg_pbt_tables); - break; - case CUDBG_MBOX_LOG: - len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; - break; - case CUDBG_HMA_INDIRECT: - if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { - n = sizeof(t6_hma_ireg_array) / - (IREG_NUM_ELEM * sizeof(u32)); - len = sizeof(struct ireg_buf) * n; - } - break; - case CUDBG_HMA: - value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); - if (value & HMA_MUX_F) { - /* In T6, there's no MC1. So, HMA shares MC1 - * address space. - */ - value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); - len = EXT_MEM1_SIZE_G(value); - } - len = cudbg_mbytes_to_bytes(len); - break; - case CUDBG_QDESC: - cudbg_fill_qdesc_num_and_size(adap, NULL, &len); - break; - default: - break; - } - - return len; -} - u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) { u32 i, entity; @@ -323,14 +79,14 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) if (flag & CXGB4_ETH_DUMP_HW) { for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) { entity = cxgb4_collect_hw_dump[i].entity; - len += cxgb4_get_entity_length(adap, entity); + len += cudbg_get_entity_length(adap, entity); } } if (flag & CXGB4_ETH_DUMP_MEM) { for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) { entity = cxgb4_collect_mem_dump[i].entity; - len += cxgb4_get_entity_length(adap, entity); + len += cudbg_get_entity_length(adap, entity); } } From 428d2459cceb77357b81c242ca22462a6a904817 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Van=C4=9Bk?= Date: Sat, 30 May 2020 14:39:12 +0200 Subject: [PATCH 0263/2056] xfrm: introduce oseq-may-wrap flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RFC 4303 in section 3.3.3 suggests to disable anti-replay for manually distributed ICVs in which case the sender does not need to monitor or reset the counter. However, the sender still increments the counter and when it reaches the maximum value, the counter rolls over back to zero. This patch introduces new extra_flag XFRM_SA_XFLAG_OSEQ_MAY_WRAP which allows sequence number to cycle in outbound packets if set. This flag is used only in legacy and bmp code, because esn should not be negotiated if anti-replay is disabled (see note in 3.3.3 section). Signed-off-by: Petr Vaněk Acked-by: Christophe Gouault Signed-off-by: Steffen Klassert --- include/uapi/linux/xfrm.h | 1 + net/xfrm/xfrm_replay.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index ff7cfdc6cb44..ffc6a5391bb7 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -387,6 +387,7 @@ struct xfrm_usersa_info { }; #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +#define XFRM_SA_XFLAG_OSEQ_MAY_WRAP 2 struct xfrm_usersa_id { xfrm_address_t daddr; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 98943f8d01aa..c6a4338a0d08 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -89,7 +89,8 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq; XFRM_SKB_CB(skb)->seq.output.hi = 0; - if (unlikely(x->replay.oseq == 0)) { + if (unlikely(x->replay.oseq == 0) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { x->replay.oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -168,7 +169,8 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq; XFRM_SKB_CB(skb)->seq.output.hi = 0; - if (unlikely(replay_esn->oseq == 0)) { + if (unlikely(replay_esn->oseq == 0) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { replay_esn->oseq--; xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -572,7 +574,8 @@ static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *sk XFRM_SKB_CB(skb)->seq.output.hi = 0; xo->seq.hi = 0; - if (unlikely(oseq < x->replay.oseq)) { + if (unlikely(oseq < x->replay.oseq) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; @@ -611,7 +614,8 @@ static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff XFRM_SKB_CB(skb)->seq.output.hi = 0; xo->seq.hi = 0; - if (unlikely(oseq < replay_esn->oseq)) { + if (unlikely(oseq < replay_esn->oseq) && + !(x->props.extra_flags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP)) { xfrm_audit_state_replay_overflow(x, skb); err = -EOVERFLOW; From 54b66c2255fadc8d78e88b5ffd99b19f7f754f5a Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Tue, 23 Jun 2020 22:36:00 +0100 Subject: [PATCH 0264/2056] tools, bpftool: Fix variable shadowing in emit_obj_refs_json() Building bpftool yields the following complaint: pids.c: In function 'emit_obj_refs_json': pids.c:175:80: warning: declaration of 'json_wtr' shadows a global declaration [-Wshadow] 175 | void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_wtr) | ~~~~~~~~~~~~~~~^~~~~~~~ In file included from pids.c:11: main.h:141:23: note: shadowed declaration is here 141 | extern json_writer_t *json_wtr; | ^~~~~~~~ Let's rename the variable. v2: - Rename the variable instead of calling the global json_wtr directly. Signed-off-by: Quentin Monnet Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200623213600.16643-1-quentin@isovalent.com --- tools/bpf/bpftool/pids.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index 3474a91743ff..2709be4de2b1 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -172,7 +172,8 @@ void delete_obj_refs_table(struct obj_refs_table *table) } } -void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_wtr) +void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, + json_writer_t *json_writer) { struct obj_refs *refs; struct obj_ref *ref; @@ -187,16 +188,16 @@ void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *j if (refs->ref_cnt == 0) break; - jsonw_name(json_wtr, "pids"); - jsonw_start_array(json_wtr); + jsonw_name(json_writer, "pids"); + jsonw_start_array(json_writer); for (i = 0; i < refs->ref_cnt; i++) { ref = &refs->refs[i]; - jsonw_start_object(json_wtr); - jsonw_int_field(json_wtr, "pid", ref->pid); - jsonw_string_field(json_wtr, "comm", ref->comm); - jsonw_end_object(json_wtr); + jsonw_start_object(json_writer); + jsonw_int_field(json_writer, "pid", ref->pid); + jsonw_string_field(json_writer, "comm", ref->comm); + jsonw_end_object(json_writer); } - jsonw_end_array(json_wtr); + jsonw_end_array(json_writer); break; } } From 135c783f4794fbdeace4a969dea6eabd27f8a501 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 23 Jun 2020 09:42:07 +0100 Subject: [PATCH 0265/2056] libbpf: Fix spelling mistake "kallasyms" -> "kallsyms" There is a spelling mistake in a pr_warn message. Fix it. Signed-off-by: Colin Ian King Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200623084207.149253-1-colin.king@canonical.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 18461deb1b19..deea27aadcef 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5741,7 +5741,7 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj) if (ret == EOF && feof(f)) break; if (ret != 3) { - pr_warn("failed to read kallasyms entry: %d\n", ret); + pr_warn("failed to read kallsyms entry: %d\n", ret); err = -EINVAL; goto out; } From 192b6638eea5d40c99964291671fc0371b858f6e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 23 Jun 2020 21:38:05 -0700 Subject: [PATCH 0266/2056] libbpf: Prevent loading vmlinux BTF twice Prevent loading/parsing vmlinux BTF twice in some cases: for CO-RE relocations and for BTF-aware hooks (tp_btf, fentry/fexit, etc). Fixes: a6ed02cac690 ("libbpf: Load btf_vmlinux only once per object.") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200624043805.1794620-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index deea27aadcef..6b4955d170ff 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2504,22 +2504,31 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) { + bool need_vmlinux_btf = false; struct bpf_program *prog; int err; + /* CO-RE relocations need kernel BTF */ + if (obj->btf_ext && obj->btf_ext->field_reloc_info.len) + need_vmlinux_btf = true; + bpf_object__for_each_program(prog, obj) { if (libbpf_prog_needs_vmlinux_btf(prog)) { - obj->btf_vmlinux = libbpf_find_kernel_btf(); - if (IS_ERR(obj->btf_vmlinux)) { - err = PTR_ERR(obj->btf_vmlinux); - pr_warn("Error loading vmlinux BTF: %d\n", err); - obj->btf_vmlinux = NULL; - return err; - } - return 0; + need_vmlinux_btf = true; + break; } } + if (!need_vmlinux_btf) + return 0; + + obj->btf_vmlinux = libbpf_find_kernel_btf(); + if (IS_ERR(obj->btf_vmlinux)) { + err = PTR_ERR(obj->btf_vmlinux); + pr_warn("Error loading vmlinux BTF: %d\n", err); + obj->btf_vmlinux = NULL; + return err; + } return 0; } @@ -4945,8 +4954,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) if (targ_btf_path) targ_btf = btf__parse_elf(targ_btf_path, NULL); else - targ_btf = libbpf_find_kernel_btf(); - if (IS_ERR(targ_btf)) { + targ_btf = obj->btf_vmlinux; + if (IS_ERR_OR_NULL(targ_btf)) { pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf)); return PTR_ERR(targ_btf); } @@ -4987,7 +4996,9 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path) } out: - btf__free(targ_btf); + /* obj->btf_vmlinux is freed at the end of object load phase */ + if (targ_btf != obj->btf_vmlinux) + btf__free(targ_btf); if (!IS_ERR_OR_NULL(cand_cache)) { hashmap__for_each_entry(cand_cache, entry, i) { bpf_core_free_cands(entry->value); From fea549b030152d5336dbd960b357a4d4b841a851 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 24 Jun 2020 11:10:59 -0700 Subject: [PATCH 0267/2056] selftests/bpf: Workaround for get_stack_rawtp test. ./test_progs-no_alu32 -t get_stack_raw_tp fails due to: 52: (85) call bpf_get_stack#67 53: (bf) r8 = r0 54: (bf) r1 = r8 55: (67) r1 <<= 32 56: (c7) r1 s>>= 32 ; if (usize < 0) 57: (c5) if r1 s< 0x0 goto pc+26 R0=inv(id=0,smax_value=800) R1_w=inv(id=0,umax_value=800,var_off=(0x0; 0x3ff)) R6=ctx(id=0,off=0,imm=0) R7=map_value(id=0,off=0,ks=4,vs=1600,imm=0) R8_w=inv(id=0,smax_value=800) R9=inv800 ; ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0); 58: (1f) r9 -= r8 ; ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0); 59: (bf) r2 = r7 60: (0f) r2 += r1 regs=1 stack=0 before 52: (85) call bpf_get_stack#67 ; ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0); 61: (bf) r1 = r6 62: (bf) r3 = r9 63: (b7) r4 = 0 64: (85) call bpf_get_stack#67 R0=inv(id=0,smax_value=800) R1_w=ctx(id=0,off=0,imm=0) R2_w=map_value(id=0,off=0,ks=4,vs=1600,umax_value=800,var_off=(0x0; 0x3ff),s32_max_value=1023,u32_max_value=1023) R3_w=inv(id=0,umax_value=9223372036854776608) R3 unbounded memory access, use 'var &= const' or 'if (var < const)' In the C code: usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK); if (usize < 0) return 0; ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0); if (ksize < 0) return 0; We used to have problem with pointer arith in R2. Now it's a problem with two integers in R3. 'if (usize < 0)' is comparing R1 and makes it [0,800], but R8 stays [-inf,800]. Both registers represent the same 'usize' variable. Then R9 -= R8 is doing 800 - [-inf, 800] so the result of "max_len - usize" looks unbounded to the verifier while it's obvious in C code that "max_len - usize" should be [0, 800]. To workaround the problem convert ksize and usize variables from int to long. Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c index 29817a703984..b6a6eb279e54 100644 --- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c +++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c @@ -57,8 +57,9 @@ struct { SEC("raw_tracepoint/sys_enter") int bpf_prog1(void *ctx) { - int max_len, max_buildid_len, usize, ksize, total_size; + int max_len, max_buildid_len, total_size; struct stack_trace_t *data; + long usize, ksize; void *raw_data; __u32 key = 0; From dfde1d7dee9bfd095a4f16c9e0579a10f4092e81 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:50 +0300 Subject: [PATCH 0268/2056] sock: Move sock_valbool_flag to header This is preparation for usage in bpf_setsockopt. Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200620153052.9439-1-zeil@yandex-team.ru --- include/net/sock.h | 9 +++++++++ net/core/sock.c | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index c53cc42b5ab9..8ba438b671d7 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -879,6 +879,15 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) __clear_bit(flag, &sk->sk_flags); } +static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, + int valbool) +{ + if (valbool) + sock_set_flag(sk, bit); + else + sock_reset_flag(sk, bit); +} + static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); diff --git a/net/core/sock.c b/net/core/sock.c index 6c4acf1f0220..5ba4753bc04d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -695,15 +695,6 @@ static int sock_getbindtodevice(struct sock *sk, char __user *optval, return ret; } -static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit, - int valbool) -{ - if (valbool) - sock_set_flag(sk, bit); - else - sock_reset_flag(sk, bit); -} - bool sk_mc_loop(struct sock *sk) { if (dev_recursion_level()) From aad4a0a9513af962137c4842463d11ed491eec37 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:51 +0300 Subject: [PATCH 0269/2056] tcp: Expose tcp_sock_set_keepidle_locked This is preparation for usage in bpf_setsockopt. v2: - remove redundant EXPORT_SYMBOL (Alexei Starovoitov) Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200620153052.9439-2-zeil@yandex-team.ru --- include/linux/tcp.h | 1 + net/ipv4/tcp.c | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9aac824c523c..3bdec31ce8f4 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -499,6 +499,7 @@ int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, void tcp_sock_set_cork(struct sock *sk, bool on); int tcp_sock_set_keepcnt(struct sock *sk, int val); +int tcp_sock_set_keepidle_locked(struct sock *sk, int val); int tcp_sock_set_keepidle(struct sock *sk, int val); int tcp_sock_set_keepintvl(struct sock *sk, int val); void tcp_sock_set_nodelay(struct sock *sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 810cc164f795..de36c91d32ea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2957,7 +2957,7 @@ void tcp_sock_set_user_timeout(struct sock *sk, u32 val) } EXPORT_SYMBOL(tcp_sock_set_user_timeout); -static int __tcp_sock_set_keepidle(struct sock *sk, int val) +int tcp_sock_set_keepidle_locked(struct sock *sk, int val) { struct tcp_sock *tp = tcp_sk(sk); @@ -2984,7 +2984,7 @@ int tcp_sock_set_keepidle(struct sock *sk, int val) int err; lock_sock(sk); - err = __tcp_sock_set_keepidle(sk, val); + err = tcp_sock_set_keepidle_locked(sk, val); release_sock(sk); return err; } @@ -3183,7 +3183,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; case TCP_KEEPIDLE: - err = __tcp_sock_set_keepidle(sk, val); + err = tcp_sock_set_keepidle_locked(sk, val); break; case TCP_KEEPINTVL: if (val < 1 || val > MAX_TCP_KEEPINTVL) From f9bcf96837f158db6ea982d15cd2c8161ca6bc23 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Sat, 20 Jun 2020 18:30:52 +0300 Subject: [PATCH 0270/2056] bpf: Add SO_KEEPALIVE and related options to bpf_setsockopt This patch adds support of SO_KEEPALIVE flag and TCP related options to bpf_setsockopt() routine. This is helpful if we want to enable or tune TCP keepalive for applications which don't do it in the userspace code. v3: - update kernel-doc in uapi (Nikita Vetoshkin ) v4: - update kernel-doc in tools too (Alexei Starovoitov) - add test to selftests (Alexei Starovoitov) Signed-off-by: Dmitry Yakunin Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200620153052.9439-3-zeil@yandex-team.ru --- include/uapi/linux/bpf.h | 7 ++-- net/core/filter.c | 36 ++++++++++++++++++- tools/include/uapi/linux/bpf.h | 7 ++-- .../selftests/bpf/progs/connect4_prog.c | 27 ++++++++++++++ 4 files changed, 72 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 9d3923e6b860..d9737d51dd19 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1621,10 +1621,13 @@ union bpf_attr { * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return diff --git a/net/core/filter.c b/net/core/filter.c index 73395384afe2..c713b6b8938f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4289,10 +4289,10 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen, u32 flags) { char devname[IFNAMSIZ]; + int val, valbool; struct net *net; int ifindex; int ret = 0; - int val; if (!sk_fullsock(sk)) return -EINVAL; @@ -4303,6 +4303,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, if (optlen != sizeof(int) && optname != SO_BINDTODEVICE) return -EINVAL; val = *((int *)optval); + valbool = val ? 1 : 0; /* Only some socketops are supported */ switch (optname) { @@ -4361,6 +4362,11 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, } ret = sock_bindtoindex(sk, ifindex, false); break; + case SO_KEEPALIVE: + if (sk->sk_prot->keepalive) + sk->sk_prot->keepalive(sk, valbool); + sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); + break; default: ret = -EINVAL; } @@ -4421,6 +4427,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, ret = tcp_set_congestion_control(sk, name, false, reinit, true); } else { + struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); if (optlen != sizeof(int)) @@ -4449,6 +4456,33 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname, else tp->save_syn = val; break; + case TCP_KEEPIDLE: + ret = tcp_sock_set_keepidle_locked(sk, val); + break; + case TCP_KEEPINTVL: + if (val < 1 || val > MAX_TCP_KEEPINTVL) + ret = -EINVAL; + else + tp->keepalive_intvl = val * HZ; + break; + case TCP_KEEPCNT: + if (val < 1 || val > MAX_TCP_KEEPCNT) + ret = -EINVAL; + else + tp->keepalive_probes = val; + break; + case TCP_SYNCNT: + if (val < 1 || val > MAX_TCP_SYNCNT) + ret = -EINVAL; + else + icsk->icsk_syn_retries = val; + break; + case TCP_USER_TIMEOUT: + if (val < 0) + ret = -EINVAL; + else + icsk->icsk_user_timeout = val; + break; default: ret = -EINVAL; } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9d3923e6b860..d9737d51dd19 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1621,10 +1621,13 @@ union bpf_attr { * * * **SOL_SOCKET**, which supports the following *optname*\ s: * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, - * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. + * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**, + * **SO_BINDTODEVICE**, **SO_KEEPALIVE**. * * **IPPROTO_TCP**, which supports the following *optname*\ s: * **TCP_CONGESTION**, **TCP_BPF_IW**, - * **TCP_BPF_SNDCWND_CLAMP**. + * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**, + * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**, + * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**. * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. * Return diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index 1ab2c5eba86c..b1b2773c0b9d 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -104,6 +104,30 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx) return 0; } +static __inline int set_keepalive(struct bpf_sock_addr *ctx) +{ + int zero = 0, one = 1; + + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one))) + return 1; + if (ctx->type == SOCK_STREAM) { + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one))) + return 1; + if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one))) + return 1; + } + if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero))) + return 1; + + return 0; +} + SEC("cgroup/connect4") int connect_v4_prog(struct bpf_sock_addr *ctx) { @@ -121,6 +145,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx) if (bind_to_device(ctx)) return 0; + if (set_keepalive(ctx)) + return 0; + if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM) return 0; else if (ctx->type == SOCK_STREAM) From 6933568aec92dd6432207baaf59378d01f55a14f Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 24 Jun 2020 11:11:44 -0700 Subject: [PATCH 0271/2056] Bluetooth: btusb: Reset port on cmd timeout QCA_ROME sometimes gets into a state where it is unresponsive to commands. Since it doesn't have support for a reset gpio, reset the usb port when this occurs instead. Signed-off-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e42fdd625eb0..df46b2a34c18 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -573,6 +573,22 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev) gpiod_set_value_cansleep(reset_gpio, 0); } +static void btusb_qca_cmd_timeout(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + int err; + + if (++data->cmd_timeout_cnt < 5) + return; + + bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device."); + err = usb_autopm_get_interface(data->intf); + if (!err) + usb_queue_reset_device(data->intf); + else + bt_dev_err(hdev, "Failed usb_autopm_get_interface with %d", err); +} + static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@ -3964,6 +3980,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + hdev->cmd_timeout = btusb_qca_cmd_timeout; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); btusb_check_needs_reset_resume(intf); } From 34a68655a1414a34cc950b7fc5478a1b4f0db9b8 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 24 Jun 2020 11:24:30 -0700 Subject: [PATCH 0272/2056] Bluetooth: btusb: BTUSB_WAKEUP_DISABLE prevents wake When the BTUSB_WAKEUP_DISABLE flag is set, always return true for prevent wake. This tells the suspend notifier not to prepare the controller for reconnections during suspend. Signed-off-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index df46b2a34c18..1ac6fd7c4534 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3743,6 +3743,9 @@ static bool btusb_prevent_wake(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); + if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) + return true; + return !device_may_wakeup(&data->udev->dev); } From 3a0377d993d7c62cbff623bce13eac077490f560 Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 24 Jun 2020 11:34:19 -0700 Subject: [PATCH 0273/2056] Bluetooth: Don't restart scanning if paused When restarting LE scanning, check if it's currently paused before enabling passive scanning. Signed-off-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 86ae4b953a01..116207009dde 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -819,6 +819,11 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, { struct hci_dev *hdev = req->hdev; + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return; + } + /* Use ext scanning if set ext scan param and ext scan enable is * supported */ @@ -2657,6 +2662,11 @@ static int le_scan_restart(struct hci_request *req, unsigned long opt) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return 0; + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + hci_req_add_le_scan_disable(req); if (use_ext_scan(hdev)) { From 0ef44e5cab8dbf0a0327871b48fe7c8425d0d885 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:07 +0200 Subject: [PATCH 0274/2056] net: phy: add support for a common probe between shared PHYs Shared PHYs (PHYs in the same hardware package) may have shared registers and their drivers would usually need to share information. There is currently a way to have a shared (part of the) init, by using phy_package_init_once(). This patch extends the logic to share parts of the probe to allow sharing the initialization of locks or resources retrieval. Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/phy.h | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/include/linux/phy.h b/include/linux/phy.h index 7860d56c6bf5..6fb8f302978d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -244,7 +244,8 @@ struct phy_package_shared { }; /* used as bit number in atomic bitops */ -#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_PROBE_DONE 1 /* * The Bus class for PHYs. Devices which provide access to @@ -1566,14 +1567,25 @@ static inline int __phy_package_write(struct phy_device *phydev, return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); } -static inline bool phy_package_init_once(struct phy_device *phydev) +static inline bool __phy_package_set_once(struct phy_device *phydev, + unsigned int b) { struct phy_package_shared *shared = phydev->shared; if (!shared) return false; - return !test_and_set_bit(PHY_SHARED_F_INIT_DONE, &shared->flags); + return !test_and_set_bit(b, &shared->flags); +} + +static inline bool phy_package_init_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE); +} + +static inline bool phy_package_probe_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE); } extern struct bus_type mdio_bus_type; From c7cd2a6a7b084f3d57ce4d783685e37d16327f9f Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:08 +0200 Subject: [PATCH 0275/2056] net: phy: mscc: fix copyright and author information in MACsec All headers in the MSCC PHY driver have been copied and pasted from the original mscc.c file. However the information is not necessarily correct, as in the MACsec support. Fix this. Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_fc_buffer.h | 2 +- drivers/net/phy/mscc/mscc_mac.h | 2 +- drivers/net/phy/mscc/mscc_macsec.c | 6 +++--- drivers/net/phy/mscc/mscc_macsec.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_fc_buffer.h b/drivers/net/phy/mscc/mscc_fc_buffer.h index 3803e826c37d..399e803395a5 100644 --- a/drivers/net/phy/mscc/mscc_fc_buffer.h +++ b/drivers/net/phy/mscc/mscc_fc_buffer.h @@ -2,7 +2,7 @@ /* * Driver for Microsemi VSC85xx PHYs * - * Copyright (C) 2019 Microsemi Corporation + * Copyright (C) 2020 Microsemi Corporation */ #ifndef _MSCC_PHY_FC_BUFFER_H_ diff --git a/drivers/net/phy/mscc/mscc_mac.h b/drivers/net/phy/mscc/mscc_mac.h index 59b6837c60b3..8dd38dc6edbf 100644 --- a/drivers/net/phy/mscc/mscc_mac.h +++ b/drivers/net/phy/mscc/mscc_mac.h @@ -2,7 +2,7 @@ /* * Driver for Microsemi VSC85xx PHYs * - * Copyright (c) 2017 Microsemi Corporation + * Copyright (c) 2020 Microsemi Corporation */ #ifndef _MSCC_PHY_LINE_MAC_H_ diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index b4d3dc4068e2..c0eeb62cb940 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -1,10 +1,10 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* - * Driver for Microsemi VSC85xx PHYs + * Driver for Microsemi VSC85xx PHYs - MACsec support * - * Author: Nagaraju Lakkaraju + * Author: Antoine Tenart * License: Dual MIT/GPL - * Copyright (c) 2016 Microsemi Corporation + * Copyright (c) 2020 Microsemi Corporation */ #include diff --git a/drivers/net/phy/mscc/mscc_macsec.h b/drivers/net/phy/mscc/mscc_macsec.h index d751f2946b79..9c6d25e36de2 100644 --- a/drivers/net/phy/mscc/mscc_macsec.h +++ b/drivers/net/phy/mscc/mscc_macsec.h @@ -2,7 +2,7 @@ /* * Driver for Microsemi VSC85xx PHYs * - * Copyright (c) 2018 Microsemi Corporation + * Copyright (c) 2020 Microsemi Corporation */ #ifndef _MSCC_PHY_MACSEC_H_ From 6705b58daf844f219ca85231c69d019fbc30b992 Mon Sep 17 00:00:00 2001 From: Quentin Schulz Date: Tue, 23 Jun 2020 16:30:09 +0200 Subject: [PATCH 0276/2056] net: phy: mscc: remove the TR CLK disable magic value This patch adds a define for the 0x8000 magic value used to perform enable/disable actions on the "token ring clock". The patch is only cosmetic. Signed-off-by: Quentin Schulz Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc.h | 1 + drivers/net/phy/mscc/mscc_main.c | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index fbcee5fce7b2..756ec418f4f8 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -252,6 +252,7 @@ enum rgmii_clock_delay { /* Test page Registers */ #define MSCC_PHY_TEST_PAGE_5 5 #define MSCC_PHY_TEST_PAGE_8 8 +#define TR_CLK_DISABLE 0x8000 #define MSCC_PHY_TEST_PAGE_9 9 #define MSCC_PHY_TEST_PAGE_20 20 #define MSCC_PHY_TEST_PAGE_24 24 diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 5ddc44f87eaf..052a0def6e83 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -629,7 +629,7 @@ static int vsc8531_pre_init_seq_set(struct phy_device *phydev) if (rc < 0) return rc; rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST, - MSCC_PHY_TEST_PAGE_8, 0x8000, 0x8000); + MSCC_PHY_TEST_PAGE_8, TR_CLK_DISABLE, TR_CLK_DISABLE); if (rc < 0) return rc; @@ -1026,7 +1026,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev) phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20); reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); - reg |= 0x8000; + reg |= TR_CLK_DISABLE; phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); @@ -1046,7 +1046,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev) phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); - reg &= ~0x8000; + reg &= ~TR_CLK_DISABLE; phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); @@ -1196,7 +1196,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev) phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20); reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); - reg |= 0x8000; + reg |= TR_CLK_DISABLE; phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR); @@ -1225,7 +1225,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev) phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST); reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8); - reg &= ~0x8000; + reg &= ~TR_CLK_DISABLE; phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg); phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); From 4c8c5dc57ae8226ceb7e72fb7a5cfa8e743c0a2b Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:10 +0200 Subject: [PATCH 0277/2056] net: phy: mscc: take into account the 1588 block in MACsec init This patch takes in account the use of the 1588 block in the MACsec initialization, as a conditional configuration has to be done (when the 1588 block is used). Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_macsec.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index c0eeb62cb940..713c62b1d1f0 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -285,7 +285,9 @@ static void vsc8584_macsec_mac_init(struct phy_device *phydev, MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA | MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA | (bank == HOST_MAC ? - MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0)); + MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0) | + (IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ? + MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(0x8) : 0)); val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG); val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC; From ab2bf933935710ac545f94e49d16b5eab01f846f Mon Sep 17 00:00:00 2001 From: Quentin Schulz Date: Tue, 23 Jun 2020 16:30:11 +0200 Subject: [PATCH 0278/2056] net: phy: mscc: 1588 block initialization This patch adds the first parts of the 1588 support in the MSCC PHY, with registers definition and the 1588 block initialization. Those PHYs are distributed in hardware packages containing multiple times the PHY. The VSC8584 for example is composed of 4 PHYs. With hardware packages, parts of the logic is usually common and one of the PHY has to be used for some parts of the initialization. Following this logic, the 1588 blocks of those PHYs are shared between two PHYs and accessing the registers has to be done using the "base" PHY of the group. This is handled thanks to helpers in the PTP code (and locks). We also need the MDIO bus lock while performing a single read or write to the 1588 registers as the read/write are composed of multiple MDIO transactions (and we don't want other threads updating the page). Co-developed-by: Antoine Tenart Signed-off-by: Quentin Schulz Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/Makefile | 4 + drivers/net/phy/mscc/mscc.h | 33 + drivers/net/phy/mscc/mscc_main.c | 30 +- drivers/net/phy/mscc/mscc_ptp.c | 1010 ++++++++++++++++++++++++++++++ drivers/net/phy/mscc/mscc_ptp.h | 477 ++++++++++++++ 5 files changed, 1552 insertions(+), 2 deletions(-) create mode 100644 drivers/net/phy/mscc/mscc_ptp.c create mode 100644 drivers/net/phy/mscc/mscc_ptp.h diff --git a/drivers/net/phy/mscc/Makefile b/drivers/net/phy/mscc/Makefile index 10af42cd9839..d8e22a4eeeff 100644 --- a/drivers/net/phy/mscc/Makefile +++ b/drivers/net/phy/mscc/Makefile @@ -8,3 +8,7 @@ mscc-objs := mscc_main.o ifdef CONFIG_MACSEC mscc-objs += mscc_macsec.o endif + +ifdef CONFIG_NETWORK_PHY_TIMESTAMPING +mscc-objs += mscc_ptp.o +endif diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index 756ec418f4f8..eabb6ab3c374 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -133,6 +133,7 @@ enum rgmii_clock_delay { * in the same package. */ #define MSCC_PHY_PAGE_EXTENDED_GPIO 0x0010 /* Extended reg - GPIO */ +#define MSCC_PHY_PAGE_1588 0x1588 /* PTP (1588) */ #define MSCC_PHY_PAGE_TEST 0x2a30 /* Test reg */ #define MSCC_PHY_PAGE_TR 0x52b5 /* Token ring registers */ @@ -373,6 +374,20 @@ struct vsc8531_private { unsigned long ingr_flows; unsigned long egr_flows; #endif + + bool input_clk_init; + struct vsc85xx_ptp *ptp; + + /* For multiple port PHYs; the MDIO address of the base PHY in the + * pair of two PHYs that share a 1588 engine. PHY0 and PHY2 are coupled. + * PHY1 and PHY3 as well. PHY0 and PHY1 are base PHYs for their + * respective pair. + */ + unsigned int ts_base_addr; + u8 ts_base_phy; + + /* ts_lock: used for per-PHY timestamping operations. */ + struct mutex ts_lock; }; #if IS_ENABLED(CONFIG_OF_MDIO) @@ -399,4 +414,22 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev) } #endif +#if IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) +void vsc85xx_link_change_notify(struct phy_device *phydev); +int vsc8584_ptp_init(struct phy_device *phydev); +int vsc8584_ptp_probe(struct phy_device *phydev); +#else +static inline void vsc85xx_link_change_notify(struct phy_device *phydev) +{ +} +static inline int vsc8584_ptp_init(struct phy_device *phydev) +{ + return 0; +} +static inline int vsc8584_ptp_probe(struct phy_device *phydev) +{ + return 0; +} +#endif + #endif /* _MSCC_PHY_H_ */ diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 052a0def6e83..ea004712540f 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1299,10 +1299,26 @@ static void vsc8584_get_base_addr(struct phy_device *phydev) __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); mutex_unlock(&phydev->mdio.bus->mdio_lock); - if (val & PHY_ADDR_REVERSED) + /* In the package, there are two pairs of PHYs (PHY0 + PHY2 and + * PHY1 + PHY3). The first PHY of each pair (PHY0 and PHY1) is + * the base PHY for timestamping operations. + */ + vsc8531->ts_base_addr = phydev->mdio.addr; + vsc8531->ts_base_phy = addr; + + if (val & PHY_ADDR_REVERSED) { vsc8531->base_addr = phydev->mdio.addr + addr; - else + if (addr > 1) { + vsc8531->ts_base_addr += 2; + vsc8531->ts_base_phy += 2; + } + } else { vsc8531->base_addr = phydev->mdio.addr - addr; + if (addr > 1) { + vsc8531->ts_base_addr -= 2; + vsc8531->ts_base_phy -= 2; + } + } vsc8531->addr = addr; } @@ -1418,6 +1434,10 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) return ret; + ret = vsc8584_ptp_init(phydev); + if (ret) + goto err; + phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1); @@ -1999,6 +2019,7 @@ static int vsc8584_probe(struct phy_device *phydev) u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY, VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY, VSC8531_DUPLEX_COLLISION}; + int ret; if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) { dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n"); @@ -2024,6 +2045,10 @@ static int vsc8584_probe(struct phy_device *phydev) if (!vsc8531->stats) return -ENOMEM; + ret = vsc8584_ptp_probe(phydev); + if (ret) + return ret; + return vsc85xx_dt_led_modes_get(phydev, default_mode); } @@ -2403,6 +2428,7 @@ static struct phy_driver vsc85xx_driver[] = { .get_sset_count = &vsc85xx_get_sset_count, .get_strings = &vsc85xx_get_strings, .get_stats = &vsc85xx_get_stats, + .link_change_notify = &vsc85xx_link_change_notify, } }; diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c new file mode 100644 index 000000000000..73f1a4796435 --- /dev/null +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -0,0 +1,1010 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support + * + * Authors: Quentin Schulz & Antoine Tenart + * License: Dual MIT/GPL + * Copyright (c) 2020 Microsemi Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mscc.h" +#include "mscc_ptp.h" + +/* Two PHYs share the same 1588 processor and it's to be entirely configured + * through the base PHY of this processor. + */ +/* phydev->bus->mdio_lock should be locked when using this function */ +static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val) +{ + struct vsc8531_private *priv = phydev->priv; + + WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock)); + return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum, + val); +} + +/* phydev->bus->mdio_lock should be locked when using this function */ +static int phy_ts_base_read(struct phy_device *phydev, u32 regnum) +{ + struct vsc8531_private *priv = phydev->priv; + + WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock)); + return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum); +} + +enum ts_blk_hw { + INGRESS_ENGINE_0, + EGRESS_ENGINE_0, + INGRESS_ENGINE_1, + EGRESS_ENGINE_1, + INGRESS_ENGINE_2, + EGRESS_ENGINE_2, + PROCESSOR_0, + PROCESSOR_1, +}; + +enum ts_blk { + INGRESS, + EGRESS, + PROCESSOR, +}; + +static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk, + u16 addr) +{ + struct vsc8531_private *priv = phydev->priv; + bool base_port = phydev->mdio.addr == priv->ts_base_addr; + u32 val, cnt = 0; + enum ts_blk_hw blk_hw; + + switch (blk) { + case INGRESS: + blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1; + break; + case EGRESS: + blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1; + break; + case PROCESSOR: + blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1; + break; + } + + mutex_lock(&phydev->mdio.bus->mdio_lock); + + phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588); + + phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE | + BIU_ADDR_READ | BIU_BLK_ID(blk_hw) | + BIU_CSR_ADDR(addr)); + + do { + val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL); + } while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX); + + val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB); + val <<= 16; + val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB); + + phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + return val; +} + +static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk, + u16 addr, u32 val) +{ + struct vsc8531_private *priv = phydev->priv; + bool base_port = phydev->mdio.addr == priv->ts_base_addr; + u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16; + bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL || + addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK || + addr == MSCC_PHY_1588_VSC85XX_INT_MASK || + addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS || + addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) && + blk == PROCESSOR; + enum ts_blk_hw blk_hw; + + switch (blk) { + case INGRESS: + blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1; + break; + case EGRESS: + blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1; + break; + case PROCESSOR: + default: + blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1; + break; + } + + mutex_lock(&phydev->mdio.bus->mdio_lock); + + bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL); + + phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588); + + if (!cond || (cond && upper)) + phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper); + + phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower); + + phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE | + BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) | + BIU_CSR_ADDR(addr)); + + do { + reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL); + } while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX); + + phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); + + if (cond && upper) + phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass); + + mutex_unlock(&phydev->mdio.bus->mdio_lock); +} + +/* Pick bytes from PTP header */ +#define PTP_HEADER_TRNSP_MSG 26 +#define PTP_HEADER_DOMAIN_NUM 25 +#define PTP_HEADER_BYTE_8_31(x) (31 - (x)) +#define MAC_ADDRESS_BYTE(x) ((x) + (35 - ETH_ALEN + 1)) + +static int vsc85xx_ts_fsb_init(struct phy_device *phydev) +{ + u8 sig_sel[16] = {}; + signed char i, pos = 0; + + /* Seq ID is 2B long and starts at 30th byte */ + for (i = 1; i >= 0; i--) + sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i); + + /* DomainNum */ + sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM; + + /* MsgType */ + sig_sel[pos++] = PTP_HEADER_TRNSP_MSG; + + /* MAC address is 6B long */ + for (i = ETH_ALEN - 1; i >= 0; i--) + sig_sel[pos++] = MAC_ADDRESS_BYTE(i); + + /* Fill the last bytes of the signature to reach a 16B signature */ + for (; pos < ARRAY_SIZE(sig_sel); pos++) + sig_sel[pos] = PTP_HEADER_TRNSP_MSG; + + for (i = 0; i <= 2; i++) { + u32 val = 0; + + for (pos = i * 5 + 4; pos >= i * 5; pos--) + val = (val << 6) | sig_sel[pos]; + + vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i), + val); + } + + vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3), + sig_sel[15]); + + return 0; +} + +static const u32 vsc85xx_egr_latency[] = { + /* Copper Egress */ + 1272, /* 1000Mbps */ + 12516, /* 100Mbps */ + 125444, /* 10Mbps */ + /* Fiber Egress */ + 1277, /* 1000Mbps */ + 12537, /* 100Mbps */ +}; + +static const u32 vsc85xx_egr_latency_macsec[] = { + /* Copper Egress ON */ + 3496, /* 1000Mbps */ + 34760, /* 100Mbps */ + 347844, /* 10Mbps */ + /* Fiber Egress ON */ + 3502, /* 1000Mbps */ + 34780, /* 100Mbps */ +}; + +static const u32 vsc85xx_ingr_latency[] = { + /* Copper Ingress */ + 208, /* 1000Mbps */ + 304, /* 100Mbps */ + 2023, /* 10Mbps */ + /* Fiber Ingress */ + 98, /* 1000Mbps */ + 197, /* 100Mbps */ +}; + +static const u32 vsc85xx_ingr_latency_macsec[] = { + /* Copper Ingress */ + 2408, /* 1000Mbps */ + 22300, /* 100Mbps */ + 222009, /* 10Mbps */ + /* Fiber Ingress */ + 2299, /* 1000Mbps */ + 22192, /* 100Mbps */ +}; + +static void vsc85xx_ts_set_latencies(struct phy_device *phydev) +{ + u32 val, ingr_latency, egr_latency; + u8 idx; + + /* No need to set latencies of packets if the PHY is not connected */ + if (!phydev->link) + return; + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY, + STALL_EGR_LATENCY(phydev->speed)); + + switch (phydev->speed) { + case SPEED_100: + idx = 1; + break; + case SPEED_1000: + idx = 0; + break; + default: + idx = 2; + break; + } + + ingr_latency = IS_ENABLED(CONFIG_MACSEC) ? + vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx]; + egr_latency = IS_ENABLED(CONFIG_MACSEC) ? + vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx]; + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY, + PTP_INGR_LOCAL_LATENCY(ingr_latency)); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_TSP_CTRL); + val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL, + val); + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY, + PTP_EGR_LOCAL_LATENCY(egr_latency)); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL); + val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val); +} + +static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk) +{ + u8 i; + + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM, + IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2)); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM, + IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2)); + vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0); + + for (i = 0; i < COMP_MAX_FLOWS; i++) { + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i), + IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i), + IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i), + ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i), + ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i), + MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1); + + if (i >= PTP_COMP_MAX_FLOWS) + continue; + + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i), + 0); + } + + return 0; +} + +static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev) +{ + u32 val; + + val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT); + val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK; + val |= ANA_ETH1_NTX_PROT_SIG_OFF(0); + vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val); + + val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG); + val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK; + val |= ANA_FSB_ADDR_FROM_ETH1; + vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val); + + return 0; +} + +static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + bool base = phydev->mdio.addr == vsc8531->ts_base_addr; + enum vsc85xx_ptp_msg_type msgs[] = { + PTP_MSG_TYPE_SYNC, + PTP_MSG_TYPE_DELAY_REQ + }; + u32 val; + u8 i; + + for (i = 0; i < ARRAY_SIZE(msgs); i++) { + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), + base ? PTP_FLOW_VALID_CH0 : + PTP_FLOW_VALID_CH1); + + val = vsc85xx_ts_read_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i)); + val &= ~PTP_FLOW_DOMAIN_RANGE_ENA; + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val); + + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), + msgs[i] << 24); + + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_MASK_UPPER(i), + PTP_FLOW_MSG_TYPE_MASK); + } + + return 0; +} + +static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + bool base = phydev->mdio.addr == vsc8531->ts_base_addr; + u32 val; + + vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID, + ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD)); + + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), + base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0), + ANA_ETH1_FLOW_MATCH_VLAN_TAG2); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0); + + val = vsc85xx_ts_read_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_MATCH_MODE(0)); + val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK; + val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY; + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0), + val); + + return 0; +} + +static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + bool base = phydev->mdio.addr == vsc8531->ts_base_addr; + u32 val; + + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER, + PTP_EV_PORT); + /* Match on dest port only, ignore src */ + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER, + 0xffff); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER, + 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0); + + val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0)); + val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK; + val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1; + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val); + + /* Match all IPs */ + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0), + 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0), + 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0), + 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0), + 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0); + + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0); + + return 0; +} + +static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk, + u32 next_comp, u32 etype) +{ + u32 val; + + val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT); + val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK; + val |= next_comp; + vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val); + + val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) | + ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA; + vsc85xx_ts_write_csr(phydev, blk, + MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val); + + return 0; +} + +static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk, + u32 next_comp, u32 header) +{ + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, + ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) | + next_comp); + + return 0; +} + +static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd) +{ + u32 val; + + /* Check non-zero reserved field */ + val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK; + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val); + + val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) | + PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) | + PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ? + PTP_NOP : cmd); + if (cmd == PTP_SAVE_IN_TS_FIFO) + val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME; + else if (cmd == PTP_WRITE_NS) + val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE | + PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow), + val); + + if (cmd == PTP_WRITE_1588) + /* Rewrite timestamp directly in frame */ + val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) | + PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10); + else if (cmd == PTP_SAVE_IN_TS_FIFO) + /* no rewrite */ + val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) | + PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0); + else + /* Write in reserved field */ + val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) | + PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val); + + return 0; +} + +static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk, + bool one_step, bool enable) +{ + enum vsc85xx_ptp_msg_type msgs[] = { + PTP_MSG_TYPE_SYNC, + PTP_MSG_TYPE_DELAY_REQ + }; + u32 val; + u8 i; + + for (i = 0; i < ARRAY_SIZE(msgs); i++) { + if (blk == INGRESS) + vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i], + PTP_WRITE_NS); + else if (msgs[i] == PTP_MSG_TYPE_SYNC && one_step) + /* no need to know Sync t when sending in one_step */ + vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i], + PTP_WRITE_1588); + else + vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i], + PTP_SAVE_IN_TS_FIFO); + + val = vsc85xx_ts_read_csr(phydev, blk, + MSCC_ANA_PTP_FLOW_ENA(i)); + val &= ~PTP_FLOW_ENA; + if (enable) + val |= PTP_FLOW_ENA; + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), + val); + } + + return 0; +} + +static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk, + bool enable) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST; + + if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) { + /* PTP over Ethernet multicast address for SYNC and DELAY msg */ + u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00}; + + val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR | + get_unaligned_be16(&ptp_multicast[4]); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), + get_unaligned_be32(ptp_multicast)); + } else { + val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST; + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val); + vsc85xx_ts_write_csr(phydev, blk, + MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0); + } + + val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0)); + val &= ~ETH1_FLOW_ENA; + if (enable) + val |= ETH1_FLOW_ENA; + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val); + + return 0; +} + +static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk, + bool enable) +{ + u32 val; + + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE, + ANA_IP1_NXT_PROT_IPV4 | + ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4); + + /* Matching UDP protocol number */ + val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) | + ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) | + ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9); + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1, + val); + + /* End of IP protocol, start of next protocol (UDP) */ + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2, + ANA_IP1_NXT_PROT_OFFSET2(20)); + + val = vsc85xx_ts_read_csr(phydev, blk, + MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM); + val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK | + IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK); + val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2); + + val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE | + IP1_NXT_PROT_UDP_CHKSUM_CLEAR); + /* UDP checksum offset in IPv4 packet + * according to: https://tools.ietf.org/html/rfc768 + */ + val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR; + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM, + val); + + val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0)); + val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA); + val |= IP1_FLOW_MATCH_DEST_SRC_ADDR; + if (enable) + val |= IP1_FLOW_ENA; + vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val); + + return 0; +} + +static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr; + u8 eng_id = base ? 0 : 1; + u32 val; + + ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ANALYZER_MODE); + /* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */ + val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) | + PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id))); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE, + val); + + if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) { + vsc85xx_eth1_next_comp(phydev, INGRESS, + ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588); + vsc85xx_eth1_next_comp(phydev, EGRESS, + ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588); + } else { + vsc85xx_eth1_next_comp(phydev, INGRESS, + ANA_ETH1_NTX_PROT_IP_UDP_ACH_1, + ETH_P_IP); + vsc85xx_eth1_next_comp(phydev, EGRESS, + ANA_ETH1_NTX_PROT_IP_UDP_ACH_1, + ETH_P_IP); + /* Header length of IPv[4/6] + UDP */ + vsc85xx_ip1_next_comp(phydev, INGRESS, + ANA_ETH1_NTX_PROT_PTP_OAM, 28); + vsc85xx_ip1_next_comp(phydev, EGRESS, + ANA_ETH1_NTX_PROT_PTP_OAM, 28); + } + + vsc85xx_eth1_conf(phydev, INGRESS, + vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE); + vsc85xx_ip1_conf(phydev, INGRESS, + ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE); + vsc85xx_ptp_conf(phydev, INGRESS, one_step, + vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE); + + vsc85xx_eth1_conf(phydev, EGRESS, + vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF); + vsc85xx_ip1_conf(phydev, EGRESS, + ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF); + vsc85xx_ptp_conf(phydev, EGRESS, one_step, + vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF); + + val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)); + if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF) + val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)); + + val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)); + if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE) + val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)); + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE, + val); + + return 0; +} + +void vsc85xx_link_change_notify(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + + mutex_lock(&priv->ts_lock); + vsc85xx_ts_set_latencies(phydev); + mutex_unlock(&priv->ts_lock); +} + +static void vsc85xx_ts_reset_fifo(struct phy_device *phydev) +{ + u32 val; + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_TS_FIFO_CTRL); + val |= PTP_EGR_TS_FIFO_RESET; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL, + val); + + val &= ~PTP_EGR_TS_FIFO_RESET; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL, + val); +} + +static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + + if (vsc8531->ts_base_addr != phydev->mdio.addr) { + struct mdio_device *dev; + + dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr]; + phydev = container_of(dev, struct phy_device, mdio); + + return phydev->priv; + } + + return vsc8531; +} + +static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev); + + return vsc8531->input_clk_init; +} + +static void vsc8584_set_input_clk_configured(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev); + + vsc8531->input_clk_init = true; +} + +static int __vsc8584_init_ptp(struct phy_device *phydev) +{ + u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 }; + u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 }; + u32 val; + + if (!vsc8584_is_1588_input_clk_configured(phydev)) { + mutex_lock(&phydev->mdio.bus->mdio_lock); + + /* 1588_DIFF_INPUT_CLK configuration: Use an external clock for + * the LTC, as per 3.13.29 in the VSC8584 datasheet. + */ + phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_1588); + phy_ts_base_write(phydev, 29, 0x7ae0); + phy_ts_base_write(phydev, 30, 0xb71c); + phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); + + mutex_unlock(&phydev->mdio.bus->mdio_lock); + + vsc8584_set_input_clk_configured(phydev); + } + + /* Disable predictor before configuring the 1588 block */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_PREDICTOR); + val &= ~PTP_INGR_PREDICTOR_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR, + val); + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_PREDICTOR); + val &= ~PTP_EGR_PREDICTOR_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR, + val); + + /* By default, the internal clock of fixed rate 250MHz is used */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL); + val &= ~PTP_LTC_CTRL_CLK_SEL_MASK; + val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE); + val &= ~PTP_LTC_SEQUENCE_A_MASK; + val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ); + val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB); + if (ltc_seq_e[PHC_CLK_250MHZ]) + val |= PTP_LTC_SEQ_ADD_SUB; + val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val); + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ, + PPS_WIDTH_ADJ); + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO, + IS_ENABLED(CONFIG_MACSEC) ? + PTP_INGR_DELAY_FIFO_DEPTH_MACSEC : + PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT); + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO, + IS_ENABLED(CONFIG_MACSEC) ? + PTP_EGR_DELAY_FIFO_DEPTH_MACSEC : + PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT); + + /* Enable n-phase sampler for Viper Rev-B */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ACCUR_CFG_STATUS); + val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS | + PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS | + PTP_ACCUR_LOAD_SAVE_BYPASS); + val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE | + PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE | + PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE | + PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE | + PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS, + val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ACCUR_CFG_STATUS); + val |= PTP_ACCUR_CALIB_TRIGG; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS, + val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ACCUR_CFG_STATUS); + val &= ~PTP_ACCUR_CALIB_TRIGG; + val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE | + PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE | + PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE | + PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE | + PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS, + val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ACCUR_CFG_STATUS); + val |= PTP_ACCUR_CALIB_TRIGG; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS, + val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ACCUR_CFG_STATUS); + val &= ~PTP_ACCUR_CALIB_TRIGG; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS, + val); + + /* Do not access FIFO via SI */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_TSTAMP_FIFO_SI); + val &= ~PTP_TSTAMP_FIFO_SI_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI, + val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_REWRITER_CTRL); + val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL, + val); + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_REWRITER_CTRL); + val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL, + val); + + /* Put the flag that indicates the frame has been modified to bit 7 */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_REWRITER_CTRL); + val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL, + val); + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_REWRITER_CTRL); + val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7); + val &= ~PTP_EGR_REWRITER_FLAG_VAL; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL, + val); + + /* 30bit mode for RX timestamp, only the nanoseconds are kept in + * reserved field. + */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_TSP_CTRL); + val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL, + val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL); + val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_SERIAL_TOD_IFACE); + val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE, + val); + + vsc85xx_ts_fsb_init(phydev); + + /* Set the Egress timestamp FIFO configuration and status register */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_TS_FIFO_CTRL); + val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK); + /* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */ + val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL, + val); + + vsc85xx_ts_reset_fifo(phydev); + + val = PTP_IFACE_CTRL_CLK_ENA; + if (!IS_ENABLED(CONFIG_MACSEC)) + val |= PTP_IFACE_CTRL_GMII_PROT; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val); + + vsc85xx_ts_set_latencies(phydev); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL); + val |= PTP_IFACE_CTRL_EGR_BYPASS; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val); + + vsc85xx_ts_disable_flows(phydev, EGRESS); + vsc85xx_ts_disable_flows(phydev, INGRESS); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_ANALYZER_MODE); + /* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */ + val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK | + PTP_ANALYZER_MODE_INGR_ENA_MASK | + PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK | + PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK); + /* Strict matching in flow (packets should match flows from the same + * index in all enabled comparators (except PTP)). + */ + val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) | + PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE, + val); + + /* Initialized for ingress and egress flows: + * - The Ethernet comparator. + * - The IP comparator. + * - The PTP comparator. + */ + vsc85xx_eth_cmp1_init(phydev, INGRESS); + vsc85xx_ip_cmp1_init(phydev, INGRESS); + vsc85xx_ptp_cmp_init(phydev, INGRESS); + vsc85xx_eth_cmp1_init(phydev, EGRESS); + vsc85xx_ip_cmp1_init(phydev, EGRESS); + vsc85xx_ptp_cmp_init(phydev, EGRESS); + + vsc85xx_ts_eth_cmp1_sig(phydev); + + return 0; +} + +int vsc8584_ptp_init(struct phy_device *phydev) +{ + switch (phydev->phy_id & phydev->drv->phy_id_mask) { + case PHY_ID_VSC8575: + case PHY_ID_VSC8582: + case PHY_ID_VSC8584: + return __vsc8584_init_ptp(phydev); + } + + return 0; +} + +int vsc8584_ptp_probe(struct phy_device *phydev) +{ + struct vsc8531_private *vsc8531 = phydev->priv; + + vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp), + GFP_KERNEL); + if (!vsc8531->ptp) + return -ENOMEM; + + mutex_init(&vsc8531->ts_lock); + + vsc8531->ptp->phydev = phydev; + + return 0; +} diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h new file mode 100644 index 000000000000..3ea163af0f4f --- /dev/null +++ b/drivers/net/phy/mscc/mscc_ptp.h @@ -0,0 +1,477 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Driver for Microsemi VSC85xx PHYs + * + * Copyright (c) 2020 Microsemi Corporation + */ + +#ifndef _MSCC_PHY_PTP_H_ +#define _MSCC_PHY_PTP_H_ + +/* 1588 page Registers */ +#define MSCC_PHY_TS_BIU_ADDR_CNTL 16 +#define BIU_ADDR_EXE 0x8000 +#define BIU_ADDR_READ 0x4000 +#define BIU_ADDR_WRITE 0x0000 +#define BIU_BLK_ID(x) ((x) << 11) +#define BIU_CSR_ADDR(x) (x) +#define BIU_ADDR_CNT_MAX 8 + +#define MSCC_PHY_TS_CSR_DATA_LSB 17 +#define MSCC_PHY_TS_CSR_DATA_MSB 18 + +#define MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS 0x002d +#define MSCC_PHY_1588_VSC85XX_INT_STATUS 0x004d +#define VSC85XX_1588_INT_FIFO_ADD 0x0004 +#define VSC85XX_1588_INT_FIFO_OVERFLOW 0x0001 + +#define MSCC_PHY_1588_INGR_VSC85XX_INT_MASK 0x002e +#define MSCC_PHY_1588_VSC85XX_INT_MASK 0x004e +#define VSC85XX_1588_INT_MASK_MASK (VSC85XX_1588_INT_FIFO_ADD | \ + VSC85XX_1588_INT_FIFO_OVERFLOW) + +/* TS CSR addresses */ +#define MSCC_PHY_ANA_ETH1_NTX_PROT 0x0000 +#define ANA_ETH1_NTX_PROT_SIG_OFF_MASK GENMASK(20, 16) +#define ANA_ETH1_NTX_PROT_SIG_OFF(x) (((x) << 16) & ANA_ETH1_NTX_PROT_SIG_OFF_MASK) +#define ANA_ETH1_NTX_PROT_COMPARATOR_MASK GENMASK(2, 0) +#define ANA_ETH1_NTX_PROT_PTP_OAM 0x0005 +#define ANA_ETH1_NTX_PROT_MPLS 0x0004 +#define ANA_ETH1_NTX_PROT_IP_UDP_ACH_2 0x0003 +#define ANA_ETH1_NTX_PROT_IP_UDP_ACH_1 0x0002 +#define ANA_ETH1_NTX_PROT_ETH2 0x0001 + +#define MSCC_PHY_PTP_IFACE_CTRL 0x0000 +#define PTP_IFACE_CTRL_CLK_ENA 0x0040 +#define PTP_IFACE_CTRL_INGR_BYPASS 0x0008 +#define PTP_IFACE_CTRL_EGR_BYPASS 0x0004 +#define PTP_IFACE_CTRL_MII_PROT 0x0003 +#define PTP_IFACE_CTRL_GMII_PROT 0x0002 +#define PTP_IFACE_CTRL_XGMII_64_PROT 0x0000 + +#define MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID 0x0001 +#define ANA_ETH1_NTX_PROT_VLAN_TPID_MASK GENMASK(31, 16) +#define ANA_ETH1_NTX_PROT_VLAN_TPID(x) (((x) << 16) & ANA_ETH1_NTX_PROT_VLAN_TPID_MASK) + +#define MSCC_PHY_PTP_ANALYZER_MODE 0x0001 +#define PTP_ANA_SPLIT_ENCAP_FLOW 0x1000000 +#define PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK GENMASK(22, 20) +#define PTP_ANA_EGR_ENCAP_FLOW_MODE(x) (((x) << 20) & PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK) +#define PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK GENMASK(18, 16) +#define PTP_ANA_INGR_ENCAP_FLOW_MODE(x) (((x) << 16) & PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK) +#define PTP_ANALYZER_MODE_EGR_ENA_MASK GENMASK(6, 4) +#define PTP_ANALYZER_MODE_EGR_ENA(x) (((x) << 4) & PTP_ANALYZER_MODE_EGR_ENA_MASK) +#define PTP_ANALYZER_MODE_INGR_ENA_MASK GENMASK(2, 0) +#define PTP_ANALYZER_MODE_INGR_ENA(x) ((x) & PTP_ANALYZER_MODE_INGR_ENA_MASK) + +#define MSCC_PHY_ANA_ETH1_NXT_PROT_TAG 0x0002 +#define ANA_ETH1_NXT_PROT_TAG_ENA 0x0001 + +#define MSCC_PHY_PTP_MODE_CTRL 0x0002 +#define PTP_MODE_CTRL_MODE_MASK GENMASK(2, 0) +#define PTP_MODE_CTRL_PKT_MODE 0x0004 + +#define MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH 0x0003 +#define ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA 0x10000 +#define ANA_ETH1_NXT_PROT_ETYPE_MATCH_MASK GENMASK(15, 0) +#define ANA_ETH1_NXT_PROT_ETYPE_MATCH(x) ((x) & ANA_ETH1_NXT_PROT_ETYPE_MATCH_MASK) + +#define MSCC_PHY_PTP_VERSION_CODE 0x0003 +#define PTP_IP_VERSION_MASK GENMASK(7, 0) +#define PTP_IP_VERSION_2_1 0x0021 + +#define MSCC_ANA_ETH1_FLOW_ENA(x) (0x0010 + ((x) << 4)) +#define ETH1_FLOW_ENA_CHANNEL_MASK_MASK GENMASK(9, 8) +#define ETH1_FLOW_ENA_CHANNEL_MASK(x) (((x) << 8) & ETH1_FLOW_ENA_CHANNEL_MASK_MASK) +#define ETH1_FLOW_VALID_CH1 ETH1_FLOW_ENA_CHANNEL_MASK(2) +#define ETH1_FLOW_VALID_CH0 ETH1_FLOW_ENA_CHANNEL_MASK(1) +#define ETH1_FLOW_ENA 0x0001 + +#define MSCC_ANA_ETH1_FLOW_MATCH_MODE(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 1) +#define ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK GENMASK(7, 6) +#define ANA_ETH1_FLOW_MATCH_VLAN_TAG(x) (((x) << 6) & ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK) +#define ANA_ETH1_FLOW_MATCH_VLAN_TAG2 0x0200 +#define ANA_ETH1_FLOW_MATCH_VLAN_VERIFY 0x0010 + +#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 2) + +#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3) +#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20) +#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000 +#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000 +#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16) +#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000 +#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC 0x010000 +#define ANA_ETH1_FLOW_ADDR_MATCH2_DEST 0x000000 + +#define MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 4) +#define MSCC_ANA_ETH1_FLOW_VLAN_TAG1(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 5) +#define MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 6) + +#define MSCC_PHY_PTP_LTC_CTRL 0x0010 +#define PTP_LTC_CTRL_CLK_SEL_MASK GENMASK(14, 12) +#define PTP_LTC_CTRL_CLK_SEL(x) (((x) << 12) & PTP_LTC_CTRL_CLK_SEL_MASK) +#define PTP_LTC_CTRL_CLK_SEL_INTERNAL_250 PTP_LTC_CTRL_CLK_SEL(5) +#define PTP_LTC_CTRL_AUTO_ADJ_UPDATE 0x0010 +#define PTP_LTC_CTRL_ADD_SUB_1NS_REQ 0x0008 +#define PTP_LTC_CTRL_ADD_1NS 0x0004 +#define PTP_LTC_CTRL_SAVE_ENA 0x0002 +#define PTP_LTC_CTRL_LOAD_ENA 0x0001 + +#define MSCC_PHY_PTP_LTC_LOAD_SEC_MSB 0x0011 +#define PTP_LTC_LOAD_SEC_MSB(x) (((x) & GENMASK_ULL(47, 32)) >> 32) + +#define MSCC_PHY_PTP_LTC_LOAD_SEC_LSB 0x0012 +#define PTP_LTC_LOAD_SEC_LSB(x) ((x) & GENMASK(31, 0)) + +#define MSCC_PHY_PTP_LTC_LOAD_NS 0x0013 +#define PTP_LTC_LOAD_NS(x) ((x) & GENMASK(31, 0)) + +#define MSCC_PHY_PTP_LTC_SAVED_SEC_MSB 0x0014 +#define MSCC_PHY_PTP_LTC_SAVED_SEC_LSB 0x0015 +#define MSCC_PHY_PTP_LTC_SAVED_NS 0x0016 + +#define MSCC_PHY_PTP_LTC_SEQUENCE 0x0017 +#define PTP_LTC_SEQUENCE_A_MASK GENMASK(3, 0) +#define PTP_LTC_SEQUENCE_A(x) ((x) & PTP_LTC_SEQUENCE_A_MASK) + +#define MSCC_PHY_PTP_LTC_SEQ 0x0018 +#define PTP_LTC_SEQ_ADD_SUB 0x80000 +#define PTP_LTC_SEQ_ERR_MASK GENMASK(18, 0) +#define PTP_LTC_SEQ_ERR(x) ((x) & PTP_LTC_SEQ_ERR_MASK) + +#define MSCC_PHY_PTP_LTC_AUTO_ADJ 0x001a +#define PTP_AUTO_ADJ_NS_ROLLOVER(x) ((x) & GENMASK(29, 0)) +#define PTP_AUTO_ADJ_ADD_SUB_1NS_MASK GENMASK(31, 30) +#define PTP_AUTO_ADJ_SUB_1NS 0x80000000 +#define PTP_AUTO_ADJ_ADD_1NS 0x40000000 + +#define MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ 0x001b +#define PTP_LTC_1PPS_WIDTH_ADJ_MASK GENMASK(29, 0) + +#define MSCC_PHY_PTP_TSTAMP_FIFO_SI 0x0020 +#define PTP_TSTAMP_FIFO_SI_EN 0x0001 + +#define MSCC_PHY_PTP_INGR_PREDICTOR 0x0022 +#define PTP_INGR_PREDICTOR_EN 0x0001 + +#define MSCC_PHY_PTP_EGR_PREDICTOR 0x0026 +#define PTP_EGR_PREDICTOR_EN 0x0001 + +#define MSCC_PHY_PTP_INGR_TSP_CTRL 0x0035 +#define PHY_PTP_INGR_TSP_CTRL_FRACT_NS 0x0004 +#define PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS 0x0001 + +#define MSCC_PHY_PTP_INGR_LOCAL_LATENCY 0x0037 +#define PTP_INGR_LOCAL_LATENCY_MASK GENMASK(22, 0) +#define PTP_INGR_LOCAL_LATENCY(x) ((x) & PTP_INGR_LOCAL_LATENCY_MASK) + +#define MSCC_PHY_PTP_INGR_DELAY_FIFO 0x003a +#define PTP_INGR_DELAY_FIFO_DEPTH_MACSEC 0x0013 +#define PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT 0x000f + +#define MSCC_PHY_PTP_INGR_TS_FIFO(x) (0x005c + (x)) +#define PTP_INGR_TS_FIFO_EMPTY 0x80000000 + +#define MSCC_PHY_PTP_INGR_REWRITER_CTRL 0x0044 +#define PTP_INGR_REWRITER_REDUCE_PREAMBLE 0x0010 +#define PTP_INGR_REWRITER_FLAG_VAL 0x0008 +#define PTP_INGR_REWRITER_FLAG_BIT_OFF_M GENMASK(2, 0) +#define PTP_INGR_REWRITER_FLAG_BIT_OFF(x) ((x) & PTP_INGR_REWRITER_FLAG_BIT_OFF_M) + +#define MSCC_PHY_PTP_EGR_STALL_LATENCY 0x004f + +#define MSCC_PHY_PTP_EGR_TSP_CTRL 0x0055 +#define PHY_PTP_EGR_TSP_CTRL_FRACT_NS 0x0004 +#define PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS 0x0001 + +#define MSCC_PHY_PTP_EGR_LOCAL_LATENCY 0x0057 +#define PTP_EGR_LOCAL_LATENCY_MASK GENMASK(22, 0) +#define PTP_EGR_LOCAL_LATENCY(x) ((x) & PTP_EGR_LOCAL_LATENCY_MASK) + +#define MSCC_PHY_PTP_EGR_DELAY_FIFO 0x005a +#define PTP_EGR_DELAY_FIFO_DEPTH_MACSEC 0x0013 +#define PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT 0x000f + +#define MSCC_PHY_PTP_EGR_TS_FIFO_CTRL 0x005b +#define PTP_EGR_TS_FIFO_RESET 0x10000 +#define PTP_EGR_FIFO_LEVEL_LAST_READ_MASK GENMASK(15, 12) +#define PTP_EGR_FIFO_LEVEL_LAST_READ(x) (((x) & PTP_EGR_FIFO_LEVEL_LAST_READ_MASK) >> 12) +#define PTP_EGR_TS_FIFO_THRESH_MASK GENMASK(11, 8) +#define PTP_EGR_TS_FIFO_THRESH(x) (((x) << 8) & PTP_EGR_TS_FIFO_THRESH_MASK) +#define PTP_EGR_TS_FIFO_SIG_BYTES_MASK GENMASK(4, 0) +#define PTP_EGR_TS_FIFO_SIG_BYTES(x) ((x) & PTP_EGR_TS_FIFO_SIG_BYTES_MASK) + +#define MSCC_PHY_PTP_EGR_TS_FIFO(x) (0x005c + (x)) +#define PTP_EGR_TS_FIFO_EMPTY 0x80000000 +#define PTP_EGR_TS_FIFO_0_MASK GENMASK(15, 0) + +#define MSCC_PHY_PTP_EGR_REWRITER_CTRL 0x0064 +#define PTP_EGR_REWRITER_REDUCE_PREAMBLE 0x0010 +#define PTP_EGR_REWRITER_FLAG_VAL 0x0008 +#define PTP_EGR_REWRITER_FLAG_BIT_OFF_M GENMASK(2, 0) +#define PTP_EGR_REWRITER_FLAG_BIT_OFF(x) ((x) & PTP_EGR_REWRITER_FLAG_BIT_OFF_M) + +#define MSCC_PHY_PTP_SERIAL_TOD_IFACE 0x006e +#define PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR 0x0004 + +#define MSCC_PHY_PTP_LTC_OFFSET 0x0070 +#define PTP_LTC_OFFSET_ADJ BIT(31) +#define PTP_LTC_OFFSET_ADD BIT(30) +#define PTP_LTC_OFFSET_VAL(x) (x) + +#define MSCC_PHY_PTP_ACCUR_CFG_STATUS 0x0074 +#define PTP_ACCUR_PPS_OUT_CALIB_ERR 0x20000 +#define PTP_ACCUR_PPS_OUT_CALIB_DONE 0x10000 +#define PTP_ACCUR_PPS_IN_CALIB_ERR 0x4000 +#define PTP_ACCUR_PPS_IN_CALIB_DONE 0x2000 +#define PTP_ACCUR_EGR_SOF_CALIB_ERR 0x1000 +#define PTP_ACCUR_EGR_SOF_CALIB_DONE 0x0800 +#define PTP_ACCUR_INGR_SOF_CALIB_ERR 0x0400 +#define PTP_ACCUR_INGR_SOF_CALIB_DONE 0x0200 +#define PTP_ACCUR_LOAD_SAVE_CALIB_ERR 0x0100 +#define PTP_ACCUR_LOAD_SAVE_CALIB_DONE 0x0080 +#define PTP_ACCUR_CALIB_TRIGG 0x0040 +#define PTP_ACCUR_PPS_OUT_BYPASS 0x0010 +#define PTP_ACCUR_PPS_IN_BYPASS 0x0008 +#define PTP_ACCUR_EGR_SOF_BYPASS 0x0004 +#define PTP_ACCUR_INGR_SOF_BYPASS 0x0002 +#define PTP_ACCUR_LOAD_SAVE_BYPASS 0x0001 + +#define MSCC_PHY_ANA_ETH2_NTX_PROT 0x0090 +#define ANA_ETH2_NTX_PROT_COMPARATOR_MASK GENMASK(2, 0) +#define ANA_ETH2_NTX_PROT_PTP_OAM 0x0005 +#define ANA_ETH2_NTX_PROT_MPLS 0x0004 +#define ANA_ETH2_NTX_PROT_IP_UDP_ACH_2 0x0003 +#define ANA_ETH2_NTX_PROT_IP_UDP_ACH_1 0x0002 +#define ANA_ETH2_NTX_PROT_ETH2 0x0001 + +#define MSCC_PHY_ANA_ETH2_NXT_PROT_ETYPE_MATCH 0x0003 +#define ANA_ETH2_NXT_PROT_ETYPE_MATCH_ENA 0x10000 +#define ANA_ETH2_NXT_PROT_ETYPE_MATCH_MASK GENMASK(15, 0) +#define ANA_ETH2_NXT_PROT_ETYPE_MATCH(x) ((x) & ANA_ETH2_NXT_PROT_ETYPE_MATCH_MASK) + +#define MSCC_ANA_ETH2_FLOW_ENA(x) (0x00a0 + ((x) << 4)) +#define ETH2_FLOW_ENA_CHANNEL_MASK_MASK GENMASK(9, 8) +#define ETH2_FLOW_ENA_CHANNEL_MASK(x) (((x) << 8) & ETH2_FLOW_ENA_CHANNEL_MASK_MASK) +#define ETH2_FLOW_VALID_CH1 ETH2_FLOW_ENA_CHANNEL_MASK(2) +#define ETH2_FLOW_VALID_CH0 ETH2_FLOW_ENA_CHANNEL_MASK(1) + +#define MSCC_PHY_ANA_MPLS_COMP_NXT_COMP 0x0120 +#define ANA_MPLS_NTX_PROT_COMPARATOR_MASK GENMASK(2, 0) +#define ANA_MPLS_NTX_PROT_PTP_OAM 0x0005 +#define ANA_MPLS_NTX_PROT_MPLS 0x0004 +#define ANA_MPLS_NTX_PROT_IP_UDP_ACH_2 0x0003 +#define ANA_MPLS_NTX_PROT_IP_UDP_ACH_1 0x0002 +#define ANA_MPLS_NTX_PROT_ETH2 0x0001 + +#define MSCC_ANA_MPLS_FLOW_CTRL(x) (0x0130 + ((x) << 4)) +#define MPLS_FLOW_CTRL_CHANNEL_MASK_MASK GENMASK(25, 24) +#define MPLS_FLOW_CTRL_CHANNEL_MASK(x) (((x) << 24) & MPLS_FLOW_CTRL_CHANNEL_MASK_MASK) +#define MPLS_FLOW_VALID_CH1 MPLS_FLOW_CTRL_CHANNEL_MASK(2) +#define MPLS_FLOW_VALID_CH0 MPLS_FLOW_CTRL_CHANNEL_MASK(1) + +#define MSCC_ANA_IP1_NXT_PROT_NXT_COMP 0x01b0 +#define ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR_MASK GENMASK(15, 8) +#define ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(x) (((x) << 8) & ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR_MASK) +#define ANA_IP1_NXT_PROT_NXT_COMP_PTP_OAM 0x0005 +#define ANA_IP1_NXT_PROT_NXT_COMP_IP_UDP_ACH2 0x0003 + +#define MSCC_ANA_IP1_NXT_PROT_IP1_MODE 0x01b1 +#define ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4 0x0c00 +#define ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV6 0x0800 +#define ANA_IP1_NXT_PROT_IPV6 0x0001 +#define ANA_IP1_NXT_PROT_IPV4 0x0000 + +#define MSCC_ANA_IP1_NXT_PROT_IP_MATCH1 0x01b2 +#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF_MASK GENMASK(20, 16) +#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(x) (((x) << 16) & ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF_MASK) +#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK_MASK GENMASK(15, 8) +#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(x) (((x) << 15) & ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK_MASK) +#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH_MASK GENMASK(7, 0) +#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(x) ((x) & ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH_MASK) + +#define MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER 0x01b3 +#define MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER 0x01b4 +#define MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER 0x01b5 +#define MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER 0x01b6 + +#define MSCC_ANA_IP1_NXT_PROT_OFFSET2 0x01b7 +#define ANA_IP1_NXT_PROT_OFFSET2_MASK GENMASK(6, 0) +#define ANA_IP1_NXT_PROT_OFFSET2(x) ((x) & ANA_IP1_NXT_PROT_OFFSET2_MASK) + +#define MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM 0x01b8 +#define IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK GENMASK(15, 8) +#define IP1_NXT_PROT_UDP_CHKSUM_OFF(x) (((x) << 8) & IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK) +#define IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK GENMASK(5, 4) +#define IP1_NXT_PROT_UDP_CHKSUM_WIDTH(x) (((x) << 4) & IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK) +#define IP1_NXT_PROT_UDP_CHKSUM_UPDATE 0x0002 +#define IP1_NXT_PROT_UDP_CHKSUM_CLEAR 0x0001 + +#define MSCC_ANA_IP1_FLOW_ENA(x) (0x01c0 + ((x) << 4)) +#define IP1_FLOW_MATCH_ADDR_MASK GENMASK(9, 8) +#define IP1_FLOW_MATCH_DEST_SRC_ADDR 0x0200 +#define IP1_FLOW_MATCH_DEST_ADDR 0x0100 +#define IP1_FLOW_MATCH_SRC_ADDR 0x0000 +#define IP1_FLOW_ENA_CHANNEL_MASK_MASK GENMASK(5, 4) +#define IP1_FLOW_ENA_CHANNEL_MASK(x) (((x) << 4) & IP1_FLOW_ENA_CHANNEL_MASK_MASK) +#define IP1_FLOW_VALID_CH1 IP1_FLOW_ENA_CHANNEL_MASK(2) +#define IP1_FLOW_VALID_CH0 IP1_FLOW_ENA_CHANNEL_MASK(1) +#define IP1_FLOW_ENA 0x0001 + +#define MSCC_ANA_OAM_PTP_FLOW_ENA(x) (0x1e0 + ((x) << 4)) +#define MSCC_ANA_OAM_PTP_FLOW_MATCH_LOWER(x) (MSCC_ANA_OAM_PTP_FLOW_ENA(x) + 2) +#define MSCC_ANA_OAM_PTP_FLOW_MASK_LOWER(x) (MSCC_ANA_OAM_PTP_FLOW_ENA(x) + 4) + +#define MSCC_ANA_OAM_PTP_FLOW_PTP_0_FIELD(x) (MSCC_ANA_OAM_PTP_FLOW_ENA(x) + 8) + +#define MSCC_ANA_IP1_FLOW_MATCH_UPPER(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 1) +#define MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 2) +#define MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 3) +#define MSCC_ANA_IP1_FLOW_MATCH_LOWER(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 4) +#define MSCC_ANA_IP1_FLOW_MASK_UPPER(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 5) +#define MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 6) +#define MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 7) +#define MSCC_ANA_IP1_FLOW_MASK_LOWER(x) (MSCC_ANA_IP1_FLOW_ENA(x) + 8) + +#define MSCC_ANA_IP2_NXT_PROT_NXT_COMP 0x0240 +#define ANA_IP2_NXT_PROT_NXT_COMP_BYTES_HDR_MASK GENMASK(15, 8) +#define ANA_IP2_NXT_PROT_NXT_COMP_BYTES_HDR(x) (((x) << 8) & ANA_IP2_NXT_PROT_NXT_COMP_BYTES_HDR_MASK) +#define ANA_IP2_NXT_PROT_NXT_COMP_PTP_OAM 0x0005 +#define ANA_IP2_NXT_PROT_NXT_COMP_IP_UDP_ACH2 0x0003 + +#define MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM 0x0248 +#define IP2_NXT_PROT_UDP_CHKSUM_OFF_MASK GENMASK(15, 8) +#define IP2_NXT_PROT_UDP_CHKSUM_OFF(x) (((x) << 8) & IP2_NXT_PROT_UDP_CHKSUM_OFF_MASK) +#define IP2_NXT_PROT_UDP_CHKSUM_WIDTH_MASK GENMASK(5, 4) +#define IP2_NXT_PROT_UDP_CHKSUM_WIDTH(x) (((x) << 4) & IP2_NXT_PROT_UDP_CHKSUM_WIDTH_MASK) + +#define MSCC_ANA_IP2_FLOW_ENA(x) (0x0250 + ((x) << 4)) +#define IP2_FLOW_ENA_CHANNEL_MASK_MASK GENMASK(5, 4) +#define IP2_FLOW_ENA_CHANNEL_MASK(x) (((x) << 4) & IP2_FLOW_ENA_CHANNEL_MASK_MASK) +#define IP2_FLOW_VALID_CH1 IP2_FLOW_ENA_CHANNEL_MASK(2) +#define IP2_FLOW_VALID_CH0 IP2_FLOW_ENA_CHANNEL_MASK(1) + +#define MSCC_ANA_PTP_FLOW_ENA(x) (0x02d0 + ((x) << 4)) +#define PTP_FLOW_ENA_CHANNEL_MASK_MASK GENMASK(5, 4) +#define PTP_FLOW_ENA_CHANNEL_MASK(x) (((x) << 4) & PTP_FLOW_ENA_CHANNEL_MASK_MASK) +#define PTP_FLOW_VALID_CH1 PTP_FLOW_ENA_CHANNEL_MASK(2) +#define PTP_FLOW_VALID_CH0 PTP_FLOW_ENA_CHANNEL_MASK(1) +#define PTP_FLOW_ENA 0x0001 + +#define MSCC_ANA_PTP_FLOW_MATCH_UPPER(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 1) +#define PTP_FLOW_MSG_TYPE_MASK 0x0F000000 +#define PTP_FLOW_MSG_PDELAY_RESP 0x04000000 +#define PTP_FLOW_MSG_PDELAY_REQ 0x02000000 +#define PTP_FLOW_MSG_DELAY_REQ 0x01000000 +#define PTP_FLOW_MSG_SYNC 0x00000000 + +#define MSCC_ANA_PTP_FLOW_MATCH_LOWER(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 2) +#define MSCC_ANA_PTP_FLOW_MASK_UPPER(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 3) +#define MSCC_ANA_PTP_FLOW_MASK_LOWER(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 4) + +#define MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 5) +#define PTP_FLOW_DOMAIN_RANGE_ENA 0x0001 + +#define MSCC_ANA_PTP_FLOW_PTP_ACTION(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 6) +#define PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE 0x10000000 +#define PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET_MASK GENMASK(26, 24) +#define PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(x) (((x) << 24) & PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET_MASK) +#define PTP_FLOW_PTP_ACTION_PTP_CMD_MASK GENMASK(3, 0) +#define PTP_FLOW_PTP_ACTION_PTP_CMD(x) ((x) & PTP_FLOW_PTP_ACTION_PTP_CMD_MASK) +#define PTP_FLOW_PTP_ACTION_SUB_DELAY_ASYM 0x00200000 +#define PTP_FLOW_PTP_ACTION_ADD_DELAY_ASYM 0x00100000 +#define PTP_FLOW_PTP_ACTION_TIME_OFFSET_MASK GENMASK(15, 10) +#define PTP_FLOW_PTP_ACTION_TIME_OFFSET(x) (((x) << 10) & PTP_FLOW_PTP_ACTION_TIME_OFFSET_MASK) +#define PTP_FLOW_PTP_ACTION_CORR_OFFSET_MASK GENMASK(9, 5) +#define PTP_FLOW_PTP_ACTION_CORR_OFFSET(x) (((x) << 5) & PTP_FLOW_PTP_ACTION_CORR_OFFSET_MASK) +#define PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME 0x00000010 + +#define MSCC_ANA_PTP_FLOW_PTP_ACTION2(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 7) +#define PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET_MASK GENMASK(15, 8) +#define PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(x) (((x) << 8) & PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET_MASK) +#define PTP_FLOW_PTP_ACTION2_REWRITE_BYTES_MASK GENMASK(3, 0) +#define PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(x) ((x) & PTP_FLOW_PTP_ACTION2_REWRITE_BYTES_MASK) + +#define MSCC_ANA_PTP_FLOW_PTP_0_FIELD(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 8) +#define PTP_FLOW_PTP_0_FIELD_PTP_FRAME 0x8000 +#define PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK 0x4000 +#define PTP_FLOW_PTP_0_FIELD_OFFSET_MASK GENMASK(13, 8) +#define PTP_FLOW_PTP_0_FIELD_OFFSET(x) (((x) << 8) & PTP_FLOW_PTP_0_FIELD_OFFSET_MASK) +#define PTP_FLOW_PTP_0_FIELD_BYTES_MASK GENMASK(3, 0) +#define PTP_FLOW_PTP_0_FIELD_BYTES(x) ((x) & PTP_FLOW_PTP_0_FIELD_BYTES_MASK) + +#define MSCC_ANA_PTP_IP_CHKSUM_SEL 0x0330 +#define ANA_PTP_IP_CHKSUM_SEL_IP_COMP_2 0x0001 +#define ANA_PTP_IP_CHKSUM_SEL_IP_COMP_1 0x0000 + +#define MSCC_PHY_ANA_FSB_CFG 0x331 +#define ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK GENMASK(1, 0) +#define ANA_FSB_ADDR_FROM_IP2 0x0003 +#define ANA_FSB_ADDR_FROM_IP1 0x0002 +#define ANA_FSB_ADDR_FROM_ETH2 0x0001 +#define ANA_FSB_ADDR_FROM_ETH1 0x0000 + +#define MSCC_PHY_ANA_FSB_REG(x) (0x332 + (x)) + +#define COMP_MAX_FLOWS 8 +#define PTP_COMP_MAX_FLOWS 6 + +#define PPS_WIDTH_ADJ 0x1dcd6500 +#define STALL_EGR_LATENCY(x) (1536000 / (x)) + +/* PHC clock available frequencies. */ +enum { + PHC_CLK_125MHZ, + PHC_CLK_156_25MHZ, + PHC_CLK_200MHZ, + PHC_CLK_250MHZ, + PHC_CLK_500MHZ, +}; + +enum ptp_cmd { + PTP_NOP = 0, + PTP_WRITE_1588 = 5, + PTP_WRITE_NS = 7, + PTP_SAVE_IN_TS_FIFO = 11, /* invalid when writing in reg */ +}; + +enum vsc85xx_ptp_msg_type { + PTP_MSG_TYPE_SYNC, + PTP_MSG_TYPE_DELAY_REQ, +}; + +struct vsc85xx_ptphdr { + u8 tsmt; /* transportSpecific | messageType */ + u8 ver; /* reserved0 | versionPTP */ + __be16 msglen; + u8 domain; + u8 rsrvd1; + __be16 flags; + __be64 correction; + __be32 rsrvd2; + __be64 clk_identity; + __be16 src_port_id; + __be16 seq_id; + u8 ctrl; + u8 log_interval; +} __attribute__((__packed__)); + +/* Represents an entry in the timestamping FIFO */ +struct vsc85xx_ts_fifo { + u32 ns; + u64 secs:48; + u8 sig[16]; +} __attribute__((__packed__)); + +struct vsc85xx_ptp { + struct phy_device *phydev; + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; + struct sk_buff_head tx_queue; + enum hwtstamp_tx_types tx_type; + enum hwtstamp_rx_filters rx_filter; + u8 configured:1; +}; + +#endif /* _MSCC_PHY_PTP_H_ */ From 7d272e63e0979d38a6256108adbe462d621c26c5 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:12 +0200 Subject: [PATCH 0279/2056] net: phy: mscc: timestamping and PHC support This patch adds support for PHC and timestamping operations for the MSCC PHY. PTP 1-step and 2-step modes are supported, over Ethernet and UDP. To get and set the PHC time, a GPIO has to be used and changes are only retrieved or committed when on a rising edge. The same GPIO is shared by all PHYs, so the granularity of the lock protecting it has to be different from the ones protecting the 1588 registers (the VSC8584 PHY has 2 1588 blocks, and a single load/save pin). Co-developed-by: Quentin Schulz Signed-off-by: Quentin Schulz Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc.h | 31 +- drivers/net/phy/mscc/mscc_main.c | 21 +- drivers/net/phy/mscc/mscc_ptp.c | 582 +++++++++++++++++++++++++++++++ 3 files changed, 630 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h index eabb6ab3c374..9481bce94c2e 100644 --- a/drivers/net/phy/mscc/mscc.h +++ b/drivers/net/phy/mscc/mscc.h @@ -375,8 +375,12 @@ struct vsc8531_private { unsigned long egr_flows; #endif + struct mii_timestamper mii_ts; + bool input_clk_init; struct vsc85xx_ptp *ptp; + /* LOAD/SAVE GPIO pin, used for retrieving or setting time to the PHC. */ + struct gpio_desc *load_save; /* For multiple port PHYs; the MDIO address of the base PHY in the * pair of two PHYs that share a 1588 engine. PHY0 and PHY2 are coupled. @@ -386,8 +390,19 @@ struct vsc8531_private { unsigned int ts_base_addr; u8 ts_base_phy; - /* ts_lock: used for per-PHY timestamping operations. */ + /* ts_lock: used for per-PHY timestamping operations. + * phc_lock: used for per-PHY PHC opertations. + */ struct mutex ts_lock; + struct mutex phc_lock; +}; + +/* Shared structure between the PHYs of the same package. + * gpio_lock: used for PHC operations. Common for all PHYs as the load/save GPIO + * is shared. + */ +struct vsc85xx_shared_private { + struct mutex gpio_lock; }; #if IS_ENABLED(CONFIG_OF_MDIO) @@ -416,20 +431,34 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev) #if IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) void vsc85xx_link_change_notify(struct phy_device *phydev); +void vsc8584_config_ts_intr(struct phy_device *phydev); int vsc8584_ptp_init(struct phy_device *phydev); +int vsc8584_ptp_probe_once(struct phy_device *phydev); int vsc8584_ptp_probe(struct phy_device *phydev); +irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev); #else static inline void vsc85xx_link_change_notify(struct phy_device *phydev) { } +static inline void vsc8584_config_ts_intr(struct phy_device *phydev) +{ +} static inline int vsc8584_ptp_init(struct phy_device *phydev) { return 0; } +static inline int vsc8584_ptp_probe_once(struct phy_device *phydev) +{ + return 0; +} static inline int vsc8584_ptp_probe(struct phy_device *phydev) { return 0; } +static inline irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev) +{ + return IRQ_NONE; +} #endif #endif /* _MSCC_PHY_H_ */ diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index ea004712540f..2a7082983c09 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1475,12 +1475,20 @@ static int vsc8584_config_init(struct phy_device *phydev) static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev) { + irqreturn_t ret; int irq_status; irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS); - if (irq_status < 0 || !(irq_status & MII_VSC85XX_INT_MASK_MASK)) + if (irq_status < 0) return IRQ_NONE; + /* Timestamping IRQ does not set a bit in the global INT_STATUS, so + * irq_status would be 0. + */ + ret = vsc8584_handle_ts_interrupt(phydev); + if (!(irq_status & MII_VSC85XX_INT_MASK_MASK)) + return ret; + if (irq_status & MII_VSC85XX_INT_MASK_EXT) vsc8584_handle_macsec_interrupt(phydev); @@ -1920,6 +1928,7 @@ static int vsc85xx_config_intr(struct phy_device *phydev) if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { vsc8584_config_macsec_intr(phydev); + vsc8584_config_ts_intr(phydev); rc = phy_write(phydev, MII_VSC85XX_INT_MASK, MII_VSC85XX_INT_MASK_MASK); @@ -2033,8 +2042,8 @@ static int vsc8584_probe(struct phy_device *phydev) phydev->priv = vsc8531; vsc8584_get_base_addr(phydev); - devm_phy_package_join(&phydev->mdio.dev, phydev, - vsc8531->base_addr, 0); + devm_phy_package_join(&phydev->mdio.dev, phydev, vsc8531->base_addr, + sizeof(struct vsc85xx_shared_private)); vsc8531->nleds = 4; vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES; @@ -2045,6 +2054,12 @@ static int vsc8584_probe(struct phy_device *phydev) if (!vsc8531->stats) return -ENOMEM; + if (phy_package_probe_once(phydev)) { + ret = vsc8584_ptp_probe_once(phydev); + if (ret) + return ret; + } + ret = vsc8584_ptp_probe(phydev); if (ret) return ret; diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 73f1a4796435..17256d85cedf 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -357,6 +357,150 @@ static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev) return 0; } +static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb, + struct iphdr *iphdr, + struct udphdr *udphdr) +{ + if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP) + return NULL; + + return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN); +} + +static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb) +{ + struct ethhdr *ethhdr = eth_hdr(skb); + struct udphdr *udphdr; + struct iphdr *iphdr; + + if (ethhdr->h_proto == htons(ETH_P_1588)) + return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) + + skb_mac_header_len(skb)); + + if (ethhdr->h_proto != htons(ETH_P_IP)) + return NULL; + + iphdr = ip_hdr(skb); + udphdr = udp_hdr(skb); + + return get_ptp_header_l4(skb, iphdr, udphdr); +} + +static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb, + enum hwtstamp_rx_filters rx_filter) +{ + struct udphdr *udphdr; + struct iphdr *iphdr; + + if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) + return (struct vsc85xx_ptphdr *)skb->data; + + iphdr = (struct iphdr *)skb->data; + udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4); + + return get_ptp_header_l4(skb, iphdr, udphdr); +} + +static int get_sig(struct sk_buff *skb, u8 *sig) +{ + struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb); + struct ethhdr *ethhdr = eth_hdr(skb); + unsigned int i; + + if (!ptphdr) + return -EOPNOTSUPP; + + sig[0] = (__force u16)ptphdr->seq_id >> 8; + sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0); + sig[2] = ptphdr->domain; + sig[3] = ptphdr->tsmt & GENMASK(3, 0); + + memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN); + + /* Fill the last bytes of the signature to reach a 16B signature */ + for (i = 10; i < 16; i++) + sig[i] = ptphdr->tsmt & GENMASK(3, 0); + + return 0; +} + +static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct vsc85xx_ts_fifo fifo; + struct sk_buff *skb; + u8 skb_sig[16], *p; + int i, len; + u32 reg; + + memset(&fifo, 0, sizeof(fifo)); + p = (u8 *)&fifo; + + reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_TS_FIFO(0)); + if (reg & PTP_EGR_TS_FIFO_EMPTY) + return; + + *p++ = reg & 0xff; + *p++ = (reg >> 8) & 0xff; + + /* Read the current FIFO item. Reading FIFO6 pops the next one. */ + for (i = 1; i < 7; i++) { + reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_TS_FIFO(i)); + *p++ = reg & 0xff; + *p++ = (reg >> 8) & 0xff; + *p++ = (reg >> 16) & 0xff; + *p++ = (reg >> 24) & 0xff; + } + + len = skb_queue_len(&ptp->tx_queue); + if (len < 1) + return; + + while (len--) { + skb = __skb_dequeue(&ptp->tx_queue); + if (!skb) + return; + + /* Can't get the signature of the packet, won't ever + * be able to have one so let's dequeue the packet. + */ + if (get_sig(skb, skb_sig) < 0) { + kfree_skb(skb); + continue; + } + + /* Check if we found the signature we were looking for. */ + if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) { + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns); + skb_complete_tx_timestamp(skb, &shhwtstamps); + + return; + } + + /* Valid signature but does not match the one of the + * packet in the FIFO right now, reschedule it for later + * packets. + */ + __skb_queue_tail(&ptp->tx_queue, skb); + } +} + +static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp) +{ + u32 reg; + + do { + vsc85xx_dequeue_skb(ptp); + + /* If other timestamps are available in the FIFO, process them. */ + reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_TS_FIFO_CTRL); + } while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1); +} + static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk) { struct vsc8531_private *vsc8531 = phydev->priv; @@ -462,6 +606,176 @@ static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk) return 0; } +static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct phy_device *phydev = ptp->phydev; + struct vsc8531_private *priv = phydev->priv; + u64 adj = 0; + u32 val; + + if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL) + return 0; + + adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm)); + if (adj > 1000000000L) + adj = 1000000000L; + + val = PTP_AUTO_ADJ_NS_ROLLOVER(adj); + val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS; + + mutex_lock(&priv->phc_lock); + + /* Update the ppb val in nano seconds to the auto adjust reg. */ + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ, + val); + + /* The auto adjust update val is set to 0 after write operation. */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL); + val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val); + + mutex_unlock(&priv->phc_lock); + + return 0; +} + +static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct phy_device *phydev = ptp->phydev; + struct vsc85xx_shared_private *shared = + (struct vsc85xx_shared_private *)phydev->shared->priv; + struct vsc8531_private *priv = phydev->priv; + u32 val; + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL); + val |= PTP_LTC_CTRL_SAVE_ENA; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val); + + /* Local Time Counter (LTC) is put in SAVE* regs on rising edge of + * LOAD_SAVE pin. + */ + mutex_lock(&shared->gpio_lock); + gpiod_set_value(priv->load_save, 1); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_LTC_SAVED_SEC_MSB); + + ts->tv_sec = ((time64_t)val) << 32; + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_LTC_SAVED_SEC_LSB); + ts->tv_sec += val; + + ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_LTC_SAVED_NS); + + gpiod_set_value(priv->load_save, 0); + mutex_unlock(&shared->gpio_lock); + + return 0; +} + +static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct phy_device *phydev = ptp->phydev; + struct vsc8531_private *priv = phydev->priv; + + mutex_lock(&priv->phc_lock); + __vsc85xx_gettime(info, ts); + mutex_unlock(&priv->phc_lock); + + return 0; +} + +static int __vsc85xx_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct phy_device *phydev = ptp->phydev; + struct vsc85xx_shared_private *shared = + (struct vsc85xx_shared_private *)phydev->shared->priv; + struct vsc8531_private *priv = phydev->priv; + u32 val; + + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB, + PTP_LTC_LOAD_SEC_MSB(ts->tv_sec)); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB, + PTP_LTC_LOAD_SEC_LSB(ts->tv_sec)); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS, + PTP_LTC_LOAD_NS(ts->tv_nsec)); + + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL); + val |= PTP_LTC_CTRL_LOAD_ENA; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val); + + /* Local Time Counter (LTC) is set from LOAD* regs on rising edge of + * LOAD_SAVE pin. + */ + mutex_lock(&shared->gpio_lock); + gpiod_set_value(priv->load_save, 1); + + val &= ~PTP_LTC_CTRL_LOAD_ENA; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val); + + gpiod_set_value(priv->load_save, 0); + mutex_unlock(&shared->gpio_lock); + + return 0; +} + +static int vsc85xx_settime(struct ptp_clock_info *info, + const struct timespec64 *ts) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct phy_device *phydev = ptp->phydev; + struct vsc8531_private *priv = phydev->priv; + + mutex_lock(&priv->phc_lock); + __vsc85xx_settime(info, ts); + mutex_unlock(&priv->phc_lock); + + return 0; +} + +static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta) +{ + struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps); + struct phy_device *phydev = ptp->phydev; + struct vsc8531_private *priv = phydev->priv; + u32 val; + + /* Can't recover that big of an offset. Let's set the time directly. */ + if (abs(delta) >= NSEC_PER_SEC) { + struct timespec64 ts; + u64 now; + + mutex_lock(&priv->phc_lock); + + __vsc85xx_gettime(info, &ts); + now = ktime_to_ns(timespec64_to_ktime(ts)); + ts = ns_to_timespec64(now + delta); + __vsc85xx_settime(info, &ts); + + mutex_unlock(&priv->phc_lock); + + return 0; + } + + mutex_lock(&priv->phc_lock); + + val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ; + if (delta > 0) + val |= PTP_LTC_OFFSET_ADD; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val); + + mutex_unlock(&priv->phc_lock); + + return 0; +} + static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk, u32 next_comp, u32 etype) { @@ -730,6 +1044,196 @@ static void vsc85xx_ts_reset_fifo(struct phy_device *phydev) val); } +static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr) +{ + struct vsc8531_private *vsc8531 = + container_of(mii_ts, struct vsc8531_private, mii_ts); + struct phy_device *phydev = vsc8531->ptp->phydev; + struct hwtstamp_config cfg; + bool one_step = false; + u32 val; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags) + return -EINVAL; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_ONESTEP_SYNC: + one_step = true; + break; + case HWTSTAMP_TX_ON: + break; + case HWTSTAMP_TX_OFF: + break; + default: + return -ERANGE; + } + + vsc8531->ptp->tx_type = cfg.tx_type; + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + /* ETH->IP->UDP->PTP */ + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + /* ETH->PTP */ + break; + default: + return -ERANGE; + } + + vsc8531->ptp->rx_filter = cfg.rx_filter; + + mutex_lock(&vsc8531->ts_lock); + + __skb_queue_purge(&vsc8531->ptp->tx_queue); + __skb_queue_head_init(&vsc8531->ptp->tx_queue); + + /* Disable predictor while configuring the 1588 block */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_PREDICTOR); + val &= ~PTP_INGR_PREDICTOR_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR, + val); + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_PREDICTOR); + val &= ~PTP_EGR_PREDICTOR_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR, + val); + + /* Bypass egress or ingress blocks if timestamping isn't used */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL); + val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS); + if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) + val |= PTP_IFACE_CTRL_EGR_BYPASS; + if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE) + val |= PTP_IFACE_CTRL_INGR_BYPASS; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val); + + /* Resetting FIFO so that it's empty after reconfiguration */ + vsc85xx_ts_reset_fifo(phydev); + + vsc85xx_ts_engine_init(phydev, one_step); + + /* Re-enable predictors now */ + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_INGR_PREDICTOR); + val |= PTP_INGR_PREDICTOR_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR, + val); + val = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_PTP_EGR_PREDICTOR); + val |= PTP_EGR_PREDICTOR_EN; + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR, + val); + + vsc8531->ptp->configured = 1; + mutex_unlock(&vsc8531->ts_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static int vsc85xx_ts_info(struct mii_timestamper *mii_ts, + struct ethtool_ts_info *info) +{ + struct vsc8531_private *vsc8531 = + container_of(mii_ts, struct vsc8531_private, mii_ts); + + info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock); + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON) | + (1 << HWTSTAMP_TX_ONESTEP_SYNC); + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + return 0; +} + +static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct vsc8531_private *vsc8531 = + container_of(mii_ts, struct vsc8531_private, mii_ts); + + if (!vsc8531->ptp->configured) + return; + + if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) { + kfree_skb(skb); + return; + } + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + + mutex_lock(&vsc8531->ts_lock); + __skb_queue_tail(&vsc8531->ptp->tx_queue, skb); + mutex_unlock(&vsc8531->ts_lock); +} + +static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts, + struct sk_buff *skb, int type) +{ + struct vsc8531_private *vsc8531 = + container_of(mii_ts, struct vsc8531_private, mii_ts); + struct skb_shared_hwtstamps *shhwtstamps = NULL; + struct vsc85xx_ptphdr *ptphdr; + struct timespec64 ts; + unsigned long ns; + + if (!vsc8531->ptp->configured) + return false; + + if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE || + type == PTP_CLASS_NONE) + return false; + + vsc85xx_gettime(&vsc8531->ptp->caps, &ts); + + ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter); + if (!ptphdr) + return false; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); + + ns = ntohl(ptphdr->rsrvd2); + + /* nsec is in reserved field */ + if (ts.tv_nsec < ns) + ts.tv_sec--; + + shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns); + netif_rx_ni(skb); + + return true; +} + +static const struct ptp_clock_info vsc85xx_clk_caps = { + .owner = THIS_MODULE, + .name = "VSC85xx timer", + .max_adj = S32_MAX, + .n_alarm = 0, + .n_pins = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjtime = &vsc85xx_adjtime, + .adjfine = &vsc85xx_adjfine, + .gettime64 = &vsc85xx_gettime, + .settime64 = &vsc85xx_settime, +}; + static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev) { struct vsc8531_private *vsc8531 = phydev->priv; @@ -762,6 +1266,7 @@ static void vsc8584_set_input_clk_configured(struct phy_device *phydev) static int __vsc8584_init_ptp(struct phy_device *phydev) { + struct vsc8531_private *vsc8531 = phydev->priv; u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 }; u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 }; u32 val; @@ -978,9 +1483,32 @@ static int __vsc8584_init_ptp(struct phy_device *phydev) vsc85xx_ts_eth_cmp1_sig(phydev); + vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp; + vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp; + vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp; + vsc8531->mii_ts.ts_info = vsc85xx_ts_info; + phydev->mii_ts = &vsc8531->mii_ts; + + memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps)); + + vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps, + &phydev->mdio.dev); + if (IS_ERR(vsc8531->ptp->ptp_clock)) + return PTR_ERR(vsc8531->ptp->ptp_clock); + return 0; } +void vsc8584_config_ts_intr(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + + mutex_lock(&priv->ts_lock); + vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK, + VSC85XX_1588_INT_MASK_MASK); + mutex_unlock(&priv->ts_lock); +} + int vsc8584_ptp_init(struct phy_device *phydev) { switch (phydev->phy_id & phydev->drv->phy_id_mask) { @@ -993,6 +1521,34 @@ int vsc8584_ptp_init(struct phy_device *phydev) return 0; } +irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev) +{ + struct vsc8531_private *priv = phydev->priv; + int rc; + + mutex_lock(&priv->ts_lock); + rc = vsc85xx_ts_read_csr(phydev, PROCESSOR, + MSCC_PHY_1588_VSC85XX_INT_STATUS); + /* Ack the PTP interrupt */ + vsc85xx_ts_write_csr(phydev, PROCESSOR, + MSCC_PHY_1588_VSC85XX_INT_STATUS, rc); + + if (!(rc & VSC85XX_1588_INT_MASK_MASK)) { + mutex_unlock(&priv->ts_lock); + return IRQ_NONE; + } + + if (rc & VSC85XX_1588_INT_FIFO_ADD) { + vsc85xx_get_tx_ts(priv->ptp); + } else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) { + __skb_queue_purge(&priv->ptp->tx_queue); + vsc85xx_ts_reset_fifo(phydev); + } + + mutex_unlock(&priv->ts_lock); + return IRQ_HANDLED; +} + int vsc8584_ptp_probe(struct phy_device *phydev) { struct vsc8531_private *vsc8531 = phydev->priv; @@ -1002,9 +1558,35 @@ int vsc8584_ptp_probe(struct phy_device *phydev) if (!vsc8531->ptp) return -ENOMEM; + mutex_init(&vsc8531->phc_lock); mutex_init(&vsc8531->ts_lock); + /* Retrieve the shared load/save GPIO. Request it as non exclusive as + * the same GPIO can be requested by all the PHYs of the same package. + * Ths GPIO must be used with the gpio_lock taken (the lock is shared + * between all PHYs). + */ + vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save", + GPIOD_FLAGS_BIT_NONEXCLUSIVE | + GPIOD_OUT_LOW); + if (IS_ERR(vsc8531->load_save)) { + phydev_err(phydev, "Can't get load-save GPIO (%ld)\n", + PTR_ERR(vsc8531->load_save)); + return PTR_ERR(vsc8531->load_save); + } + vsc8531->ptp->phydev = phydev; return 0; } + +int vsc8584_ptp_probe_once(struct phy_device *phydev) +{ + struct vsc85xx_shared_private *shared = + (struct vsc85xx_shared_private *)phydev->shared->priv; + + /* Initialize shared GPIO lock */ + mutex_init(&shared->gpio_lock); + + return 0; +} From 3461522d00c075705d38eb0833ace7ba8c180345 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Tue, 23 Jun 2020 16:30:13 +0200 Subject: [PATCH 0280/2056] dt-bindings: net: phy: vsc8531: document the load/save GPIO A new optional property can be used to reference the load/save GPIO, used for PTP hardware clock (PHC) operations. This patch documents it in the binding documentation. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt index 5ff37c68c941..87a27d775d48 100644 --- a/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt +++ b/Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt @@ -31,6 +31,8 @@ Optional properties: VSC8531_LINK_100_ACTIVITY (2), VSC8531_LINK_ACTIVITY (0) and VSC8531_DUPLEX_COLLISION (8). +- load-save-gpios : GPIO used for the load/save operation of the PTP + hardware clock (PHC). Table: 1 - Edge rate change @@ -67,4 +69,5 @@ Example: vsc8531,edge-slowdown = <7>; vsc8531,led-0-mode = ; vsc8531,led-1-mode = ; + load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>; }; From 15324652f61287e2e640ca47f2a044df6cccf4b3 Mon Sep 17 00:00:00 2001 From: Quentin Schulz Date: Tue, 23 Jun 2020 16:30:14 +0200 Subject: [PATCH 0281/2056] MIPS: dts: ocelot: describe the load/save GPIO This patch adds a description of the load/save GPIN pin, used in the VSC8584 PHY for timestamping operations. The related pinctrl description is also added. Signed-off-by: Quentin Schulz Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- arch/mips/boot/dts/mscc/ocelot_pcb120.dts | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts index 33991fd209f5..897de5025d7f 100644 --- a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts +++ b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts @@ -3,6 +3,7 @@ /dts-v1/; +#include #include #include #include "ocelot.dtsi" @@ -25,6 +26,11 @@ phy_int_pins: phy_int_pins { pins = "GPIO_4"; function = "gpio"; }; + + phy_load_save_pins: phy_load_save_pins { + pins = "GPIO_10"; + function = "ptp2"; + }; }; &mdio0 { @@ -34,27 +40,31 @@ &mdio0 { &mdio1 { status = "okay"; pinctrl-names = "default"; - pinctrl-0 = <&miim1>, <&phy_int_pins>; + pinctrl-0 = <&miim1>, <&phy_int_pins>, <&phy_load_save_pins>; phy7: ethernet-phy@0 { reg = <0>; interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&gpio>; + load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>; }; phy6: ethernet-phy@1 { reg = <1>; interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&gpio>; + load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>; }; phy5: ethernet-phy@2 { reg = <2>; interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&gpio>; + load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>; }; phy4: ethernet-phy@3 { reg = <3>; interrupts = <4 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&gpio>; + load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>; }; }; From 0592ff88347b5e13e31711a20a21c2ef2397f80b Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:15 +0300 Subject: [PATCH 0282/2056] net: bridge: fdb_add_entry takes ndm as argument We can just pass ndm as an argument instead of its fields separately. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 4877a0db16c6..ed80d9ab0fb9 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -793,11 +793,11 @@ int br_fdb_get(struct sk_buff *skb, /* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, - const u8 *addr, u16 state, u16 flags, u16 vid, - u8 ndm_flags) + const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid) { - bool is_sticky = !!(ndm_flags & NTF_STICKY); + bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); struct net_bridge_fdb_entry *fdb; + u16 state = ndm->ndm_state; bool modified = false; /* If the port cannot learn allow only local and static entries */ @@ -893,8 +893,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, err = br_fdb_external_learn_add(br, p, addr, vid, true); } else { spin_lock_bh(&br->hash_lock); - err = fdb_add_entry(br, p, addr, ndm->ndm_state, - nlh_flags, vid, ndm->ndm_flags); + err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid); spin_unlock_bh(&br->hash_lock); } From 899426b3bdd947541ba4af8c767575889c8b842a Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:16 +0300 Subject: [PATCH 0283/2056] net: neighbor: add fdb extended attribute Add an attribute to NDA which will contain all future fdb-specific attributes in order to avoid polluting the NDA namespace with e.g. bridge or vxlan specific attributes. The attribute is called NDA_FDB_EXT_ATTRS and the structure would look like: [NDA_FDB_EXT_ATTRS] = { [NFEA_xxx] } Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 12 ++++++++++++ net/core/neighbour.c | 1 + 2 files changed, 13 insertions(+) diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index eefcda8ca44e..540ff48402a1 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -30,6 +30,7 @@ enum { NDA_SRC_VNI, NDA_PROTOCOL, /* Originator of entry */ NDA_NH_ID, + NDA_FDB_EXT_ATTRS, __NDA_MAX }; @@ -172,4 +173,15 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) +/* embedded into NDA_FDB_EXT_ATTRS: + * [NDA_FDB_EXT_ATTRS] = { + * ... + * } + */ +enum { + NFEA_UNSPEC, + __NFEA_MAX +}; +#define NFEA_MAX (__NFEA_MAX - 1) + #endif diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ef6b5a8f629c..8e39e28b0a8d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1783,6 +1783,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = { [NDA_MASTER] = { .type = NLA_U32 }, [NDA_PROTOCOL] = { .type = NLA_U8 }, [NDA_NH_ID] = { .type = NLA_U32 }, + [NDA_FDB_EXT_ATTRS] = { .type = NLA_NESTED }, }; static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, From 31cbc39b6344916c20452e43a9171009214c409c Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:17 +0300 Subject: [PATCH 0284/2056] net: bridge: add option to allow activity notifications for any fdb entries This patch adds the ability to notify about activity of any entries (static, permanent or ext_learn). EVPN multihoming peers need it to properly and efficiently handle mac sync (peer active/locally active). We add a new NFEA_ACTIVITY_NOTIFY attribute which is used to dump the current activity state and to control if static entries should be monitored at all. We use 2 bits - one to activate fdb entry tracking (disabled by default) and the second to denote that an entry is inactive. We need the second bit in order to avoid multiple notifications of inactivity. Obviously this makes no difference for dynamic entries since at the time of inactivity they get deleted, while the tracked non-dynamic entries get the inactive bit set and get a notification. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 11 ++++ net/bridge/br_fdb.c | 117 +++++++++++++++++++++++++++++---- net/bridge/br_private.h | 4 ++ 3 files changed, 119 insertions(+), 13 deletions(-) diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 540ff48402a1..21e569297355 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -173,13 +173,24 @@ enum { }; #define NDTA_MAX (__NDTA_MAX - 1) + /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY: + * - FDB_NOTIFY_BIT - notify on activity/expire for any entry + * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications + */ +enum { + FDB_NOTIFY_BIT = (1 << 0), + FDB_NOTIFY_INACTIVE_BIT = (1 << 1) +}; + /* embedded into NDA_FDB_EXT_ATTRS: * [NDA_FDB_EXT_ATTRS] = { + * [NFEA_ACTIVITY_NOTIFY] * ... * } */ enum { NFEA_UNSPEC, + NFEA_ACTIVITY_NOTIFY, __NFEA_MAX }; #define NFEA_MAX (__NFEA_MAX - 1) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index ed80d9ab0fb9..642deb57c064 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -349,12 +349,21 @@ void br_fdb_cleanup(struct work_struct *work) */ rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { - unsigned long this_timer; + unsigned long this_timer = f->updated + delay; if (test_bit(BR_FDB_STATIC, &f->flags) || - test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) + test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) { + if (test_bit(BR_FDB_NOTIFY, &f->flags)) { + if (time_after(this_timer, now)) + work_delay = min(work_delay, + this_timer - now); + else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, + &f->flags)) + fdb_notify(br, f, RTM_NEWNEIGH, false); + } continue; - this_timer = f->updated + delay; + } + if (time_after(this_timer, now)) { work_delay = min(work_delay, this_timer - now); } else { @@ -556,11 +565,17 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, return ret; } +/* returns true if the fdb was modified */ +static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb) +{ + return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) && + test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)); +} + void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, unsigned long flags) { struct net_bridge_fdb_entry *fdb; - bool fdb_modified = false; /* some users want to always flood. */ if (hold_time(br) == 0) @@ -575,6 +590,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, source->dev->name, addr, vid); } else { unsigned long now = jiffies; + bool fdb_modified = false; + + if (now != fdb->updated) { + fdb->updated = now; + fdb_modified = __fdb_mark_active(fdb); + } /* fastpath: update of existing entry */ if (unlikely(source != fdb->dst && @@ -587,8 +608,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, clear_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); } - if (now != fdb->updated) - fdb->updated = now; + if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); if (unlikely(fdb_modified)) { @@ -667,6 +687,23 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, &fdb->key.vlan_id)) goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { + struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); + u8 notify_bits = FDB_NOTIFY_BIT; + + if (!nest) + goto nla_put_failure; + if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + notify_bits |= FDB_NOTIFY_INACTIVE_BIT; + + if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { + nla_nest_cancel(skb, nest); + goto nla_put_failure; + } + + nla_nest_end(skb, nest); + } + nlmsg_end(skb, nlh); return 0; @@ -681,7 +718,9 @@ static inline size_t fdb_nlmsg_size(void) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + nla_total_size(sizeof(u32)) /* NDA_MASTER */ + nla_total_size(sizeof(u16)) /* NDA_VLAN */ - + nla_total_size(sizeof(struct nda_cacheinfo)); + + nla_total_size(sizeof(struct nda_cacheinfo)) + + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ + + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ } static void fdb_notify(struct net_bridge *br, @@ -791,14 +830,40 @@ int br_fdb_get(struct sk_buff *skb, return err; } +/* returns true if the fdb is modified */ +static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify) +{ + bool modified = false; + + /* allow to mark an entry as inactive, usually done on creation */ + if ((notify & FDB_NOTIFY_INACTIVE_BIT) && + !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) + modified = true; + + if ((notify & FDB_NOTIFY_BIT) && + !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) { + /* enabled activity tracking */ + modified = true; + } else if (!(notify & FDB_NOTIFY_BIT) && + test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) { + /* disabled activity tracking, clear notify state */ + clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags); + modified = true; + } + + return modified; +} + /* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, - const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid) + const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid, + struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; + u8 notify = 0; /* If the port cannot learn allow only local and static entries */ if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && @@ -815,6 +880,13 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, if (is_sticky && (state & NUD_PERMANENT)) return -EINVAL; + if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) { + notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]); + if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) || + (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT) + return -EINVAL; + } + fdb = br_fdb_find(br, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) @@ -858,6 +930,9 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, modified = true; } + if (fdb_handle_notify(fdb, notify)) + modified = true; + set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); fdb->used = jiffies; @@ -871,7 +946,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, - u16 nlh_flags, u16 vid) + u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[]) { int err = 0; @@ -893,19 +968,24 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, err = br_fdb_external_learn_add(br, p, addr, vid, true); } else { spin_lock_bh(&br->hash_lock); - err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid); + err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); spin_unlock_bh(&br->hash_lock); } return err; } +static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { + [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, +}; + /* Add new permanent fdb entry with RTM_NEWNEIGH */ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, struct netlink_ext_ack *extack) { + struct nlattr *nfea_tb[NFEA_MAX + 1], *attr; struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; @@ -938,6 +1018,16 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], vg = nbp_vlan_group(p); } + if (tb[NDA_FDB_EXT_ATTRS]) { + attr = tb[NDA_FDB_EXT_ATTRS]; + err = nla_parse_nested(nfea_tb, NFEA_MAX, attr, + br_nda_fdb_pol, extack); + if (err) + return err; + } else { + memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1)); + } + if (vid) { v = br_vlan_find(vg, vid); if (!v || !br_vlan_should_use(v)) { @@ -946,9 +1036,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], } /* VID was specified, so use it. */ - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb); } else { - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb); if (err || !vg || !vg->num_vlans) goto out; @@ -959,7 +1049,8 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; - err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid); + err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, + nfea_tb); if (err) goto out; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 7501be4eeba0..c0ae639e1b36 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -48,6 +48,8 @@ enum { /* Path to usermode spanning tree program */ #define BR_STP_PROG "/sbin/bridge-stp" +#define BR_FDB_NOTIFY_SETTABLE_BITS (FDB_NOTIFY_BIT | FDB_NOTIFY_INACTIVE_BIT) + typedef struct bridge_id bridge_id; typedef struct mac_addr mac_addr; typedef __u16 port_id; @@ -184,6 +186,8 @@ enum { BR_FDB_ADDED_BY_USER, BR_FDB_ADDED_BY_EXT_LEARN, BR_FDB_OFFLOADED, + BR_FDB_NOTIFY, + BR_FDB_NOTIFY_INACTIVE }; struct net_bridge_fdb_key { From b5f1d9ec283bd28a452cf61d7e5c2f2b1a9cccda Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 23 Jun 2020 23:47:18 +0300 Subject: [PATCH 0285/2056] net: bridge: add a flag to avoid refreshing fdb when changing/adding When we modify or create a new fdb entry sometimes we want to avoid refreshing its activity in order to track it properly. One example is when a mac is received from EVPN multi-homing peer by FRR, which doesn't want to change local activity accounting. It makes it static and sets a flag to track its activity. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/neighbour.h | 1 + net/bridge/br_fdb.c | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 21e569297355..dc8b72201f6c 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -191,6 +191,7 @@ enum { enum { NFEA_UNSPEC, NFEA_ACTIVITY_NOTIFY, + NFEA_DONT_REFRESH, __NFEA_MAX }; #define NFEA_MAX (__NFEA_MAX - 1) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 642deb57c064..9db504baa094 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -860,6 +860,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); + bool refresh = !nfea_tb[NFEA_DONT_REFRESH]; struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; @@ -937,7 +938,8 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, fdb->used = jiffies; if (modified) { - fdb->updated = jiffies; + if (refresh) + fdb->updated = jiffies; fdb_notify(br, fdb, RTM_NEWNEIGH, true); } @@ -977,6 +979,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, + [NFEA_DONT_REFRESH] = { .type = NLA_FLAG }, }; /* Add new permanent fdb entry with RTM_NEWNEIGH */ From 147373d968f1c1b5d6bb71e4e8b7495eeb9cdcae Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 24 Jun 2020 07:18:21 +0000 Subject: [PATCH 0286/2056] lan743x: Remove duplicated include from lan743x_main.c Remove duplicated include. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 9585467cf11c..2373e72d2d29 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -15,7 +15,6 @@ #include #include #include -#include #include "lan743x_main.h" #include "lan743x_ethtool.h" From b08d4d3b6c0460306e8a0608413b201705200d33 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:04 -0700 Subject: [PATCH 0287/2056] net: bpf: Add bpf_seq_afinfo in tcp_iter_state A new field bpf_seq_afinfo is added to tcp_iter_state to provide bpf tcp iterator afinfo. There are two reasons on why we did this. First, the current way to get afinfo from PDE_DATA does not work for bpf iterator as its seq_file inode does not conform to /proc/net/{tcp,tcp6} inode structures. More specifically, anonymous bpf iterator will use an anonymous inode which is shared in the system and we cannot change inode private data structure at all. Second, bpf iterator for tcp/tcp6 wants to traverse all tcp and tcp6 sockets in one pass and bpf program can control whether they want to skip one sk_family or not. Having a different afinfo with family AF_UNSPEC make it easier to understand in the code. This patch does not change /proc/net/{tcp,tcp6} behavior as the bpf_seq_afinfo will be NULL for these two proc files. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230804.3987829-1-yhs@fb.com --- include/net/tcp.h | 1 + net/ipv4/tcp_ipv4.c | 30 ++++++++++++++++++++++++------ 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 4de9485f73d9..eab1c7d0facb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1935,6 +1935,7 @@ struct tcp_iter_state { struct seq_net_private p; enum tcp_seq_states state; struct sock *syn_wait_sk; + struct tcp_seq_afinfo *bpf_seq_afinfo; int bucket, offset, sbucket, num; loff_t last_pos; }; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ad6435ba6d72..9cb65ee4ec63 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2211,13 +2211,18 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); */ static void *listening_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct hlist_nulls_node *node; struct sock *sk = cur; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; @@ -2235,7 +2240,8 @@ static void *listening_get_next(struct seq_file *seq, void *cur) sk_nulls_for_each_from(sk, node) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) return sk; } spin_unlock(&ilb->lock); @@ -2272,11 +2278,16 @@ static inline bool empty_bucket(const struct tcp_iter_state *st) */ static void *established_get_first(struct seq_file *seq) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; @@ -2289,7 +2300,8 @@ static void *established_get_first(struct seq_file *seq) spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { - if (sk->sk_family != afinfo->family || + if ((afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family) || !net_eq(sock_net(sk), net)) { continue; } @@ -2304,19 +2316,25 @@ static void *established_get_first(struct seq_file *seq) static void *established_get_next(struct seq_file *seq, void *cur) { - struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct tcp_seq_afinfo *afinfo; struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); + if (st->bpf_seq_afinfo) + afinfo = st->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { - if (sk->sk_family == afinfo->family && + if ((afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) && net_eq(sock_net(sk), net)) return sk; } From 52d87d5f6418ba1b8b449ed5eea1532664896851 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:05 -0700 Subject: [PATCH 0288/2056] net: bpf: Implement bpf iterator for tcp The bpf iterator for tcp is implemented. Both tcp4 and tcp6 sockets will be traversed. It is up to bpf program to filter for tcp4 or tcp6 only, or both families of sockets. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230805.3987959-1-yhs@fb.com --- net/ipv4/tcp_ipv4.c | 123 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9cb65ee4ec63..ea0df9fd7618 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2613,6 +2613,74 @@ static int tcp4_seq_show(struct seq_file *seq, void *v) return 0; } +#ifdef CONFIG_BPF_SYSCALL +struct bpf_iter__tcp { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct sock_common *, sk_common); + uid_t uid __aligned(8); +}; + +static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, + struct sock_common *sk_common, uid_t uid) +{ + struct bpf_iter__tcp ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.sk_common = sk_common; + ctx.uid = uid; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + struct sock *sk = v; + uid_t uid; + + if (v == SEQ_START_TOKEN) + return 0; + + if (sk->sk_state == TCP_TIME_WAIT) { + uid = 0; + } else if (sk->sk_state == TCP_NEW_SYN_RECV) { + const struct request_sock *req = v; + + uid = from_kuid_munged(seq_user_ns(seq), + sock_i_uid(req->rsk_listener)); + } else { + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); + } + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + return tcp_prog_seq_show(prog, &meta, v, uid); +} + +static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)tcp_prog_seq_show(prog, &meta, v, 0); + } + + tcp_seq_stop(seq, v); +} + +static const struct seq_operations bpf_iter_tcp_seq_ops = { + .show = bpf_iter_tcp_seq_show, + .start = tcp_seq_start, + .next = tcp_seq_next, + .stop = bpf_iter_tcp_seq_stop, +}; +#endif + static const struct seq_operations tcp4_seq_ops = { .show = tcp4_seq_show, .start = tcp_seq_start, @@ -2844,8 +2912,63 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { .exit_batch = tcp_sk_exit_batch, }; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta, + struct sock_common *sk_common, uid_t uid) + +static int bpf_iter_init_tcp(void *priv_data) +{ + struct tcp_iter_state *st = priv_data; + struct tcp_seq_afinfo *afinfo; + int ret; + + afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); + if (!afinfo) + return -ENOMEM; + + afinfo->family = AF_UNSPEC; + st->bpf_seq_afinfo = afinfo; + ret = bpf_iter_init_seq_net(priv_data); + if (ret) + kfree(afinfo); + return ret; +} + +static void bpf_iter_fini_tcp(void *priv_data) +{ + struct tcp_iter_state *st = priv_data; + + kfree(st->bpf_seq_afinfo); + bpf_iter_fini_seq_net(priv_data); +} + +static const struct bpf_iter_reg tcp_reg_info = { + .target = "tcp", + .seq_ops = &bpf_iter_tcp_seq_ops, + .init_seq_private = bpf_iter_init_tcp, + .fini_seq_private = bpf_iter_fini_tcp, + .seq_priv_size = sizeof(struct tcp_iter_state), + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__tcp, sk_common), + PTR_TO_BTF_ID_OR_NULL }, + }, +}; + +static void __init bpf_iter_register(void) +{ + if (bpf_iter_reg_target(&tcp_reg_info)) + pr_warn("Warning: could not register bpf iterator tcp\n"); +} + +#endif + void __init tcp_v4_init(void) { if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); + +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_register(); +#endif } From c06b0229579806f5b5e14af64cf9c5dc771445b3 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:07 -0700 Subject: [PATCH 0289/2056] bpf: Support 'X' in bpf_seq_printf() helper 'X' tells kernel to print hex with upper case letters. /proc/net/tcp{4,6} seq_file show() used this, and supports it in bpf_seq_printf() helper too. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230807.3988014-1-yhs@fb.com --- kernel/trace/bpf_trace.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a3ac7de98baa..b44505f56b6d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -681,7 +681,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, } if (fmt[i] != 'i' && fmt[i] != 'd' && - fmt[i] != 'u' && fmt[i] != 'x') { + fmt[i] != 'u' && fmt[i] != 'x' && + fmt[i] != 'X') { err = -EINVAL; goto out; } From 72e2b2b66f9c1225e51fc4a1c1e8512959195b76 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:08 -0700 Subject: [PATCH 0290/2056] bpf: Allow tracing programs to use bpf_jiffies64() helper /proc/net/tcp{4,6} uses jiffies for various computations. Let us add bpf_jiffies64() helper to tracing program so bpf_iter and other programs can use it. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230808.3988073-1-yhs@fb.com --- kernel/trace/bpf_trace.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b44505f56b6d..0159f12d2af5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1135,6 +1135,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_ringbuf_discard_proto; case BPF_FUNC_ringbuf_query: return &bpf_ringbuf_query_proto; + case BPF_FUNC_jiffies64: + return &bpf_jiffies64_proto; default: return NULL; } From af7ec13833619e17f03aa73a785a2f871da6d66b Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:09 -0700 Subject: [PATCH 0291/2056] bpf: Add bpf_skc_to_tcp6_sock() helper The helper is used in tracing programs to cast a socket pointer to a tcp6_sock pointer. The return value could be NULL if the casting is illegal. A new helper return type RET_PTR_TO_BTF_ID_OR_NULL is added so the verifier is able to deduce proper return types for the helper. Different from the previous BTF_ID based helpers, the bpf_skc_to_tcp6_sock() argument can be several possible btf_ids. More specifically, all possible socket data structures with sock_common appearing in the first in the memory layout. This patch only added socket types related to tcp and udp. All possible argument btf_id and return value btf_id for helper bpf_skc_to_tcp6_sock() are pre-calculcated and cached. In the future, it is even possible to precompute these btf_id's at kernel build time. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230809.3988195-1-yhs@fb.com --- include/linux/bpf.h | 12 +++++ include/uapi/linux/bpf.h | 9 +++- kernel/bpf/btf.c | 1 + kernel/bpf/verifier.c | 43 +++++++++++++----- kernel/trace/bpf_trace.c | 2 + net/core/filter.c | 82 ++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 2 + tools/include/uapi/linux/bpf.h | 9 +++- 8 files changed, 148 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1e1501ee53ce..c0e38ad07848 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -265,6 +265,7 @@ enum bpf_return_type { RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ + RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -287,6 +288,12 @@ struct bpf_func_proto { enum bpf_arg_type arg_type[5]; }; int *btf_id; /* BTF ids of arguments */ + bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is + * valid. Often used if more + * than one btf id is permitted + * for this argument. + */ + int *ret_btf_id; /* return value btf_id */ }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -1524,6 +1531,7 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); +void init_btf_sock_ids(struct btf *btf); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1549,6 +1557,9 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } +static inline void init_btf_sock_ids(struct btf *btf) +{ +} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) @@ -1638,6 +1649,7 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index d9737d51dd19..e90ad07b291a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3255,6 +3255,12 @@ union bpf_attr { * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3392,7 +3398,8 @@ union bpf_attr { FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ - FN(csum_level), + FN(csum_level), \ + FN(skc_to_tcp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index e377d1981730..4c3007f428b1 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3674,6 +3674,7 @@ struct btf *btf_parse_vmlinux(void) goto errout; bpf_struct_ops_init(btf, log); + init_btf_sock_ids(btf); btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7460f967cb75..7de98906ddf4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3800,12 +3800,14 @@ static int int_ptr_type_to_size(enum bpf_arg_type type) return -EINVAL; } -static int check_func_arg(struct bpf_verifier_env *env, u32 regno, - enum bpf_arg_type arg_type, - struct bpf_call_arg_meta *meta) +static int check_func_arg(struct bpf_verifier_env *env, u32 arg, + struct bpf_call_arg_meta *meta, + const struct bpf_func_proto *fn) { + u32 regno = BPF_REG_1 + arg; struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; enum bpf_reg_type expected_type, type = reg->type; + enum bpf_arg_type arg_type = fn->arg_type[arg]; int err = 0; if (arg_type == ARG_DONTCARE) @@ -3885,9 +3887,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, expected_type = PTR_TO_BTF_ID; if (type != expected_type) goto err_type; - if (reg->btf_id != meta->btf_id) { - verbose(env, "Helper has type %s got %s in R%d\n", - kernel_type_name(meta->btf_id), + if (!fn->check_btf_id) { + if (reg->btf_id != meta->btf_id) { + verbose(env, "Helper has type %s got %s in R%d\n", + kernel_type_name(meta->btf_id), + kernel_type_name(reg->btf_id), regno); + + return -EACCES; + } + } else if (!fn->check_btf_id(reg->btf_id, arg)) { + verbose(env, "Helper does not support %s in R%d\n", kernel_type_name(reg->btf_id), regno); return -EACCES; @@ -4709,10 +4718,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn meta.func_id = func_id; /* check args */ for (i = 0; i < 5; i++) { - err = btf_resolve_helper_id(&env->log, fn, i); - if (err > 0) - meta.btf_id = err; - err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta); + if (!fn->check_btf_id) { + err = btf_resolve_helper_id(&env->log, fn, i); + if (err > 0) + meta.btf_id = err; + } + err = check_func_arg(env, i, &meta, fn); if (err) return err; } @@ -4815,6 +4826,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL; regs[BPF_REG_0].id = ++env->id_gen; regs[BPF_REG_0].mem_size = meta.mem_size; + } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) { + int ret_btf_id; + + mark_reg_known_zero(env, regs, BPF_REG_0); + regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL; + ret_btf_id = *fn->ret_btf_id; + if (ret_btf_id == 0) { + verbose(env, "invalid return type %d of func %s#%d\n", + fn->ret_type, func_id_name(func_id), func_id); + return -EINVAL; + } + regs[BPF_REG_0].btf_id = ret_btf_id; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0159f12d2af5..2a97a268f533 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1515,6 +1515,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skb_output_proto; case BPF_FUNC_xdp_output: return &bpf_xdp_output_proto; + case BPF_FUNC_skc_to_tcp6_sock: + return &bpf_skc_to_tcp6_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index c713b6b8938f..176e27d75c51 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -9225,3 +9226,84 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) { bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } + +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, "inet_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, "inet_connection_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, "inet_request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, "inet_timewait_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, "request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, "sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, "sock_common") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, "tcp_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, "tcp_request_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, "tcp_timewait_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, "tcp6_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, "udp_sock") \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, "udp6_sock") + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +static int btf_sock_ids[MAX_BTF_SOCK_TYPE]; + +#ifdef CONFIG_BPF_SYSCALL +static const char *bpf_sock_types[] = { +#define BTF_SOCK_TYPE(name, str) str, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +}; + +void init_btf_sock_ids(struct btf *btf) +{ + int i, btf_id; + + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) { + btf_id = btf_find_by_name_kind(btf, bpf_sock_types[i], + BTF_KIND_STRUCT); + if (btf_id > 0) + btf_sock_ids[i] = btf_id; + } +} +#endif + +static bool check_arg_btf_id(u32 btf_id, u32 arg) +{ + int i; + + /* only one argument, no need to check arg */ + for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) + if (btf_sock_ids[i] == btf_id) + return true; + return false; +} + +BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) +{ + /* tcp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct tcp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && + sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { + .func = bpf_skc_to_tcp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 91fa668fa860..6c2f64118651 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -421,6 +421,7 @@ class PrinterHelpers(Printer): 'struct sockaddr', 'struct tcphdr', 'struct seq_file', + 'struct tcp6_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -458,6 +459,7 @@ class PrinterHelpers(Printer): 'struct sockaddr', 'struct tcphdr', 'struct seq_file', + 'struct tcp6_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index d9737d51dd19..e90ad07b291a 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3255,6 +3255,12 @@ union bpf_attr { * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level * is returned or the error code -EACCES in case the skb is not * subject to CHECKSUM_UNNECESSARY. + * + * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3392,7 +3398,8 @@ union bpf_attr { FN(ringbuf_submit), \ FN(ringbuf_discard), \ FN(ringbuf_query), \ - FN(csum_level), + FN(csum_level), \ + FN(skc_to_tcp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 478cfbdf5f13dfe09cfd0b1cbac821f5e27f6108 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:11 -0700 Subject: [PATCH 0292/2056] bpf: Add bpf_skc_to_{tcp, tcp_timewait, tcp_request}_sock() helpers Three more helpers are added to cast a sock_common pointer to an tcp_sock, tcp_timewait_sock or a tcp_request_sock for tracing programs. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230811.3988277-1-yhs@fb.com --- include/linux/bpf.h | 3 ++ include/uapi/linux/bpf.h | 23 ++++++++++++- kernel/trace/bpf_trace.c | 6 ++++ net/core/filter.c | 62 ++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 6 ++++ tools/include/uapi/linux/bpf.h | 23 ++++++++++++- 6 files changed, 121 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c0e38ad07848..c23998cf6699 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1650,6 +1650,9 @@ extern const struct bpf_func_proto bpf_ringbuf_submit_proto; extern const struct bpf_func_proto bpf_ringbuf_discard_proto; extern const struct bpf_func_proto bpf_ringbuf_query_proto; extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e90ad07b291a..b9412ab275f3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3261,6 +3261,24 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3399,7 +3417,10 @@ union bpf_attr { FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ - FN(skc_to_tcp6_sock), + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2a97a268f533..48d935b0d87c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1517,6 +1517,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_output_proto; case BPF_FUNC_skc_to_tcp6_sock: return &bpf_skc_to_tcp6_sock_proto; + case BPF_FUNC_skc_to_tcp_sock: + return &bpf_skc_to_tcp_sock_proto; + case BPF_FUNC_skc_to_tcp_timewait_sock: + return &bpf_skc_to_tcp_timewait_sock_proto; + case BPF_FUNC_skc_to_tcp_request_sock: + return &bpf_skc_to_tcp_request_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index 176e27d75c51..0b4e5aed7e20 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -74,6 +74,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -9307,3 +9308,64 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { .check_btf_id = check_arg_btf_id, .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6], }; + +BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) +{ + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { + .func = bpf_skc_to_tcp_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP], +}; + +BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { + .func = bpf_skc_to_tcp_timewait_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW], +}; + +BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) +{ + if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; + +#if IS_BUILTIN(CONFIG_IPV6) + if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) + return (unsigned long)sk; +#endif + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { + .func = bpf_skc_to_tcp_request_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6c2f64118651..d886657c6aaa 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -422,6 +422,9 @@ class PrinterHelpers(Printer): 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -460,6 +463,9 @@ class PrinterHelpers(Printer): 'struct tcphdr', 'struct seq_file', 'struct tcp6_sock', + 'struct tcp_sock', + 'struct tcp_timewait_sock', + 'struct tcp_request_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e90ad07b291a..b9412ab275f3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3261,6 +3261,24 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. + * + * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3399,7 +3417,10 @@ union bpf_attr { FN(ringbuf_discard), \ FN(ringbuf_query), \ FN(csum_level), \ - FN(skc_to_tcp6_sock), + FN(skc_to_tcp6_sock), \ + FN(skc_to_tcp_sock), \ + FN(skc_to_tcp_timewait_sock), \ + FN(skc_to_tcp_request_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 9e8ca27afab6c92477b459f6a5d2af0cd3197c20 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:12 -0700 Subject: [PATCH 0293/2056] net: bpf: Add bpf_seq_afinfo in udp_iter_state Similar to tcp_iter_state, a new field bpf_seq_afinfo is added to udp_iter_state to provide bpf udp iterator afinfo. This does not change /proc/net/{udp, udp6} behavior. But it enables bpf iterator to avoid get afinfo from PDE_DATA and iterate through all udp and udp6 sockets in one pass. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230812.3988347-1-yhs@fb.com --- include/net/udp.h | 1 + net/ipv4/udp.c | 28 +++++++++++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index a8fa6c0c6ded..67c8b7368845 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -440,6 +440,7 @@ struct udp_seq_afinfo { struct udp_iter_state { struct seq_net_private p; int bucket; + struct udp_seq_afinfo *bpf_seq_afinfo; }; void *udp_seq_start(struct seq_file *seq, loff_t *pos); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1b7ebbcae497..90355301b266 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2826,10 +2826,15 @@ EXPORT_SYMBOL(udp_prot); static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + for (state->bucket = start; state->bucket <= afinfo->udp_table->mask; ++state->bucket) { struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket]; @@ -2841,7 +2846,8 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) sk_for_each(sk, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; - if (sk->sk_family == afinfo->family) + if (afinfo->family == AF_UNSPEC || + sk->sk_family == afinfo->family) goto found; } spin_unlock_bh(&hslot->lock); @@ -2853,13 +2859,20 @@ static struct sock *udp_get_first(struct seq_file *seq, int start) static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + do { sk = sk_next(sk); - } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family)); + } while (sk && (!net_eq(sock_net(sk), net) || + (afinfo->family != AF_UNSPEC && + sk->sk_family != afinfo->family))); if (!sk) { if (state->bucket <= afinfo->udp_table->mask) @@ -2904,9 +2917,14 @@ EXPORT_SYMBOL(udp_seq_next); void udp_seq_stop(struct seq_file *seq, void *v) { - struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file)); + struct udp_seq_afinfo *afinfo; struct udp_iter_state *state = seq->private; + if (state->bpf_seq_afinfo) + afinfo = state->bpf_seq_afinfo; + else + afinfo = PDE_DATA(file_inode(seq->file)); + if (state->bucket <= afinfo->udp_table->mask) spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock); } From 5788b3a07fc5863606c3b92fa7b1ffe125e6eb4c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:13 -0700 Subject: [PATCH 0294/2056] net: bpf: Implement bpf iterator for udp The bpf iterator for udp is implemented. Both udp4 and udp6 sockets will be traversed. It is up to bpf program to filter for udp4 or udp6 only, or both families of sockets. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230813.3988404-1-yhs@fb.com --- net/ipv4/udp.c | 116 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 90355301b266..31530129f137 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2968,6 +2968,67 @@ int udp4_seq_show(struct seq_file *seq, void *v) return 0; } +#ifdef CONFIG_BPF_SYSCALL +struct bpf_iter__udp { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct udp_sock *, udp_sk); + uid_t uid __aligned(8); + int bucket __aligned(8); +}; + +static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta, + struct udp_sock *udp_sk, uid_t uid, int bucket) +{ + struct bpf_iter__udp ctx; + + meta->seq_num--; /* skip SEQ_START_TOKEN */ + ctx.meta = meta; + ctx.udp_sk = udp_sk; + ctx.uid = uid; + ctx.bucket = bucket; + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v) +{ + struct udp_iter_state *state = seq->private; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + struct sock *sk = v; + uid_t uid; + + if (v == SEQ_START_TOKEN) + return 0; + + uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)); + meta.seq = seq; + prog = bpf_iter_get_info(&meta, false); + return udp_prog_seq_show(prog, &meta, v, uid, state->bucket); +} + +static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_meta meta; + struct bpf_prog *prog; + + if (!v) { + meta.seq = seq; + prog = bpf_iter_get_info(&meta, true); + if (prog) + (void)udp_prog_seq_show(prog, &meta, v, 0, 0); + } + + udp_seq_stop(seq, v); +} + +static const struct seq_operations bpf_iter_udp_seq_ops = { + .start = udp_seq_start, + .next = udp_seq_next, + .stop = bpf_iter_udp_seq_stop, + .show = bpf_iter_udp_seq_show, +}; +#endif + const struct seq_operations udp_seq_ops = { .start = udp_seq_start, .next = udp_seq_next, @@ -3085,6 +3146,57 @@ static struct pernet_operations __net_initdata udp_sysctl_ops = { .init = udp_sysctl_init, }; +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) +DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, + struct udp_sock *udp_sk, uid_t uid, int bucket) + +static int bpf_iter_init_udp(void *priv_data) +{ + struct udp_iter_state *st = priv_data; + struct udp_seq_afinfo *afinfo; + int ret; + + afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN); + if (!afinfo) + return -ENOMEM; + + afinfo->family = AF_UNSPEC; + afinfo->udp_table = &udp_table; + st->bpf_seq_afinfo = afinfo; + ret = bpf_iter_init_seq_net(priv_data); + if (ret) + kfree(afinfo); + return ret; +} + +static void bpf_iter_fini_udp(void *priv_data) +{ + struct udp_iter_state *st = priv_data; + + kfree(st->bpf_seq_afinfo); + bpf_iter_fini_seq_net(priv_data); +} + +static const struct bpf_iter_reg udp_reg_info = { + .target = "udp", + .seq_ops = &bpf_iter_udp_seq_ops, + .init_seq_private = bpf_iter_init_udp, + .fini_seq_private = bpf_iter_fini_udp, + .seq_priv_size = sizeof(struct udp_iter_state), + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__udp, udp_sk), + PTR_TO_BTF_ID_OR_NULL }, + }, +}; + +static void __init bpf_iter_register(void) +{ + if (bpf_iter_reg_target(&udp_reg_info)) + pr_warn("Warning: could not register bpf iterator udp\n"); +} +#endif + void __init udp_init(void) { unsigned long limit; @@ -3110,4 +3222,8 @@ void __init udp_init(void) if (register_pernet_subsys(&udp_sysctl_ops)) panic("UDP: failed to init sysctl parameters.\n"); + +#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) + bpf_iter_register(); +#endif } From 0d4fad3e57df2bf61e8ffc8d12a34b1caf9b8835 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:15 -0700 Subject: [PATCH 0295/2056] bpf: Add bpf_skc_to_udp6_sock() helper The helper is used in tracing programs to cast a socket pointer to a udp6_sock pointer. The return value could be NULL if the casting is illegal. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Cc: Eric Dumazet Link: https://lore.kernel.org/bpf/20200623230815.3988481-1-yhs@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 9 ++++++++- kernel/trace/bpf_trace.c | 2 ++ net/core/filter.c | 22 ++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 2 ++ tools/include/uapi/linux/bpf.h | 9 ++++++++- 6 files changed, 43 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c23998cf6699..3d2ade703a35 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1653,6 +1653,7 @@ extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b9412ab275f3..0cb8ec948816 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3279,6 +3279,12 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3420,7 +3426,8 @@ union bpf_attr { FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 48d935b0d87c..5d59dda5f661 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1523,6 +1523,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_skc_to_tcp_timewait_sock_proto; case BPF_FUNC_skc_to_tcp_request_sock: return &bpf_skc_to_tcp_request_sock_proto; + case BPF_FUNC_skc_to_udp6_sock: + return &bpf_skc_to_udp6_sock_proto; #endif case BPF_FUNC_seq_printf: return prog->expected_attach_type == BPF_TRACE_ITER ? diff --git a/net/core/filter.c b/net/core/filter.c index 0b4e5aed7e20..c796e141ea8e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9369,3 +9369,25 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = { .check_btf_id = check_arg_btf_id, .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ], }; + +BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) +{ + /* udp6_sock type is not generated in dwarf and hence btf, + * trigger an explicit type generation here. + */ + BTF_TYPE_EMIT(struct udp6_sock); + if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && + sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) + return (unsigned long)sk; + + return (unsigned long)NULL; +} + +const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = { + .func = bpf_skc_to_udp6_sock, + .gpl_only = false, + .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, + .arg1_type = ARG_PTR_TO_BTF_ID, + .check_btf_id = check_arg_btf_id, + .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6], +}; diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index d886657c6aaa..6bab40ff442e 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -425,6 +425,7 @@ class PrinterHelpers(Printer): 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', + 'struct udp6_sock', 'struct __sk_buff', 'struct sk_msg_md', @@ -466,6 +467,7 @@ class PrinterHelpers(Printer): 'struct tcp_sock', 'struct tcp_timewait_sock', 'struct tcp_request_sock', + 'struct udp6_sock', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index b9412ab275f3..0cb8ec948816 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3279,6 +3279,12 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk) + * Description + * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. + * Return + * *sk* if casting is valid, or NULL otherwise. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3420,7 +3426,8 @@ union bpf_attr { FN(skc_to_tcp6_sock), \ FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ - FN(skc_to_tcp_request_sock), + FN(skc_to_tcp_request_sock), \ + FN(skc_to_udp6_sock), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 84544f5637ff3501876ba96bd48ca900317e08fb Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:16 -0700 Subject: [PATCH 0296/2056] selftests/bpf: Move newer bpf_iter_* type redefining to a new header file Commit b9f4c01f3e0b ("selftest/bpf: Make bpf_iter selftest compilable against old vmlinux.h") and Commit dda18a5c0b75 ("selftests/bpf: Convert bpf_iter_test_kern{3, 4}.c to define own bpf_iter_meta") redefined newly introduced types in bpf programs so the bpf program can still compile properly with old kernels although loading may fail. Since this patch set introduced new types and the same workaround is needed, so let us move the workaround to a separate header file so they do not clutter bpf programs. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230816.3988656-1-yhs@fb.com --- tools/testing/selftests/bpf/progs/bpf_iter.h | 49 +++++++++++++++++++ .../selftests/bpf/progs/bpf_iter_bpf_map.c | 18 +------ .../selftests/bpf/progs/bpf_iter_ipv6_route.c | 18 +------ .../selftests/bpf/progs/bpf_iter_netlink.c | 18 +------ .../selftests/bpf/progs/bpf_iter_task.c | 18 +------ .../selftests/bpf/progs/bpf_iter_task_file.c | 20 +------- .../selftests/bpf/progs/bpf_iter_test_kern3.c | 17 +------ .../selftests/bpf/progs/bpf_iter_test_kern4.c | 17 +------ .../bpf/progs/bpf_iter_test_kern_common.h | 18 +------ 9 files changed, 57 insertions(+), 136 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter.h diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h new file mode 100644 index 000000000000..3757e88c6406 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2020 Facebook */ +/* "undefine" structs in vmlinux.h, because we "override" them below */ +#define bpf_iter_meta bpf_iter_meta___not_used +#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used +#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used +#define bpf_iter__netlink bpf_iter__netlink___not_used +#define bpf_iter__task bpf_iter__task___not_used +#define bpf_iter__task_file bpf_iter__task_file___not_used +#include "vmlinux.h" +#undef bpf_iter_meta +#undef bpf_iter__bpf_map +#undef bpf_iter__ipv6_route +#undef bpf_iter__netlink +#undef bpf_iter__task +#undef bpf_iter__task_file + +struct bpf_iter_meta { + struct seq_file *seq; + __u64 session_id; + __u64 seq_num; +} __attribute__((preserve_access_index)); + +struct bpf_iter__ipv6_route { + struct bpf_iter_meta *meta; + struct fib6_info *rt; +} __attribute__((preserve_access_index)); + +struct bpf_iter__netlink { + struct bpf_iter_meta *meta; + struct netlink_sock *sk; +} __attribute__((preserve_access_index)); + +struct bpf_iter__task { + struct bpf_iter_meta *meta; + struct task_struct *task; +} __attribute__((preserve_access_index)); + +struct bpf_iter__task_file { + struct bpf_iter_meta *meta; + struct task_struct *task; + __u32 fd; + struct file *file; +} __attribute__((preserve_access_index)); + +struct bpf_iter__bpf_map { + struct bpf_iter_meta *meta; + struct bpf_map *map; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c index b57bd6fef208..08651b23edba 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c @@ -1,27 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__bpf_map +#include "bpf_iter.h" #include #include char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; -} __attribute__((preserve_access_index)); - SEC("iter/bpf_map") int dump_bpf_map(struct bpf_iter__bpf_map *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c index c8e9ca74c87b..93a452d1d136 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c @@ -1,25 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__ipv6_route +#include "bpf_iter.h" #include #include -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__ipv6_route { - struct bpf_iter_meta *meta; - struct fib6_info *rt; -} __attribute__((preserve_access_index)); - char _license[] SEC("license") = "GPL"; extern bool CONFIG_IPV6_SUBTREES __kconfig __weak; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index e7b8753eac0b..fda5036fdf75 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -1,11 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__netlink bpf_iter__netlink___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__netlink +#include "bpf_iter.h" #include #include @@ -14,17 +9,6 @@ char _license[] SEC("license") = "GPL"; #define sk_rmem_alloc sk_backlog.rmem_alloc #define sk_refcnt __sk_common.skc_refcnt -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__netlink { - struct bpf_iter_meta *meta; - struct netlink_sock *sk; -} __attribute__((preserve_access_index)); - static inline struct inode *SOCK_INODE(struct socket *socket) { return &container_of(socket, struct socket_alloc, socket)->vfs_inode; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_task.c index ee754021f98e..4983087852a0 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task.c @@ -1,27 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task bpf_iter__task___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task +#include "bpf_iter.h" #include #include char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c index 0f0ec3db20ba..8b787baa2654 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c @@ -1,29 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task_file bpf_iter__task_file___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task_file +#include "bpf_iter.h" #include #include char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task_file { - struct bpf_iter_meta *meta; - struct task_struct *task; - __u32 fd; - struct file *file; -} __attribute__((preserve_access_index)); - SEC("iter/task_file") int dump_task_file(struct bpf_iter__task_file *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c index 13c2c90c835f..2a4647f20c46 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c @@ -1,25 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task bpf_iter__task___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task +#include "bpf_iter.h" #include char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c index 0aa71b333cf3..ee49493dc125 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c @@ -1,25 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__bpf_map +#include "bpf_iter.h" #include char _license[] SEC("license") = "GPL"; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__bpf_map { - struct bpf_iter_meta *meta; - struct bpf_map *map; -} __attribute__((preserve_access_index)); - __u32 map1_id = 0, map2_id = 0; __u32 map1_accessed = 0, map2_accessed = 0; __u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h index dee1339e6905..d5e3df66ad9a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h @@ -1,27 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2020 Facebook */ -/* "undefine" structs in vmlinux.h, because we "override" them below */ -#define bpf_iter_meta bpf_iter_meta___not_used -#define bpf_iter__task bpf_iter__task___not_used -#include "vmlinux.h" -#undef bpf_iter_meta -#undef bpf_iter__task +#include "bpf_iter.h" #include char _license[] SEC("license") = "GPL"; int count = 0; -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__task { - struct bpf_iter_meta *meta; - struct task_struct *task; -} __attribute__((preserve_access_index)); - SEC("iter/task") int dump_task(struct bpf_iter__task *ctx) { From 647b502e3d5456f5c240b1587112b163c69732e9 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:17 -0700 Subject: [PATCH 0297/2056] selftests/bpf: Refactor some net macros to bpf_tracing_net.h Refactor bpf_iter_ipv6_route.c and bpf_iter_netlink.c so net macros, originally from various include/linux header files, are moved to a new header file bpf_tracing_net.h. The goal is to improve reuse so networking tracing programs do not need to copy these macros every time they use them. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230817.3988962-1-yhs@fb.com --- .../selftests/bpf/progs/bpf_iter_ipv6_route.c | 7 +------ .../selftests/bpf/progs/bpf_iter_netlink.c | 4 +--- .../selftests/bpf/progs/bpf_tracing_net.h | 16 ++++++++++++++++ 3 files changed, 18 insertions(+), 9 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/bpf_tracing_net.h diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c index 93a452d1d136..d58d9f1642b5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #include "bpf_iter.h" +#include "bpf_tracing_net.h" #include #include @@ -8,12 +9,6 @@ char _license[] SEC("license") = "GPL"; extern bool CONFIG_IPV6_SUBTREES __kconfig __weak; -#define RTF_GATEWAY 0x0002 -#define IFNAMSIZ 16 -#define fib_nh_gw_family nh_common.nhc_gw_family -#define fib_nh_gw6 nh_common.nhc_gw.ipv6 -#define fib_nh_dev nh_common.nhc_dev - SEC("iter/ipv6_route") int dump_ipv6_route(struct bpf_iter__ipv6_route *ctx) { diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index fda5036fdf75..cec82a419800 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -1,14 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ #include "bpf_iter.h" +#include "bpf_tracing_net.h" #include #include char _license[] SEC("license") = "GPL"; -#define sk_rmem_alloc sk_backlog.rmem_alloc -#define sk_refcnt __sk_common.skc_refcnt - static inline struct inode *SOCK_INODE(struct socket *socket) { return &container_of(socket, struct socket_alloc, socket)->vfs_inode; diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h new file mode 100644 index 000000000000..1f38a1098727 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ +#ifndef __BPF_TRACING_NET_H__ +#define __BPF_TRACING_NET_H__ + +#define IFNAMSIZ 16 + +#define RTF_GATEWAY 0x0002 + +#define fib_nh_dev nh_common.nhc_dev +#define fib_nh_gw_family nh_common.nhc_gw_family +#define fib_nh_gw6 nh_common.nhc_gw.ipv6 + +#define sk_rmem_alloc sk_backlog.rmem_alloc +#define sk_refcnt __sk_common.skc_refcnt + +#endif From 3982bfaaef7c80ecf6a065cbf9422165a8e36f75 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:19 -0700 Subject: [PATCH 0298/2056] selftests/bpf: Add more common macros to bpf_tracing_net.h These newly added macros will be used in subsequent bpf iterator tcp{4,6} and udp{4,6} programs. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230819.3989050-1-yhs@fb.com --- .../selftests/bpf/progs/bpf_tracing_net.h | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index 1f38a1098727..01378911252b 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -2,15 +2,50 @@ #ifndef __BPF_TRACING_NET_H__ #define __BPF_TRACING_NET_H__ +#define AF_INET 2 +#define AF_INET6 10 + +#define ICSK_TIME_RETRANS 1 +#define ICSK_TIME_PROBE0 3 +#define ICSK_TIME_LOSS_PROBE 5 +#define ICSK_TIME_REO_TIMEOUT 6 + #define IFNAMSIZ 16 #define RTF_GATEWAY 0x0002 +#define TCP_INFINITE_SSTHRESH 0x7fffffff +#define TCP_PINGPONG_THRESH 3 + #define fib_nh_dev nh_common.nhc_dev #define fib_nh_gw_family nh_common.nhc_gw_family #define fib_nh_gw6 nh_common.nhc_gw.ipv6 +#define inet_daddr sk.__sk_common.skc_daddr +#define inet_rcv_saddr sk.__sk_common.skc_rcv_saddr +#define inet_dport sk.__sk_common.skc_dport + +#define ir_loc_addr req.__req_common.skc_rcv_saddr +#define ir_num req.__req_common.skc_num +#define ir_rmt_addr req.__req_common.skc_daddr +#define ir_rmt_port req.__req_common.skc_dport +#define ir_v6_rmt_addr req.__req_common.skc_v6_daddr +#define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr + +#define sk_family __sk_common.skc_family #define sk_rmem_alloc sk_backlog.rmem_alloc #define sk_refcnt __sk_common.skc_refcnt +#define sk_state __sk_common.skc_state +#define sk_v6_daddr __sk_common.skc_v6_daddr +#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr + +#define s6_addr32 in6_u.u6_addr32 + +#define tw_daddr __tw_common.skc_daddr +#define tw_rcv_saddr __tw_common.skc_rcv_saddr +#define tw_dport __tw_common.skc_dport +#define tw_refcnt __tw_common.skc_refcnt +#define tw_v6_daddr __tw_common.skc_v6_daddr +#define tw_v6_rcv_saddr __tw_common.skc_v6_rcv_saddr #endif From 2767c97765cb3d9b54c8e62b468e55cc56854a66 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:20 -0700 Subject: [PATCH 0299/2056] selftests/bpf: Implement sample tcp/tcp6 bpf_iter programs In my VM, I got identical result compared to /proc/net/{tcp,tcp6}. For tcp6: $ cat /proc/net/tcp6 sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 0: 00000000000000000000000000000000:0016 00000000000000000000000000000000:0000 0A 00000000:00000000 00:00000001 00000000 0 0 17955 1 000000003eb3102e 100 0 0 10 0 $ cat /sys/fs/bpf/p1 sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 0: 00000000000000000000000000000000:0016 00000000000000000000000000000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 17955 1 000000003eb3102e 100 0 0 10 0 For tcp: $ cat /proc/net/tcp sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 0: 00000000:0016 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 2666 1 000000007152e43f 100 0 0 10 0 $ cat /sys/fs/bpf/p2 sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode 1: 00000000:0016 00000000:0000 0A 00000000:00000000 00:00000000 00000000 0 0 2666 1 000000007152e43f 100 0 0 10 0 Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230820.3989165-1-yhs@fb.com --- tools/testing/selftests/bpf/progs/bpf_iter.h | 15 ++ .../selftests/bpf/progs/bpf_iter_tcp4.c | 234 ++++++++++++++++ .../selftests/bpf/progs/bpf_iter_tcp6.c | 250 ++++++++++++++++++ 3 files changed, 499 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h index 3757e88c6406..bde23e16e777 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -7,6 +7,8 @@ #define bpf_iter__netlink bpf_iter__netlink___not_used #define bpf_iter__task bpf_iter__task___not_used #define bpf_iter__task_file bpf_iter__task_file___not_used +#define bpf_iter__tcp bpf_iter__tcp___not_used +#define tcp6_sock tcp6_sock___not_used #include "vmlinux.h" #undef bpf_iter_meta #undef bpf_iter__bpf_map @@ -14,6 +16,8 @@ #undef bpf_iter__netlink #undef bpf_iter__task #undef bpf_iter__task_file +#undef bpf_iter__tcp +#undef tcp6_sock struct bpf_iter_meta { struct seq_file *seq; @@ -47,3 +51,14 @@ struct bpf_iter__bpf_map { struct bpf_iter_meta *meta; struct bpf_map *map; } __attribute__((preserve_access_index)); + +struct bpf_iter__tcp { + struct bpf_iter_meta *meta; + struct sock_common *sk_common; + uid_t uid; +} __attribute__((preserve_access_index)); + +struct tcp6_sock { + struct tcp_sock tcp; + struct ipv6_pinfo inet6; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c new file mode 100644 index 000000000000..30fd587cb325 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +static int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !(h->pprev); +} + +static int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern unsigned CONFIG_HZ __kconfig; + +#define USER_HZ 100 +#define NSEC_PER_SEC 1000000000ULL +static clock_t jiffies_to_clock_t(unsigned long x) +{ + /* The implementation here tailored to a particular + * setting of USER_HZ. + */ + u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ; + u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ; + + if ((tick_nsec % user_hz_nsec) == 0) { + if (CONFIG_HZ < USER_HZ) + return x * (USER_HZ / CONFIG_HZ); + else + return x / (CONFIG_HZ / USER_HZ); + } + return x * tick_nsec/user_hz_nsec; +} + +static clock_t jiffies_delta_to_clock_t(long delta) +{ + if (delta <= 0) + return 0; + + return jiffies_to_clock_t(delta); +} + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +static bool +inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk) +{ + return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; +} + +static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) +{ + return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp, + uid_t uid, __u32 seq_num) +{ + const struct inet_connection_sock *icsk; + const struct fastopen_queue *fastopenq; + const struct inet_sock *inet; + unsigned long timer_expires; + const struct sock *sp; + __u16 destp, srcp; + __be32 dest, src; + int timer_active; + int rx_queue; + int state; + + icsk = &tp->inet_conn; + inet = &icsk->icsk_inet; + sp = &inet->sk; + fastopenq = &icsk->icsk_accept_queue.fastopenq; + + dest = inet->inet_daddr; + src = inet->inet_rcv_saddr; + destp = bpf_ntohs(inet->inet_dport); + srcp = bpf_ntohs(inet->inet_sport); + + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + timer_active = 1; + timer_expires = icsk->icsk_timeout; + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + timer_active = 4; + timer_expires = icsk->icsk_timeout; + } else if (timer_pending(&sp->sk_timer)) { + timer_active = 2; + timer_expires = sp->sk_timer.expires; + } else { + timer_active = 0; + timer_expires = bpf_jiffies64(); + } + + state = sp->sk_state; + if (state == TCP_LISTEN) { + rx_queue = sp->sk_ack_backlog; + } else { + rx_queue = tp->rcv_nxt - tp->copied_seq; + if (rx_queue < 0) + rx_queue = 0; + } + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, src, srcp, destp, destp); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", + state, + tp->write_seq - tp->snd_una, rx_queue, + timer_active, + jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()), + icsk->icsk_retransmits, uid, + icsk->icsk_probes_out, + sock_i_ino(sp), + sp->sk_refcnt.refs.counter); + BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n", + tp, + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk), + tp->snd_cwnd, + state == TCP_LISTEN ? fastopenq->max_qlen + : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) + ); + + return 0; +} + +static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw, + uid_t uid, __u32 seq_num) +{ + struct inet_timewait_sock *tw = &ttw->tw_sk; + __u16 destp, srcp; + __be32 dest, src; + long delta; + + delta = tw->tw_timer.expires - bpf_jiffies64(); + dest = tw->tw_daddr; + src = tw->tw_rcv_saddr; + destp = bpf_ntohs(tw->tw_dport); + srcp = bpf_ntohs(tw->tw_sport); + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, src, srcp, dest, destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + tw->tw_substate, 0, 0, + 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, + tw->tw_refcnt.refs.counter, tw); + + return 0; +} + +static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq, + uid_t uid, __u32 seq_num) +{ + struct inet_request_sock *irsk = &treq->req; + struct request_sock *req = &irsk->req; + long ttd; + + ttd = req->rsk_timer.expires - bpf_jiffies64(); + + if (ttd < 0) + ttd = 0; + + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ", + seq_num, irsk->ir_loc_addr, + irsk->ir_num, irsk->ir_rmt_addr, + bpf_ntohs(irsk->ir_rmt_port)); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd), + req->num_timeout, uid, 0, 0, 0, req); + + return 0; +} + +SEC("iter/tcp") +int dump_tcp4(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + struct seq_file *seq = ctx->meta->seq; + struct tcp_timewait_sock *tw; + struct tcp_request_sock *req; + struct tcp_sock *tp; + uid_t uid = ctx->uid; + __u32 seq_num; + + if (sk_common == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " sl " + "local_address " + "rem_address " + "st tx_queue rx_queue tr tm->when retrnsmt" + " uid timeout inode\n"); + + if (sk_common->skc_family != AF_INET) + return 0; + + tp = bpf_skc_to_tcp_sock(sk_common); + if (tp) + return dump_tcp_sock(seq, tp, uid, seq_num); + + tw = bpf_skc_to_tcp_timewait_sock(sk_common); + if (tw) + return dump_tw_sock(seq, tw, uid, seq_num); + + req = bpf_skc_to_tcp_request_sock(sk_common); + if (req) + return dump_req_sock(seq, req, uid, seq_num); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c new file mode 100644 index 000000000000..10dec4392031 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +static int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !(h->pprev); +} + +static int timer_pending(const struct timer_list * timer) +{ + return !hlist_unhashed_lockless(&timer->entry); +} + +extern unsigned CONFIG_HZ __kconfig; + +#define USER_HZ 100 +#define NSEC_PER_SEC 1000000000ULL +static clock_t jiffies_to_clock_t(unsigned long x) +{ + /* The implementation here tailored to a particular + * setting of USER_HZ. + */ + u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ; + u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ; + + if ((tick_nsec % user_hz_nsec) == 0) { + if (CONFIG_HZ < USER_HZ) + return x * (USER_HZ / CONFIG_HZ); + else + return x / (CONFIG_HZ / USER_HZ); + } + return x * tick_nsec/user_hz_nsec; +} + +static clock_t jiffies_delta_to_clock_t(long delta) +{ + if (delta <= 0) + return 0; + + return jiffies_to_clock_t(delta); +} + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +static bool +inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk) +{ + return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH; +} + +static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp) +{ + return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; +} + +static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp, + uid_t uid, __u32 seq_num) +{ + const struct inet_connection_sock *icsk; + const struct fastopen_queue *fastopenq; + const struct in6_addr *dest, *src; + const struct inet_sock *inet; + unsigned long timer_expires; + const struct sock *sp; + __u16 destp, srcp; + int timer_active; + int rx_queue; + int state; + + icsk = &tp->tcp.inet_conn; + inet = &icsk->icsk_inet; + sp = &inet->sk; + fastopenq = &icsk->icsk_accept_queue.fastopenq; + + dest = &sp->sk_v6_daddr; + src = &sp->sk_v6_rcv_saddr; + destp = bpf_ntohs(inet->inet_dport); + srcp = bpf_ntohs(inet->inet_sport); + + if (icsk->icsk_pending == ICSK_TIME_RETRANS || + icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || + icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { + timer_active = 1; + timer_expires = icsk->icsk_timeout; + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { + timer_active = 4; + timer_expires = icsk->icsk_timeout; + } else if (timer_pending(&sp->sk_timer)) { + timer_active = 2; + timer_expires = sp->sk_timer.expires; + } else { + timer_active = 0; + timer_expires = bpf_jiffies64(); + } + + state = sp->sk_state; + if (state == TCP_LISTEN) { + rx_queue = sp->sk_ack_backlog; + } else { + rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq; + if (rx_queue < 0) + rx_queue = 0; + } + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ", + state, + tp->tcp.write_seq - tp->tcp.snd_una, rx_queue, + timer_active, + jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()), + icsk->icsk_retransmits, uid, + icsk->icsk_probes_out, + sock_i_ino(sp), + sp->sk_refcnt.refs.counter); + BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n", + tp, + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk), + tp->tcp.snd_cwnd, + state == TCP_LISTEN ? fastopenq->max_qlen + : (tcp_in_initial_slowstart(&tp->tcp) ? -1 + : tp->tcp.snd_ssthresh) + ); + + return 0; +} + +static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw, + uid_t uid, __u32 seq_num) +{ + struct inet_timewait_sock *tw = &ttw->tw_sk; + const struct in6_addr *dest, *src; + __u16 destp, srcp; + long delta; + + delta = tw->tw_timer.expires - bpf_jiffies64(); + dest = &tw->tw_v6_daddr; + src = &tw->tw_v6_rcv_saddr; + destp = bpf_ntohs(tw->tw_dport); + srcp = bpf_ntohs(tw->tw_sport); + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + tw->tw_substate, 0, 0, + 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, + tw->tw_refcnt.refs.counter, tw); + + return 0; +} + +static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq, + uid_t uid, __u32 seq_num) +{ + struct inet_request_sock *irsk = &treq->req; + struct request_sock *req = &irsk->req; + struct in6_addr *src, *dest; + long ttd; + + ttd = req->rsk_timer.expires - bpf_jiffies64(); + src = &irsk->ir_v6_loc_addr; + dest = &irsk->ir_v6_rmt_addr; + + if (ttd < 0) + ttd = 0; + + BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + seq_num, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], + irsk->ir_num, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], + bpf_ntohs(irsk->ir_rmt_port)); + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", + TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd), + req->num_timeout, uid, 0, 0, 0, req); + + return 0; +} + +SEC("iter/tcp") +int dump_tcp6(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + struct seq_file *seq = ctx->meta->seq; + struct tcp_timewait_sock *tw; + struct tcp_request_sock *req; + struct tcp6_sock *tp; + uid_t uid = ctx->uid; + __u32 seq_num; + + if (sk_common == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, " sl " + "local_address " + "remote_address " + "st tx_queue rx_queue tr tm->when retrnsmt" + " uid timeout inode\n"); + + if (sk_common->skc_family != AF_INET6) + return 0; + + tp = bpf_skc_to_tcp6_sock(sk_common); + if (tp) + return dump_tcp6_sock(seq, tp, uid, seq_num); + + tw = bpf_skc_to_tcp_timewait_sock(sk_common); + if (tw) + return dump_tw_sock(seq, tw, uid, seq_num); + + req = bpf_skc_to_tcp_request_sock(sk_common); + if (req) + return dump_req_sock(seq, req, uid, seq_num); + + return 0; +} From ace6d6ec9e9e167047b6c8ca462a0830220640c2 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:22 -0700 Subject: [PATCH 0300/2056] selftests/bpf: Implement sample udp/udp6 bpf_iter programs On my VM, I got identical results between /proc/net/udp[6] and the udp{4,6} bpf iterator. For udp6: $ cat /sys/fs/bpf/p1 sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops 1405: 000080FE00000000FF7CC4D0D9EFE4FE:0222 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 193 0 19183 2 0000000029eab111 0 $ cat /proc/net/udp6 sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops 1405: 000080FE00000000FF7CC4D0D9EFE4FE:0222 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 193 0 19183 2 0000000029eab111 0 For udp4: $ cat /sys/fs/bpf/p4 sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops 2007: 00000000:1F90 00000000:0000 07 00000000:00000000 00:00000000 00000000 0 0 72540 2 000000004ede477a 0 $ cat /proc/net/udp sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops 2007: 00000000:1F90 00000000:0000 07 00000000:00000000 00:00000000 00000000 0 0 72540 2 000000004ede477a 0 Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230822.3989299-1-yhs@fb.com --- tools/testing/selftests/bpf/progs/bpf_iter.h | 16 ++++ .../selftests/bpf/progs/bpf_iter_udp4.c | 71 +++++++++++++++++ .../selftests/bpf/progs/bpf_iter_udp6.c | 79 +++++++++++++++++++ 3 files changed, 166 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_udp4.c create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_udp6.c diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h index bde23e16e777..17db3bac518b 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -9,6 +9,8 @@ #define bpf_iter__task_file bpf_iter__task_file___not_used #define bpf_iter__tcp bpf_iter__tcp___not_used #define tcp6_sock tcp6_sock___not_used +#define bpf_iter__udp bpf_iter__udp___not_used +#define udp6_sock udp6_sock___not_used #include "vmlinux.h" #undef bpf_iter_meta #undef bpf_iter__bpf_map @@ -18,6 +20,8 @@ #undef bpf_iter__task_file #undef bpf_iter__tcp #undef tcp6_sock +#undef bpf_iter__udp +#undef udp6_sock struct bpf_iter_meta { struct seq_file *seq; @@ -62,3 +66,15 @@ struct tcp6_sock { struct tcp_sock tcp; struct ipv6_pinfo inet6; } __attribute__((preserve_access_index)); + +struct bpf_iter__udp { + struct bpf_iter_meta *meta; + struct udp_sock *udp_sk; + uid_t uid __attribute__((aligned(8))); + int bucket __attribute__((aligned(8))); +} __attribute__((preserve_access_index)); + +struct udp6_sock { + struct udp_sock udp; + struct ipv6_pinfo inet6; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c new file mode 100644 index 000000000000..7053784575e4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +SEC("iter/udp") +int dump_udp4(struct bpf_iter__udp *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct udp_sock *udp_sk = ctx->udp_sk; + struct inet_sock *inet; + __u16 srcp, destp; + __be32 dest, src; + __u32 seq_num; + int rqueue; + + if (udp_sk == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, + " sl local_address rem_address st tx_queue " + "rx_queue tr tm->when retrnsmt uid timeout " + "inode ref pointer drops\n"); + + /* filter out udp6 sockets */ + inet = &udp_sk->inet; + if (inet->sk.sk_family == AF_INET6) + return 0; + + inet = &udp_sk->inet; + dest = inet->inet_daddr; + src = inet->inet_rcv_saddr; + srcp = bpf_ntohs(inet->inet_sport); + destp = bpf_ntohs(inet->inet_dport); + rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; + + BPF_SEQ_PRINTF(seq, "%5d: %08X:%04X %08X:%04X ", + ctx->bucket, src, srcp, dest, destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", + inet->sk.sk_state, + inet->sk.sk_wmem_alloc.refs.counter - 1, + rqueue, + 0, 0L, 0, ctx->uid, 0, + sock_i_ino(&inet->sk), + inet->sk.sk_refcnt.refs.counter, udp_sk, + inet->sk.sk_drops.counter); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c new file mode 100644 index 000000000000..c1175a6ecf43 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include +#include +#include + +char _license[] SEC("license") = "GPL"; + +#define IPV6_SEQ_DGRAM_HEADER \ + " sl " \ + "local_address " \ + "remote_address " \ + "st tx_queue rx_queue tr tm->when retrnsmt" \ + " uid timeout inode ref pointer drops\n" + +static long sock_i_ino(const struct sock *sk) +{ + const struct socket *sk_socket = sk->sk_socket; + const struct inode *inode; + unsigned long ino; + + if (!sk_socket) + return 0; + + inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; + bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + return ino; +} + +SEC("iter/udp") +int dump_udp6(struct bpf_iter__udp *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct udp_sock *udp_sk = ctx->udp_sk; + const struct in6_addr *dest, *src; + struct udp6_sock *udp6_sk; + struct inet_sock *inet; + __u16 srcp, destp; + __u32 seq_num; + int rqueue; + + if (udp_sk == (void *)0) + return 0; + + seq_num = ctx->meta->seq_num; + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, IPV6_SEQ_DGRAM_HEADER); + + udp6_sk = bpf_skc_to_udp6_sock(udp_sk); + if (udp6_sk == (void *)0) + return 0; + + inet = &udp_sk->inet; + srcp = bpf_ntohs(inet->inet_sport); + destp = bpf_ntohs(inet->inet_dport); + rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit; + dest = &inet->sk.sk_v6_daddr; + src = &inet->sk.sk_v6_rcv_saddr; + + BPF_SEQ_PRINTF(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ", + ctx->bucket, + src->s6_addr32[0], src->s6_addr32[1], + src->s6_addr32[2], src->s6_addr32[3], srcp, + dest->s6_addr32[0], dest->s6_addr32[1], + dest->s6_addr32[2], dest->s6_addr32[3], destp); + + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", + inet->sk.sk_state, + inet->sk.sk_wmem_alloc.refs.counter - 1, + rqueue, + 0, 0L, 0, ctx->uid, 0, + sock_i_ino(&inet->sk), + inet->sk.sk_refcnt.refs.counter, udp_sk, + inet->sk.sk_drops.counter); + + return 0; +} From cfcd75f9bf12301dfdcfe9ff6dfb240997e7745f Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 23 Jun 2020 16:08:23 -0700 Subject: [PATCH 0301/2056] selftests/bpf: Add tcp/udp iterator programs to selftests Added tcp{4,6} and udp{4,6} bpf programs into test_progs selftest so that they at least can load successfully. $ ./test_progs -n 3 ... #3/7 tcp4:OK #3/8 tcp6:OK #3/9 udp4:OK #3/10 udp6:OK ... #3 bpf_iter:OK Summary: 1/16 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200623230823.3989372-1-yhs@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 87c29dde1cf9..1e2e0fced6e8 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -6,6 +6,10 @@ #include "bpf_iter_bpf_map.skel.h" #include "bpf_iter_task.skel.h" #include "bpf_iter_task_file.skel.h" +#include "bpf_iter_tcp4.skel.h" +#include "bpf_iter_tcp6.skel.h" +#include "bpf_iter_udp4.skel.h" +#include "bpf_iter_udp6.skel.h" #include "bpf_iter_test_kern1.skel.h" #include "bpf_iter_test_kern2.skel.h" #include "bpf_iter_test_kern3.skel.h" @@ -120,6 +124,62 @@ static void test_task_file(void) bpf_iter_task_file__destroy(skel); } +static void test_tcp4(void) +{ + struct bpf_iter_tcp4 *skel; + + skel = bpf_iter_tcp4__open_and_load(); + if (CHECK(!skel, "bpf_iter_tcp4__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_tcp4); + + bpf_iter_tcp4__destroy(skel); +} + +static void test_tcp6(void) +{ + struct bpf_iter_tcp6 *skel; + + skel = bpf_iter_tcp6__open_and_load(); + if (CHECK(!skel, "bpf_iter_tcp6__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_tcp6); + + bpf_iter_tcp6__destroy(skel); +} + +static void test_udp4(void) +{ + struct bpf_iter_udp4 *skel; + + skel = bpf_iter_udp4__open_and_load(); + if (CHECK(!skel, "bpf_iter_udp4__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_udp4); + + bpf_iter_udp4__destroy(skel); +} + +static void test_udp6(void) +{ + struct bpf_iter_udp6 *skel; + + skel = bpf_iter_udp6__open_and_load(); + if (CHECK(!skel, "bpf_iter_udp6__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_udp6); + + bpf_iter_udp6__destroy(skel); +} + /* The expected string is less than 16 bytes */ static int do_read_with_fd(int iter_fd, const char *expected, bool read_one_char) @@ -394,6 +454,14 @@ void test_bpf_iter(void) test_task(); if (test__start_subtest("task_file")) test_task_file(); + if (test__start_subtest("tcp4")) + test_tcp4(); + if (test__start_subtest("tcp6")) + test_tcp6(); + if (test__start_subtest("udp4")) + test_udp4(); + if (test__start_subtest("udp6")) + test_udp6(); if (test__start_subtest("anon")) test_anon_iter(false); if (test__start_subtest("anon-read-one-char")) From 19e528dc9af29169fa7cdfa61071805fdef504c6 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:28 +0800 Subject: [PATCH 0302/2056] net: qos: add tc police offloading action with max frame size limit Current police offloading support the 'burst'' and 'rate_bytes_ps'. Some hardware own the capability to limit the frame size. If the frame size larger than the setting, the frame would be dropped. For the police action itself already accept the 'mtu' parameter in tc command. But not extend to tc flower offloading. So extend 'mtu' to tc flower offloading. Signed-off-by: Po Liu Signed-off-by: David S. Miller --- include/net/flow_offload.h | 1 + include/net/tc_act/tc_police.h | 10 ++++++++++ net/sched/cls_api.c | 1 + 3 files changed, 12 insertions(+) diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 00c15f14c434..c2ef19c6b27d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -234,6 +234,7 @@ struct flow_action_entry { struct { /* FLOW_ACTION_POLICE */ s64 burst; u64 rate_bytes_ps; + u32 mtu; } police; struct { /* FLOW_ACTION_CT */ int action; diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index f098ad4424be..cd973b10ae8c 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -69,4 +69,14 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) return params->tcfp_burst; } +static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->tcfp_mtu; +} + #endif /* __NET_TC_POLICE_H */ diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index a00a203b2ef5..6aba7d5ba1ec 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3658,6 +3658,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->police.burst = tcf_police_tcfp_burst(act); entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); + entry->police.mtu = tcf_police_tcfp_mtu(act); } else if (is_tcf_ct(act)) { entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); From 89d1f0966997d5bbe510dbdb10a5c26c0e567b03 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:29 +0800 Subject: [PATCH 0303/2056] net: enetc: add support max frame size for tc flower offload Base on the tc flower offload police action add max frame size by the parameter 'mtu'. Tc flower device driver working by the IEEE 802.1Qci stream filter can implement the max frame size filtering. Add it to the current hardware tc flower stearm filter driver. Signed-off-by: Po Liu Signed-off-by: David S. Miller --- .../net/ethernet/freescale/enetc/enetc_qos.c | 52 +++++++++++++------ 1 file changed, 36 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index fb76903eca90..07f98bf7a06b 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -389,6 +389,7 @@ struct enetc_psfp_filter { u32 index; s32 handle; s8 prio; + u32 maxsdu; u32 gate_id; s32 meter_id; refcount_t refcount; @@ -430,6 +431,12 @@ static struct actions_fwd enetc_act_fwd[] = { BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), FILTER_ACTION_TYPE_PSFP }, + { + BIT(FLOW_ACTION_POLICE) | + BIT(FLOW_ACTION_GATE), + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS), + FILTER_ACTION_TYPE_PSFP + }, /* example for ACL actions */ { BIT(FLOW_ACTION_DROP), @@ -594,8 +601,12 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv, /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX * field as being either an MSDU value or an index into the Flow * Meter Instance table. - * TODO: no limit max sdu */ + if (sfi->maxsdu) { + sfi_config->msdu = + cpu_to_le16(sfi->maxsdu); + sfi_config->multi |= 0x40; + } if (sfi->meter_id >= 0) { sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id); @@ -872,6 +883,7 @@ static struct enetc_psfp_filter hlist_for_each_entry(s, &epsfp.psfp_filter_list, node) if (s->gate_id == sfi->gate_id && s->prio == sfi->prio && + s->maxsdu == sfi->maxsdu && s->meter_id == sfi->meter_id) return s; @@ -979,6 +991,7 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts, static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, struct flow_cls_offload *f) { + struct flow_action_entry *entryg = NULL, *entryp = NULL; struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; struct enetc_stream_filter *filter, *old_filter; @@ -997,9 +1010,12 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, flow_action_for_each(i, entry, &rule->action) if (entry->id == FLOW_ACTION_GATE) - break; + entryg = entry; + else if (entry->id == FLOW_ACTION_POLICE) + entryp = entry; - if (entry->id != FLOW_ACTION_GATE) + /* Not support without gate action */ + if (!entryg) return -EINVAL; filter = kzalloc(sizeof(*filter), GFP_KERNEL); @@ -1079,19 +1095,19 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } /* parsing gate action */ - if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) { + if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) { NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); err = -ENOSPC; goto free_filter; } - if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { + if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) { NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!"); err = -ENOSPC; goto free_filter; } - entries_size = struct_size(sgi, entries, entry->gate.num_entries); + entries_size = struct_size(sgi, entries, entryg->gate.num_entries); sgi = kzalloc(entries_size, GFP_KERNEL); if (!sgi) { err = -ENOMEM; @@ -1099,18 +1115,18 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } refcount_set(&sgi->refcount, 1); - sgi->index = entry->gate.index; - sgi->init_ipv = entry->gate.prio; - sgi->basetime = entry->gate.basetime; - sgi->cycletime = entry->gate.cycletime; - sgi->num_entries = entry->gate.num_entries; + sgi->index = entryg->gate.index; + sgi->init_ipv = entryg->gate.prio; + sgi->basetime = entryg->gate.basetime; + sgi->cycletime = entryg->gate.cycletime; + sgi->num_entries = entryg->gate.num_entries; e = sgi->entries; - for (i = 0; i < entry->gate.num_entries; i++) { - e[i].gate_state = entry->gate.entries[i].gate_state; - e[i].interval = entry->gate.entries[i].interval; - e[i].ipv = entry->gate.entries[i].ipv; - e[i].maxoctets = entry->gate.entries[i].maxoctets; + for (i = 0; i < entryg->gate.num_entries; i++) { + e[i].gate_state = entryg->gate.entries[i].gate_state; + e[i].interval = entryg->gate.entries[i].interval; + e[i].ipv = entryg->gate.entries[i].ipv; + e[i].maxoctets = entryg->gate.entries[i].maxoctets; } filter->sgi_index = sgi->index; @@ -1127,6 +1143,10 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, /* flow meter not support yet */ sfi->meter_id = ENETC_PSFP_WILDCARD; + /* Max frame size */ + if (entryp) + sfi->maxsdu = entryp->police.mtu; + /* prio ref the filter prio */ if (f->common.prio && f->common.prio <= BIT(3)) sfi->prio = f->common.prio - 1; From 627e39b1399e72e53895eec6bbec30199ed43de2 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:30 +0800 Subject: [PATCH 0304/2056] net: qos: police action add index for tc flower offloading Hardware device may include more than one police entry. Specifying the action's index make it possible for several tc filters to share the same police action when installing the filters. Propagate this index to device drivers through the flow offload intermediate representation, so that drivers could share a single hardware policer between multiple filters. v1->v2 changes: - Update the commit message suggest by Ido Schimmel Signed-off-by: Po Liu Signed-off-by: David S. Miller --- include/net/flow_offload.h | 1 + net/sched/cls_api.c | 1 + 2 files changed, 2 insertions(+) diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index c2ef19c6b27d..eed98075b1ae 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -232,6 +232,7 @@ struct flow_action_entry { bool truncate; } sample; struct { /* FLOW_ACTION_POLICE */ + u32 index; s64 burst; u64 rate_bytes_ps; u32 mtu; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 6aba7d5ba1ec..fdc4c89ca1fa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3659,6 +3659,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); entry->police.mtu = tcf_police_tcfp_mtu(act); + entry->police.index = act->tcfa_index; } else if (is_tcf_ct(act)) { entry->id = FLOW_ACTION_CT; entry->ct.action = tcf_ct_action(act); From d621d7703d510d222fa674254293ec48ca6ea709 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Wed, 24 Jun 2020 17:36:31 +0800 Subject: [PATCH 0305/2056] net: enetc add tc flower offload flow metering policing action Flow metering entries in IEEE 802.1Qci is an optional function for a flow filtering module. Flow metering is two rates two buckets and three color marker to policing the frames. This patch only enable one rate one bucket and in color blind mode. Flow metering instance are as specified in the algorithm in MEF 10.3 and in Bandwidth Profile Parameters. They are: a) Flow meter instance identifier. An integer value identifying the flow meter instance. The patch use the police 'index' as thin value. b) Committed Information Rate (CIR), in bits per second. This patch use the 'rate_bytes_ps' represent this value. c) Committed Burst Size (CBS), in octets. This patch use the 'burst' represent this value. d) Excess Information Rate (EIR), in bits per second. e) Excess Burst Size per Bandwidth Profile Flow (EBS), in octets. And plus some other parameters. This patch set EIR/EBS default disable and color blind mode. v1->v2 changes: - Use div_u64() as division replace the '/' report: All errors (new ones prefixed by >>): ld: drivers/net/ethernet/freescale/enetc/enetc_qos.o: in function `enetc_flowmeter_hw_set': >> enetc_qos.c:(.text+0x66): undefined reference to `__udivdi3' Reported-by: kernel test robot Signed-off-by: Po Liu Signed-off-by: David S. Miller --- .../net/ethernet/freescale/enetc/enetc_hw.h | 24 +++ .../net/ethernet/freescale/enetc/enetc_qos.c | 160 ++++++++++++++++-- 2 files changed, 172 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 6314051bc6c1..f00c4382423e 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -570,6 +570,7 @@ enum bdcr_cmd_class { BDCR_CMD_STREAM_IDENTIFY, BDCR_CMD_STREAM_FILTER, BDCR_CMD_STREAM_GCL, + BDCR_CMD_FLOW_METER, __BDCR_CMD_MAX_LEN, BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1, }; @@ -736,10 +737,33 @@ struct sgcl_data { struct sgce sgcl[0]; }; +#define ENETC_CBDR_FMI_MR BIT(0) +#define ENETC_CBDR_FMI_MREN BIT(1) +#define ENETC_CBDR_FMI_DOY BIT(2) +#define ENETC_CBDR_FMI_CM BIT(3) +#define ENETC_CBDR_FMI_CF BIT(4) +#define ENETC_CBDR_FMI_NDOR BIT(5) +#define ENETC_CBDR_FMI_OALEN BIT(6) +#define ENETC_CBDR_FMI_IRFPP_MASK GENMASK(4, 0) + +/* class 10: command 0/1, Flow Meter Instance Set, short Format */ +struct fmi_conf { + __le32 cir; + __le32 cbs; + __le32 eir; + __le32 ebs; + u8 conf; + u8 res1; + u8 ir_fpp; + u8 res2[4]; + u8 en; +}; + struct enetc_cbd { union{ struct sfi_conf sfi_conf; struct sgi_table sgi_table; + struct fmi_conf fmi_conf; struct { __le32 addr[2]; union { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 07f98bf7a06b..4f670cbdf186 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -408,10 +408,26 @@ struct enetc_psfp_gate { struct action_gate_entry entries[0]; }; +/* Only enable the green color frame now + * Will add eir and ebs color blind, couple flag etc when + * policing action add more offloading parameters + */ +struct enetc_psfp_meter { + u32 index; + u32 cir; + u32 cbs; + refcount_t refcount; + struct hlist_node node; +}; + +#define ENETC_PSFP_FLAGS_FMI BIT(0) + struct enetc_stream_filter { struct enetc_streamid sid; u32 sfi_index; u32 sgi_index; + u32 flags; + u32 fmi_index; struct flow_stats stats; struct hlist_node node; }; @@ -422,6 +438,7 @@ struct enetc_psfp { struct hlist_head stream_list; struct hlist_head psfp_filter_list; struct hlist_head psfp_gate_list; + struct hlist_head psfp_meter_list; spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */ }; @@ -842,6 +859,47 @@ static int enetc_streamgate_hw_set(struct enetc_ndev_priv *priv, return err; } +static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv, + struct enetc_psfp_meter *fmi, + u8 enable) +{ + struct enetc_cbd cbd = { .cmd = 0 }; + struct fmi_conf *fmi_config; + u64 temp = 0; + + cbd.index = cpu_to_le16((u16)fmi->index); + cbd.cls = BDCR_CMD_FLOW_METER; + cbd.status_flags = 0x80; + + if (!enable) + return enetc_send_cmd(priv->si, &cbd); + + fmi_config = &cbd.fmi_conf; + fmi_config->en = 0x80; + + if (fmi->cir) { + temp = (u64)8000 * fmi->cir; + temp = div_u64(temp, 3725); + } + + fmi_config->cir = cpu_to_le32((u32)temp); + fmi_config->cbs = cpu_to_le32(fmi->cbs); + + /* Default for eir ebs disable */ + fmi_config->eir = 0; + fmi_config->ebs = 0; + + /* Default: + * mark red disable + * drop on yellow disable + * color mode disable + * couple flag disable + */ + fmi_config->conf = 0; + + return enetc_send_cmd(priv->si, &cbd); +} + static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index) { struct enetc_stream_filter *f; @@ -875,6 +933,17 @@ static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index) return NULL; } +static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index) +{ + struct enetc_psfp_meter *m; + + hlist_for_each_entry(m, &epsfp.psfp_meter_list, node) + if (m->index == index) + return m; + + return NULL; +} + static struct enetc_psfp_filter *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi) { @@ -934,9 +1003,27 @@ static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index) } } +static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index) +{ + struct enetc_psfp_meter *fmi; + u8 z; + + fmi = enetc_get_meter_by_index(index); + WARN_ON(!fmi); + z = refcount_dec_and_test(&fmi->refcount); + if (z) { + enetc_flowmeter_hw_set(priv, fmi, false); + hlist_del(&fmi->node); + kfree(fmi); + } +} + static void remove_one_chain(struct enetc_ndev_priv *priv, struct enetc_stream_filter *filter) { + if (filter->flags & ENETC_PSFP_FLAGS_FMI) + flow_meter_unref(priv, filter->fmi_index); + stream_gate_unref(priv, filter->sgi_index); stream_filter_unref(priv, filter->sfi_index); @@ -947,7 +1034,8 @@ static void remove_one_chain(struct enetc_ndev_priv *priv, static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, struct enetc_streamid *sid, struct enetc_psfp_filter *sfi, - struct enetc_psfp_gate *sgi) + struct enetc_psfp_gate *sgi, + struct enetc_psfp_meter *fmi) { int err; @@ -965,8 +1053,16 @@ static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv, if (err) goto revert_sfi; + if (fmi) { + err = enetc_flowmeter_hw_set(priv, fmi, true); + if (err) + goto revert_sgi; + } + return 0; +revert_sgi: + enetc_streamgate_hw_set(priv, sgi, false); revert_sfi: if (sfi) enetc_streamfilter_hw_set(priv, sfi, false); @@ -995,6 +1091,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct netlink_ext_ack *extack = f->common.extack; struct enetc_stream_filter *filter, *old_filter; + struct enetc_psfp_meter *fmi = NULL, *old_fmi; struct enetc_psfp_filter *sfi, *old_sfi; struct enetc_psfp_gate *sgi, *old_sgi; struct flow_action_entry *entry; @@ -1139,13 +1236,34 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, refcount_set(&sfi->refcount, 1); sfi->gate_id = sgi->index; - - /* flow meter not support yet */ sfi->meter_id = ENETC_PSFP_WILDCARD; - /* Max frame size */ - if (entryp) - sfi->maxsdu = entryp->police.mtu; + /* Flow meter and max frame size */ + if (entryp) { + if (entryp->police.burst) { + u64 temp; + + fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); + if (!fmi) { + err = -ENOMEM; + goto free_sfi; + } + refcount_set(&fmi->refcount, 1); + fmi->cir = entryp->police.rate_bytes_ps; + /* Convert to original burst value */ + temp = entryp->police.burst * fmi->cir; + temp = div_u64(temp, 1000000000ULL); + + fmi->cbs = temp; + fmi->index = entryp->police.index; + filter->flags |= ENETC_PSFP_FLAGS_FMI; + filter->fmi_index = fmi->index; + sfi->meter_id = fmi->index; + } + + if (entryp->police.mtu) + sfi->maxsdu = entryp->police.mtu; + } /* prio ref the filter prio */ if (f->common.prio && f->common.prio <= BIT(3)) @@ -1161,7 +1279,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, if (sfi->handle < 0) { NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!"); err = -ENOSPC; - goto free_sfi; + goto free_fmi; } sfi->index = index; @@ -1177,11 +1295,23 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } err = enetc_psfp_hw_set(priv, &filter->sid, - sfi_overwrite ? NULL : sfi, sgi); + sfi_overwrite ? NULL : sfi, sgi, fmi); if (err) - goto free_sfi; + goto free_fmi; spin_lock(&epsfp.psfp_lock); + if (filter->flags & ENETC_PSFP_FLAGS_FMI) { + old_fmi = enetc_get_meter_by_index(filter->fmi_index); + if (old_fmi) { + fmi->refcount = old_fmi->refcount; + refcount_set(&fmi->refcount, + refcount_read(&old_fmi->refcount) + 1); + hlist_del(&old_fmi->node); + kfree(old_fmi); + } + hlist_add_head(&fmi->node, &epsfp.psfp_meter_list); + } + /* Remove the old node if exist and update with a new node */ old_sgi = enetc_get_gate_by_index(filter->sgi_index); if (old_sgi) { @@ -1212,6 +1342,8 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, return 0; +free_fmi: + kfree(fmi); free_sfi: kfree(sfi); free_gate: @@ -1310,9 +1442,13 @@ static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv, return -EINVAL; spin_lock(&epsfp.psfp_lock); - stats.pkts = counters.matching_frames_count - filter->stats.pkts; - stats.drops = counters.not_passing_frames_count - - filter->stats.drops; + stats.pkts = counters.matching_frames_count + + counters.not_passing_sdu_count - + filter->stats.pkts; + stats.drops = counters.not_passing_frames_count + + counters.not_passing_sdu_count + + counters.red_frames_count - + filter->stats.drops; stats.lastused = filter->stats.lastused; filter->stats.pkts += stats.pkts; filter->stats.drops += stats.drops; From 5ea7c81a4f34e4dbb9c7bfd0fe18e5fba8a7f8f9 Mon Sep 17 00:00:00 2001 From: Kiran K Date: Thu, 25 Jun 2020 00:04:32 +0530 Subject: [PATCH 0306/2056] Bluetooth: btusb: Refactor of firmware download flow for Intel conrollers Address the scalability to support new generation Intel controller with respect to readability and enhancement to new firmware download sequence Signed-off-by: Kiran K Reviewed-by: Chethan T N Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 145 +++++++++++++++++++++----------------- 1 file changed, 80 insertions(+), 65 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1ac6fd7c4534..f8a71fdabb3d 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -511,7 +511,6 @@ struct btusb_data { unsigned cmd_timeout_cnt; }; - static void btusb_intel_cmd_timeout(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); @@ -2278,46 +2277,25 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver, return true; } -static int btusb_setup_intel_new(struct hci_dev *hdev) +static int btusb_intel_download_firmware(struct hci_dev *hdev, + struct intel_version *ver, + struct intel_boot_params *params) { - struct btusb_data *data = hci_get_drvdata(hdev); - struct intel_version ver; - struct intel_boot_params params; - struct intel_debug_features features; const struct firmware *fw; u32 boot_param; char fwname[64]; - ktime_t calltime, delta, rettime; - unsigned long long duration; int err; + struct btusb_data *data = hci_get_drvdata(hdev); - BT_DBG("%s", hdev->name); - - /* Set the default boot parameter to 0x0 and it is updated to - * SKU specific boot parameter after reading Intel_Write_Boot_Params - * command while downloading the firmware. - */ - boot_param = 0x00000000; - - calltime = ktime_get(); - - /* Read the Intel version information to determine if the device - * is in bootloader mode or if it already has operational firmware - * loaded. - */ - err = btintel_read_version(hdev, &ver); - if (err) { - bt_dev_err(hdev, "Intel Read version failed (%d)", err); - btintel_reset_to_bootloader(hdev); - return err; - } + if (!ver || !params) + return -EINVAL; /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ - if (ver.hw_platform != 0x37) { + if (ver->hw_platform != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", - ver.hw_platform); + ver->hw_platform); return -EINVAL; } @@ -2327,7 +2305,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come along. */ - switch (ver.hw_variant) { + switch (ver->hw_variant) { case 0x0b: /* SfP */ case 0x0c: /* WsP */ case 0x11: /* JfP */ @@ -2337,11 +2315,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", - ver.hw_variant); + ver->hw_variant); return -EINVAL; } - btintel_version_info(hdev, &ver); + btintel_version_info(hdev, ver); /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies @@ -2356,25 +2334,25 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ - if (ver.fw_variant == 0x23) { + if (ver->fw_variant == 0x23) { clear_bit(BTUSB_BOOTLOADER, &data->flags); btintel_check_bdaddr(hdev); - goto finish; + return 0; } /* If the device is not in bootloader mode, then the only possible * choice is to return an error and abort the device initialization. */ - if (ver.fw_variant != 0x06) { + if (ver->fw_variant != 0x06) { bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", - ver.fw_variant); + ver->fw_variant); return -ENODEV; } /* Read the secure boot parameters to identify the operating * details of the bootloader. */ - err = btintel_read_boot_params(hdev, ¶ms); + err = btintel_read_boot_params(hdev, params); if (err) return err; @@ -2382,16 +2360,16 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * with a command complete event. If the boot parameters indicate * that this bootloader does not send them, then abort the setup. */ - if (params.limited_cce != 0x00) { + if (params->limited_cce != 0x00) { bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", - params.limited_cce); + params->limited_cce); return -EINVAL; } /* If the OTP has no valid Bluetooth device address, then there will * also be no valid address for the operational firmware. */ - if (!bacmp(¶ms.otp_bdaddr, BDADDR_ANY)) { + if (!bacmp(¶ms->otp_bdaddr, BDADDR_ANY)) { bt_dev_info(hdev, "No device address configured"); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } @@ -2417,7 +2395,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) * ibt---.sfi. * */ - err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, + err = btusb_setup_intel_new_get_fw_name(ver, params, fwname, sizeof(fwname), "sfi"); if (!err) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); @@ -2432,16 +2410,6 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) bt_dev_info(hdev, "Found device firmware: %s", fwname); - /* Save the DDC file name for later use to apply once the firmware - * downloading is done. - */ - err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, fwname, - sizeof(fwname), "ddc"); - if (!err) { - bt_dev_err(hdev, "Unsupported Intel firmware naming"); - return -EINVAL; - } - if (fw->size < 644) { bt_dev_err(hdev, "Invalid size of firmware file (%zu)", fw->size); @@ -2496,18 +2464,58 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) goto done; } +done: + release_firmware(fw); + return err; +} + +static int btusb_setup_intel_new(struct hci_dev *hdev) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct intel_version ver; + struct intel_boot_params params; + u32 boot_param; + char ddcname[64]; + ktime_t calltime, delta, rettime; + unsigned long long duration; + int err; + struct intel_debug_features features; + + BT_DBG("%s", hdev->name); + + /* Set the default boot parameter to 0x0 and it is updated to + * SKU specific boot parameter after reading Intel_Write_Boot_Params + * command while downloading the firmware. + */ + boot_param = 0x00000000; + + calltime = ktime_get(); + + /* Read the Intel version information to determine if the device + * is in bootloader mode or if it already has operational firmware + * loaded. + */ + err = btintel_read_version(hdev, &ver); + if (err) { + bt_dev_err(hdev, "Intel Read version failed (%d)", err); + btintel_reset_to_bootloader(hdev); + return err; + } + + err = btusb_intel_download_firmware(hdev, &ver, ¶ms); + if (err) + return err; + + /* controller is already having an operational firmware */ + if (ver.fw_variant == 0x23) + goto finish; + rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); -done: - release_firmware(fw); - - if (err < 0) - return err; - calltime = ktime_get(); set_bit(BTUSB_BOOTING, &data->flags); @@ -2551,13 +2559,20 @@ static int btusb_setup_intel_new(struct hci_dev *hdev) clear_bit(BTUSB_BOOTLOADER, &data->flags); - /* Once the device is running in operational mode, it needs to apply - * the device configuration (DDC) parameters. - * - * The device can work without DDC parameters, so even if it fails - * to load the file, no need to fail the setup. - */ - btintel_load_ddc_config(hdev, fwname); + err = btusb_setup_intel_new_get_fw_name(&ver, ¶ms, ddcname, + sizeof(ddcname), "ddc"); + + if (!err) { + bt_dev_err(hdev, "Unsupported Intel firmware naming"); + } else { + /* Once the device is running in operational mode, it needs to + * apply the device configuration (DDC) parameters. + * + * The device can work without DDC parameters, so even if it + * fails to load the file, no need to fail the setup. + */ + btintel_load_ddc_config(hdev, ddcname); + } /* Read the Intel supported features and if new exception formats * supported, need to load the additional DDC config to enable. From 9023497d8746d355bac8ddbc65797a4f553726fd Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 24 Jun 2020 16:31:24 +0200 Subject: [PATCH 0307/2056] tools, bpftool: Define prog_type_name array only once Define prog_type_name in prog.c instead of main.h so it is only defined once. This leads to a slight decrease in the binary size of bpftool. Before: text data bss dec hex filename 401032 11936 1573160 1986128 1e4e50 bpftool After: text data bss dec hex filename 399024 11168 1573160 1983352 1e4378 bpftool Signed-off-by: Tobias Klauser Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200624143124.12914-1-tklauser@distanz.ch --- tools/bpf/bpftool/feature.c | 4 ++-- tools/bpf/bpftool/link.c | 4 ++-- tools/bpf/bpftool/main.h | 33 ++------------------------------- tools/bpf/bpftool/map.c | 4 ++-- tools/bpf/bpftool/prog.c | 34 ++++++++++++++++++++++++++++++++++ 5 files changed, 42 insertions(+), 37 deletions(-) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 768bf77df886..1cd75807673e 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -695,7 +695,7 @@ section_program_types(bool *supported_types, const char *define_prefix, "/*** eBPF program types ***/", define_prefix); - for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) + for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++) probe_prog_type(i, supported_types, define_prefix, ifindex); print_end_section(); @@ -741,7 +741,7 @@ section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex) " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n", define_prefix, define_prefix, define_prefix, define_prefix); - for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++) + for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++) probe_helpers_for_progtype(i, supported_types[i], define_prefix, ifindex); diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 7329f3134283..326b8fdf0243 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -108,7 +108,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) if (err) return err; - if (prog_info.type < ARRAY_SIZE(prog_type_name)) + if (prog_info.type < prog_type_name_size) jsonw_string_field(json_wtr, "prog_type", prog_type_name[prog_info.type]); else @@ -187,7 +187,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) if (err) return err; - if (prog_info.type < ARRAY_SIZE(prog_type_name)) + if (prog_info.type < prog_type_name_size) printf("\n\tprog_type %s ", prog_type_name[prog_info.type]); else diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index ce26271e5f0c..269f1cb6aef5 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -56,37 +56,8 @@ #define HELP_SPEC_LINK \ "LINK := { id LINK_ID | pinned FILE }" -static const char * const prog_type_name[] = { - [BPF_PROG_TYPE_UNSPEC] = "unspec", - [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", - [BPF_PROG_TYPE_KPROBE] = "kprobe", - [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", - [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", - [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", - [BPF_PROG_TYPE_XDP] = "xdp", - [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", - [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", - [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", - [BPF_PROG_TYPE_LWT_IN] = "lwt_in", - [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", - [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", - [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", - [BPF_PROG_TYPE_SK_SKB] = "sk_skb", - [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", - [BPF_PROG_TYPE_SK_MSG] = "sk_msg", - [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", - [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", - [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", - [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", - [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", - [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", - [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", - [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", - [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", - [BPF_PROG_TYPE_TRACING] = "tracing", - [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", - [BPF_PROG_TYPE_EXT] = "ext", -}; +extern const char * const prog_type_name[]; +extern const size_t prog_type_name_size; static const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_CGROUP_INET_INGRESS] = "ingress", diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index bbb74d387fb0..42c215b6eae6 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -473,7 +473,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) if (owner_prog_type) { unsigned int prog_type = atoi(owner_prog_type); - if (prog_type < ARRAY_SIZE(prog_type_name)) + if (prog_type < prog_type_name_size) jsonw_string_field(json_wtr, "owner_prog_type", prog_type_name[prog_type]); else @@ -558,7 +558,7 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) if (owner_prog_type) { unsigned int prog_type = atoi(owner_prog_type); - if (prog_type < ARRAY_SIZE(prog_type_name)) + if (prog_type < prog_type_name_size) printf("owner_prog_type %s ", prog_type_name[prog_type]); else diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index e21fa8ad2efa..6863c57effd0 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -29,6 +29,40 @@ #include "main.h" #include "xlated_dumper.h" +const char * const prog_type_name[] = { + [BPF_PROG_TYPE_UNSPEC] = "unspec", + [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter", + [BPF_PROG_TYPE_KPROBE] = "kprobe", + [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls", + [BPF_PROG_TYPE_SCHED_ACT] = "sched_act", + [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint", + [BPF_PROG_TYPE_XDP] = "xdp", + [BPF_PROG_TYPE_PERF_EVENT] = "perf_event", + [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb", + [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock", + [BPF_PROG_TYPE_LWT_IN] = "lwt_in", + [BPF_PROG_TYPE_LWT_OUT] = "lwt_out", + [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit", + [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops", + [BPF_PROG_TYPE_SK_SKB] = "sk_skb", + [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device", + [BPF_PROG_TYPE_SK_MSG] = "sk_msg", + [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint", + [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr", + [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local", + [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2", + [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport", + [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector", + [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl", + [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable", + [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt", + [BPF_PROG_TYPE_TRACING] = "tracing", + [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", + [BPF_PROG_TYPE_EXT] = "ext", +}; + +const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name); + enum dump_mode { DUMP_JITED, DUMP_XLATED, From 16d37ee3d2b1c30052ba5ebb69556040fc174061 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 24 Jun 2020 16:31:54 +0200 Subject: [PATCH 0308/2056] tools, bpftool: Define attach_type_name array only once Define attach_type_name in common.c instead of main.h so it is only defined once. This leads to a slight decrease in the binary size of bpftool. Before: text data bss dec hex filename 399024 11168 1573160 1983352 1e4378 bpftool After: text data bss dec hex filename 398256 10880 1573160 1982296 1e3f58 bpftool Signed-off-by: Tobias Klauser Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200624143154.13145-1-tklauser@distanz.ch --- tools/bpf/bpftool/common.c | 36 ++++++++++++++++++++++++++++++++++++ tools/bpf/bpftool/main.h | 36 +----------------------------------- 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 6c864c3683fc..18e5604fe260 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -29,6 +29,42 @@ #define BPF_FS_MAGIC 0xcafe4a11 #endif +const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { + [BPF_CGROUP_INET_INGRESS] = "ingress", + [BPF_CGROUP_INET_EGRESS] = "egress", + [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", + [BPF_CGROUP_SOCK_OPS] = "sock_ops", + [BPF_CGROUP_DEVICE] = "device", + [BPF_CGROUP_INET4_BIND] = "bind4", + [BPF_CGROUP_INET6_BIND] = "bind6", + [BPF_CGROUP_INET4_CONNECT] = "connect4", + [BPF_CGROUP_INET6_CONNECT] = "connect6", + [BPF_CGROUP_INET4_POST_BIND] = "post_bind4", + [BPF_CGROUP_INET6_POST_BIND] = "post_bind6", + [BPF_CGROUP_INET4_GETPEERNAME] = "getpeername4", + [BPF_CGROUP_INET6_GETPEERNAME] = "getpeername6", + [BPF_CGROUP_INET4_GETSOCKNAME] = "getsockname4", + [BPF_CGROUP_INET6_GETSOCKNAME] = "getsockname6", + [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4", + [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6", + [BPF_CGROUP_SYSCTL] = "sysctl", + [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4", + [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6", + [BPF_CGROUP_GETSOCKOPT] = "getsockopt", + [BPF_CGROUP_SETSOCKOPT] = "setsockopt", + + [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", + [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", + [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", + [BPF_LIRC_MODE2] = "lirc_mode2", + [BPF_FLOW_DISSECTOR] = "flow_dissector", + [BPF_TRACE_RAW_TP] = "raw_tp", + [BPF_TRACE_FENTRY] = "fentry", + [BPF_TRACE_FEXIT] = "fexit", + [BPF_MODIFY_RETURN] = "mod_ret", + [BPF_LSM_MAC] = "lsm_mac", +}; + void p_err(const char *fmt, ...) { va_list ap; diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 269f1cb6aef5..78d34e860713 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -59,41 +59,7 @@ extern const char * const prog_type_name[]; extern const size_t prog_type_name_size; -static const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { - [BPF_CGROUP_INET_INGRESS] = "ingress", - [BPF_CGROUP_INET_EGRESS] = "egress", - [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", - [BPF_CGROUP_SOCK_OPS] = "sock_ops", - [BPF_CGROUP_DEVICE] = "device", - [BPF_CGROUP_INET4_BIND] = "bind4", - [BPF_CGROUP_INET6_BIND] = "bind6", - [BPF_CGROUP_INET4_CONNECT] = "connect4", - [BPF_CGROUP_INET6_CONNECT] = "connect6", - [BPF_CGROUP_INET4_POST_BIND] = "post_bind4", - [BPF_CGROUP_INET6_POST_BIND] = "post_bind6", - [BPF_CGROUP_INET4_GETPEERNAME] = "getpeername4", - [BPF_CGROUP_INET6_GETPEERNAME] = "getpeername6", - [BPF_CGROUP_INET4_GETSOCKNAME] = "getsockname4", - [BPF_CGROUP_INET6_GETSOCKNAME] = "getsockname6", - [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4", - [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6", - [BPF_CGROUP_SYSCTL] = "sysctl", - [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4", - [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6", - [BPF_CGROUP_GETSOCKOPT] = "getsockopt", - [BPF_CGROUP_SETSOCKOPT] = "setsockopt", - - [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser", - [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict", - [BPF_SK_MSG_VERDICT] = "sk_msg_verdict", - [BPF_LIRC_MODE2] = "lirc_mode2", - [BPF_FLOW_DISSECTOR] = "flow_dissector", - [BPF_TRACE_RAW_TP] = "raw_tp", - [BPF_TRACE_FENTRY] = "fentry", - [BPF_TRACE_FEXIT] = "fexit", - [BPF_MODIFY_RETURN] = "mod_ret", - [BPF_LSM_MAC] = "lsm_mac", -}; +extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE]; extern const char * const map_type_name[]; extern const size_t map_type_name_size; From 4b88b9ce722f19f8743de531cb3c510f36c6f970 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Thu, 25 Jun 2020 17:40:43 +0530 Subject: [PATCH 0309/2056] ptp_pch: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. In the case of ptp_pch, after removing PCI helper functions, .suspend() and .resume() became empty-body functions. Hence, define them NULL and use dev_pm_ops. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/ptp/ptp_pch.c | 37 +++---------------------------------- 1 file changed, 3 insertions(+), 34 deletions(-) diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index dcd6e00c8046..ce10ecd41ba0 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -508,40 +508,8 @@ static const struct ptp_clock_info ptp_pch_caps = { .enable = ptp_pch_enable, }; - -#ifdef CONFIG_PM -static s32 pch_suspend(struct pci_dev *pdev, pm_message_t state) -{ - pci_disable_device(pdev); - pci_enable_wake(pdev, PCI_D3hot, 0); - - if (pci_save_state(pdev) != 0) { - dev_err(&pdev->dev, "could not save PCI config state\n"); - return -ENOMEM; - } - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static s32 pch_resume(struct pci_dev *pdev) -{ - s32 ret; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - ret = pci_enable_device(pdev); - if (ret) { - dev_err(&pdev->dev, "pci_enable_device failed\n"); - return ret; - } - pci_enable_wake(pdev, PCI_D3hot, 0); - return 0; -} -#else #define pch_suspend NULL #define pch_resume NULL -#endif static void pch_remove(struct pci_dev *pdev) { @@ -684,13 +652,14 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = { {0} }; +static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume); + static struct pci_driver pch_driver = { .name = KBUILD_MODNAME, .id_table = pch_ieee1588_pcidev_id, .probe = pch_probe, .remove = pch_remove, - .suspend = pch_suspend, - .resume = pch_resume, + .driver.pm = &pch_pm_ops, }; static void __exit ptp_pch_exit(void) From 04dfa7057bd189780576d73ce360c874ed507e08 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 28 May 2020 04:48:27 -0500 Subject: [PATCH 0310/2056] net/mlx5: Avoid eswitch header inclusion in fs core layer Flow steering core layer is independent of the eswitch layer. Hence avoid fs_core dependency on eswitch. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 10 ---------- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 10 ++++++++++ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 67e09902bd88..522cadc09149 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -44,16 +44,6 @@ #include "lib/mpfs.h" #include "en/tc_ct.h" -#define FDB_TC_MAX_CHAIN 3 -#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) -#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) - -/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ -#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) - -#define FDB_TC_MAX_PRIO 16 -#define FDB_TC_LEVELS_PER_PRIO 2 - #ifdef CONFIG_MLX5_ESWITCH #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 13e2fb79c21a..e47a66983935 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -41,7 +41,6 @@ #include "diag/fs_tracepoint.h" #include "accel/ipsec.h" #include "fpga/ipsec.h" -#include "eswitch.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 825b662f809b..afe7f0bffb93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -39,6 +39,16 @@ #include #include +#define FDB_TC_MAX_CHAIN 3 +#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) +#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) + +/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ +#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) + +#define FDB_TC_MAX_PRIO 16 +#define FDB_TC_LEVELS_PER_PRIO 2 + struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; union { From 39797f1c53f5323179d61692bdd4c80de21d540e Mon Sep 17 00:00:00 2001 From: Hu Haowen Date: Fri, 3 Apr 2020 12:26:59 +0800 Subject: [PATCH 0311/2056] net/mlx5: FWTrace: Add missing space Missing space at the end of a comment line, add it. Signed-off-by: Hu Haowen Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index a7551274be58..ad3594c4afcb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -676,7 +676,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; - /* Copy the block to local buffer to avoid HW override while being processed*/ + /* Copy the block to local buffer to avoid HW override while being processed */ memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, TRACER_BLOCK_SIZE_BYTE); From 360000b26e37a75b3000bf0585b263809d96ffd3 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Fri, 5 Jun 2020 22:22:35 +0300 Subject: [PATCH 0312/2056] net/mlx5: Use kfree(ft->g) in arfs_create_groups() Use kfree() instead of kvfree() on ft->g in arfs_create_groups() because the memory is allocated with kcalloc(). Signed-off-by: Denis Efremov Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 014639ea06e3..c4c9d6cda7e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -220,7 +220,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft, sizeof(*ft->g), GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL); if (!in || !ft->g) { - kvfree(ft->g); + kfree(ft->g); kvfree(in); return -ENOMEM; } From d39c9885b65740e737db8303ed3d344f2f6ceb35 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Thu, 11 Jun 2020 15:48:45 +0300 Subject: [PATCH 0313/2056] net/mlx5e: Remove unused mlx5e_xsk_first_unused_channel mlx5e_xsk_first_unused_channel is a leftover from old versions of the first XSK commit, and it was never used. Remove it. Fixes: db05815b36cb ("net/mlx5e: Add XSK zero-copy support") Signed-off-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/xsk/umem.c | 13 ------------- .../net/ethernet/mellanox/mlx5/core/en/xsk/umem.h | 2 -- 2 files changed, 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c index 7b17fcd0a56d..331ca2b0f8a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c @@ -215,16 +215,3 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid) return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) : mlx5e_xsk_disable_umem(priv, ix); } - -u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk) -{ - u16 res = xsk->refcnt ? params->num_channels : 0; - - while (res) { - if (mlx5e_xsk_get_umem(params, xsk, res - 1)) - break; - --res; - } - - return res; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h index 25b4cbe58b54..bada94973586 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h @@ -26,6 +26,4 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid) int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries); -u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk); - #endif /* __MLX5_EN_XSK_UMEM_H__ */ From 8fab0175aabcd28789e51df7e1c9222f9efe1532 Mon Sep 17 00:00:00 2001 From: Alaa Hleihel Date: Tue, 2 Jun 2020 12:09:21 +0300 Subject: [PATCH 0314/2056] net/mlx5e: Move including net/arp.h from en_rep.c to rep/neigh.c After the cited commit, the header net/arp.h is no longer used in en_rep.c. So, move it to the new file rep/neigh.c that uses it now. Signed-off-by: Alaa Hleihel Reviewed-by: Vlad Buslov Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c index baa162432e75..a0913836c973 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "neigh.h" #include "tc.h" #include "en_rep.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 20ff8526d212..ed2430677b12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include From 185901ceebbd4f8b11db478fd3a30eb66e5d591d Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Wed, 10 Jun 2020 18:09:13 +0300 Subject: [PATCH 0315/2056] net/mlx5e: Move TC-specific function definitions into MLX5_CLS_ACT en_tc.h header file declares several TC-specific functions in CONFIG_MLX5_ESWITCH block even though those functions are only compiled when CONFIG_MLX5_CLS_ACT is set, which is a recent change. Move them to proper block. Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 5c330b0cae21..1561eaa89ffd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -40,6 +40,14 @@ #ifdef CONFIG_MLX5_ESWITCH +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); + +struct mlx5e_tc_update_priv { + struct net_device *tun_dev; +}; + +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) + struct tunnel_match_key { struct flow_dissector_key_control enc_control; struct flow_dissector_key_keyid enc_key_id; @@ -114,8 +122,6 @@ void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_l struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags); - void mlx5e_tc_reoffload_flows_work(struct work_struct *work); enum mlx5e_tc_attr_to_reg { @@ -142,10 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, struct net_device *out_dev); -struct mlx5e_tc_update_priv { - struct net_device *tun_dev; -}; - struct mlx5e_tc_mod_hdr_acts { int num_actions; int max_actions; @@ -174,8 +176,6 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev, struct flow_match_basic *match, bool outer, void *headers_c, void *headers_v); -#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) - int mlx5e_tc_nic_init(struct mlx5e_priv *priv); void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); From 7a64ca862ac96d5e78a59bd57549034134ee0949 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 15 May 2020 17:09:05 -0700 Subject: [PATCH 0316/2056] net/mlx5e: vxlan: Use RCU for vxlan table lookup Remove the spinlock protecting the vxlan table and use RCU instead. This will improve performance as it will eliminate contention on data path cores. Fixes: b3f63c3d5e2c ("net/mlx5e: Add netdev support for VXLAN tunneling") Signed-off-by: Saeed Mahameed Reviewed-by: Maxim Mikityanskiy --- .../ethernet/mellanox/mlx5/core/lib/vxlan.c | 73 ++++++++----------- 1 file changed, 31 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index 82c766a95165..85cbc4295585 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -40,7 +40,6 @@ struct mlx5_vxlan { struct mlx5_core_dev *mdev; - spinlock_t lock; /* protect vxlan table */ /* max_num_ports is usuallly 4, 16 buckets is more than enough */ DECLARE_HASHTABLE(htable, 4); int num_ports; @@ -78,45 +77,46 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in); } -static struct mlx5_vxlan_port* -mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port) -{ - struct mlx5_vxlan_port *vxlanp; - - hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) { - if (vxlanp->udp_port == port) - return vxlanp; - } - - return NULL; -} - struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { - struct mlx5_vxlan_port *vxlanp; + struct mlx5_vxlan_port *retptr = NULL, *vxlanp; if (!mlx5_vxlan_allowed(vxlan)) return NULL; - spin_lock_bh(&vxlan->lock); - vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); - spin_unlock_bh(&vxlan->lock); + rcu_read_lock(); + hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port) + if (vxlanp->udp_port == port) { + retptr = vxlanp; + break; + } + rcu_read_unlock(); - return vxlanp; + return retptr; +} + +static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) +{ + struct mlx5_vxlan_port *vxlanp; + + hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) + if (vxlanp->udp_port == port) + return vxlanp; + return NULL; } int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; - int ret = -ENOSPC; - - vxlanp = mlx5_vxlan_lookup_port(vxlan, port); - if (vxlanp) { - refcount_inc(&vxlanp->refcount); - return 0; - } + int ret = 0; mutex_lock(&vxlan->sync_lock); + vxlanp = vxlan_lookup_port(vxlan, port); + if (vxlanp) { + refcount_inc(&vxlanp->refcount); + goto unlock; + } + if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { mlx5_core_info(vxlan->mdev, "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", @@ -138,9 +138,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) vxlanp->udp_port = port; refcount_set(&vxlanp->refcount, 1); - spin_lock_bh(&vxlan->lock); - hash_add(vxlan->htable, &vxlanp->hlist, port); - spin_unlock_bh(&vxlan->lock); + hash_add_rcu(vxlan->htable, &vxlanp->hlist, port); vxlan->num_ports++; mutex_unlock(&vxlan->sync_lock); @@ -157,34 +155,26 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; - bool remove = false; int ret = 0; mutex_lock(&vxlan->sync_lock); - spin_lock_bh(&vxlan->lock); - vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port); + vxlanp = vxlan_lookup_port(vxlan, port); if (!vxlanp) { ret = -ENOENT; goto out_unlock; } if (refcount_dec_and_test(&vxlanp->refcount)) { - hash_del(&vxlanp->hlist); - remove = true; - } - -out_unlock: - spin_unlock_bh(&vxlan->lock); - - if (remove) { + hash_del_rcu(&vxlanp->hlist); + synchronize_rcu(); mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); kfree(vxlanp); vxlan->num_ports--; } +out_unlock: mutex_unlock(&vxlan->sync_lock); - return ret; } @@ -201,7 +191,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) vxlan->mdev = mdev; mutex_init(&vxlan->sync_lock); - spin_lock_init(&vxlan->lock); hash_init(vxlan->htable); /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */ From efbb974d8ead5106787144219cb240fdcebccc16 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Fri, 15 May 2020 17:11:29 -0700 Subject: [PATCH 0317/2056] net/mlx5e: vxlan: Return bool instead of opaque ptr in port_lookup() struct mlx5_vxlan_port is not exposed to the outside callers, it is redundant to return a pointer to it from mlx5_vxlan_port_lookup(), to be only used as a boolean, so just return a boolean. Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c | 9 +++++---- drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h | 5 ++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index 85cbc4295585..be34330d89cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -77,9 +77,10 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in); } -struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) +bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { - struct mlx5_vxlan_port *retptr = NULL, *vxlanp; + struct mlx5_vxlan_port *vxlanp; + bool found = false; if (!mlx5_vxlan_allowed(vxlan)) return NULL; @@ -87,12 +88,12 @@ struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 por rcu_read_lock(); hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port) if (vxlanp->udp_port == port) { - retptr = vxlanp; + found = true; break; } rcu_read_unlock(); - return retptr; + return found; } static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h index 8fb0eb08fa6d..6d599f4a8acd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h @@ -50,15 +50,14 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev); void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); -struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); +bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); #else static inline struct mlx5_vxlan* mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); } static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } -static inline struct mx5_vxlan_port* -mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; } +static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; } #endif #endif /* __MLX5_VXLAN_H__ */ From c6d5d843d9b6e8dca3768250970f0d0a1e3d4fb0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jun 2020 11:06:54 +0100 Subject: [PATCH 0318/2056] net: phylink: add phylink_speed_(up|down) interface Add an interface for the phy_speed_(up|down) functions when a driver makes use of phylink. These pass the call through to phylib when we have a normal PHY attached (i.o.w., not a PHY on a SFP module.) Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 48 +++++++++++++++++++++++++++++++++++++++ include/linux/phylink.h | 2 ++ 2 files changed, 50 insertions(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7ce787c227b3..7cda1646bbf7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1826,6 +1826,54 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL_GPL(phylink_mii_ioctl); +/** + * phylink_speed_down() - set the non-SFP PHY to lowest speed supported by both + * link partners + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @sync: perform action synchronously + * + * If we have a PHY that is not part of a SFP module, then set the speed + * as described in the phy_speed_down() function. Please see this function + * for a description of the @sync parameter. + * + * Returns zero if there is no PHY, otherwise as per phy_speed_down(). + */ +int phylink_speed_down(struct phylink *pl, bool sync) +{ + int ret = 0; + + ASSERT_RTNL(); + + if (!pl->sfp_bus && pl->phydev) + ret = phy_speed_down(pl->phydev, sync); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_speed_down); + +/** + * phylink_speed_up() - restore the advertised speeds prior to the call to + * phylink_speed_down() + * @pl: a pointer to a &struct phylink returned from phylink_create() + * + * If we have a PHY that is not part of a SFP module, then restore the + * PHY speeds as per phy_speed_up(). + * + * Returns zero if there is no PHY, otherwise as per phy_speed_up(). + */ +int phylink_speed_up(struct phylink *pl) +{ + int ret = 0; + + ASSERT_RTNL(); + + if (!pl->sfp_bus && pl->phydev) + ret = phy_speed_up(pl->phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_speed_up); + static void phylink_sfp_attach(void *upstream, struct sfp_bus *bus) { struct phylink *pl = upstream; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index cc5b452a184e..b32b8b45421b 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -392,6 +392,8 @@ int phylink_init_eee(struct phylink *, bool); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); +int phylink_speed_down(struct phylink *pl, bool sync); +int phylink_speed_up(struct phylink *pl); #define phylink_zero(bm) \ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS) From a5440cbecd99e2564504572b39bc70ea7addfc25 Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jun 2020 11:21:32 +0100 Subject: [PATCH 0319/2056] net: dsa/ar9331: convert to mac_link_up() Convert the ar9331 DSA driver to use the finalised link parameters in mac_link_up() rather than the parameters in mac_config(). Tested-by: Oleksij Rempel Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/qca/ar9331.c | 60 +++++++++++++++++++----------------- 1 file changed, 31 insertions(+), 29 deletions(-) diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 7c86056b9401..e24a99031b80 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -97,8 +97,7 @@ (AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC) #define AR9331_SW_PORT_STATUS_LINK_MASK \ - (AR9331_SW_PORT_STATUS_LINK_EN | AR9331_SW_PORT_STATUS_FLOW_LINK_EN | \ - AR9331_SW_PORT_STATUS_DUPLEX_MODE | \ + (AR9331_SW_PORT_STATUS_DUPLEX_MODE | \ AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \ AR9331_SW_PORT_STATUS_SPEED_M) @@ -410,33 +409,10 @@ static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port, struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv; struct regmap *regmap = priv->regmap; int ret; - u32 val; - - switch (state->speed) { - case SPEED_1000: - val = AR9331_SW_PORT_STATUS_SPEED_1000; - break; - case SPEED_100: - val = AR9331_SW_PORT_STATUS_SPEED_100; - break; - case SPEED_10: - val = AR9331_SW_PORT_STATUS_SPEED_10; - break; - default: - return; - } - - if (state->duplex) - val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE; - - if (state->pause & MLO_PAUSE_TX) - val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN; - - if (state->pause & MLO_PAUSE_RX) - val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN; ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port), - AR9331_SW_PORT_STATUS_LINK_MASK, val); + AR9331_SW_PORT_STATUS_LINK_EN | + AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0); if (ret) dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret); } @@ -464,11 +440,37 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port, { struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv; struct regmap *regmap = priv->regmap; + u32 val; int ret; + val = AR9331_SW_PORT_STATUS_MAC_MASK; + switch (speed) { + case SPEED_1000: + val |= AR9331_SW_PORT_STATUS_SPEED_1000; + break; + case SPEED_100: + val |= AR9331_SW_PORT_STATUS_SPEED_100; + break; + case SPEED_10: + val |= AR9331_SW_PORT_STATUS_SPEED_10; + break; + default: + return; + } + + if (duplex) + val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE; + + if (tx_pause) + val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN; + + if (rx_pause) + val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN; + ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port), - AR9331_SW_PORT_STATUS_MAC_MASK, - AR9331_SW_PORT_STATUS_MAC_MASK); + AR9331_SW_PORT_STATUS_MAC_MASK | + AR9331_SW_PORT_STATUS_LINK_MASK, + val); if (ret) dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret); } From 575691b309291c5163cca5877ec159120a68774d Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 24 Jun 2020 12:30:04 +0100 Subject: [PATCH 0320/2056] net: phylink: only restart AN if the link mode is using in-band AN If we are not using in-band autonegotiation, there is no point passing the request to restart autonegotiation on to the driver. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 7cda1646bbf7..494af91535ba 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -429,7 +429,8 @@ static void phylink_mac_config_up(struct phylink *pl, static void phylink_mac_pcs_an_restart(struct phylink *pl) { if (pl->link_config.an_enabled && - phy_interface_mode_is_8023z(pl->link_config.interface)) { + phy_interface_mode_is_8023z(pl->link_config.interface) && + phylink_autoneg_inband(pl->cur_link_an_mode)) { if (pl->pcs_ops) pl->pcs_ops->pcs_an_restart(pl->config); else From 0da1e28f97352fb8aadde4d66774373bedd50408 Mon Sep 17 00:00:00 2001 From: Ioana Radulescu Date: Wed, 24 Jun 2020 14:34:17 +0300 Subject: [PATCH 0321/2056] dpaa2-eth: trim debugfs FQ stats With the addition of multiple traffic classes support, the number of available frame queues grew significantly, overly inflating the debugfs FQ statistics entry. Update it to only show the queues which are actually in use (i.e. have a non-zero frame counter). Signed-off-by: Ioana Radulescu Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index c453a23045c1..2880ca02d7e7 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -90,6 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) if (err) fcnt = 0; + /* Skip FQs with no traffic */ + if (!fq->stats.frames && !fcnt) + continue; + seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n", fq->fqid, fq->target_cpu, From 37fbbdda63538f1bcebd566b75fa1865d30b6b19 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Wed, 24 Jun 2020 14:34:18 +0300 Subject: [PATCH 0322/2056] dpaa2-eth: check the result of skb_to_sgvec() Before passing the result of skb_to_sgvec() to dma_map_sg() check if any error was returned. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index f150cd454fa4..db27f959d409 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -611,6 +611,10 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, sg_init_table(scl, nr_frags + 1); num_sg = skb_to_sgvec(skb, scl, 0, skb->len); + if (unlikely(num_sg < 0)) { + err = -ENOMEM; + goto dma_map_sg_failed; + } num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); if (unlikely(!num_dma_bufs)) { err = -ENOMEM; From 0e5ad75b02d9341eb9ca22627247f9a02cc20d6f Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Wed, 24 Jun 2020 14:34:19 +0300 Subject: [PATCH 0323/2056] dpaa2-eth: fix condition for number of buffer acquire retries We should keep retrying to acquire buffers through the software portals as long as the function returns -EBUSY and the number of retries is __below__ DPAA2_ETH_SWP_BUSY_RETRIES. Fixes: ef17bd7cc0c8 ("dpaa2-eth: Avoid unbounded while loops") Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index db27f959d409..712bbfdbe7d7 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1113,7 +1113,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count) buf_array, count); if (ret < 0) { if (ret == -EBUSY && - retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) + retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) continue; netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); return; From cef5820b7f911eb9bd6c04bcd7ef6107e671640c Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Wed, 24 Jun 2020 14:34:20 +0300 Subject: [PATCH 0324/2056] dpaa2-eth: fix recursive header include The dpaa2-eth.h header file includes dpaa2-eth-trace.h which includes back dpaa2-eth leading to a recursion in the include path. Fix this by removing the include of dpaa2-eth.h in the trace header. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h index 9801528db2a5..5fb5f14e01ec 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -10,7 +10,6 @@ #include #include -#include "dpaa2-eth.h" #include #define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" From 05e190467d82fea26531430a15f9f66b4dee29c2 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Wed, 24 Jun 2020 14:34:21 +0300 Subject: [PATCH 0325/2056] dpaa2-eth: fix misspelled function parameters in dpni_[set/get]_taildrop Two of the function parameters (qtype and index) were misspelled in the associated descriptions of dpni_[set/get]_taildrop which led to sparse warnings. Fix this by using the exact same names as present in the function definition. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpni.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 6b479ba66465..426100607854 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -1558,10 +1558,10 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io, * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object * @cg_point: Congestion point - * @q_type: Queue type on which the taildrop is configured. + * @qtype: Queue type on which the taildrop is configured. * Only Rx queues are supported for now * @tc: Traffic class to apply this taildrop to - * @q_index: Index of the queue if the DPNI supports multiple queues for + * @index: Index of the queue if the DPNI supports multiple queues for * traffic distribution. Ignored if CONGESTION_POINT is not 0. * @taildrop: Taildrop structure * @@ -1602,10 +1602,10 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io, * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPNI object * @cg_point: Congestion point - * @q_type: Queue type on which the taildrop is configured. + * @qtype: Queue type on which the taildrop is configured. * Only Rx queues are supported for now * @tc: Traffic class to apply this taildrop to - * @q_index: Index of the queue if the DPNI supports multiple queues for + * @index: Index of the queue if the DPNI supports multiple queues for * traffic distribution. Ignored if CONGESTION_POINT is not 0. * @taildrop: Taildrop structure * From 9150069bf5fc0e86cc8b79495bfbb98d2b642ba0 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:01 -0500 Subject: [PATCH 0326/2056] dt-bindings: net: Add tx and rx internal delays tx-internal-delays and rx-internal-delays are a common setting for RGMII capable devices. These properties are used when the phy-mode or phy-controller is set to rgmii-id, rgmii-rxid or rgmii-txid. These modes indicate to the controller that the PHY will add the internal delay for the connection. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- .../devicetree/bindings/net/ethernet-phy.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index 9b1f1147ca36..a9e547ac7905 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -162,6 +162,18 @@ properties: description: Specifies a reference to a node representing a SFP cage. + rx-internal-delay-ps: + description: | + RGMII Receive PHY Clock Delay defined in pico seconds. This is used for + PHY's that have configurable RX internal delays. If this property is + present then the PHY applies the RX delay. + + tx-internal-delay-ps: + description: | + RGMII Transmit PHY Clock Delay defined in pico seconds. This is used for + PHY's that have configurable TX internal delays. If this property is + present then the PHY applies the TX delay. + required: - reg From 92252eec913b2dd5e7b5de11ea3efa2e64d65cf4 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:02 -0500 Subject: [PATCH 0327/2056] net: phy: Add a helper to return the index for of the internal delay Add a helper function that will return the index in the array for the passed in internal delay value. The helper requires the array, size and delay value. The helper will then return the index for the exact match or return the index for the index to the closest smaller value. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 99 ++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 4 ++ 2 files changed, 103 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 29ef4456ac25..6d47485e68f9 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -31,6 +31,7 @@ #include #include #include +#include MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); @@ -2708,6 +2709,104 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause) } EXPORT_SYMBOL(phy_get_pause); +#if IS_ENABLED(CONFIG_OF_MDIO) +static int phy_get_int_delay_property(struct device *dev, const char *name) +{ + s32 int_delay; + int ret; + + ret = device_property_read_u32(dev, name, &int_delay); + if (ret) + return ret; + + return int_delay; +} +#else +static int phy_get_int_delay_property(struct device *dev, const char *name) +{ + return -EINVAL; +} +#endif + +/** + * phy_get_delay_index - returns the index of the internal delay + * @phydev: phy_device struct + * @dev: pointer to the devices device struct + * @delay_values: array of delays the PHY supports + * @size: the size of the delay array + * @is_rx: boolean to indicate to get the rx internal delay + * + * Returns the index within the array of internal delay passed in. + * If the device property is not present then the interface type is checked + * if the interface defines use of internal delay then a 1 is returned otherwise + * a 0 is returned. + * The array must be in ascending order. If PHY does not have an ascending order + * array then size = 0 and the value of the delay property is returned. + * Return -EINVAL if the delay is invalid or cannot be found. + */ +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx) +{ + s32 delay; + int i; + + if (is_rx) { + delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps"); + if (delay < 0 && size == 0) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + return 1; + else + return 0; + } + + } else { + delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps"); + if (delay < 0 && size == 0) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID || + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + return 1; + else + return 0; + } + } + + if (delay < 0) + return delay; + + if (delay && size == 0) + return delay; + + if (delay < delay_values[0] || delay > delay_values[size - 1]) { + phydev_err(phydev, "Delay %d is out of range\n", delay); + return -EINVAL; + } + + if (delay == delay_values[0]) + return 0; + + for (i = 1; i < size; i++) { + if (delay == delay_values[i]) + return i; + + /* Find an approximate index by looking up the table */ + if (delay > delay_values[i - 1] && + delay < delay_values[i]) { + if (delay - delay_values[i - 1] < + delay_values[i] - delay) + return i - 1; + else + return i; + } + } + + phydev_err(phydev, "error finding internal delay index for %d\n", + delay); + + return -EINVAL; +} +EXPORT_SYMBOL(phy_get_internal_delay); + static bool phy_drv_supports_irq(struct phy_driver *phydrv) { return phydrv->config_intr && phydrv->ack_interrupt; diff --git a/include/linux/phy.h b/include/linux/phy.h index 6fb8f302978d..2c00374dc996 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1443,6 +1443,10 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); + +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx); + void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, bool *tx_pause, bool *rx_pause); From 2fb305c37d5b78ef5d849b260d9ef997ad1d1281 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:03 -0500 Subject: [PATCH 0328/2056] dt-bindings: net: Add RGMII internal delay for DP83869 Add the internal delay values into the header and update the binding with the internal delay properties. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- .../devicetree/bindings/net/ti,dp83869.yaml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/ti,dp83869.yaml b/Documentation/devicetree/bindings/net/ti,dp83869.yaml index 5b69ef03bbf7..71e90a3e4652 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83869.yaml +++ b/Documentation/devicetree/bindings/net/ti,dp83869.yaml @@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#" title: TI DP83869 ethernet PHY allOf: - - $ref: "ethernet-controller.yaml#" + - $ref: "ethernet-phy.yaml#" maintainers: - Dan Murphy @@ -64,6 +64,18 @@ properties: Operational mode for the PHY. If this is not set then the operational mode is set by the straps. see dt-bindings/net/ti-dp83869.h for values + rx-internal-delay-ps: + description: Delay is in pico seconds + enum: [ 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000, + 3250, 3500, 3750, 4000 ] + default: 2000 + + tx-internal-delay-ps: + description: Delay is in pico seconds + enum: [ 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000, + 3250, 3500, 3750, 4000 ] + default: 2000 + required: - reg @@ -80,5 +92,7 @@ examples: ti,op-mode = ; ti,max-output-impedance = "true"; ti,clk-output-sel = ; + rx-internal-delay-ps = <2000>; + tx-internal-delay-ps = <2000>; }; }; From 736b25afe28447967390a4244c10f156b43c9006 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:04 -0500 Subject: [PATCH 0329/2056] net: dp83869: Add RGMII internal delay configuration Add RGMII internal delay configuration for Rx and Tx. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/dp83869.c | 53 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 50 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index 53ed3abc26c9..58103152c601 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -64,6 +64,10 @@ #define DP83869_RGMII_TX_CLK_DELAY_EN BIT(1) #define DP83869_RGMII_RX_CLK_DELAY_EN BIT(0) +/* RGMIIDCTL */ +#define DP83869_RGMII_CLK_DELAY_SHIFT 4 +#define DP83869_CLK_DELAY_DEF 7 + /* STRAP_STS1 bits */ #define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0) #define DP83869_STRAP_STS1_RESERVED BIT(11) @@ -78,9 +82,6 @@ #define DP83869_PHYCR_FIFO_DEPTH_MASK GENMASK(15, 12) #define DP83869_PHYCR_RESERVED_MASK BIT(11) -/* RGMIIDCTL bits */ -#define DP83869_RGMII_TX_CLK_DELAY_SHIFT 4 - /* IO_MUX_CFG bits */ #define DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL 0x1f @@ -108,6 +109,8 @@ enum { struct dp83869_private { int tx_fifo_depth; int rx_fifo_depth; + s32 rx_int_delay; + s32 tx_int_delay; int io_impedance; int port_mirroring; bool rxctrl_strap_quirk; @@ -177,11 +180,16 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev) } #if IS_ENABLED(CONFIG_OF_MDIO) +static const int dp83869_internal_delay[] = {250, 500, 750, 1000, 1250, 1500, + 1750, 2000, 2250, 2500, 2750, 3000, + 3250, 3500, 3750, 4000}; + static int dp83869_of_init(struct phy_device *phydev) { struct dp83869_private *dp83869 = phydev->priv; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; + int delay_size = ARRAY_SIZE(dp83869_internal_delay); int ret; if (!of_node) @@ -235,6 +243,20 @@ static int dp83869_of_init(struct phy_device *phydev) &dp83869->tx_fifo_depth)) dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB; + dp83869->rx_int_delay = phy_get_internal_delay(phydev, dev, + &dp83869_internal_delay[0], + delay_size, true); + if (dp83869->rx_int_delay < 0) + dp83869->rx_int_delay = + dp83869_internal_delay[DP83869_CLK_DELAY_DEF]; + + dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev, + &dp83869_internal_delay[0], + delay_size, false); + if (dp83869->tx_int_delay < 0) + dp83869->tx_int_delay = + dp83869_internal_delay[DP83869_CLK_DELAY_DEF]; + return ret; } #else @@ -397,6 +419,31 @@ static int dp83869_config_init(struct phy_device *phydev) dp83869->clk_output_sel << DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT); + if (phy_interface_is_rgmii(phydev)) { + ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIIDCTL, + dp83869->rx_int_delay | + dp83869->tx_int_delay << DP83869_RGMII_CLK_DELAY_SHIFT); + if (ret) + return ret; + + val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL); + val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN | + DP83869_RGMII_RX_CLK_DELAY_EN); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + val |= (DP83869_RGMII_TX_CLK_DELAY_EN | + DP83869_RGMII_RX_CLK_DELAY_EN); + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + val |= DP83869_RGMII_TX_CLK_DELAY_EN; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + val |= DP83869_RGMII_RX_CLK_DELAY_EN; + + ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL, + val); + } + return ret; } From 8095295292b564583bb5bee5ba69924c2fd40434 Mon Sep 17 00:00:00 2001 From: Dan Murphy Date: Wed, 24 Jun 2020 07:16:05 -0500 Subject: [PATCH 0330/2056] net: phy: DP83822: Add setting the fixed internal delay The DP83822 can be configured to use the RGMII interface. There are independent fixed 3.5ns clock shift (aka internal delay) for the TX and RX paths. This allow either one to be set if the MII interface is RGMII and the value is set in the firmware node. Signed-off-by: Dan Murphy Signed-off-by: David S. Miller --- drivers/net/phy/dp83822.c | 79 ++++++++++++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 1dd19d0cb269..37643c468e19 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -26,7 +26,9 @@ #define MII_DP83822_PHYSCR 0x11 #define MII_DP83822_MISR1 0x12 #define MII_DP83822_MISR2 0x13 +#define MII_DP83822_RCSR 0x17 #define MII_DP83822_RESET_CTRL 0x1f +#define MII_DP83822_GENCFG 0x465 #define DP83822_HW_RESET BIT(15) #define DP83822_SW_RESET BIT(14) @@ -77,6 +79,10 @@ #define DP83822_WOL_INDICATION_SEL BIT(8) #define DP83822_WOL_CLR_INDICATION BIT(11) +/* RSCR bits */ +#define DP83822_RX_CLK_SHIFT BIT(12) +#define DP83822_TX_CLK_SHIFT BIT(11) + static int dp83822_ack_interrupt(struct phy_device *phydev) { int err; @@ -255,7 +261,7 @@ static int dp83822_config_intr(struct phy_device *phydev) return phy_write(phydev, MII_DP83822_PHYSCR, physcr_status); } -static int dp83822_config_init(struct phy_device *phydev) +static int dp8382x_disable_wol(struct phy_device *phydev) { int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN | DP83822_WOL_SECURE_ON; @@ -264,6 +270,46 @@ static int dp83822_config_init(struct phy_device *phydev) MII_DP83822_WOL_CFG, value); } +static int dp83822_config_init(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + int rgmii_delay; + s32 rx_int_delay; + s32 tx_int_delay; + int err = 0; + + if (phy_interface_is_rgmii(phydev)) { + rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, + true); + + if (rx_int_delay <= 0) + rgmii_delay = 0; + else + rgmii_delay = DP83822_RX_CLK_SHIFT; + + tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0, + false); + if (tx_int_delay <= 0) + rgmii_delay &= ~DP83822_TX_CLK_SHIFT; + else + rgmii_delay |= DP83822_TX_CLK_SHIFT; + + if (rgmii_delay) { + err = phy_set_bits_mmd(phydev, DP83822_DEVADDR, + MII_DP83822_RCSR, rgmii_delay); + if (err) + return err; + } + } + + return dp8382x_disable_wol(phydev); +} + +static int dp8382x_config_init(struct phy_device *phydev) +{ + return dp8382x_disable_wol(phydev); +} + static int dp83822_phy_reset(struct phy_device *phydev) { int err; @@ -272,9 +318,7 @@ static int dp83822_phy_reset(struct phy_device *phydev) if (err < 0) return err; - dp83822_config_init(phydev); - - return 0; + return phydev->drv->config_init(phydev); } static int dp83822_suspend(struct phy_device *phydev) @@ -318,14 +362,29 @@ static int dp83822_resume(struct phy_device *phydev) .resume = dp83822_resume, \ } +#define DP8382X_PHY_DRIVER(_id, _name) \ + { \ + PHY_ID_MATCH_MODEL(_id), \ + .name = (_name), \ + /* PHY_BASIC_FEATURES */ \ + .soft_reset = dp83822_phy_reset, \ + .config_init = dp8382x_config_init, \ + .get_wol = dp83822_get_wol, \ + .set_wol = dp83822_set_wol, \ + .ack_interrupt = dp83822_ack_interrupt, \ + .config_intr = dp83822_config_intr, \ + .suspend = dp83822_suspend, \ + .resume = dp83822_resume, \ + } + static struct phy_driver dp83822_driver[] = { DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"), - DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"), - DP83822_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"), - DP83822_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"), - DP83822_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"), - DP83822_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"), - DP83822_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"), + DP8382X_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"), + DP8382X_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"), + DP8382X_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"), + DP8382X_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"), + DP8382X_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"), + DP8382X_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"), }; module_phy_driver(dp83822_driver); From 853bede848733dee1ce41ecced775d426fe245fa Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:04 +0200 Subject: [PATCH 0331/2056] net: phy: mscc: macsec: fix sparse warnings This patch fixes the following sparse warnings when building MACsec support in the MSCC PHY driver. mscc_macsec.c:393:42: warning: cast from restricted sci_t mscc_macsec.c:395:42: warning: restricted sci_t degrades to integer mscc_macsec.c:402:42: warning: restricted __be16 degrades to integer mscc_macsec.c:608:34: warning: cast from restricted sci_t mscc_macsec.c:610:34: warning: restricted sci_t degrades to integer Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_macsec.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index 713c62b1d1f0..77c8c2fb28de 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -385,21 +385,23 @@ static void vsc8584_macsec_flow(struct phy_device *phydev, } if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) { + u64 sci = (__force u64)flow->rx_sa->sc->sci; + match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3)); mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) | MSCC_MS_SAM_MASK_SCI_MASK; vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx), - lower_32_bits(flow->rx_sa->sc->sci)); + lower_32_bits(sci)); vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx), - upper_32_bits(flow->rx_sa->sc->sci)); + upper_32_bits(sci)); } if (flow->match.etype) { mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK; vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx), - MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype))); + MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE((__force u32)htons(flow->etype))); } match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority); @@ -545,7 +547,7 @@ static int vsc8584_macsec_transformation(struct phy_device *phydev, int i, ret, index = flow->index; u32 rec = 0, control = 0; u8 hkey[16]; - sci_t sci; + u64 sci; ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey); if (ret) @@ -603,7 +605,7 @@ static int vsc8584_macsec_transformation(struct phy_device *phydev, priv->secy->replay_window); /* Set the input vectors */ - sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci; + sci = (__force u64)(bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci); vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++), lower_32_bits(sci)); vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++), From b16a213b4d68022011ddaabd583c855d7e5ec5b2 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:05 +0200 Subject: [PATCH 0332/2056] net: phy: mscc: fix a possible double unlock On vsc8584_ptp_init failure we jump to the 'err' label, which unlocks the MDIO bus lock. But vsc8584_ptp_init isn't called with the MDIO bus lock taken, which could result in a double unlock. Fix this. Fixes: ab2bf9339357 ("net: phy: mscc: 1588 block initialization") Reported-by: kernel test robot Reported-by: Dan Carpenter Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 2a7082983c09..cb4e15d6e2db 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1436,7 +1436,7 @@ static int vsc8584_config_init(struct phy_device *phydev) ret = vsc8584_ptp_init(phydev); if (ret) - goto err; + return ret; phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); From b487032ee683fcff08e070d93080de02a4506695 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:06 +0200 Subject: [PATCH 0333/2056] net: phy: mscc: ptp: fix a smatch error The following error was reported by smatch: vsc85xx_ts_read_csr() error: uninitialized symbol 'blk_hw'. In practice this is very unlikely, as all the block identifiers given to this functions are handled and described in an enum. The smatch error is fixed by doing what is already done in vsc85xx_ts_write_csr: using the "PROCESSOR" block by default. Reported-by: kernel test robot Reported-by: Dan Carpenter Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_ptp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 17256d85cedf..030a56c9a06d 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -75,6 +75,7 @@ static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk, blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1; break; case PROCESSOR: + default: blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1; break; } From b9dccf91b34a1d1b3c4e1d36782701288697a2fb Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:07 +0200 Subject: [PATCH 0334/2056] net: phy: mscc: ptp: fix a typo in a comment This patch fixes a typo in a comment, s/Ths/This/. The patch is cosmetic only. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index 030a56c9a06d..d4266911efc5 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1564,7 +1564,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev) /* Retrieve the shared load/save GPIO. Request it as non exclusive as * the same GPIO can be requested by all the PHYs of the same package. - * Ths GPIO must be used with the gpio_lock taken (the lock is shared + * This GPIO must be used with the gpio_lock taken (the lock is shared * between all PHYs). */ vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save", From d9608aacd3c0272a5a363e5002c7102ddd4529b6 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:08 +0200 Subject: [PATCH 0335/2056] net: phy: mscc: do not access the MDIO bus lock directly This patch improves the MSCC driver by using the provided phy_lock_mdio_bus and phy_unlock_mdio_bus helpers instead of locking and unlocking the MDIO bus lock directly. The patch is only cosmetic but should improve maintenance and consistency. Signed-off-by: Antoine Tenart Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_main.c | 24 ++++++++++++------------ drivers/net/phy/mscc/mscc_ptp.c | 12 ++++++------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index cb4e15d6e2db..03680933f530 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1288,7 +1288,7 @@ static void vsc8584_get_base_addr(struct phy_device *phydev) struct vsc8531_private *vsc8531 = phydev->priv; u16 val, addr; - mutex_lock(&phydev->mdio.bus->mdio_lock); + phy_lock_mdio_bus(phydev); __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED); addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4); @@ -1297,7 +1297,7 @@ static void vsc8584_get_base_addr(struct phy_device *phydev) val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL); __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); /* In the package, there are two pairs of PHYs (PHY0 + PHY2 and * PHY1 + PHY3). The first PHY of each pair (PHY0 and PHY1) is @@ -1331,7 +1331,7 @@ static int vsc8584_config_init(struct phy_device *phydev) phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - mutex_lock(&phydev->mdio.bus->mdio_lock); + phy_lock_mdio_bus(phydev); /* Some parts of the init sequence are identical for every PHY in the * package. Some parts are modifying the GPIO register bank which is a @@ -1428,7 +1428,7 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) goto err; - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); ret = vsc8584_macsec_init(phydev); if (ret) @@ -1469,7 +1469,7 @@ static int vsc8584_config_init(struct phy_device *phydev) return 0; err: - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return ret; } @@ -1755,7 +1755,7 @@ static int vsc8514_config_init(struct phy_device *phydev) phydev->mdix_ctrl = ETH_TP_MDI_AUTO; - mutex_lock(&phydev->mdio.bus->mdio_lock); + phy_lock_mdio_bus(phydev); /* Some parts of the init sequence are identical for every PHY in the * package. Some parts are modifying the GPIO register bank which is a @@ -1843,14 +1843,14 @@ static int vsc8514_config_init(struct phy_device *phydev) reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, PHY_S6G_PLL_STATUS); if (reg == 0xffffffff) { - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return -EIO; } } while (time_before(jiffies, deadline) && (reg & BIT(12))); if (reg & BIT(12)) { - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return -ETIMEDOUT; } @@ -1870,18 +1870,18 @@ static int vsc8514_config_init(struct phy_device *phydev) reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, PHY_S6G_IB_STATUS0); if (reg == 0xffffffff) { - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return -EIO; } } while (time_before(jiffies, deadline) && !(reg & BIT(8))); if (!(reg & BIT(8))) { - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return -ETIMEDOUT; } - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); ret = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); @@ -1908,7 +1908,7 @@ static int vsc8514_config_init(struct phy_device *phydev) return ret; err: - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return ret; } diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index d4266911efc5..ef3441747348 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -80,7 +80,7 @@ static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk, break; } - mutex_lock(&phydev->mdio.bus->mdio_lock); + phy_lock_mdio_bus(phydev); phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588); @@ -98,7 +98,7 @@ static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk, phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); return val; } @@ -130,7 +130,7 @@ static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk, break; } - mutex_lock(&phydev->mdio.bus->mdio_lock); + phy_lock_mdio_bus(phydev); bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL); @@ -154,7 +154,7 @@ static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk, if (cond && upper) phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass); - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); } /* Pick bytes from PTP header */ @@ -1273,7 +1273,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev) u32 val; if (!vsc8584_is_1588_input_clk_configured(phydev)) { - mutex_lock(&phydev->mdio.bus->mdio_lock); + phy_lock_mdio_bus(phydev); /* 1588_DIFF_INPUT_CLK configuration: Use an external clock for * the LTC, as per 3.13.29 in the VSC8584 datasheet. @@ -1285,7 +1285,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev) phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); - mutex_unlock(&phydev->mdio.bus->mdio_lock); + phy_unlock_mdio_bus(phydev); vsc8584_set_input_clk_configured(phydev); } From 6119dda34e5d0821959e37641b287576826b6378 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:09 +0200 Subject: [PATCH 0336/2056] net: phy: mscc: restore the base page in vsc8514/8584_config_init In the vsc8584_config_init and vsc8514_config_init, the base page is set to 'GPIO', configuration is done, and the page is never explicitly restored to the standard page. No bug was triggered as it turns out helpers called in those config_init functions do modify the base page, and set it back to standard. But that is dangerous and any modification to those functions would introduce bugs. This patch fixes this, to improve maintenance, by restoring the base page to 'standard' once 'GPIO' accesses are completed. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_main.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 03680933f530..f625109df00a 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1395,6 +1395,11 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) goto err; + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); + if (ret) + goto err; + if (!phy_interface_is_rgmii(phydev)) { val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT | PROC_CMD_READ_MOD_WRITE_PORT; @@ -1779,7 +1784,11 @@ static int vsc8514_config_init(struct phy_device *phydev) val &= ~MAC_CFG_MASK; val |= MAC_CFG_QSGMII; ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val); + if (ret) + goto err; + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_STANDARD); if (ret) goto err; From d4a76dc74dff76c4d7f193806f336899fb8e5016 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:10 +0200 Subject: [PATCH 0337/2056] net: phy: mscc: remove useless page configuration in the config init In the middle of vsc8584_config_init and vsc8514_config_init, the page is set to 'standard'. This is the default value, and the page isn't set to another value before. Those pages configuration can be safely removed. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_main.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index f625109df00a..04e1ef791cec 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1443,8 +1443,6 @@ static int vsc8584_config_init(struct phy_device *phydev) if (ret) return ret; - phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); - val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1); val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK); val |= (MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS) | @@ -1892,11 +1890,6 @@ static int vsc8514_config_init(struct phy_device *phydev) phy_unlock_mdio_bus(phydev); - ret = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD); - - if (ret) - return ret; - ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK, MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS); From b4368d2b5b96f01c9b0780096dc2306d78c3e72f Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Thu, 25 Jun 2020 17:42:11 +0200 Subject: [PATCH 0338/2056] net: phy: mscc: improve vsc8514/8584_config_init consistency All PHY read and write return values are checked for errors in vsc8514_config_init and vsc8584_config_init, except for one. Fix this. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 04e1ef791cec..a4fbf3a4fa97 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -1375,8 +1375,10 @@ static int vsc8584_config_init(struct phy_device *phydev) goto err; } - phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, - MSCC_PHY_PAGE_EXTENDED_GPIO); + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + if (ret) + goto err; val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); val &= ~MAC_CFG_MASK; @@ -1774,8 +1776,10 @@ static int vsc8514_config_init(struct phy_device *phydev) if (phy_package_init_once(phydev)) vsc8514_config_pre_init(phydev); - phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, - MSCC_PHY_PAGE_EXTENDED_GPIO); + ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, + MSCC_PHY_PAGE_EXTENDED_GPIO); + if (ret) + goto err; val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK); From b8392808eb3fc28e523e28cb258c81ca246deb9b Mon Sep 17 00:00:00 2001 From: Kevin Darbyshire-Bryant Date: Thu, 25 Jun 2020 22:18:00 +0200 Subject: [PATCH 0339/2056] sch_cake: add RFC 8622 LE PHB support to CAKE diffserv handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change tin mapping on diffserv3, 4 & 8 for LE PHB support, in essence making LE a member of the Bulk tin. Bulk has the least priority and minimum of 1/16th total bandwidth in the face of higher priority traffic. NB: Diffserv 3 & 4 swap tin 0 & 1 priorities from the default order as found in diffserv8, in case anyone is wondering why it looks a bit odd. Signed-off-by: Kevin Darbyshire-Bryant [ reword commit message slightly ] Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: David S. Miller --- net/sched/sch_cake.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 60f8ae578819..cae367515804 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -312,8 +312,8 @@ static const u8 precedence[] = { }; static const u8 diffserv8[] = { - 2, 5, 1, 2, 4, 2, 2, 2, - 0, 2, 1, 2, 1, 2, 1, 2, + 2, 0, 1, 2, 4, 2, 2, 2, + 1, 2, 1, 2, 1, 2, 1, 2, 5, 2, 4, 2, 4, 2, 4, 2, 3, 2, 3, 2, 3, 2, 3, 2, 6, 2, 3, 2, 3, 2, 3, 2, @@ -323,7 +323,7 @@ static const u8 diffserv8[] = { }; static const u8 diffserv4[] = { - 0, 2, 0, 0, 2, 0, 0, 0, + 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, 2, 0, @@ -334,7 +334,7 @@ static const u8 diffserv4[] = { }; static const u8 diffserv3[] = { - 0, 0, 0, 0, 2, 0, 0, 0, + 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, From b980d477de2d8393f289fee982cd86ee44f5e37c Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Thu, 25 Jun 2020 16:26:27 -0700 Subject: [PATCH 0340/2056] Bluetooth: btusb: Comment on unbalanced pm reference Add a comment clarifying that a PM reference in btusb_qca_cmd_timeout is not unbalanced because it results in a device reset. Signed-off-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f8a71fdabb3d..0e143c0cecf2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -581,6 +581,7 @@ static void btusb_qca_cmd_timeout(struct hci_dev *hdev) return; bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device."); + /* This is not an unbalanced PM reference since the device will reset */ err = usb_autopm_get_interface(data->intf); if (!err) usb_queue_reset_device(data->intf); From fe21b6c3a65ca74cc4d0909635649bea48b115b3 Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Mon, 4 May 2020 09:43:48 -0700 Subject: [PATCH 0341/2056] i40e: Move client header location Move i40e_client.h to include/linux/net/intel/* since its shared between i40iw and i40e. Signed-off-by: Shiraz Saleem Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/infiniband/hw/i40iw/Makefile | 1 - drivers/infiniband/hw/i40iw/i40iw.h | 2 +- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_client.c | 2 +- .../intel/i40e => include/linux/net/intel}/i40e_client.h | 0 5 files changed, 3 insertions(+), 4 deletions(-) rename {drivers/net/ethernet/intel/i40e => include/linux/net/intel}/i40e_client.h (100%) diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile index 8942f8229945..34da9eba8a7c 100644 --- a/drivers/infiniband/hw/i40iw/Makefile +++ b/drivers/infiniband/hw/i40iw/Makefile @@ -1,5 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -ccflags-y := -I $(srctree)/drivers/net/ethernet/intel/i40e obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 49d92638e0db..25747b85a79c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +58,6 @@ #include "i40iw_d.h" #include "i40iw_hmc.h" -#include #include "i40iw_type.h" #include "i40iw_p.h" #include diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e95b8da45e07..5ff0828a6f50 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -38,7 +38,7 @@ #include #include "i40e_type.h" #include "i40e_prototype.h" -#include "i40e_client.h" +#include #include #include "i40e_virtchnl_pf.h" #include "i40e_txrx.h" diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index e81530ca08d0..befd3018183f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -3,10 +3,10 @@ #include #include +#include #include "i40e.h" #include "i40e_prototype.h" -#include "i40e_client.h" static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR; static struct i40e_client *registered_client; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/include/linux/net/intel/i40e_client.h similarity index 100% rename from drivers/net/ethernet/intel/i40e/i40e_client.h rename to include/linux/net/intel/i40e_client.h From 3c98f9ee6bc280499cbcb6f8e42c001c3bd7caa1 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 6 Jan 2020 16:09:33 -0800 Subject: [PATCH 0342/2056] i40e: remove unused defines Remove all the unused defines as they are just dead weight. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 24 - .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 495 -- drivers/net/ethernet/intel/i40e/i40e_common.c | 4 - drivers/net/ethernet/intel/i40e/i40e_dcb.h | 5 - .../net/ethernet/intel/i40e/i40e_debugfs.c | 1 - drivers/net/ethernet/intel/i40e/i40e_devids.h | 3 - drivers/net/ethernet/intel/i40e/i40e_hmc.h | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 10 +- drivers/net/ethernet/intel/i40e/i40e_osdep.h | 1 - .../net/ethernet/intel/i40e/i40e_register.h | 4656 ----------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 25 - drivers/net/ethernet/intel/i40e/i40e_type.h | 82 - .../ethernet/intel/i40e/i40e_virtchnl_pf.h | 1 - include/linux/net/intel/i40e_client.h | 9 - 14 files changed, 1 insertion(+), 5316 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5ff0828a6f50..8151671e5e0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -60,17 +60,14 @@ (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 #define I40E_MAX_VF_QUEUES 16 -#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) -#define I40E_FDIR_RING 0 #define I40E_FDIR_RING_COUNT 32 #define I40E_MAX_AQ_BUF_SIZE 4096 #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) -#define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) @@ -92,10 +89,6 @@ #define I40E_OEM_SNAP_SHIFT 16 #define I40E_OEM_RELEASE_MASK 0x0000ffff -/* The values in here are decimal coded as hex as is the case in the NVM map*/ -#define I40E_CURRENT_NVM_VERSION_HI 0x2 -#define I40E_CURRENT_NVM_VERSION_LO 0x40 - #define I40E_RX_DESC(R, i) \ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) \ @@ -105,9 +98,6 @@ #define I40E_TX_FDIRDESC(R, i) \ (&(((struct i40e_filter_program_desc *)((R)->desc))[i])) -/* default to trying for four seconds */ -#define I40E_TRY_LINK_TIMEOUT (4 * HZ) - /* BW rate limiting */ #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ #define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ @@ -295,9 +285,6 @@ struct i40e_cloud_filter { u8 tunnel_type; }; -#define I40E_DCB_PRIO_TYPE_STRICT 0 -#define I40E_DCB_PRIO_TYPE_ETS 1 -#define I40E_DCB_STRICT_PRIO_CREDITS 127 /* DCB per TC information data structure */ struct i40e_tc_info { u16 qoffset; /* Queue offset from base queue */ @@ -357,15 +344,6 @@ struct i40e_ddp_old_profile_list { I40E_FLEX_SET_FSIZE(fsize) | \ I40E_FLEX_SET_SRC_WORD(src)) -#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \ - I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) -#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \ - I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) -#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \ - I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \ - I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) #define I40E_MAX_FLEX_SRC_OFFSET 0x1F @@ -390,7 +368,6 @@ struct i40e_ddp_old_profile_list { #define I40E_L4_GLQF_ORT_IDX 35 /* Flex PIT register index */ -#define I40E_FLEX_PIT_IDX_START_L2 0 #define I40E_FLEX_PIT_IDX_START_L3 3 #define I40E_FLEX_PIT_IDX_START_L4 6 @@ -531,7 +508,6 @@ struct i40e_pf { #define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9) #define I40E_HW_PTP_L4_CAPABLE BIT(10) #define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11) -#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12) #define I40E_HW_HAVE_CRT_RETIMER BIT(13) #define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14) #define I40E_HW_PHY_CONTROLS_LEDS BIT(15) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index aa5f1c0aa721..c52910f03cfb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -55,29 +55,17 @@ struct i40e_aq_desc { */ /* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 #define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 #define I40E_AQ_FLAG_LB_SHIFT 9 #define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 #define I40E_AQ_FLAG_BUF_SHIFT 12 #define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ #define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ #define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ #define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ #define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ #define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -362,13 +350,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); /* Request resource ownership (direct 0x0008) * Release resource ownership (direct 0x0009) */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - struct i40e_aqc_request_resource { __le16 resource_id; __le16 access_type; @@ -384,7 +365,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); */ struct i40e_aqc_list_capabilites { u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 u8 pf_index; u8 reserved[2]; __le32 count; @@ -411,8 +391,6 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 @@ -441,11 +419,6 @@ struct i40e_aqc_list_capabilities_element_resp { /* Set CPPM Configuration (direct 0x0103) */ struct i40e_aqc_cppm_configuration { __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 __le16 ttlx; __le32 dmacr; __le16 dmcth; @@ -459,15 +432,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -482,19 +448,6 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -507,7 +460,6 @@ I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); /* Manage LAA Command (0x0106) - obsolete */ struct i40e_aqc_mng_laa { __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 u8 reserved[2]; __le32 sal; __le16 sah; @@ -520,11 +472,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); struct i40e_aqc_mac_address_read { __le16 command_flags; #define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 #define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 u8 reserved[6]; __le32 addr_high; __le32 addr_low; @@ -548,9 +496,7 @@ struct i40e_aqc_mac_address_write { #define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 #define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 __le16 mac_sah; __le32 mac_sal; @@ -573,22 +519,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); struct i40e_aqc_set_wol_filter { __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 u8 reserved[2]; __le32 address_high; __le32 address_low; @@ -608,12 +541,6 @@ I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); struct i40e_aqc_get_wake_reason_completion { u8 reserved_1[2]; __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) u8 reserved_2[12]; }; @@ -646,25 +573,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); struct i40e_aqc_switch_config_element_resp { u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 __le16 seid; __le16 uplink_seid; __le16 downlink_seid; u8 reserved[3]; u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 __le16 scheduler_id; __le16 element_info; }; @@ -697,12 +611,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); /* Set Port Parameters command (direct 0x0203) */ struct i40e_aqc_set_port_parameters { __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; @@ -722,25 +631,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); /* expect an array of these structs in the response buffer */ struct i40e_aqc_switch_resource_alloc_element_resp { u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 u8 reserved1; __le16 guaranteed; __le16 total; @@ -756,7 +646,6 @@ struct i40e_aqc_set_switch_config { __le16 flags; /* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; /* The ethertype in switch_tag is dropped on ingress and used * internally by the switch. Set this to zero for the default @@ -789,17 +678,10 @@ struct i40e_aqc_set_switch_config { */ #define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 -#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 -#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 #define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 -#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 -#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 -#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 -#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 #define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 -#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 u8 mode; u8 rsvd5[5]; }; @@ -834,19 +716,13 @@ struct i40e_aqc_add_get_update_vsi { __le16 uplink_seid; u8 connection_type; #define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 u8 reserved1; u8 vf_id; u8 reserved2; __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) #define I40E_AQ_VSI_TYPE_VF 0x0 #define I40E_AQ_VSI_TYPE_VMDQ2 0x1 #define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 __le32 addr_high; __le32 addr_low; }; @@ -870,24 +746,18 @@ struct i40e_aqc_vsi_properties_data { #define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 #define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 #define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 #define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 /* switch section */ __le16 switch_id; /* 12bit id combined with flags below */ #define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 #define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 u8 sw_reserved[2]; /* security section */ u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 u8 sec_reserved; @@ -899,78 +769,33 @@ struct i40e_aqc_vsi_properties_data { #define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ I40E_AQ_VSI_PVLAN_MODE_SHIFT) #define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 #define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 #define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 #define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ I40E_AQ_VSI_PVLAN_EMOD_SHIFT) #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 #define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 u8 pvlan_reserved[3]; /* ingress egress up sections */ __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) __le32 egress_table; /* same defines as for ingress table */ /* cascaded PV section */ __le16 cas_pv_tag; u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 u8 cas_pv_reserved; /* queue mapping section */ __le16 mapping_flags; #define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 #define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) __le16 tc_mapping[8]; #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) /* queueing option section */ u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 u8 queueing_opt_reserved[3]; /* scheduler section */ @@ -995,10 +820,6 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); */ struct i40e_aqc_add_update_pv { __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 __le16 uplink_seid; __le16 connected_seid; u8 reserved[10]; @@ -1009,10 +830,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); struct i40e_aqc_add_update_pv_completion { /* reserved for update; for add also encodes error if rc == ENOSPC */ __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 u8 reserved[14]; }; @@ -1026,9 +843,6 @@ struct i40e_aqc_get_pv_params_completion { __le16 seid; __le16 default_stag; __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 u8 reserved[8]; __le16 default_port_seid; }; @@ -1041,12 +855,8 @@ struct i40e_aqc_add_veb { __le16 downlink_seid; __le16 veb_flags; #define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ #define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; @@ -1059,10 +869,6 @@ struct i40e_aqc_add_veb_completion { __le16 switch_seid; /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 __le16 statistic_index; __le16 vebs_used; __le16 vebs_free; @@ -1095,9 +901,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); struct i40e_aqc_macvlan { __le16 num_addresses; __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) #define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 __le32 addr_high; __le32 addr_low; @@ -1111,18 +914,11 @@ struct i40e_aqc_add_macvlan_element_data { __le16 vlan_tag; __le16 flags; #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 #define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) /* response section */ u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 #define I40E_AQC_MM_ERR_NO_RES 0xFF u8 reserved1[3]; }; @@ -1148,14 +944,10 @@ struct i40e_aqc_remove_macvlan_element_data { __le16 vlan_tag; u8 flags; #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 u8 reserved[3]; /* reply section */ u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF u8 reply_reserved[3]; }; @@ -1166,30 +958,8 @@ struct i40e_aqc_remove_macvlan_element_data { struct i40e_aqc_add_remove_vlan_element_data { __le16 vlan_tag; u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 u8 reserved; u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF u8 reserved1[3]; }; @@ -1213,9 +983,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 #define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 u8 reserved[8]; }; @@ -1227,11 +995,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); */ struct i40e_aqc_add_tag { __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; __le16 queue_number; u8 reserved[8]; @@ -1252,9 +1016,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); */ struct i40e_aqc_remove_tag { __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) __le16 tag; u8 reserved[12]; }; @@ -1290,9 +1051,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); /* Update S/E-Tag (direct 0x0259) */ struct i40e_aqc_update_tag { __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) __le16 old_tag; __le16 new_tag; u8 reserved[10]; @@ -1319,13 +1077,8 @@ struct i40e_aqc_add_remove_control_packet_filter { __le16 flags; #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) __le16 queue; u8 reserved[2]; }; @@ -1351,9 +1104,6 @@ struct i40e_aqc_add_remove_cloud_filters { u8 num_filters; u8 reserved; __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) u8 big_buffer_flag; #define I40E_AQC_ADD_CLOUD_CMD_BB 1 u8 reserved2[3]; @@ -1380,9 +1130,6 @@ struct i40e_aqc_cloud_filters_element_data { } raw_v6; } ipaddr; __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) /* 0x0000 reserved */ /* 0x0001 reserved */ /* 0x0002 reserved */ @@ -1404,36 +1151,20 @@ struct i40e_aqc_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ #define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 __le32 tenant_id; u8 reserved[4]; __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) u8 reserved2[14]; /* response section */ u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF u8 response_reserved[7]; }; @@ -1445,37 +1176,7 @@ I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); struct i40e_aqc_cloud_filters_element_bb { struct i40e_aqc_cloud_filters_element_data element; u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 }; I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); @@ -1504,11 +1205,6 @@ I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); struct i40e_aqc_replace_cloud_filters_cmd { u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 u8 old_filter_type; u8 new_filter_type; u8 tr_bit; @@ -1521,25 +1217,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); struct i40e_aqc_replace_cloud_filters_cmd_buf { u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 struct i40e_filter_data filters[8]; }; @@ -1556,8 +1233,6 @@ struct i40e_aqc_add_delete_mirror_rule { #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 #define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 #define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 @@ -1600,8 +1275,6 @@ struct i40e_aqc_write_ddp_resp { struct i40e_aqc_get_applied_profiles { u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 u8 rsv[3]; __le32 reserved; __le32 addr_high; @@ -1618,8 +1291,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); struct i40e_aqc_pfc_ignore { u8 tc_bitmap; u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 u8 reserved[14]; }; @@ -1736,7 +1407,6 @@ struct i40e_aqc_configure_switching_comp_ets_data { u8 reserved[4]; u8 tc_valid_bits; u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 u8 tc_strict_priority_flags; u8 reserved1[17]; u8 tc_bw_share_credits[8]; @@ -1977,40 +1647,18 @@ struct i40e_aq_get_phy_abilities_resp { u8 abilities; #define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 #define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 __le32 eeer_val; u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 u8 phy_type_ext; #define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 -#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40 -#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80 u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 #define I40E_AQ_REQUEST_FEC_KR 0x04 #define I40E_AQ_REQUEST_FEC_RS 0x08 #define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 u8 ext_comp_code; u8 phy_id[4]; @@ -2028,7 +1676,6 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */ u8 link_speed; u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; @@ -2056,21 +1703,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); struct i40e_aq_set_mac_config { __le16 max_frame_size; u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 -#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80 u8 tx_timer_priority; /* bitmap */ __le16 tx_timer_value; __le16 fc_refresh_threshold; @@ -2092,8 +1724,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); /* Get Link Status cmd & response data structure (direct 0x0607) */ struct i40e_aqc_get_link_status { __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 #define I40E_AQ_LSE_DISABLE 0x2 #define I40E_AQ_LSE_ENABLE 0x3 /* only response uses this flag */ @@ -2102,44 +1732,16 @@ struct i40e_aqc_get_link_status { u8 link_speed; /* i40e_aq_link_speed */ u8 link_info; #define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 #define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 u8 an_info; #define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 #define I40E_AQ_LINK_PAUSE_TX 0x20 #define I40E_AQ_LINK_PAUSE_RX 0x40 #define I40E_AQ_QUALIFIED_MODULE 0x80 u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ /* Since firmware API 1.7 loopback field keeps power class info as well */ #define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 @@ -2149,11 +1751,6 @@ struct i40e_aqc_get_link_status { union { struct { u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; struct { @@ -2171,13 +1768,7 @@ struct i40e_aqc_set_phy_int_mask { __le16 event_mask; #define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 #define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 u8 reserved1[6]; }; @@ -2209,13 +1800,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); /* Set PHY Debug command (0x0622) */ struct i40e_aqc_set_phy_debug { u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 /* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 /* Disable link manageability on all ports */ @@ -2247,7 +1831,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); /* Get PHY Register command (0x0629) */ struct i40e_aqc_phy_register_access { u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 u8 dev_address; @@ -2274,9 +1857,7 @@ struct i40e_aqc_nvm_update { #define I40E_AQ_NVM_LAST_CMD 0x01 #define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 #define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 #define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; @@ -2291,9 +1872,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); /* NVM Config Read (indirect 0x0704) */ struct i40e_aqc_nvm_config_read { __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; __le16 element_id; /* Feature/field ID */ __le16 element_id_msw; /* MSWord of field ID */ @@ -2315,16 +1893,8 @@ struct i40e_aqc_nvm_config_write { I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 __le16 feature_options; __le16 feature_selection; }; @@ -2344,7 +1914,6 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); * no command data struct used */ struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 u8 sel_data; u8 reserved[7]; }; @@ -2366,9 +1935,6 @@ I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); */ struct i40e_aqc_thermal_sensor { u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 u8 reserved[7]; __le32 addr_high; __le32 addr_low; @@ -2421,10 +1987,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); */ struct i40e_aqc_alternate_write_done { __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 u8 reserved[14]; }; @@ -2433,8 +1995,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); /* Set OEM mode (direct 0x0905) */ struct i40e_aqc_alternate_set_mode { __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 u8 reserved[12]; }; @@ -2460,13 +2020,9 @@ struct i40e_aqc_lldp_get_mib { #define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 #define I40E_AQ_LLDP_MIB_LOCAL 0x0 #define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) /* TX pause flags use I40E_AQ_LINK_TX_* above */ __le16 local_len; __le16 remote_len; @@ -2482,7 +2038,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); */ struct i40e_aqc_lldp_update_mib { u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 u8 reserved[7]; __le32 addr_high; @@ -2521,7 +2076,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); /* Stop LLDP (direct 0x0A05) */ struct i40e_aqc_lldp_stop { u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 #define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 #define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2 u8 reserved[15]; @@ -2627,13 +2181,6 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); * Used to replace the local MIB of a given LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_set_local_mib { -#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \ - BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; __le16 length; @@ -2648,9 +2195,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); * Used for stopping/starting specific LLDP agent. e.g. DCBx */ struct i40e_aqc_lldp_stop_start_specific_agent { -#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 -#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ - BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2660,7 +2204,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent); /* Restore LLDP Agent factory settings (direct 0x0A0A) */ struct i40e_aqc_lldp_restore { u8 command; -#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0 #define I40E_AQ_LLDP_AGENT_RESTORE 0x1 u8 reserved[15]; }; @@ -2674,8 +2217,6 @@ struct i40e_aqc_add_udp_tunnel { u8 protocol_type; #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; @@ -2685,8 +2226,6 @@ struct i40e_aqc_add_udp_tunnel_completion { __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 u8 total_filters; u8 reserved[11]; }; @@ -2759,16 +2298,7 @@ struct i40e_aqc_tunnel_key_structure { u8 key1_len; /* 0 to 15 */ u8 key2_len; /* 0 to 15 */ u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 u8 reserved[10]; }; @@ -2777,9 +2307,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); /* OEM mode commands (direct 0xFE0x) */ struct i40e_aqc_oem_param_change { __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 __le32 param_value1; __le16 param_value2; u8 reserved[6]; @@ -2789,8 +2316,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); struct i40e_aqc_oem_state_change { __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 u8 reserved[12]; }; @@ -2826,14 +2351,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); struct i40e_acq_set_test_mode { u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 u8 reserved[3]; u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 u8 reserved2[3]; __le32 address_high; __le32 address_low; @@ -2874,20 +2393,6 @@ struct i40e_aqc_debug_modify_reg { I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); /* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - struct i40e_aqc_debug_dump_internals { u8 cluster_id; u8 table_id; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 45b90eb11adb..4ab081953e19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1455,10 +1455,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) return gpio_val; } -#define I40E_COMBINED_ACTIVITY 0xA -#define I40E_FILTER_ACTIVITY 0xE -#define I40E_LINK_ACTIVITY 0xC -#define I40E_MAC_ACTIVITY 0xD #define I40E_FW_LED BIT(4) #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h index ba86ad833bee..2b1a2e81ac73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h @@ -6,10 +6,8 @@ #include "i40e_type.h" -#define I40E_DCBX_STATUS_NOT_STARTED 0 #define I40E_DCBX_STATUS_IN_PROGRESS 1 #define I40E_DCBX_STATUS_DONE 2 -#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3 #define I40E_DCBX_STATUS_DISABLED 7 #define I40E_TLV_TYPE_END 0 @@ -24,7 +22,6 @@ #define I40E_CEE_DCBX_OUI 0x001b21 #define I40E_CEE_DCBX_TYPE 2 -#define I40E_CEE_SUBTYPE_CTRL 1 #define I40E_CEE_SUBTYPE_PG_CFG 2 #define I40E_CEE_SUBTYPE_PFC_CFG 3 #define I40E_CEE_SUBTYPE_APP_PRI 4 @@ -105,9 +102,7 @@ struct i40e_cee_ctrl_tlv { struct i40e_cee_feat_tlv { struct i40e_cee_tlv_hdr hdr; u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */ -#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80 #define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40 -#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20 u8 subtype; u8 tlvinfo[1]; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 99ea543dd245..9cb9b781451c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -688,7 +688,6 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf) i40e_dbg_dump_vf(pf, i); } -#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4) /** * i40e_dbg_command_write - write into command datum * @filp: the opened file diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index bf15a868292f..33df3bf2f73b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -32,8 +32,5 @@ #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ - (d) == I40E_DEV_ID_QSFP_B || \ - (d) == I40E_DEV_ID_QSFP_C) #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h index 1c78de838857..3113792afaff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h @@ -14,7 +14,6 @@ struct i40e_hw; #define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ #define I40E_HMC_PAGED_BP_SIZE 4096 #define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 struct i40e_hmc_obj_info { u64 base; /* base addr in FPM */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5d807c8004f8..5f7f5147f9a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6492,8 +6492,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) return err; } #endif /* CONFIG_I40E_DCB */ -#define SPEED_SIZE 14 -#define FC_SIZE 8 + /** * i40e_print_link_message - print link up or down * @vsi: the VSI for which link needs a message @@ -8950,13 +8949,6 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; } -/* We can see up to 256 filter programming desc in transit if the filters are - * being applied really fast; before we see the first - * filter miss error on Rx queue 0. Accumulating enough error messages before - * reacting will make sure we don't cause flush too often. - */ -#define I40E_MAX_FD_PROGRAM_ERROR 256 - /** * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table * @pf: board private structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index c302ef2524f8..2f6815b2f8df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -26,7 +26,6 @@ do { \ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #define rd32(a, reg) readl((a)->hw_addr + (reg)) -#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) #define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index d35d690ca10f..7cd3a08a1891 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -4,53 +4,14 @@ #ifndef _I40E_REGISTER_H_ #define _I40E_REGISTER_H_ -#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT) -#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */ -#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT) -#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */ -#define I40E_GL_ARQH_ARQH_SHIFT 0 -#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT) -#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */ -#define I40E_GL_ARQT_ARQT_SHIFT 0 -#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT) -#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */ -#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT) -#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */ -#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT) -#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */ -#define I40E_GL_ATQH_ATQH_SHIFT 0 -#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT) -#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */ -#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT) -#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT) -#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT) #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT) -#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT) -#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */ -#define I40E_GL_ATQT_ATQT_SHIFT 0 -#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT) #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */ -#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT) #define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */ -#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT) #define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */ #define I40E_PF_ARQH_ARQH_SHIFT 0 #define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT) #define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */ -#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT) #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28 #define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT) #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29 @@ -60,20 +21,10 @@ #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ -#define I40E_PF_ARQT_ARQT_SHIFT 0 -#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) #define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */ -#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT) #define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */ -#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT) #define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */ -#define I40E_PF_ATQH_ATQH_SHIFT 0 -#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT) #define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */ -#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT) #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28 #define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT) #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29 @@ -83,284 +34,13 @@ #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ -#define I40E_PF_ATQT_ATQT_SHIFT 0 -#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) -#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAH_MAX_INDEX 127 -#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQBAL_MAX_INDEX 127 -#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT) -#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQH_MAX_INDEX 127 -#define I40E_VF_ARQH_ARQH_SHIFT 0 -#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT) -#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQLEN_MAX_INDEX 127 -#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) -#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ARQT_MAX_INDEX 127 -#define I40E_VF_ARQT_ARQT_SHIFT 0 -#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT) -#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAH_MAX_INDEX 127 -#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQBAL_MAX_INDEX 127 -#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT) -#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQH_MAX_INDEX 127 -#define I40E_VF_ATQH_ATQH_SHIFT 0 -#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT) -#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQLEN_MAX_INDEX 127 -#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) -#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_VF_ATQT_MAX_INDEX 127 -#define I40E_VF_ATQT_ATQT_SHIFT 0 -#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT) -#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */ -#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0 -#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */ -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */ -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0 -#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT) -#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12 -#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT) -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15 -#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT) -#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17 -#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT) -#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3 -#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0 -#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT) -#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */ -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0 -#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT) -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1 -#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT) -#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT) -#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */ -#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0 -#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT) -#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */ -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0 -#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT) -#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3 -#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT) -#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */ -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0 -#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT) -#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3 -#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0 -#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT) -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16 -#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT) #define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */ -#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0 -#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT) -#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2 -#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6 -#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT) -#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9 -#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT) #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16 #define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT) #define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */ #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0 #define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT) -#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */ -#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0 -#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT) -#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1 -#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2 -#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT) -#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3 -#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT) -#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4 -#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT) -#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */ -#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0 -#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1 -#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2 -#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) -#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8 -#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT) -#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7 -#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0 -#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 -#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) -#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) -#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ -#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 -#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8 -#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT) -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16 -#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) -#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */ -#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0 -#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT) -#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */ -#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0 -#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3 -#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6 -#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9 -#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12 -#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15 -#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18 -#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT) -#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21 -#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT) -#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7 -#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0 -#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT) -#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */ -#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */ -#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT) -#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */ -#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0 -#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT) -#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) -#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT) -#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */ -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0 -#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT) -#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8 -#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT) -#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */ -#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0 -#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8 -#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9 -#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10 -#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11 -#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12 -#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13 -#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14 -#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT) -#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15 -#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT) -#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */ -#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0 -#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT) -#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */ -#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0 -#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT) -#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4 -#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT) -#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5 -#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT) -#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16 -#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT) #define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ -#define I40E_GL_FWSTS_FWS0B_SHIFT 0 -#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT) -#define I40E_GL_FWSTS_FWRI_SHIFT 9 -#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT) #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) @@ -369,500 +49,119 @@ #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT) -#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */ -#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0 -#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT) -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4 -#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8 -#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12 -#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16 -#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT) -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20 -#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT) #define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */ #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 -#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 -#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 -#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10 -#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 -#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17 -#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT) -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 -#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 -#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26 -#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT) -#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0 -#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT) -#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 -#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 -#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) -#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */ -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0 -#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT) -#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */ -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0 -#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT) -#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CCMD_MAX_INDEX 3 -#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0 -#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT) -#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16 -#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT) -#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24 -#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT) -#define I40E_GLGEN_I2CCMD_OP_SHIFT 27 -#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT) -#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28 -#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT) -#define I40E_GLGEN_I2CCMD_R_SHIFT 29 -#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT) -#define I40E_GLGEN_I2CCMD_E_SHIFT 31 -#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT) -#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0 -#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5 -#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8 -#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9 -#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10 -#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11 -#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12 -#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13 -#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14 -#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT) -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15 -#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT) -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31 -#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT) -#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */ -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0 -#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT) -#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17 -#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT) -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29 -#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT) #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20 -#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25 -#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT) -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31 -#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT) #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSCA_MAX_INDEX 3 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 -#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT) #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16 -#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT) #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21 -#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT) #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26 -#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_GLGEN_MSCA_STCODE_SHIFT 28 -#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT) #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 -#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT) #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT) -#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */ -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0 -#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT) -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16 -#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT) #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */ #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT) #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2 #define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT) -#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4 -#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT) -#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6 -#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8 -#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT) -#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10 -#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT) #define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */ #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT) -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8 -#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT) #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */ #define I40E_GLGEN_RTRIG_CORER_SHIFT 0 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT) #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1 #define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT) -#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2 -#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT) #define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ -#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0 -#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT) -#define I40E_GLGEN_STAT_DCBEN_SHIFT 2 -#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT) -#define I40E_GLGEN_STAT_VTEN_SHIFT 3 -#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT) -#define I40E_GLGEN_STAT_FCOEN_SHIFT 4 -#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT) -#define I40E_GLGEN_STAT_EVBEN_SHIFT 5 -#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT) -#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6 -#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT) #define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3 -#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0 -#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT) #define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */ -#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0 -#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT) #define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */ #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0 #define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT) -#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */ -#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0 -#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT) #define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */ #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0 #define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT) -#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */ -#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0 -#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT) -#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1 -#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT) -#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2 -#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT) -#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3 -#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT) #define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */ #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0 #define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1 -#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2 -#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT) -#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */ -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0 -#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT) #define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */ -#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0 -#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT) -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1 -#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT) #define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFGEN_RSTAT1_MAX_INDEX 127 -#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT) #define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0 #define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT) #define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0 #define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT) -#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RSTAT_MAX_INDEX 383 -#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0 -#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT) -#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_VSIGEN_RTRIG_MAX_INDEX 383 -#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0 -#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT) #define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT) #define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0 -#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT) #define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */ -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT) #define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT) #define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0 -#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT) #define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */ #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT) #define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */ -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0 -#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT) #define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */ -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0 -#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT) -#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29 -#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT) -#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0 -#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT) -#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */ -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT) -#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0 -#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT) -#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT) -#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */ -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0 -#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT) -#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */ -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0 -#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT) #define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */ -#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0 -#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT) #define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT) #define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0 -#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT) #define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */ -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT) #define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT) -#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24 -#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT) #define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0 -#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT) #define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */ -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0 -#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT) -#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0 -#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT) -#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_SDPART_MAX_INDEX 15 -#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT) #define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */ -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0 -#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT) #define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */ -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0 -#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT) -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7 -#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8 -#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16 -#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT) -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31 -#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT) #define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT) #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 -#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT) #define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */ -#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0 -#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT) #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 -#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT) #define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */ -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0 -#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT) #define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */ #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 -#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 -#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 -#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12 -#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT) -#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */ -#define I40E_GL_GP_FUSE_MAX_INDEX 28 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0 -#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT) -#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */ -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1 -#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT) -#define I40E_GL_UFUSE_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10 -#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT) -#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11 -#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT) -#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */ -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */ -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0 -#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT) #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT) #define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */ #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT) #define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_PFINT_CEQCTL_MAX_INDEX 511 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16 -#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT) -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT) #define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */ -#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0 -#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT) #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1 #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT) -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2 -#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT) #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */ #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT) @@ -872,8 +171,6 @@ #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT) #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 @@ -881,7 +178,6 @@ #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 #define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 @@ -891,93 +187,13 @@ #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT) #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */ -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0 -#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1 -#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2 -#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3 -#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4 -#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5 -#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6 -#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7 -#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8 -#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9 -#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10 -#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11 -#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12 -#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13 -#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14 -#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15 -#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16 -#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17 -#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18 -#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19 -#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20 -#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21 -#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22 -#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23 -#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24 -#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25 -#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26 -#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27 -#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28 -#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT) -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29 -#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT) #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */ #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT) #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1 #define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5 -#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6 -#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7 -#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT) -#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8 -#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT) #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16 #define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT) #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19 @@ -986,14 +202,8 @@ #define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT) #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21 #define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT) -#define I40E_PFINT_ICR0_GPIO_SHIFT 22 -#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT) #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28 @@ -1017,10 +227,6 @@ #define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT) #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT) -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24 -#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT) -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26 #define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT) #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28 @@ -1029,43 +235,17 @@ #define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT) #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30 #define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT) #define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */ -#define I40E_PFINT_ITR0_MAX_INDEX 2 -#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT) #define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_ITRN_MAX_INDEX 2 -#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT) #define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */ #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 -#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_LNKLSTN_MAX_INDEX 511 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 -#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */ -#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT) -#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT) #define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ -#define I40E_PFINT_RATEN_MAX_INDEX 511 -#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT) -#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT) #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */ -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_RQCTL_MAX_INDEX 1535 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11 @@ -1075,13 +255,11 @@ #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT) #define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QINT_TQCTL_MAX_INDEX 1535 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0 #define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT) #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11 @@ -1091,160 +269,45 @@ #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27 -#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30 #define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT) #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31 #define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT) #define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127 -#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT) #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511 -#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT) #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_MAX_INDEX 127 -#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_SWINT_SHIFT 31 -#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT) -#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT) -#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VFINT_ITR0_MAX_INDEX 2 -#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN_MAX_INDEX 2 -#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT) #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VPINT_AEQCTL_MAX_INDEX 127 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_VPINT_CEQCTL_MAX_INDEX 511 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0 -#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11 -#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) -#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13 -#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT) #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30 #define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT) -#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31 -#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT) #define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLST0_MAX_INDEX 127 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11 -#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT) #define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_LNKLSTN_MAX_INDEX 511 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT) #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) -#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPINT_RATE0_MAX_INDEX 127 -#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT) -#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT) -#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */ -#define I40E_VPINT_RATEN_MAX_INDEX 511 -#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0 -#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT) -#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6 -#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT) -#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */ -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0 -#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT) -#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1 -#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT) #define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */ #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0 #define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT) #define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0 -#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT) #define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0 -#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT) #define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */ -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0 -#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT) #define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */ -#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0 #define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT) -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16 -#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 @@ -1257,19 +320,12 @@ #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 #define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT) -#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT) #define I40E_QRX_ENA_QENA_STAT_SHIFT 2 #define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT) #define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QRX_TAIL_MAX_INDEX 1535 -#define I40E_QRX_TAIL_TAIL_SHIFT 0 -#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT) #define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_CTL_MAX_INDEX 1535 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0 #define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT) #define I40E_QTX_CTL_PF_INDX_SHIFT 2 @@ -1277,43 +333,22 @@ #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7 #define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT) #define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_ENA_MAX_INDEX 1535 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0 #define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT) -#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1 -#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT) #define I40E_QTX_ENA_QENA_STAT_SHIFT 2 #define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT) #define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */ -#define I40E_QTX_HEAD_MAX_INDEX 1535 -#define I40E_QTX_HEAD_HEAD_SHIFT 0 -#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT) -#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16 -#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT) #define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ -#define I40E_QTX_TAIL_MAX_INDEX 1535 -#define I40E_QTX_TAIL_TAIL_SHIFT 0 -#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT) #define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_MAPENA_MAX_INDEX 127 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0 #define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT) #define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QTABLE_MAX_INDEX 15 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0 #define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT) #define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QBASE_MAX_INDEX 383 -#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0 -#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT) #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT) #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSILAN_QTABLE_MAX_INDEX 7 -#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0 -#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT) -#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16 -#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT) #define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */ #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0 #define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT) @@ -1322,789 +357,47 @@ #define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */ #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0 #define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT) -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */ -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0 -#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */ -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT) -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14 -#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT) -#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */ -#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0 -#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT) -#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */ -#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0 -#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT) -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10 -#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT) -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11 -#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT) -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15 -#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT) -#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16 -#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT) -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19 -#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26 -#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27 -#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28 -#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29 -#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT) -#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */ -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0 -#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT) -#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0 -#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT) -#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT) -#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7 -#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0 -#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT) -#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */ -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0 -#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1 -#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17 -#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT) -#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19 -#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT) -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25 -#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26 -#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28 -#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT) -#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29 -#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT) -#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7 -#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0 -#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT) -#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4 -#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5 -#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13 -#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17 -#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21 -#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26 -#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29 -#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30 -#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31 -#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4 -#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24 -#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27 -#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28 -#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29 -#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31 -#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT) -#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT) -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16 -#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT) -#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_METF_MAX_INDEX 3 -#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0 -#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT) -#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30 -#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT) -#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0 -#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT) -#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16 -#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT) -#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17 -#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT) -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18 -#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT) -#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3 -#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT) -#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */ -#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15 -#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0 -#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT) -#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAH_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0 -#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT) -#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */ -#define I40E_PRT_MNG_MMAL_MAX_INDEX 3 -#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0 -#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT) -#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */ -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0 -#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT) -#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */ -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0 -#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1 -#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2 -#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT) -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3 -#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4 -#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5 -#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6 -#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT) -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7 -#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT) -#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */ -#define I40E_MSIX_PBA_MAX_INDEX 5 -#define I40E_MSIX_PBA_PENBIT_SHIFT 0 -#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT) -#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TADD_MAX_INDEX 128 -#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT) -#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TMSG_MAX_INDEX 128 -#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TUADD_MAX_INDEX 128 -#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */ -#define I40E_MSIX_TVCTRL_MAX_INDEX 128 -#define I40E_MSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ -#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0 -#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT) -#define I40E_GLNVM_FLA_FL_CE_SHIFT 1 -#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT) -#define I40E_GLNVM_FLA_FL_SI_SHIFT 2 -#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT) -#define I40E_GLNVM_FLA_FL_SO_SHIFT 3 -#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT) -#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4 -#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT) -#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5 -#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT) #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) -#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18 -#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT) -#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30 -#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT) -#define I40E_GLNVM_FLA_FL_DER_SHIFT 31 -#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT) -#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */ -#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0 -#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT) -#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31 -#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT) #define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */ -#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0 -#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT) #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5 #define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT) -#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8 -#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT) -#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23 -#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT) -#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25 -#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT) -#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */ -#define I40E_GLNVM_PROTCSR_MAX_INDEX 59 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0 -#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT) #define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */ -#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0 -#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT) #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14 -#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT) -#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29 -#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT) #define I40E_GLNVM_SRCTL_START_SHIFT 30 -#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ -#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 -#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16 #define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1 -#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2 -#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3 #define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT) #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6 -#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7 -#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8 -#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT) -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9 -#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT) -#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */ -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0 -#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT) -#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */ -#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0 -#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT) #define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */ -#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0 -#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT) -#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2 -#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3 -#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT) #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4 #define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5 -#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6 -#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7 -#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16 -#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17 -#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18 -#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19 -#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT) -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20 -#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30 -#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT) -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31 -#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT) -#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */ -#define I40E_GLPCI_CNF_FLEX10_SHIFT 1 -#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT) -#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2 -#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT) #define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */ -#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0 -#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT) -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1 -#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2 #define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT) #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13 #define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT) -#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ -#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 -#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT) -#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT) -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7 -#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15 -#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28 -#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT) -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31 -#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT) -#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT) -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24 -#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT) -#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0 -#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT) -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16 -#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT) -#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */ -#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0 -#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT) #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ -#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0 -#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT) -#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1 -#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT) -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3 -#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT) #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6 #define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT) -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11 -#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT) -#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */ -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0 -#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6 -#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT) -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9 -#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT) -#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */ -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0 -#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT) -#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */ -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0 -#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */ -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0 -#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT) -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16 -#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT) -#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */ -#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0 -#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2 -#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5 -#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8 -#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11 -#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT) -#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14 -#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT) -#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15 -#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */ -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT) -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8 -#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT) -#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */ -#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0 -#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8 -#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16 -#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT) -#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24 -#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT) -#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */ -#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0 -#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT) -#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */ -#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0 -#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT) -#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */ -#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0 -#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT) -#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */ -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0 -#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT) -#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */ -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0 -#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT) -#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */ -#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1 -#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT) -#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */ -#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0 -#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT) -#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */ -#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0 -#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT) -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1 -#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT) -#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */ -#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9 -#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT) -#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11 -#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT) #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */ -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0 -#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3 -#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT) -#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8 -#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT) #define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */ -#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0 -#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT) #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 -#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT) #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ -#define I40E_PF_PCI_CIAD_DATA_SHIFT 0 -#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT) -#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */ -#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0 -#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT) -#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1 -#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT) -#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2 -#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT) -#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */ -#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2 -#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT) -#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3 -#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT) -#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4 -#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT) -#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5 -#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT) -#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */ -#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0 -#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT) -#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16 -#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT) -#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */ -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0 -#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT) -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3 -#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT) -#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */ -#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1 -#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT) -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2 -#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT) -#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */ -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0 -#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT) -#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */ -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0 -#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT) -#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */ -#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0 -#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT) -#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */ -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */ -#define I40E_PFPCI_PM_PME_EN_SHIFT 0 -#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT) -#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */ -#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0 -#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT) -#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0 -#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16 -#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */ -#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */ -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0 -#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT) -#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */ -#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0 -#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT) -#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */ -#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0 -#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT) #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ -#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29 -#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT) #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT) -#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16 -#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT) -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24 -#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT) -#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26 -#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT) -#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */ -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31 -#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT) -#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */ -#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0 -#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT) -#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16 -#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT) -#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */ -#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0 -#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT) -#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */ -#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0 -#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT) -#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1 -#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT) -#define I40E_PRTPM_GC_RATD_SHIFT 2 -#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT) -#define I40E_PRTPM_GC_LCDMP_SHIFT 3 -#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT) -#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31 -#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT) #define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */ -#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0 -#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT) #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ -#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 -#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) -#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GL_PRS_FVBM_MAX_INDEX 3 -#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 -#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) -#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 -#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) -#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 -#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) -#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ -#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 -#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) -#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */ -#define I40E_GLRPB_GHW_GHW_SHIFT 0 -#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT) -#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */ -#define I40E_GLRPB_GLW_GLW_SHIFT 0 -#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT) -#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */ -#define I40E_GLRPB_PHW_PHW_SHIFT 0 -#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT) -#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */ -#define I40E_GLRPB_PLW_PLW_SHIFT 0 -#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT) -#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DHW_MAX_INDEX 7 -#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0 -#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT) -#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DLW_MAX_INDEX 7 -#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0 -#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT) -#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_DPS_MAX_INDEX 7 -#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0 -#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT) -#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SHT_MAX_INDEX 7 -#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0 -#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT) -#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */ -#define I40E_PRTRPB_SHW_SHW_SHIFT 0 -#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT) -#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTRPB_SLT_MAX_INDEX 7 -#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0 -#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT) -#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */ -#define I40E_PRTRPB_SLW_SLW_SHIFT 0 -#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT) -#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */ -#define I40E_PRTRPB_SPS_SPS_SHIFT 0 -#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT) -#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */ -#define I40E_GLQF_CTL_HTOEP_SHIFT 1 -#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT) -#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2 -#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT) -#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3 -#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT) -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6 -#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT) -#define I40E_GLQF_CTL_RSVD_SHIFT 7 -#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT) -#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8 -#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11 -#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT) -#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14 -#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT) -#define I40E_GLQF_CTL_FDBEST_SHIFT 17 -#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT) -#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25 -#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT) -#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26 -#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT) -#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27 -#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT) #define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */ #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT) @@ -2112,36 +405,7 @@ #define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT) #define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_GLQF_HKEY_MAX_INDEX 12 -#define I40E_GLQF_HKEY_KEY_0_SHIFT 0 -#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT) -#define I40E_GLQF_HKEY_KEY_1_SHIFT 8 -#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT) -#define I40E_GLQF_HKEY_KEY_2_SHIFT 16 -#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT) -#define I40E_GLQF_HKEY_KEY_3_SHIFT 24 -#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT) -#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HSYM_MAX_INDEX 63 -#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0 -#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT) #define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */ -#define I40E_GLQF_PCNT_MAX_INDEX 511 -#define I40E_GLQF_PCNT_PCNT_SHIFT 0 -#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT) -#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_SWAP_MAX_INDEX 1 -#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0 -#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6 -#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN0_SHIFT 12 -#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16 -#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT) -#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22 -#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT) -#define I40E_GLQF_SWAP_FLEN1_SHIFT 28 -#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT) #define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */ #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0 #define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT) @@ -2159,54 +423,19 @@ #define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT) #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19 #define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT) -#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20 -#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT) -#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24 -#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT) #define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */ #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0 #define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT) -#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */ -#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0 -#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT) -#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8 -#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT) #define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */ #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0 #define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT) #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16 #define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT) #define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_PFQF_HENA_MAX_INDEX 1 -#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT) #define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */ #define I40E_PFQF_HKEY_MAX_INDEX 12 -#define I40E_PFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT) -#define I40E_PFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT) -#define I40E_PFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT) -#define I40E_PFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT) #define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_PFQF_HLUT_MAX_INDEX 127 -#define I40E_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT) -#define I40E_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT) -#define I40E_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT) -#define I40E_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */ -#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0 -#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT) -#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 -#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 -#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 @@ -2215,14 +444,7 @@ #define I40E_PRTQF_FD_INSET_MAX_INDEX 63 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) -#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ -#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 -#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 -#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT) -#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT) #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */ -#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5 @@ -2230,775 +452,148 @@ #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HENA1_MAX_INDEX 1 -#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT) #define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HKEY1_MAX_INDEX 12 -#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT) -#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT) -#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT) -#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT) #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */ #define I40E_VFQF_HLUT1_MAX_INDEX 15 -#define I40E_VFQF_HLUT1_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT) -#define I40E_VFQF_HLUT1_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT) -#define I40E_VFQF_HLUT1_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT) -#define I40E_VFQF_HLUT1_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT) -#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION1_MAX_INDEX 7 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT) -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT) -#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPQF_CTL_MAX_INDEX 127 -#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0 -#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT) -#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5 -#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT) -#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10 -#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT) -#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14 -#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT) -#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_CTL_MAX_INDEX 383 -#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0 -#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT) -#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1 -#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2 -#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3 -#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4 -#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT) -#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5 -#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT) -#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */ -#define I40E_VSIQF_TCREGION_MAX_INDEX 3 -#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0 -#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9 -#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT) -#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16 -#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT) -#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25 -#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT) -#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOECRC_MAX_INDEX 143 -#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0 -#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT) -#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDDPC_MAX_INDEX 143 -#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0 -#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT) -#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0 -#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT) -#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0 -#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT) -#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXEC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0 -#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT) -#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDIXVC_MAX_INDEX 143 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0 -#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT) -#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0 -#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT) -#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWRCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0 -#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT) -#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCH_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0 -#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT) -#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEDWTCL_MAX_INDEX 143 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0 -#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT) -#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOELAST_MAX_INDEX 143 -#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0 -#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT) -#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPRC_MAX_INDEX 143 -#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0 -#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT) -#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOEPTC_MAX_INDEX 143 -#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0 -#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT) -#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_FCOERPDC_MAX_INDEX 143 -#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0 -#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT) -#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR1_L_MAX_INDEX 143 -#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0 -#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT) -#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */ -#define I40E_GL_RXERR2_L_MAX_INDEX 143 -#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0 -#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT) #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCH_MAX_INDEX 3 -#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT) #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPRCL_MAX_INDEX 3 -#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT) #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCH_MAX_INDEX 3 -#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT) #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_BPTCL_MAX_INDEX 3 -#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT) #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_CRCERRS_MAX_INDEX 3 -#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0 -#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT) #define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCH_MAX_INDEX 3 -#define I40E_GLPRT_GORCH_GORCH_SHIFT 0 -#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT) #define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GORCL_MAX_INDEX 3 -#define I40E_GLPRT_GORCL_GORCL_SHIFT 0 -#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT) #define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCH_MAX_INDEX 3 -#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT) #define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_GOTCL_MAX_INDEX 3 -#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT) #define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ILLERRC_MAX_INDEX 3 -#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0 -#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT) -#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LDPC_MAX_INDEX 3 -#define I40E_GLPRT_LDPC_LDPC_SHIFT 0 -#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT) #define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT) #define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0 -#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT) #define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0 -#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT) #define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_LXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0 -#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT) #define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MLFC_MAX_INDEX 3 -#define I40E_GLPRT_MLFC_MLFC_SHIFT 0 -#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT) #define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCH_MAX_INDEX 3 -#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT) #define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPRCL_MAX_INDEX 3 -#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT) #define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCH_MAX_INDEX 3 -#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT) #define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MPTCL_MAX_INDEX 3 -#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT) #define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_MRFC_MAX_INDEX 3 -#define I40E_GLPRT_MRFC_MRFC_SHIFT 0 -#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT) #define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0 -#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT) #define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0 -#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT) #define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127H_MAX_INDEX 3 -#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0 -#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT) #define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC127L_MAX_INDEX 3 -#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0 -#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT) #define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT) #define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255H_MAX_INDEX 3 -#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0 -#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT) #define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC255L_MAX_INDEX 3 -#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0 -#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT) #define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511H_MAX_INDEX 3 -#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0 -#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT) #define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC511L_MAX_INDEX 3 -#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0 -#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT) #define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64H_MAX_INDEX 3 -#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0 -#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT) #define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC64L_MAX_INDEX 3 -#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0 -#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT) #define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0 -#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT) #define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PRC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0 -#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT) #define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0 -#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT) #define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1023L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0 -#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT) #define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127H_MAX_INDEX 3 -#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0 -#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT) #define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC127L_MAX_INDEX 3 -#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0 -#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT) #define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0 -#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT) #define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC1522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0 -#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT) #define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255H_MAX_INDEX 3 -#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0 -#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT) #define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC255L_MAX_INDEX 3 -#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0 -#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT) #define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511H_MAX_INDEX 3 -#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0 -#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT) #define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC511L_MAX_INDEX 3 -#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0 -#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT) #define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64H_MAX_INDEX 3 -#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0 -#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT) #define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC64L_MAX_INDEX 3 -#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0 -#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT) #define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522H_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0 -#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT) #define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_PTC9522L_MAX_INDEX 3 -#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0 -#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT) #define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT) #define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0 -#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT) #define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONRXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0 -#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT) #define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_PXONTXC_MAX_INDEX 3 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0 -#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT) #define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RDPC_MAX_INDEX 3 -#define I40E_GLPRT_RDPC_RDPC_SHIFT 0 -#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT) #define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RFC_MAX_INDEX 3 -#define I40E_GLPRT_RFC_RFC_SHIFT 0 -#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT) #define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RJC_MAX_INDEX 3 -#define I40E_GLPRT_RJC_RJC_SHIFT 0 -#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT) #define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RLEC_MAX_INDEX 3 -#define I40E_GLPRT_RLEC_RLEC_SHIFT 0 -#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT) #define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_ROC_MAX_INDEX 3 -#define I40E_GLPRT_ROC_ROC_SHIFT 0 -#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT) #define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUC_MAX_INDEX 3 -#define I40E_GLPRT_RUC_RUC_SHIFT 0 -#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT) -#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_RUPP_MAX_INDEX 3 -#define I40E_GLPRT_RUPP_RUPP_SHIFT 0 -#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT) #define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */ -#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0 -#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT) #define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_TDOLD_MAX_INDEX 3 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0 -#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT) #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCH_MAX_INDEX 3 -#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT) #define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPRCL_MAX_INDEX 3 -#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT) #define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCH_MAX_INDEX 3 -#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT) #define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_GLPRT_UPTCL_MAX_INDEX 3 -#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0 -#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT) #define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCH_MAX_INDEX 15 -#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT) #define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPRCL_MAX_INDEX 15 -#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT) #define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCH_MAX_INDEX 15 -#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT) #define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_BPTCL_MAX_INDEX 15 -#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT) #define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCH_MAX_INDEX 15 -#define I40E_GLSW_GORCH_GORCH_SHIFT 0 -#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT) #define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GORCL_MAX_INDEX 15 -#define I40E_GLSW_GORCL_GORCL_SHIFT 0 -#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT) #define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCH_MAX_INDEX 15 -#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT) #define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_GOTCL_MAX_INDEX 15 -#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT) #define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCH_MAX_INDEX 15 -#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT) #define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPRCL_MAX_INDEX 15 -#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT) #define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCH_MAX_INDEX 15 -#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT) #define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_MPTCL_MAX_INDEX 15 -#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT) #define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_RUPP_MAX_INDEX 15 -#define I40E_GLSW_RUPP_RUPP_SHIFT 0 -#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT) #define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_TDPC_MAX_INDEX 15 -#define I40E_GLSW_TDPC_TDPC_SHIFT 0 -#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT) #define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCH_MAX_INDEX 15 -#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT) #define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPRCL_MAX_INDEX 15 -#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT) #define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCH_MAX_INDEX 15 -#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0 -#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT) #define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLSW_UPTCL_MAX_INDEX 15 -#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT) #define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCH_MAX_INDEX 383 -#define I40E_GLV_BPRCH_BPRCH_SHIFT 0 -#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT) #define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPRCL_MAX_INDEX 383 -#define I40E_GLV_BPRCL_BPRCL_SHIFT 0 -#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT) #define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCH_MAX_INDEX 383 -#define I40E_GLV_BPTCH_BPTCH_SHIFT 0 -#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT) #define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_BPTCL_MAX_INDEX 383 -#define I40E_GLV_BPTCL_BPTCL_SHIFT 0 -#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT) #define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCH_MAX_INDEX 383 -#define I40E_GLV_GORCH_GORCH_SHIFT 0 -#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT) #define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GORCL_MAX_INDEX 383 -#define I40E_GLV_GORCL_GORCL_SHIFT 0 -#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT) #define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCH_MAX_INDEX 383 -#define I40E_GLV_GOTCH_GOTCH_SHIFT 0 -#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT) #define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_GOTCL_MAX_INDEX 383 -#define I40E_GLV_GOTCL_GOTCL_SHIFT 0 -#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT) #define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCH_MAX_INDEX 383 -#define I40E_GLV_MPRCH_MPRCH_SHIFT 0 -#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT) #define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPRCL_MAX_INDEX 383 -#define I40E_GLV_MPRCL_MPRCL_SHIFT 0 -#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT) #define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCH_MAX_INDEX 383 -#define I40E_GLV_MPTCH_MPTCH_SHIFT 0 -#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT) #define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_MPTCL_MAX_INDEX 383 -#define I40E_GLV_MPTCL_MPTCL_SHIFT 0 -#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT) #define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RDPC_MAX_INDEX 383 -#define I40E_GLV_RDPC_RDPC_SHIFT 0 -#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT) #define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_RUPP_MAX_INDEX 383 -#define I40E_GLV_RUPP_RUPP_SHIFT 0 -#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) #define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_TEPC_MAX_INDEX 383 -#define I40E_GLV_TEPC_TEPC_SHIFT 0 -#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) #define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCH_MAX_INDEX 383 -#define I40E_GLV_UPRCH_UPRCH_SHIFT 0 -#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT) #define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPRCL_MAX_INDEX 383 -#define I40E_GLV_UPRCL_UPRCL_SHIFT 0 -#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT) #define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCH_MAX_INDEX 383 -#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0 -#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT) #define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ -#define I40E_GLV_UPTCL_MAX_INDEX 383 -#define I40E_GLV_UPTCL_UPTCL_SHIFT 0 -#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT) #define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_RPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT) #define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0 -#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT) #define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TBCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0 -#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT) #define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCH_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0 -#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT) #define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */ -#define I40E_GLVEBTC_TPCL_MAX_INDEX 7 -#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0 -#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT) -#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0 -#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT) -#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_BPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0 -#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT) -#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GORCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0 -#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT) -#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127 -#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0 -#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT) -#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0 -#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT) -#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_MPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0 -#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT) -#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCH_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0 -#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT) -#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_GLVEBVL_UPCL_MAX_INDEX 127 -#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0 -#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT) -#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */ -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0 -#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT) -#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT) -#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0 -#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT) -#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ -#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0 -#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT) -#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31 -#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT) -#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 -#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 -#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) -#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3 -#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8 -#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT) -#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 -#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT) -#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1 -#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 -#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1 -#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT) -#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_CLKO_MAX_INDEX 1 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0 -#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT) #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0 -#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT) #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 -#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3 -#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) -#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12 -#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT) #define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */ #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8 -#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT) -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20 -#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24 -#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26 #define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31 #define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT) -#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0 -#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT) -#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0 -#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT) #define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0 -#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT) #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ -#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0 -#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT) #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT) #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ -#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT) #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ -#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2 -#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT) -#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3 -#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT) #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ -#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0 -#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1 -#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2 -#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT) -#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3 -#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT) -#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0 -#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT) -#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ -#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0 -#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT) #define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0 -#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT) #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0 -#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT) #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT) #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0 -#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT) #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) @@ -3033,2304 +628,53 @@ #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 #define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT) #define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_VP_MDET_TX_MAX_INDEX 127 #define I40E_VP_MDET_TX_VALID_SHIFT 0 #define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT) -#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */ -#define I40E_GLPM_WUMC_NOTCO_SHIFT 0 -#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT) -#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1 -#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT) -#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2 -#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT) -#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3 -#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT) -#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16 -#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT) #define I40E_PFPM_APM 0x000B8080 /* Reset: POR */ #define I40E_PFPM_APM_APME_SHIFT 0 #define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT) -#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */ -#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0 -#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT) -#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */ -#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5 -#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT) #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */ -#define I40E_PFPM_WUFC_LNKC_SHIFT 0 -#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT) #define I40E_PFPM_WUFC_MAG_SHIFT 1 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT) -#define I40E_PFPM_WUFC_MNG_SHIFT 3 -#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT) -#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4 -#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5 -#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6 -#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7 -#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8 -#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9 -#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10 -#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11 -#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT) -#define I40E_PFPM_WUFC_FLX0_SHIFT 16 -#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT) -#define I40E_PFPM_WUFC_FLX1_SHIFT 17 -#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT) -#define I40E_PFPM_WUFC_FLX2_SHIFT 18 -#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT) -#define I40E_PFPM_WUFC_FLX3_SHIFT 19 -#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT) -#define I40E_PFPM_WUFC_FLX4_SHIFT 20 -#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT) -#define I40E_PFPM_WUFC_FLX5_SHIFT 21 -#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT) -#define I40E_PFPM_WUFC_FLX6_SHIFT 22 -#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT) -#define I40E_PFPM_WUFC_FLX7_SHIFT 23 -#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT) -#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT) -#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */ -#define I40E_PFPM_WUS_LNKC_SHIFT 0 -#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT) -#define I40E_PFPM_WUS_MAG_SHIFT 1 -#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT) -#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2 -#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT) -#define I40E_PFPM_WUS_MNG_SHIFT 3 -#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT) -#define I40E_PFPM_WUS_FLX0_SHIFT 16 -#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT) -#define I40E_PFPM_WUS_FLX1_SHIFT 17 -#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT) -#define I40E_PFPM_WUS_FLX2_SHIFT 18 -#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT) -#define I40E_PFPM_WUS_FLX3_SHIFT 19 -#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT) -#define I40E_PFPM_WUS_FLX4_SHIFT 20 -#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT) -#define I40E_PFPM_WUS_FLX5_SHIFT 21 -#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT) -#define I40E_PFPM_WUS_FLX6_SHIFT 22 -#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT) -#define I40E_PFPM_WUS_FLX7_SHIFT 23 -#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT) -#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31 -#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT) -#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */ -#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0 -#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT) -#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1 -#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT) -#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAH_MAX_INDEX 3 -#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0 -#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT) -#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26 -#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT) -#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30 -#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT) -#define I40E_PRTPM_SAH_AV_SHIFT 31 -#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT) -#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */ -#define I40E_PRTPM_SAL_MAX_INDEX 3 -#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0 -#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT) #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) #define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) #define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) #define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) #define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) #define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) #define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) #define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ #define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */ -#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0 -#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT) -#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */ -#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2 -#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT) -#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3 -#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT) -#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4 -#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT) -#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8 -#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT) -#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */ -#define I40E_MNGSB_FDS_START_BC_SHIFT 0 -#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT) -#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16 -#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT) -#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127 -#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0 -#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT) -#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ -#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127 -#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0 -#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */ -#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12 -#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT) -#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16 -#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT) -#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */ -#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12 -#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT) -#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16 -#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT) -#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */ -#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */ -#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7 -#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0 -#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT) -#define I40E_GL_FWSTS_FWROWD_SHIFT 8 -#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT) -#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */ -#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15 -#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 -#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) -#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_CEQPART_MAX_INDEX 15 -#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0 -#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT) -#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16 -#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT) -#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */ -#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0 -#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT) -#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_DBCQPART_MAX_INDEX 15 -#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0 -#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT) -#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16 -#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT) -#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */ -#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0 -#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT) -#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_DBQPPART_MAX_INDEX 15 -#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0 -#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT) -#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16 -#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT) -#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0 -#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT) -#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0 -#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT) -#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */ -#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0 -#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT) -#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */ -#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT) -#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PECQBASE_MAX_INDEX 15 -#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0 -#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT) -#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PECQCNT_MAX_INDEX 15 -#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0 -#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT) -#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */ -#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0 -#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT) -#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0 -#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT) -#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0 -#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT) -#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */ -#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT) -#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */ -#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0 -#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT) -#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0 -#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT) -#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0 -#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT) -#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */ -#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0 -#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT) -#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */ -#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0 -#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT) -#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0 -#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT) -#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0 -#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT) -#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */ -#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0 -#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT) -#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */ -#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0 -#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT) -#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0 -#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT) -#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0 -#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT) -#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 -#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) -#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0 -#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT) -#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0 -#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT) -#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */ -#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0 -#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT) -#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0 -#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT) -#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0 -#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT) -#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */ -#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT) -#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15 -#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0 -#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT) -#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15 -#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0 -#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT) -#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */ -#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0 -#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT) -#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */ -#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0 -#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT) -#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15 -#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0 -#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT) -#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15 -#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0 -#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT) -#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */ -#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0 -#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT) -#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */ -#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0 -#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT) -#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0 -#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT) -#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15 -#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0 -#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT) -#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15 -#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 -#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT) -#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */ -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0 -#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT) -#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */ -#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0 -#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT) -#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */ -#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0 -#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT) -#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15 -#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT) -#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0 -#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT) -#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31 -#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0 -#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT) -#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16 -#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT) -#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31 -#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0 -#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT) -#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16 -#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT) -#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31 -#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0 -#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT) -#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16 -#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT) -#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0 -#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT) -#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0 -#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT) -#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPDINV_MAX_INDEX 31 -#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0 -#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT) -#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15 -#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT) -#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16 -#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT) -#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0 -#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT) -#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0 -#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT) -#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0 -#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT) -#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0 -#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT) -#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0 -#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT) -#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0 -#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT) -#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0 -#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT) -#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0 -#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT) -#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT) -#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0 -#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT) -#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0 -#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT) -#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0 -#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT) -#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT) -#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0 -#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT) -#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0 -#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT) -#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0 -#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT) -#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0 -#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT) -#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0 -#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT) -#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0 -#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT) -#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0 -#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT) -#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0 -#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT) -#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31 -#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0 -#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT) -#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLHMC_VFSDPART_MAX_INDEX 31 -#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0 -#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT) -#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16 -#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT) -#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */ -#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */ -#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT) -#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */ -#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0 -#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT) -#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8 -#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT) -#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20 -#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT) -#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 -#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) -#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15 -#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT) -#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */ -#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0 -#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT) -#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */ -#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0 -#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT) -#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */ -#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0 -#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */ -#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0 -#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2 -#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4 -#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8 -#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11 -#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12 -#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13 -#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT) -#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14 -#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT) -#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */ -#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0 -#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT) -#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */ -#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0 -#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT) -#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2 -#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT) -#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3 -#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT) #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT) #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT) -#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VPLAN_QBASE_MAX_INDEX 127 -#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0 -#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT) -#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11 -#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT) -#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31 -#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT) -#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */ -#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0 -#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT) -#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */ -#define I40E_GLNVM_AL_REQ_POR_SHIFT 0 -#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT) -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1 -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT) -#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2 -#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT) -#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3 -#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT) -#define I40E_GLNVM_AL_REQ_PE_SHIFT 4 -#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT) -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5 -#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT) -#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */ -#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0 -#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT) -#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12 -#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT) #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */ #define I40E_GLNVM_FLA_LOCKED_SHIFT 6 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT) #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */ -#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0 -#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT) -#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1 -#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT) -#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3 -#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT) -#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4 -#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT) -#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5 -#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT) -#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8 -#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT) -#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9 -#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT) -#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10 -#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT) -#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */ -#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0 -#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1 -#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2 -#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT) -#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3 -#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4 -#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5 -#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6 -#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7 -#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT) -#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8 -#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT) -#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9 -#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT) -#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10 -#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT) -#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */ -#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0 -#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1 -#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2 -#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3 -#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4 -#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5 -#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6 -#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7 -#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8 -#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9 -#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10 -#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11 -#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12 -#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13 -#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14 -#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15 -#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT) -#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16 -#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT) -#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */ -#define I40E_MNGSB_DADD_ADDR_SHIFT 0 -#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT) -#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */ -#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0 -#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT) -#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */ -#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0 -#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT) -#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8 -#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT) -#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26 -#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT) -#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28 -#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT) -#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30 -#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT) -#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31 -#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT) -#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */ -#define I40E_MNGSB_RDATA_DATA_SHIFT 0 -#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT) -#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */ -#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0 -#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT) -#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8 -#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT) -#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16 -#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT) -#define I40E_MNGSB_RHDR0_TAG_SHIFT 24 -#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT) -#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27 -#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT) -#define I40E_MNGSB_RHDR0_EH_SHIFT 31 -#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT) -#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */ -#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0 -#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26 -#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30 -#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT) -#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31 -#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT) -#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */ -#define I40E_MNGSB_WDATA_DATA_SHIFT 0 -#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT) -#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */ -#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0 -#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT) -#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12 -#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT) -#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16 -#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT) -#define I40E_MNGSB_WHDR0_TAG_SHIFT 24 -#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT) -#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */ -#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0 -#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT) -#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */ -#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0 -#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT) -#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21 -#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT) -#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT) -#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */ -#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT) -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */ -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16 -#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT) -#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 -#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) -#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10 -#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT) -#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */ -#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0 -#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT) -#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1 -#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT) -#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2 -#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT) -#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6 -#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT) -#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16 -#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT) -#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT) -#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */ -#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0 -#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT) -#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16 -#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT) -#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 -#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) -#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 -#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) -#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 -#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) -#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ -#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 -#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) -#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 -#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) -#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 -#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) -#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ -#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 -#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) -#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) -#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) -#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 -#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 -#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) -#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) -#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) -#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 -#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 -#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 -#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 -#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 -#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 -#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) -#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 -#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) -#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) -#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 -#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) -#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ -#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 -#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) -#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) -#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) -#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ -#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 -#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 -#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) -#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 -#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 -#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) -#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 -#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) -#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 -#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 -#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 -#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 -#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 -#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 -#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 -#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) -#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 -#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) -#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 -#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 -#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) -#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 -#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) -#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ -#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 -#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) -#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ -#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 -#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) -#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ -#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 -#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) -#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ -#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 -#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) -#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 -#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) -#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 -#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) -#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 -#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) -#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ -#define I40E_PFPE_CQACK_PECQID_SHIFT 0 -#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT) -#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ -#define I40E_PFPE_CQARM_PECQID_SHIFT 0 -#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT) -#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ -#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 -#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT) -#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ -#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 -#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) -#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) -#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ -#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 -#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) -#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 -#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) -#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ -#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ -#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 -#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) -#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ -#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 -#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) -#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 -#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ -#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) -#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ -#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ -#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 -#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) -#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */ -#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0 -#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1 -#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2 -#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3 -#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT) -#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 -#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT) -#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */ -#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0 -#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT) -#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31 -#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT) -#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ -#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 -#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT) -#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 -#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) -#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */ -#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0 -#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT) -#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */ -#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7 -#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0 -#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */ -#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0 -#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13 -#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT) -#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30 -#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */ -#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0 -#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8 -#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16 -#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT) -#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24 -#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT) -#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT) -#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13 -#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT) -#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30 -#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT) -#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT) -#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0 -#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16 -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT) -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31 -#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT) -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0 -#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8 -#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16 -#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT) -#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24 -#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT) -#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */ -#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0 -#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT) -#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8 -#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT) -#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16 -#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT) -#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 -#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 -#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 -#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 -#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQACK_MAX_INDEX 127 -#define I40E_VFPE_CQACK_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT) -#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQARM_MAX_INDEX 127 -#define I40E_VFPE_CQARM_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT) -#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPDB_MAX_INDEX 127 -#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 -#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 -#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 -#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 -#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 -#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 -#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 -#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) -#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 -#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) -#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 -#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 -#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) -#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 -#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) -#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 -#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) -#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 -#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 -#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) -#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) -#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) -#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 -#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) -#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) -#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) -#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) -#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 -#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) -#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 -#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) -#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 -#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) -#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 -#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 -#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) -#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 -#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 -#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) -#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 -#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) -#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 -#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) -#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 -#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) -#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 -#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) -#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 -#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 -#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) -#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 -#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 -#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) -#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 -#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 -#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) -#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) -#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) -#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 -#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 -#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) -#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ -#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 -#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 -#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 -#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) -#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 -#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) -#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 -#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) -#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 -#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) -#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 -#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) -#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ -#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 -#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) -#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) -#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) -#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 -#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) -#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 -#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) -#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) -#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 -#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) -#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 -#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) -#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 -#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) -#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 -#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) -#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) -#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) -#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) -#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ -#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 -#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 -#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) -#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 -#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 -#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) -#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 -#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) -#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 -#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) -#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 -#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 -#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) -#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) -#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) -#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 -#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) -#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) -#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) -#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) -#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) -#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) -#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) -#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) -#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 -#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) -#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 -#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) -#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 -#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) -#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 -#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) -#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 -#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) -#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 -#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 -#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) -#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 -#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 -#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) -#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 -#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) -#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 -#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) -#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 -#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) -#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 -#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) -#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 -#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 -#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) -#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 -#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 -#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) -#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 -#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 -#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) -#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) -#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) -#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 -#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 -#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) -#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ -#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 -#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 -#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) -#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */ -#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0 -#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT) -#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */ -#define I40E_GLQF_APBVT_MAX_INDEX 2047 -#define I40E_GLQF_APBVT_APBVT_SHIFT 0 -#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT) -#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */ -#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 -#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 -#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) -#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_FD_MSK_MAX_INDEX 1 -#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 -#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) -#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 -#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 -#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 -#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) -#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 -#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 -#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) -#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 -#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ -#define I40E_GLQF_ORT_MAX_INDEX 63 #define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 #define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) #define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 #define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) -#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ -#define I40E_GLQF_PIT_MAX_INDEX 23 -#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 -#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) -#define I40E_GLQF_PIT_FSIZE_SHIFT 5 -#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) -#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 -#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 -#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 -#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT) -#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */ -#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0 -#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT) -#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8 -#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT) -#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */ -#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0 -#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT) -#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5 -#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT) /* Redefined for X722 family */ -#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */ -#define I40E_X722_PFQF_HLUT_MAX_INDEX 127 -#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0 -#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8 -#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16 -#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT) -#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24 -#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT) -#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_PFQF_HREGION_MAX_INDEX 7 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_PFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_PFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_PFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_PFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_PFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_PFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_PFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT) -#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_PFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT) -#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8 -#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT) -#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */ -#define I40E_VSIQF_HKEY_MAX_INDEX 12 -#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT) -#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT) -#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT) -#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT) -#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */ -#define I40E_VSIQF_HLUT_MAX_INDEX 15 -#define I40E_VSIQF_HLUT_LUT0_SHIFT 0 -#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT) -#define I40E_VSIQF_HLUT_LUT1_SHIFT 8 -#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT) -#define I40E_VSIQF_HLUT_LUT2_SHIFT 16 -#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT) -#define I40E_VSIQF_HLUT_LUT3_SHIFT 24 -#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT) #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */ -#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0 -#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT) -#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */ -#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0 -#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) #endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 5c255977fd58..8d3c9d37e42e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -18,10 +18,7 @@ #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ -#define I40E_ITR_100K 10 /* all values below must be even */ -#define I40E_ITR_50K 20 #define I40E_ITR_20K 50 -#define I40E_ITR_18K 60 #define I40E_ITR_8K 122 #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) @@ -52,9 +49,6 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl) else return 0; } -#define I40E_INTRL_8K 125 /* 8000 ints/sec */ -#define I40E_INTRL_62K 16 /* 62500 ints/sec */ -#define I40E_INTRL_83K 12 /* 83333 ints/sec */ #define I40E_QUEUE_END_OF_LIST 0x7FF @@ -73,7 +67,6 @@ enum i40e_dyn_idx_t { /* these are indexes into ITRN registers */ #define I40E_RX_ITR I40E_IDX_ITR0 #define I40E_TX_ITR I40E_IDX_ITR1 -#define I40E_PE_ITR I40E_IDX_ITR2 /* Supported RSS offloads */ #define I40E_DEFAULT_RSS_HENA ( \ @@ -193,13 +186,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ -#define I40E_RX_INCREMENT(r, i) \ - do { \ - (i)++; \ - if ((i) == (r)->count) \ - i = 0; \ - r->next_to_clean = i; \ - } while (0) #define I40E_RX_NEXT_DESC(r, i, n) \ do { \ @@ -209,11 +195,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, (n) = I40E_RX_DESC((r), (i)); \ } while (0) -#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \ - do { \ - I40E_RX_NEXT_DESC((r), (i), (n)); \ - prefetch((n)); \ - } while (0) #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 @@ -262,15 +243,12 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) /* Tx Descriptors needed, worst case */ #define DESC_NEEDED (MAX_SKB_FRAGS + 6) -#define I40E_MIN_DESC_PENDING 4 #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) #define I40E_TX_FLAGS_IPV4 BIT(4) #define I40E_TX_FLAGS_IPV6 BIT(5) -#define I40E_TX_FLAGS_FCCRC BIT(6) -#define I40E_TX_FLAGS_FSO BIT(7) #define I40E_TX_FLAGS_TSYN BIT(8) #define I40E_TX_FLAGS_FD_SB BIT(9) #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) @@ -332,9 +310,7 @@ enum i40e_ring_state_t { /* some useful defines for virtchannel interface, which * is the only remaining user of header split */ -#define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 @@ -444,7 +420,6 @@ static inline void set_ring_xdp(struct i40e_ring *ring) #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e #define I40E_ITR_ADAPTIVE_LATENCY 0x8000 #define I40E_ITR_ADAPTIVE_BULK 0x0000 -#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY)) struct i40e_ring_container { struct i40e_ring *ring; /* pointer to linked list of ring(s) */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 63e098f7cb63..52410d609ba1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -84,8 +84,6 @@ enum i40e_debug_mask { I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) -#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \ - I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) @@ -178,21 +176,9 @@ struct i40e_link_status { u8 module_type[3]; /* 1st byte: module identifier */ #define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 /* 3rd byte: ethernet compliance codes for 1G */ #define I40E_MODULE_TYPE_1000BASE_SX 0x01 #define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 }; struct i40e_phy_info { @@ -262,9 +248,6 @@ struct i40e_phy_info { /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 /* Cloud filter modes: * Mode1: Filter on L4 port only @@ -273,14 +256,10 @@ struct i40e_hw_capabilities { */ #define I40E_CLOUD_FILTER_MODE1 0x6 #define I40E_CLOUD_FILTER_MODE2 0x7 -#define I40E_CLOUD_FILTER_MODE3 0x8 #define I40E_SWITCH_MODE_MASK 0xF u32 management_mode; u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 u32 npar_enable; u32 os2bmc; u32 valid_functions; @@ -294,13 +273,8 @@ struct i40e_hw_capabilities { bool flex10_enable; bool flex10_capable; u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 bool sec_rev_disabled; bool update_disabled; @@ -421,11 +395,8 @@ enum i40e_nvmupd_state { #define I40E_NVM_AQE 0xe #define I40E_NVM_EXEC 0xf -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) #define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ struct i40e_nvm_access { u32 command; @@ -438,7 +409,6 @@ struct i40e_nvm_access { /* (Q)SFP module access definitions */ #define I40E_I2C_EEPROM_DEV_ADDR 0xA0 #define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 #define I40E_MODULE_REVISION_ADDR 0x01 #define I40E_MODULE_SFF_8472_COMP 0x5E #define I40E_MODULE_SFF_8472_SWAP 0x5C @@ -547,7 +517,6 @@ struct i40e_dcbx_config { #define I40E_DCBX_MODE_CEE 0x1 #define I40E_DCBX_MODE_IEEE 0x2 u8 app_mode; -#define I40E_DCBX_APPS_NON_WILLING 0x1 u32 numapps; u32 tlv_status; /* CEE mode TLV status */ struct i40e_dcb_ets_config etscfg; @@ -895,9 +864,6 @@ enum i40e_rx_ptype_payload_layer { #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) #define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 #define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) @@ -926,7 +892,6 @@ enum i40e_rx_desc_pe_status_bits { I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 }; -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 #define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 @@ -963,8 +928,6 @@ struct i40e_tx_desc { __le64 cmd_type_offset_bsz; }; -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) enum i40e_tx_desc_dtype_value { I40E_TX_DESC_DTYPE_DATA = 0x0, @@ -980,7 +943,6 @@ enum i40e_tx_desc_dtype_value { }; #define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) enum i40e_tx_desc_cmd_bits { I40E_TX_DESC_CMD_EOP = 0x0001, @@ -1004,8 +966,6 @@ enum i40e_tx_desc_cmd_bits { }; #define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) enum i40e_tx_desc_length_fields { /* Note: These are predefined bit offsets */ @@ -1015,11 +975,8 @@ enum i40e_tx_desc_length_fields { }; #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) #define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) /* Context descriptors */ struct i40e_tx_context_desc { @@ -1029,11 +986,8 @@ struct i40e_tx_context_desc { __le64 type_cmd_tso_mss; }; -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) #define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) enum i40e_tx_ctx_desc_cmd_bits { I40E_TX_CTX_DESC_TSO = 0x01, @@ -1048,19 +1002,10 @@ enum i40e_tx_ctx_desc_cmd_bits { }; #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) #define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) enum i40e_tx_ctx_desc_eipt_offload { I40E_TX_CTX_EXT_IP_NONE = 0x0, @@ -1070,28 +1015,16 @@ enum i40e_tx_ctx_desc_eipt_offload { }; #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) #define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) #define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK #define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 #define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) @@ -1161,11 +1094,8 @@ enum i40e_filter_program_desc_fd_status { I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) #define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) enum i40e_filter_program_desc_pcmd { I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, @@ -1316,7 +1246,6 @@ struct i40e_hw_port_stats { #define I40E_NVM_OEM_VER_OFF 0x83 #define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 #define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 #define I40E_SR_NVM_EETRACK_LO 0x2D #define I40E_SR_NVM_EETRACK_HI 0x2E #define I40E_SR_VPD_PTR 0x2F @@ -1329,7 +1258,6 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) #define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) #define I40E_PTR_TYPE BIT(15) #define I40E_SR_OCP_CFG_WORD0 0x2B @@ -1463,14 +1391,11 @@ struct i40e_lldp_variables { /* Offsets into Alternate Ram */ #define I40E_ALT_STRUCT_FIRST_PF_OFFSET 0 /* in dwords */ #define I40E_ALT_STRUCT_DWORDS_PER_PF 64 /* in dwords */ -#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET 0xD /* in dwords */ -#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET 0xC /* in dwords */ #define I40E_ALT_STRUCT_MIN_BW_OFFSET 0xE /* in dwords */ #define I40E_ALT_STRUCT_MAX_BW_OFFSET 0xF /* in dwords */ /* Alternate Ram Bandwidth Masks */ #define I40E_ALT_BW_VALUE_MASK 0xFF -#define I40E_ALT_BW_RELATIVE_MASK 0x40000000 #define I40E_ALT_BW_VALID_MASK 0x80000000 /* RSS Hash Table Size */ @@ -1529,9 +1454,7 @@ struct i40e_package_header { /* Generic segment header */ struct i40e_generic_seg_header { #define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 #define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 u32 type; struct i40e_ddp_version version; u32 size; @@ -1541,7 +1464,6 @@ struct i40e_generic_seg_header { struct i40e_metadata_segment { struct i40e_generic_seg_header header; struct i40e_ddp_version version; -#define I40E_DDP_TRACKID_RDONLY 0 #define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF u32 track_id; char name[I40E_DDP_NAME_SIZE]; @@ -1575,10 +1497,6 @@ struct i40e_profile_section_header { #define SECTION_TYPE_AQ 0x00000801 #define SECTION_TYPE_RB_AQ 0x00001801 #define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 -#define SECTION_TYPE_PROTO 0x80000002 -#define SECTION_TYPE_PCTYPE 0x80000003 -#define SECTION_TYPE_PTYPE 0x80000004 u32 type; u32 offset; u32 size; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 631248c0981a..5491215d81de 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -10,7 +10,6 @@ #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2 -#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 #define I40E_VLAN_PRIORITY_SHIFT 13 diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index 72994baf4941..f41387a8969f 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -37,11 +37,6 @@ enum i40e_client_instance_state { struct i40e_ops; struct i40e_client; -/* HW does not define a type value for AEQ; only for RX/TX and CEQ. - * In order for us to keep the interface simple, SW will define a - * unique type value for AEQ. - */ -#define I40E_QUEUE_TYPE_PE_AEQ 0x80 #define I40E_QUEUE_INVALID_IDX 0xFFFF struct i40e_qv_info { @@ -56,7 +51,6 @@ struct i40e_qvlist_info { struct i40e_qv_info qv_info[1]; }; -#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF /* set of LAN parameters useful for clients managed by LAN */ @@ -87,7 +81,6 @@ struct i40e_info { u8 __iomem *hw_addr; u8 fid; /* function id, PF id or VF id */ #define I40E_CLIENT_FTYPE_PF 0 -#define I40E_CLIENT_FTYPE_VF 1 u8 ftype; /* function type, PF or VF */ void *pf; @@ -184,8 +177,6 @@ struct i40e_client { unsigned long state; /* client state */ atomic_t ref_cnt; /* Count of all the client devices of this kind */ u32 flags; -#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) -#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) u8 type; #define I40E_CLIENT_IWARP 0 const struct i40e_client_ops *ops; /* client ops provided by the client */ From 91c534b5e32527573454f73183e479d4f51594b0 Mon Sep 17 00:00:00 2001 From: Piotr Kwapulinski Date: Tue, 26 May 2020 12:51:12 +0200 Subject: [PATCH 0343/2056] i40e: make PF wait reset loop reliable Use jiffies to limit max waiting time for PF reset to succeed. Previous wait loop was unreliable. It required unreasonably long time to wait for PF reset after reboot when NIC was about to enter recovery mode Reviewed-by: Aleksandr Loktionov Signed-off-by: Piotr Kwapulinski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5f7f5147f9a7..3978b66dcffd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -14606,25 +14606,23 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf) **/ static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) { - const unsigned short MAX_CNT = 1000; - const unsigned short MSECS = 10; + /* wait max 10 seconds for PF reset to succeed */ + const unsigned long time_end = jiffies + 10 * HZ; + struct i40e_hw *hw = &pf->hw; i40e_status ret; - int cnt; - for (cnt = 0; cnt < MAX_CNT; ++cnt) { + ret = i40e_pf_reset(hw); + while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) { + usleep_range(10000, 20000); ret = i40e_pf_reset(hw); - if (!ret) - break; - msleep(MSECS); } - if (cnt == MAX_CNT) { + if (ret == I40E_SUCCESS) + pf->pfr_count++; + else dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); - return ret; - } - pf->pfr_count++; return ret; } From fffeeddfcf57ebf652e80d27b416de7cb74e3507 Mon Sep 17 00:00:00 2001 From: Piotr Kwapulinski Date: Wed, 27 May 2020 14:12:04 -0700 Subject: [PATCH 0344/2056] i40e: detect and log info about pre-recovery mode Detect and log information about pre-recovery mode when firmware transitions to a recovery mode. When a firmware transitions to a recovery mode it stores a number of unexpected EMP resets in one of its registers. The number of EMP resets ranging from 0x21 to 0x2A indicates that FW transitions to recovery mode. Use these values to emit log entry about transition process. Previously the pre-recovery mode may not have been detected and there was no log entry when NIC was in pre-recovery mode. Reviewed-by: Aleksandr Loktionov Signed-off-by: Piotr Kwapulinski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 70 +++++++++++++------ .../net/ethernet/intel/i40e/i40e_register.h | 2 + 2 files changed, 51 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3978b66dcffd..841e49e1e091 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -14557,28 +14557,17 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags) **/ static bool i40e_check_recovery_mode(struct i40e_pf *pf) { - u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK; - bool is_recovery_mode = false; + u32 val = rd32(&pf->hw, I40E_GL_FWSTS); - if (pf->hw.mac.type == I40E_MAC_XL710) - is_recovery_mode = - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK || - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK || - val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK; - if (pf->hw.mac.type == I40E_MAC_X722) - is_recovery_mode = - val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK || - val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK; - if (is_recovery_mode) { - dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); - dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + if (val & I40E_GL_FWSTS_FWS1B_MASK) { + dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); + dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); set_bit(__I40E_RECOVERY_MODE, pf->state); return true; } - if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state)) - dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n"); + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) + dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n"); return false; } @@ -14626,6 +14615,47 @@ static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf) return ret; } +/** + * i40e_check_fw_empr - check if FW issued unexpected EMP Reset + * @pf: board private structure + * + * Check FW registers to determine if FW issued unexpected EMP Reset. + * Every time when unexpected EMP Reset occurs the FW increments + * a counter of unexpected EMP Resets. When the counter reaches 10 + * the FW should enter the Recovery mode + * + * Returns true if FW issued unexpected EMP Reset + **/ +static bool i40e_check_fw_empr(struct i40e_pf *pf) +{ + const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & + I40E_GL_FWSTS_FWS1B_MASK; + return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) && + (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10); +} + +/** + * i40e_handle_resets - handle EMP resets and PF resets + * @pf: board private structure + * + * Handle both EMP resets and PF resets and conclude whether there are + * any issues regarding these resets. If there are any issues then + * generate log entry. + * + * Return 0 if NIC is healthy or negative value when there are issues + * with resets + **/ +static i40e_status i40e_handle_resets(struct i40e_pf *pf) +{ + const i40e_status pfr = i40e_pf_loop_reset(pf); + const bool is_empr = i40e_check_fw_empr(pf); + + if (is_empr || pfr != I40E_SUCCESS) + dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n"); + + return is_empr ? I40E_ERR_RESET_FAILED : pfr; +} + /** * i40e_init_recovery_mode - initialize subsystems needed in recovery mode * @pf: board private structure @@ -14862,11 +14892,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pf_reset; } - err = i40e_pf_loop_reset(pf); - if (err) { - dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err); + err = i40e_handle_resets(pf); + if (err) goto err_pf_reset; - } i40e_check_recovery_mode(pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 7cd3a08a1891..564df22f3f46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -43,6 +43,8 @@ #define I40E_GL_FWSTS 0x00083048 /* Reset: POR */ #define I40E_GL_FWSTS_FWS1B_SHIFT 16 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT) +#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT) #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT) From 3a2c6ced90e18326ec64b9c4b5f68a2a127004c0 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Thu, 28 May 2020 22:27:12 -0700 Subject: [PATCH 0345/2056] i40e: Add a check to see if MFS is set A customer was chain-booting to provision his systems and one of the steps was setting MFS. MFS isn't cleared by normal warm reboots (clearing requires a GLOBR) and there was no indication of why Jumbo Frame receives were failing. Add a warning if MFS is set to anything lower than the default. Signed-off-by: Todd Fujinaka Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 841e49e1e091..12d7191c936a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15290,6 +15290,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_stat_str(&pf->hw, err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + /* make sure the MFS hasn't been set lower than the default */ +#define MAX_FRAME_SIZE_DEFAULT 0x2600 + val = (rd32(&pf->hw, I40E_PRTGL_SAH) & + I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT; + if (val < MAX_FRAME_SIZE_DEFAULT) + dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", + i, val); + /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out * PAUSE or PFC frames and potentially controlling traffic for other From 34a2a3b83e2caf18ab85627f9c6a78bc0a1d1520 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Fri, 29 May 2020 00:18:33 -0700 Subject: [PATCH 0346/2056] net/intel: remove driver versions from Intel drivers As with other networking drivers, remove the unnecessary driver version from the Intel drivers. The ethtool driver information and module version will then report the kernel version instead. For ixgbe, i40e and ice drivers, the driver passes the driver version to the firmware to confirm that we are up and running. So we now pass the value of UTS_RELEASE to the firmware. This adminq call is required per the HAS document. The Device then sends an indication to the BMC that the PF driver is present. This is done using Host NC Driver Status Indication in NC-SI Get Link command or via the Host Network Controller Driver Status Change AEN. What the BMC may do with this information is implementation-dependent, but this is a standard NC-SI 1.1 command we honor per the HAS. CC: Bruce Allan CC: Jesse Brandeburg CC: Alek Loktionov CC: Kevin Liedtke CC: Aaron Rowden Signed-off-by: Jeff Kirsher Co-developed-by: Jacob Keller Tested-by: Aaron Brown --- drivers/net/ethernet/intel/e100.c | 6 +---- drivers/net/ethernet/intel/e1000/e1000.h | 1 - .../net/ethernet/intel/e1000/e1000_ethtool.c | 2 -- drivers/net/ethernet/intel/e1000/e1000_main.c | 5 +---- drivers/net/ethernet/intel/e1000e/e1000.h | 1 - drivers/net/ethernet/intel/e1000e/ethtool.c | 2 -- drivers/net/ethernet/intel/e1000e/netdev.c | 8 +------ drivers/net/ethernet/intel/fm10k/fm10k.h | 1 - .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 2 -- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 5 +---- drivers/net/ethernet/intel/i40e/i40e.h | 1 - .../net/ethernet/intel/i40e/i40e_ethtool.c | 2 -- drivers/net/ethernet/intel/i40e/i40e_main.c | 22 +++++-------------- drivers/net/ethernet/intel/iavf/iavf.h | 1 - .../net/ethernet/intel/iavf/iavf_ethtool.c | 1 - drivers/net/ethernet/intel/iavf/iavf_main.c | 14 +----------- drivers/net/ethernet/intel/iavf/iavf_type.h | 8 ------- drivers/net/ethernet/intel/ice/ice.h | 1 - drivers/net/ethernet/intel/ice/ice_ethtool.c | 1 - drivers/net/ethernet/intel/ice/ice_main.c | 22 ++++++------------- drivers/net/ethernet/intel/igb/igb.h | 1 - drivers/net/ethernet/intel/igb/igb_ethtool.c | 1 - drivers/net/ethernet/intel/igb/igb_main.c | 11 +--------- drivers/net/ethernet/intel/igbvf/ethtool.c | 2 -- drivers/net/ethernet/intel/igbvf/igbvf.h | 1 - drivers/net/ethernet/intel/igbvf/netdev.c | 5 +---- drivers/net/ethernet/intel/igc/igc.h | 1 - drivers/net/ethernet/intel/igc/igc_ethtool.c | 1 - drivers/net/ethernet/intel/igc/igc_main.c | 7 +----- drivers/net/ethernet/intel/ixgb/ixgb.h | 1 - .../net/ethernet/intel/ixgb/ixgb_ethtool.c | 2 -- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 6 +---- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 - .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 2 -- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 10 ++++----- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 2 -- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 1 - .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 7 +----- 39 files changed, 29 insertions(+), 142 deletions(-) diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 1b8d015ebfb0..91c64f91a835 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -150,8 +150,6 @@ #define DRV_NAME "e100" -#define DRV_EXT "-NAPI" -#define DRV_VERSION "3.5.24-k2"DRV_EXT #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" @@ -165,7 +163,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); MODULE_FIRMWARE(FIRMWARE_D101M); MODULE_FIRMWARE(FIRMWARE_D101S); MODULE_FIRMWARE(FIRMWARE_D102E); @@ -2430,7 +2427,6 @@ static void e100_get_drvinfo(struct net_device *netdev, { struct nic *nic = netdev_priv(netdev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info)); } @@ -3167,7 +3163,7 @@ static struct pci_driver e100_driver = { static int __init e100_init_module(void) { if (((1 << debug) - 1) & NETIF_MSG_DRV) { - pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); + pr_info("%s\n", DRV_DESCRIPTION); pr_info("%s\n", DRV_COPYRIGHT); } return pci_register_driver(&e100_driver); diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 7fad2f24dcad..4817eb13ca6f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -330,7 +330,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); dev_err(&adapter->pdev->dev, format, ## arg) extern char e1000_driver_name[]; -extern const char e1000_driver_version[]; int e1000_open(struct net_device *netdev); int e1000_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 6f45df5690d4..0b4196d2cdd4 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -533,8 +533,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, e1000_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, e1000_driver_version, - sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 4b2de08137be..266899c0c933 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -10,8 +10,6 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k8-NAPI" -const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; /* e1000_pci_tbl - PCI Device ID Table @@ -194,7 +192,6 @@ static struct pci_driver e1000_driver = { MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; @@ -221,7 +218,7 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) static int __init e1000_init_module(void) { int ret; - pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); + pr_info("%s\n", e1000_driver_string); pr_info("%s\n", e1000_copyright); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 944abd5eae11..ba7a0f8f6937 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -460,7 +460,6 @@ enum latency_range { }; extern char e1000e_driver_name[]; -extern const char e1000e_driver_version[]; void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 1d47e2503072..11de79e49661 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -633,8 +633,6 @@ static void e1000_get_drvinfo(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, e1000e_driver_version, - sizeof(drvinfo->version)); /* EEPROM image version # is reported as firmware version # for * PCI-E controllers diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 6f6479ca1267..75937a48d517 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -28,11 +28,7 @@ #include "e1000.h" -#define DRV_EXTRAVERSION "-k" - -#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; -const char e1000e_driver_version[] = DRV_VERSION; #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; @@ -7899,8 +7895,7 @@ static struct pci_driver e1000_driver = { **/ static int __init e1000_init_module(void) { - pr_info("Intel(R) PRO/1000 Network Driver - %s\n", - e1000e_driver_version); + pr_info("Intel(R) PRO/1000 Network Driver\n"); pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); return pci_register_driver(&e1000_driver); @@ -7922,6 +7917,5 @@ module_exit(e1000_exit_module); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); /* netdev.c */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 5b78362b82ac..f9be10a04dd6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -476,7 +476,6 @@ struct fm10k_cb { /* main */ extern char fm10k_driver_name[]; -extern const char fm10k_driver_version[]; int fm10k_init_queueing_scheme(struct fm10k_intfc *interface); void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface); __be16 fm10k_tx_encap_offload(struct sk_buff *skb); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 37fbc646deb9..30ea2b422678 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -449,8 +449,6 @@ static void fm10k_get_drvinfo(struct net_device *dev, strncpy(info->driver, fm10k_driver_name, sizeof(info->driver) - 1); - strncpy(info->version, fm10k_driver_version, - sizeof(info->version) - 1); strncpy(info->bus_info, pci_name(interface->pdev), sizeof(info->bus_info) - 1); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 17738b0a9873..05e9bdb5f4aa 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -11,9 +11,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.27.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" -const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = @@ -22,7 +20,6 @@ static const char fm10k_copyright[] = MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); /* single workqueue for entire fm10k driver */ struct workqueue_struct *fm10k_workqueue; @@ -35,7 +32,7 @@ struct workqueue_struct *fm10k_workqueue; **/ static int __init fm10k_init_module(void) { - pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version); + pr_info("%s\n", fm10k_driver_string); pr_info("%s\n", fm10k_copyright); /* create driver workqueue */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 8151671e5e0e..7618834b149b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -968,7 +968,6 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf, int i40e_up(struct i40e_vsi *vsi); void i40e_down(struct i40e_vsi *vsi); extern const char i40e_driver_name[]; -extern const char i40e_driver_version_str[]; void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags); void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired); int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index aa8026b1eb81..2dfd87f0bdfd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1893,8 +1893,6 @@ static void i40e_get_drvinfo(struct net_device *netdev, struct i40e_pf *pf = vsi->back; strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, i40e_driver_version_str, - sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw), sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 12d7191c936a..b928fb899b44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5,6 +5,7 @@ #include #include #include +#include /* Local includes */ #include "i40e.h" @@ -23,15 +24,6 @@ const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = "Intel(R) Ethernet Connection XL710 Network Driver"; -#define DRV_KERN "-k" - -#define DRV_VERSION_MAJOR 2 -#define DRV_VERSION_MINOR 8 -#define DRV_VERSION_BUILD 20 -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) DRV_KERN -const char i40e_driver_version_str[] = DRV_VERSION; static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation."; /* a bit of forward declarations */ @@ -101,7 +93,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *i40e_wq; @@ -9843,11 +9834,11 @@ static void i40e_send_version(struct i40e_pf *pf) { struct i40e_driver_version dv; - dv.major_version = DRV_VERSION_MAJOR; - dv.minor_version = DRV_VERSION_MINOR; - dv.build_version = DRV_VERSION_BUILD; + dv.major_version = 0xff; + dv.minor_version = 0xff; + dv.build_version = 0xff; dv.subbuild_version = 0; - strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string)); + strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); i40e_aq_send_driver_version(&pf->hw, &dv, NULL); } @@ -15808,8 +15799,7 @@ static struct pci_driver i40e_driver = { **/ static int __init i40e_init_module(void) { - pr_info("%s: %s - version %s\n", i40e_driver_name, - i40e_driver_string, i40e_driver_version_str); + pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); /* There is no need to throttle the number of active tasks because diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 10b805ba03ee..8a65525a7c0d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -375,7 +375,6 @@ struct iavf_device { /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; -extern const char iavf_driver_version[]; extern struct workqueue_struct *iavf_wq; int iavf_up(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 181573822942..c93567f4d0f7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -571,7 +571,6 @@ static void iavf_get_drvinfo(struct net_device *netdev, struct iavf_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, iavf_driver_name, 32); - strlcpy(drvinfo->version, iavf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index fa82768e5eda..78bd9e3df3ac 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -21,16 +21,6 @@ char iavf_driver_name[] = "iavf"; static const char iavf_driver_string[] = "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; -#define DRV_KERN "-k" - -#define DRV_VERSION_MAJOR 3 -#define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 3 -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) \ - DRV_KERN -const char iavf_driver_version[] = DRV_VERSION; static const char iavf_copyright[] = "Copyright (c) 2013 - 2018 Intel Corporation."; @@ -57,7 +47,6 @@ MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); static const struct net_device_ops iavf_netdev_ops; struct workqueue_struct *iavf_wq; @@ -3982,8 +3971,7 @@ static int __init iavf_init_module(void) { int ret; - pr_info("iavf: %s - version %s\n", iavf_driver_string, - iavf_driver_version); + pr_info("iavf: %s\n", iavf_driver_string); pr_info("%s\n", iavf_copyright); diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 7190a40c540c..de9fda78b43a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -192,14 +192,6 @@ struct iavf_hw { char err_str[16]; }; -struct iavf_driver_version { - u8 major_version; - u8 minor_version; - u8 build_version; - u8 subbuild_version; - u8 driver_string[32]; -}; - /* RX Descriptors */ union iavf_16byte_rx_desc { struct { diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 5792ee616b5c..a4cda8212e64 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -55,7 +55,6 @@ #include "ice_xsk.h" #include "ice_arfs.h" -extern const char ice_drv_ver[]; #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 #define ICE_MIN_NUM_DESC 64 diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 68c38004a088..7066775769eb 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -179,7 +179,6 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) orom = &nvm->orom; strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); - strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version)); /* Display NVM version (from which the firmware version can be * determined) which contains more pertinent information. diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 082825e3cb39..ddfc52661721 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5,6 +5,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include "ice.h" #include "ice_base.h" #include "ice_lib.h" @@ -13,15 +14,7 @@ #include "ice_dcb_nl.h" #include "ice_devlink.h" -#define DRV_VERSION_MAJOR 0 -#define DRV_VERSION_MINOR 8 -#define DRV_VERSION_BUILD 2 - -#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ - __stringify(DRV_VERSION_MINOR) "." \ - __stringify(DRV_VERSION_BUILD) "-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" -const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; @@ -32,7 +25,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); MODULE_FIRMWARE(ICE_DDP_PKG_FILE); static int debug = -1; @@ -3168,11 +3160,11 @@ static enum ice_status ice_send_version(struct ice_pf *pf) { struct ice_driver_ver dv; - dv.major_ver = DRV_VERSION_MAJOR; - dv.minor_ver = DRV_VERSION_MINOR; - dv.build_ver = DRV_VERSION_BUILD; + dv.major_ver = 0xff; + dv.minor_ver = 0xff; + dv.build_ver = 0xff; dv.subbuild_ver = 0; - strscpy((char *)dv.driver_string, DRV_VERSION, + strscpy((char *)dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string)); return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); } @@ -3463,7 +3455,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_send_version(pf); if (err) { dev_err(dev, "probe failed sending driver version %s. error: %d\n", - ice_drv_ver, err); + UTS_RELEASE, err); goto err_alloc_sw_unroll; } @@ -3769,7 +3761,7 @@ static int __init ice_module_init(void) { int status; - pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); + pr_info("%s\n", ice_driver_string); pr_info("%s\n", ice_copyright); ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 0c9282e2aaec..2f015b60a995 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -642,7 +642,6 @@ enum igb_boards { }; extern char igb_driver_name[]; -extern char igb_driver_version[]; int igb_open(struct net_device *netdev); int igb_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 2cd003c5ad43..da60e8d2128f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -851,7 +851,6 @@ static void igb_get_drvinfo(struct net_device *netdev, struct igb_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); /* EEPROM image version # is reported as firmware version # for * 82575 controllers diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8bb3db2cbd41..ce611cb02c0a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -38,12 +38,6 @@ #include #include "igb.h" -#define MAJ 5 -#define MIN 6 -#define BUILD 0 -#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ -__stringify(BUILD) "-k" - enum queue_mode { QUEUE_MODE_STRICT_PRIORITY, QUEUE_MODE_STREAM_RESERVATION, @@ -55,7 +49,6 @@ enum tx_queue_prio { }; char igb_driver_name[] = "igb"; -char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = "Intel(R) Gigabit Ethernet Network Driver"; static const char igb_copyright[] = @@ -240,7 +233,6 @@ static struct pci_driver igb_driver = { MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; @@ -666,8 +658,7 @@ static int __init igb_init_module(void) { int ret; - pr_info("%s - version %s\n", - igb_driver_string, igb_driver_version); + pr_info("%s\n", igb_driver_string); pr_info("%s\n", igb_copyright); #ifdef CONFIG_IGB_DCA diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 9217d150e286..f4835eb62fee 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -170,8 +170,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, igbvf_driver_version, - sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index eee26a3be90b..975eb47ee04d 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -281,7 +281,6 @@ enum igbvf_state_t { }; extern char igbvf_driver_name[]; -extern const char igbvf_driver_version[]; void igbvf_check_options(struct igbvf_adapter *); void igbvf_set_ethtool_ops(struct net_device *); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 5b1800c3ba82..07740654df5c 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -24,9 +24,7 @@ #include "igbvf.h" -#define DRV_VERSION "2.4.0-k" char igbvf_driver_name[] = "igbvf"; -const char igbvf_driver_version[] = DRV_VERSION; static const char igbvf_driver_string[] = "Intel(R) Gigabit Virtual Function Network Driver"; static const char igbvf_copyright[] = @@ -2987,7 +2985,7 @@ static int __init igbvf_init_module(void) { int ret; - pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); + pr_info("%s\n", igbvf_driver_string); pr_info("%s\n", igbvf_copyright); ret = pci_register_driver(&igbvf_driver); @@ -3011,6 +3009,5 @@ module_exit(igbvf_exit_module); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); /* netdev.c */ diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 5dbc5a156626..a2d260165df3 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -239,7 +239,6 @@ void igc_rings_dump(struct igc_adapter *adapter); void igc_regs_dump(struct igc_adapter *adapter); extern char igc_driver_name[]; -extern char igc_driver_version[]; #define IGC_REGS_LEN 740 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index a938ec8db681..735f3fb47dca 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -130,7 +130,6 @@ static void igc_ethtool_get_drvinfo(struct net_device *netdev, struct igc_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version)); /* add fw_version here */ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6919c50e449a..c2f41a558fd6 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -17,7 +17,6 @@ #include "igc_hw.h" #include "igc_tsn.h" -#define DRV_VERSION "0.0.1-k" #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver" #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) @@ -27,12 +26,10 @@ static int debug = -1; MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); char igc_driver_name[] = "igc"; -char igc_driver_version[] = DRV_VERSION; static const char igc_driver_string[] = DRV_SUMMARY; static const char igc_copyright[] = "Copyright(c) 2018 Intel Corporation."; @@ -5614,9 +5611,7 @@ static int __init igc_init_module(void) { int ret; - pr_info("%s - version %s\n", - igc_driver_string, igc_driver_version); - + pr_info("%s\n", igc_driver_string); pr_info("%s\n", igc_copyright); ret = pci_register_driver(&igc_driver); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb.h b/drivers/net/ethernet/intel/ixgb/ixgb.h index 681d44cc9784..81ac39576803 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb.h @@ -163,7 +163,6 @@ enum ixgb_state_t { void ixgb_check_options(struct ixgb_adapter *adapter); void ixgb_set_ethtool_ops(struct net_device *netdev); extern char ixgb_driver_name[]; -extern const char ixgb_driver_version[]; void ixgb_set_speed_duplex(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c index c65eb1afc8fb..582099a5ad41 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c @@ -458,8 +458,6 @@ ixgb_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->driver, ixgb_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgb_driver_version, - sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); } diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index b64e91ea3465..46829cfd54df 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -9,9 +9,6 @@ char ixgb_driver_name[] = "ixgb"; static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver"; -#define DRIVERNAPI "-NAPI" -#define DRV_VERSION "1.0.135-k2" DRIVERNAPI -const char ixgb_driver_version[] = DRV_VERSION; static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation."; #define IXGB_CB_LENGTH 256 @@ -103,7 +100,6 @@ static struct pci_driver ixgb_driver = { MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; @@ -120,7 +116,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); static int __init ixgb_init_module(void) { - pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version); + pr_info("%s\n", ixgb_driver_string); pr_info("%s\n", ixgb_copyright); return pci_register_driver(&ixgb_driver); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5ddfc83a1e46..debbcf216134 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -846,7 +846,6 @@ extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; #endif extern char ixgbe_driver_name[]; -extern const char ixgbe_driver_version[]; #ifdef IXGBE_FCOE extern char ixgbe_default_device_descr[]; #endif /* IXGBE_FCOE */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index c6bf0a50ee63..5da367cb5c93 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1004,8 +1004,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgbe_driver_version, - sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, adapter->eeprom_id, sizeof(drvinfo->fw_version)); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index ec7a11d13fdc..6c5703cdf062 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -1001,7 +1002,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, sizeof(info->driver_version), "%s v%s", ixgbe_driver_name, - ixgbe_driver_version); + UTS_RELEASE); /* Firmware Version */ strlcpy(info->firmware_version, adapter->eeprom_id, sizeof(info->firmware_version)); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f162b8b8f345..5becbb487b59 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -56,8 +57,6 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "5.1.0-k" -const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2016 Intel Corporation."; @@ -165,7 +164,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); static struct workqueue_struct *ixgbe_wq; @@ -11146,8 +11144,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (hw->mac.ops.set_fw_drv_ver) hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, - sizeof(ixgbe_driver_version) - 1, - ixgbe_driver_version); + sizeof(UTS_RELEASE) - 1, + UTS_RELEASE); /* add san mac addr to netdev */ ixgbe_add_sanmac_netdev(netdev); @@ -11504,7 +11502,7 @@ static struct pci_driver ixgbe_driver = { static int __init ixgbe_init_module(void) { int ret; - pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_driver_string); pr_info("%s\n", ixgbe_copyright); ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 988fa49fa99a..e49fb1cd9a99 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -218,8 +218,6 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev, struct ixgbevf_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, ixgbevf_driver_version, - sizeof(drvinfo->version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ecab686574b6..a0e325774819 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -440,7 +440,6 @@ extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; /* needed by ethtool.c */ extern const char ixgbevf_driver_name[]; -extern const char ixgbevf_driver_version[]; int ixgbevf_open(struct net_device *netdev); int ixgbevf_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a39e2cb384dd..635cbc25e2f2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -38,8 +38,6 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "4.1.0-k" -const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2018 Intel Corporation."; @@ -81,7 +79,6 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) static int debug = -1; @@ -4913,9 +4910,7 @@ static struct pci_driver ixgbevf_driver = { **/ static int __init ixgbevf_init_module(void) { - pr_info("%s - version %s\n", ixgbevf_driver_string, - ixgbevf_driver_version); - + pr_info("%s\n", ixgbevf_driver_string); pr_info("%s\n", ixgbevf_copyright); ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name); if (!ixgbevf_wq) { From 3dbdd6c2f70a2efbc28e197e58ccad08bfba7735 Mon Sep 17 00:00:00 2001 From: Aleksandr Loktionov Date: Fri, 29 May 2020 13:01:22 -0700 Subject: [PATCH 0347/2056] i40e: Add support for 5Gbps cards Make possible for the i40e driver to bind to the new v710 for 5GBASE-T NICs. Signed-off-by: Aleksandr Loktionov Signed-off-by: Arkadiusz Kubalewski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 3 +++ drivers/net/ethernet/intel/i40e/i40e_devids.h | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4ab081953e19..afad5e9f80e0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_QSFP_A: case I40E_DEV_ID_QSFP_B: case I40E_DEV_ID_QSFP_C: + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: @@ -4906,6 +4907,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw, status = i40e_write_phy_register_clause22(hw, reg, phy_addr, value); break; + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: @@ -4943,6 +4945,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw, status = i40e_read_phy_register_clause22(hw, reg, phy_addr, value); break; + case I40E_DEV_ID_5G_BASE_T_BC: case I40E_DEV_ID_10G_BASE_T: case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_10G_BASE_T_BC: diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 33df3bf2f73b..1bcb0ec0f0c0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -23,8 +23,10 @@ #define I40E_DEV_ID_10G_BASE_T_BC 0x15FF #define I40E_DEV_ID_10G_B 0x104F #define I40E_DEV_ID_10G_SFP 0x104E +#define I40E_DEV_ID_5G_BASE_T_BC 0x101F #define I40E_IS_X710TL_DEVICE(d) \ - ((d) == I40E_DEV_ID_10G_BASE_T_BC) + (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \ + ((d) == I40E_DEV_ID_10G_BASE_T_BC)) #define I40E_DEV_ID_KX_X722 0x37CE #define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 From 37d318d7805f25b672bfd74fc694f19a2ee9665d Mon Sep 17 00:00:00 2001 From: Aleksandr Loktionov Date: Fri, 29 May 2020 14:10:39 -0700 Subject: [PATCH 0348/2056] i40e: Remove scheduling while atomic possibility In some occasions task held spinlock (mac_filter_hash_lock), while being rescheduled due to admin queue mutex_lock. The struct i40e_spinlock asq_spinlock, which later expands to struct mutex spinlock. Moved i40e_aq_set_vsi_multicast_promiscuous(), i40e_aq_set_vsi_unicast_promiscuous(), i40e_aq_set_vsi_mc_promisc_on_vlan(), and i40e_aq_set_vsi_uc_promisc_on_vlan() outside of atomic context. Without this patch there is a race condition, which might result in scheduling while in atomic context. The race condition is between the thread, which holds mac_filter_hash_lock, while trying to acquire an admin queue mutex and a thread, which already has said admin queue mutex. The thread, which holds spinlock, fails to acquire the mutex, which causes this thread to sleep. Signed-off-by: Arkadiusz Kubalewski Signed-off-by: Aleksandr Loktionov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- .../ethernet/intel/i40e/i40e_virtchnl_pf.c | 254 ++++++++++-------- 1 file changed, 147 insertions(+), 107 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 56b9e445732b..8e133d6545bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1106,7 +1106,139 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) return -EIO; } -static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi); +/** + * i40e_getnum_vf_vsi_vlan_filters + * @vsi: pointer to the vsi + * + * called to get the number of VLANs offloaded on this VF + **/ +static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + int num_vlans = 0, bkt; + + hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) + num_vlans++; + } + + return num_vlans; +} + +/** + * i40e_get_vlan_list_sync + * @vsi: pointer to the VSI + * @num_vlans: number of VLANs in mac_filter_hash, returned to caller + * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller. + * This array is allocated here, but has to be freed in caller. + * + * Called to get number of VLANs and VLAN list present in mac_filter_hash. + **/ +static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans, + s16 **vlan_list) +{ + struct i40e_mac_filter *f; + int i = 0; + int bkt; + + spin_lock_bh(&vsi->mac_filter_hash_lock); + *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi); + *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); + if (!(*vlan_list)) + goto err; + + hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + (*vlan_list)[i++] = f->vlan; + } +err: + spin_unlock_bh(&vsi->mac_filter_hash_lock); +} + +/** + * i40e_set_vsi_promisc + * @vf: pointer to the VF struct + * @seid: VSI number + * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable + * for a given VLAN + * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable + * for a given VLAN + * @vl: List of VLANs - apply filter for given VLANs + * @num_vlans: Number of elements in @vl + **/ +static i40e_status +i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, + bool unicast_enable, s16 *vl, int num_vlans) +{ + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + i40e_status aq_ret; + int i; + + /* No VLAN to set promisc on, set on VSI */ + if (!num_vlans || !vl) { + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid, + multi_enable, + NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + + return aq_ret; + } + + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid, + unicast_enable, + NULL, true); + + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + return aq_ret; + } + + for (i = 0; i < num_vlans; i++) { + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid, + multi_enable, + vl[i], NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, + unicast_enable, + vl[i], NULL); + if (aq_ret) { + int aq_err = pf->hw.aq.asq_last_status; + + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + } + return aq_ret; +} /** * i40e_config_vf_promiscuous_mode @@ -1123,108 +1255,35 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, bool allmulti, bool alluni) { + i40e_status aq_ret = I40E_SUCCESS; struct i40e_pf *pf = vf->pf; - struct i40e_hw *hw = &pf->hw; - struct i40e_mac_filter *f; - i40e_status aq_ret = 0; struct i40e_vsi *vsi; - int bkt; + int num_vlans; + s16 *vl; vsi = i40e_find_vsi_from_id(pf, vsi_id); if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) return I40E_ERR_PARAM; if (vf->port_vlan_id) { - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, - allmulti, - vf->port_vlan_id, - NULL); - if (aq_ret) { - int aq_err = pf->hw.aq.asq_last_status; - - dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - return aq_ret; - } - - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, - alluni, - vf->port_vlan_id, - NULL); - if (aq_ret) { - int aq_err = pf->hw.aq.asq_last_status; - - dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } + aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, + alluni, &vf->port_vlan_id, 1); return aq_ret; } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) - continue; - aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, - vsi->seid, - allmulti, - f->vlan, - NULL); - if (aq_ret) { - int aq_err = pf->hw.aq.asq_last_status; + i40e_get_vlan_list_sync(vsi, &num_vlans, &vl); - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } + if (!vl) + return I40E_ERR_NO_MEMORY; - aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - if (aq_ret) { - int aq_err = pf->hw.aq.asq_last_status; - - dev_err(&pf->pdev->dev, - "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", - f->vlan, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } - } - return aq_ret; - } - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti, - NULL); - if (aq_ret) { - int aq_err = pf->hw.aq.asq_last_status; - - dev_err(&pf->pdev->dev, - "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, + vl, num_vlans); + kfree(vl); return aq_ret; } - aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni, - NULL, true); - if (aq_ret) { - int aq_err = pf->hw.aq.asq_last_status; - - dev_err(&pf->pdev->dev, - "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n", - vf->vf_id, - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - } - + /* no VLANs to set on, set on VSI */ + aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni, + NULL, 0); return aq_ret; } @@ -1972,25 +2031,6 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) i40e_reset_vf(vf, false); } -/** - * i40e_getnum_vf_vsi_vlan_filters - * @vsi: pointer to the vsi - * - * called to get the number of VLANs offloaded on this VF - **/ -static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) -{ - struct i40e_mac_filter *f; - int num_vlans = 0, bkt; - - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) - num_vlans++; - } - - return num_vlans; -} - /** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info From 5625f965d7644b4dc6a71d74021cfe093ad34eea Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Thu, 25 Jun 2020 12:37:23 +0000 Subject: [PATCH 0349/2056] wilc1000: move wilc driver out of staging WILC1000 is an IEEE 802.11 b/g/n IoT link controller module. The WILC1000 connects to Microchip AVR/SMART MCUs, SMART MPUs, and other processors with minimal resource requirements with a simple SPI/SDIO-to-Wi-Fi interface. WILC1000 driver has been part of staging for few years. With contributions from the community, it has improved significantly. Full driver review has helped in achieving the current state. The details for those reviews are captured in 1 & 2. [1]. https://lore.kernel.org/linux-wireless/1537957525-11467-1-git-send-email-ajay.kathat@microchip.com/ [2]. https://lore.kernel.org/linux-wireless/1562896697-8002-1-git-send-email-ajay.kathat@microchip.com/ Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo --- .../net/wireless}/microchip,wilc1000.yaml | 0 MAINTAINERS | 14 +++++++------- drivers/net/wireless/Kconfig | 1 + drivers/net/wireless/Makefile | 1 + drivers/net/wireless/microchip/Kconfig | 15 +++++++++++++++ drivers/net/wireless/microchip/Makefile | 2 ++ .../wireless/microchip}/wilc1000/Kconfig | 0 .../wireless/microchip}/wilc1000/Makefile | 0 .../wireless/microchip}/wilc1000/cfg80211.c | 0 .../wireless/microchip}/wilc1000/cfg80211.h | 0 .../wireless/microchip}/wilc1000/fw.h | 0 .../wireless/microchip}/wilc1000/hif.c | 0 .../wireless/microchip}/wilc1000/hif.h | 0 .../wireless/microchip}/wilc1000/mon.c | 0 .../wireless/microchip}/wilc1000/netdev.c | 0 .../wireless/microchip}/wilc1000/netdev.h | 0 .../wireless/microchip}/wilc1000/sdio.c | 0 .../wireless/microchip}/wilc1000/spi.c | 0 .../wireless/microchip}/wilc1000/wlan.c | 0 .../wireless/microchip}/wilc1000/wlan.h | 0 .../wireless/microchip}/wilc1000/wlan_cfg.c | 0 .../wireless/microchip}/wilc1000/wlan_cfg.h | 0 .../wireless/microchip}/wilc1000/wlan_if.h | 0 drivers/staging/Kconfig | 2 -- drivers/staging/Makefile | 1 - 25 files changed, 26 insertions(+), 10 deletions(-) rename {drivers/staging/wilc1000 => Documentation/devicetree/bindings/net/wireless}/microchip,wilc1000.yaml (100%) create mode 100644 drivers/net/wireless/microchip/Kconfig create mode 100644 drivers/net/wireless/microchip/Makefile rename drivers/{staging => net/wireless/microchip}/wilc1000/Kconfig (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/Makefile (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/cfg80211.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/cfg80211.h (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/fw.h (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/hif.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/hif.h (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/mon.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/netdev.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/netdev.h (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/sdio.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/spi.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/wlan.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/wlan.h (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/wlan_cfg.c (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/wlan_cfg.h (100%) rename drivers/{staging => net/wireless/microchip}/wilc1000/wlan_if.h (100%) diff --git a/drivers/staging/wilc1000/microchip,wilc1000.yaml b/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml similarity index 100% rename from drivers/staging/wilc1000/microchip,wilc1000.yaml rename to Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml diff --git a/MAINTAINERS b/MAINTAINERS index 68f21d46614c..7dae51e32254 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11362,6 +11362,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: drivers/usb/gadget/udc/atmel_usba_udc.* +MICROCHIP WILC1000 WIFI DRIVER +M: Adham Abozaeid +M: Ajay Singh +L: linux-wireless@vger.kernel.org +S: Supported +F: drivers/net/wireless/microchip/wilc1000/ + MICROCHIP XDMA DRIVER M: Ludovic Desroches L: linux-arm-kernel@lists.infradead.org @@ -16251,13 +16258,6 @@ M: Forest Bond S: Odd Fixes F: drivers/staging/vt665?/ -STAGING - WILC1000 WIFI DRIVER -M: Adham Abozaeid -M: Ajay Singh -L: linux-wireless@vger.kernel.org -S: Supported -F: drivers/staging/wilc1000/ - STAGING SUBSYSTEM M: Greg Kroah-Hartman L: devel@driverdev.osuosl.org diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 8ab62bb6b853..75f18c1e5009 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -40,6 +40,7 @@ source "drivers/net/wireless/intel/Kconfig" source "drivers/net/wireless/intersil/Kconfig" source "drivers/net/wireless/marvell/Kconfig" source "drivers/net/wireless/mediatek/Kconfig" +source "drivers/net/wireless/microchip/Kconfig" source "drivers/net/wireless/ralink/Kconfig" source "drivers/net/wireless/realtek/Kconfig" source "drivers/net/wireless/rsi/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 6cfe74515c95..80b324499786 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/ obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/ +obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/ obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/ obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/ diff --git a/drivers/net/wireless/microchip/Kconfig b/drivers/net/wireless/microchip/Kconfig new file mode 100644 index 000000000000..a6b46fb6b1ec --- /dev/null +++ b/drivers/net/wireless/microchip/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +config WLAN_VENDOR_MICROCHIP + bool "Microchip devices" + default y + help + If you have a wireless card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all the + questions about these cards. If you say Y, you will be asked for + your specific card in the following questions. + +if WLAN_VENDOR_MICROCHIP +source "drivers/net/wireless/microchip/wilc1000/Kconfig" +endif # WLAN_VENDOR_MICROCHIP diff --git a/drivers/net/wireless/microchip/Makefile b/drivers/net/wireless/microchip/Makefile new file mode 100644 index 000000000000..73b763c7393e --- /dev/null +++ b/drivers/net/wireless/microchip/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_WILC1000) += wilc1000/ diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/net/wireless/microchip/wilc1000/Kconfig similarity index 100% rename from drivers/staging/wilc1000/Kconfig rename to drivers/net/wireless/microchip/wilc1000/Kconfig diff --git a/drivers/staging/wilc1000/Makefile b/drivers/net/wireless/microchip/wilc1000/Makefile similarity index 100% rename from drivers/staging/wilc1000/Makefile rename to drivers/net/wireless/microchip/wilc1000/Makefile diff --git a/drivers/staging/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c similarity index 100% rename from drivers/staging/wilc1000/cfg80211.c rename to drivers/net/wireless/microchip/wilc1000/cfg80211.c diff --git a/drivers/staging/wilc1000/cfg80211.h b/drivers/net/wireless/microchip/wilc1000/cfg80211.h similarity index 100% rename from drivers/staging/wilc1000/cfg80211.h rename to drivers/net/wireless/microchip/wilc1000/cfg80211.h diff --git a/drivers/staging/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h similarity index 100% rename from drivers/staging/wilc1000/fw.h rename to drivers/net/wireless/microchip/wilc1000/fw.h diff --git a/drivers/staging/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c similarity index 100% rename from drivers/staging/wilc1000/hif.c rename to drivers/net/wireless/microchip/wilc1000/hif.c diff --git a/drivers/staging/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h similarity index 100% rename from drivers/staging/wilc1000/hif.h rename to drivers/net/wireless/microchip/wilc1000/hif.h diff --git a/drivers/staging/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c similarity index 100% rename from drivers/staging/wilc1000/mon.c rename to drivers/net/wireless/microchip/wilc1000/mon.c diff --git a/drivers/staging/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c similarity index 100% rename from drivers/staging/wilc1000/netdev.c rename to drivers/net/wireless/microchip/wilc1000/netdev.c diff --git a/drivers/staging/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h similarity index 100% rename from drivers/staging/wilc1000/netdev.h rename to drivers/net/wireless/microchip/wilc1000/netdev.h diff --git a/drivers/staging/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c similarity index 100% rename from drivers/staging/wilc1000/sdio.c rename to drivers/net/wireless/microchip/wilc1000/sdio.c diff --git a/drivers/staging/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c similarity index 100% rename from drivers/staging/wilc1000/spi.c rename to drivers/net/wireless/microchip/wilc1000/spi.c diff --git a/drivers/staging/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c similarity index 100% rename from drivers/staging/wilc1000/wlan.c rename to drivers/net/wireless/microchip/wilc1000/wlan.c diff --git a/drivers/staging/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h similarity index 100% rename from drivers/staging/wilc1000/wlan.h rename to drivers/net/wireless/microchip/wilc1000/wlan.h diff --git a/drivers/staging/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c similarity index 100% rename from drivers/staging/wilc1000/wlan_cfg.c rename to drivers/net/wireless/microchip/wilc1000/wlan_cfg.c diff --git a/drivers/staging/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h similarity index 100% rename from drivers/staging/wilc1000/wlan_cfg.h rename to drivers/net/wireless/microchip/wilc1000/wlan_cfg.h diff --git a/drivers/staging/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h similarity index 100% rename from drivers/staging/wilc1000/wlan_if.h rename to drivers/net/wireless/microchip/wilc1000/wlan_if.h diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 4ec5528f89fa..b3fb4d41e231 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -84,8 +84,6 @@ source "drivers/staging/fbtft/Kconfig" source "drivers/staging/fsl-dpaa2/Kconfig" -source "drivers/staging/wilc1000/Kconfig" - source "drivers/staging/most/Kconfig" source "drivers/staging/ks7010/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 4d34198151b3..3d8c7ea21a10 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -32,7 +32,6 @@ obj-$(CONFIG_UNISYSSPAR) += unisys/ obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/ obj-$(CONFIG_FB_TFT) += fbtft/ obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ -obj-$(CONFIG_WILC1000) += wilc1000/ obj-$(CONFIG_MOST) += most/ obj-$(CONFIG_KS7010) += ks7010/ obj-$(CONFIG_GREYBUS) += greybus/ From d528510e6dee7a1954560cb1f23438b60236fada Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 26 Jun 2020 10:35:06 +0200 Subject: [PATCH 0350/2056] batman-adv: Start new development cycle Signed-off-by: Simon Wunderlich --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 61d8dbe8c954..42b8d1e76dea 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -13,7 +13,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2020.2" +#define BATADV_SOURCE_VERSION "2020.3" #endif /* B.A.T.M.A.N. parameters */ From 4dd5066a38c939f5cd8faa2a221acb94ff15482f Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 27 May 2020 23:29:27 +0200 Subject: [PATCH 0351/2056] batman-adv: Switch mailing list subscription page The mailman installation on lists.open-mesh.org was switched from mailman2 to mailman3. The URL to the subscription webpage changed in this process. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- Documentation/networking/batman-adv.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst index 18020943ba25..02af49b08635 100644 --- a/Documentation/networking/batman-adv.rst +++ b/Documentation/networking/batman-adv.rst @@ -160,7 +160,7 @@ IRC: #batman on irc.freenode.org Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription at - https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n) + https://lists.open-mesh.org/mailman3/postorius/lists/b.a.t.m.a.n.lists.open-mesh.org/) You can also contact the Authors: From bccb48c89fe3c09f1cbb7c8612e31f5daa1d4541 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 1 Jun 2020 20:13:21 +0200 Subject: [PATCH 0352/2056] batman-adv: Fix typos and grammar in documentation Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- Documentation/networking/batman-adv.rst | 6 +-- include/uapi/linux/batadv_packet.h | 50 ++++++++++++------------- include/uapi/linux/batman_adv.h | 4 +- net/batman-adv/bat_iv_ogm.c | 8 ++-- net/batman-adv/bat_v_elp.c | 10 ++--- net/batman-adv/bat_v_ogm.c | 14 +++---- net/batman-adv/bridge_loop_avoidance.c | 6 +-- net/batman-adv/distributed-arp-table.c | 2 +- net/batman-adv/fragmentation.c | 6 +-- net/batman-adv/hard-interface.c | 14 +++---- net/batman-adv/log.h | 6 +-- net/batman-adv/main.c | 2 +- net/batman-adv/main.h | 6 +-- net/batman-adv/multicast.c | 21 ++++++----- net/batman-adv/netlink.c | 2 +- net/batman-adv/network-coding.c | 14 +++---- net/batman-adv/originator.c | 8 ++-- net/batman-adv/routing.c | 4 +- net/batman-adv/send.c | 4 +- net/batman-adv/soft-interface.c | 2 +- net/batman-adv/tp_meter.c | 12 +++--- net/batman-adv/translation-table.c | 10 ++--- net/batman-adv/tvlv.c | 4 +- net/batman-adv/types.h | 12 +++--- 24 files changed, 114 insertions(+), 113 deletions(-) diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst index 02af49b08635..74821d29a22f 100644 --- a/Documentation/networking/batman-adv.rst +++ b/Documentation/networking/batman-adv.rst @@ -73,7 +73,7 @@ lower value. This will make the mesh more responsive to topology changes, but will also increase the overhead. Information about the current state can be accessed via the batadv generic -netlink family. batctl provides human readable version via its debug tables +netlink family. batctl provides a human readable version via its debug tables subcommands. @@ -115,8 +115,8 @@ are prefixed with "batman-adv:" So to see just these messages try:: $ dmesg | grep batman-adv When investigating problems with your mesh network, it is sometimes necessary to -see more detail debug messages. This must be enabled when compiling the -batman-adv module. When building batman-adv as part of kernel, use "make +see more detailed debug messages. This must be enabled when compiling the +batman-adv module. When building batman-adv as part of the kernel, use "make menuconfig" and enable the option ``B.A.T.M.A.N. debugging`` (``CONFIG_BATMAN_ADV_DEBUG=y``). diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 0ae34c85ef9e..9c8604c5b5f6 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -72,8 +72,8 @@ enum batadv_subtype { /** * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets - * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was - * previously received from someone else than the best neighbor. + * @BATADV_NOT_BEST_NEXT_HOP: flag is set when the ogm packet is forwarded and + * was previously received from someone other than the best neighbor. * @BATADV_PRIMARIES_FIRST_HOP: flag unused. * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a * one hop neighbor on the interface where it was originally received. @@ -195,8 +195,8 @@ struct batadv_bla_claim_dst { /** * struct batadv_ogm_packet - ogm (routing protocol) packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @flags: contains routing relevant flags - see enum batadv_iv_flags * @seqno: sequence identification * @orig: address of the source node @@ -247,7 +247,7 @@ struct batadv_ogm2_packet { /** * struct batadv_elp_packet - elp (neighbor discovery) packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header + * @version: batman-adv protocol version, part of the general header * @orig: originator mac address * @seqno: sequence number * @elp_interval: currently used ELP sending interval in ms @@ -265,15 +265,15 @@ struct batadv_elp_packet { /** * struct batadv_icmp_header - common members among all the ICMP packets * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node * @uid: local ICMP socket identifier * @align: not used - useful for alignment purposes only * - * This structure is used for ICMP packets parsing only and it is never sent + * This structure is used for ICMP packet parsing only and it is never sent * over the wire. The alignment field at the end is there to ensure that * members are padded the same way as they are in real packets. */ @@ -291,8 +291,8 @@ struct batadv_icmp_header { /** * struct batadv_icmp_packet - ICMP packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -315,8 +315,8 @@ struct batadv_icmp_packet { /** * struct batadv_icmp_tp_packet - ICMP TP Meter packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -358,8 +358,8 @@ enum batadv_icmp_tp_subtype { /** * struct batadv_icmp_packet_rr - ICMP RouteRecord packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @msg_type: ICMP packet type * @dst: address of the destination node * @orig: address of the source node @@ -397,8 +397,8 @@ struct batadv_icmp_packet_rr { /** * struct batadv_unicast_packet - unicast packet for network payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @ttvn: translation table version number * @dest: originator destination of the unicast packet */ @@ -433,8 +433,8 @@ struct batadv_unicast_4addr_packet { /** * struct batadv_frag_packet - fragmented packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @dest: final destination used when routing fragments * @orig: originator of the fragment used when merging the packet * @no: fragment number within this sequence @@ -467,8 +467,8 @@ struct batadv_frag_packet { /** * struct batadv_bcast_packet - broadcast packet for network payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @reserved: reserved byte for alignment * @seqno: sequence identification * @orig: originator of the broadcast packet @@ -488,10 +488,10 @@ struct batadv_bcast_packet { /** * struct batadv_coded_packet - network coded packet * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @first_source: original source of first included packet - * @first_orig_dest: original destinal of first included packet + * @first_orig_dest: original destination of first included packet * @first_crc: checksum of first included packet * @first_ttvn: tt-version number of first included packet * @second_ttl: ttl of second packet @@ -523,8 +523,8 @@ struct batadv_coded_packet { /** * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload * @packet_type: batman-adv packet type, part of the general header - * @version: batman-adv protocol version, part of the genereal header - * @ttl: time to live for this packet, part of the genereal header + * @version: batman-adv protocol version, part of the general header + * @ttl: time to live for this packet, part of the general header * @reserved: reserved field (for packet alignment) * @src: address of the source * @dst: address of the destination diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 617c180ff0c8..8cf2ad11ead9 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -69,7 +69,7 @@ enum batadv_tt_client_flags { /** * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be - * part of the network but no nnode has already announced it + * part of the network but no node has already announced it */ BATADV_TT_CLIENT_TEMP = (1 << 11), }; @@ -131,7 +131,7 @@ enum batadv_gw_modes { /** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */ BATADV_GW_MODE_CLIENT, - /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */ + /** @BATADV_GW_MODE_SERVER: announce itself as gateway server */ BATADV_GW_MODE_SERVER, }; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index e87f19c82e8d..5b3a41983156 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -134,7 +134,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[]) * * Return: the originator object corresponding to the passed mac address or NULL * on failure. - * If the object does not exists it is created an initialised. + * If the object does not exist, it is created and initialised. */ static struct batadv_orig_node * batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) @@ -871,7 +871,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface) } /** - * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface + * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface * @orig_node: originator which reproadcasted the OGMs directly * @if_outgoing: interface which transmitted the original OGM and received the * direct rebroadcast @@ -1554,7 +1554,7 @@ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet, * batadv_iv_ogm_process() - process an incoming batman iv OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) - * @if_incoming: the interface where this packet was receved + * @if_incoming: the interface where this packet was received */ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) @@ -2288,7 +2288,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq, * @msg: Netlink message to dump into * @cb: Control block containing additional options * @bat_priv: The bat priv with all the soft interface information - * @single_hardif: Limit dump to this hard interfaace + * @single_hardif: Limit dump to this hard interface */ static void batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb, diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 0bdefa35da98..d35aca0e969a 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -60,7 +60,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface) * @neigh: the neighbour for which the throughput has to be obtained * * Return: The throughput towards the given neighbour in multiples of 100kpbs - * (a value of '1' equals to 0.1Mbps, '10' equals 1Mbps, etc). + * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc). */ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) { @@ -183,8 +183,8 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work) * * Sends a predefined number of unicast wifi packets to a given neighbour in * order to trigger the throughput estimation on this link by the RC algorithm. - * Packets are sent only if there there is not enough payload unicast traffic - * towards this neighbour.. + * Packets are sent only if there is not enough payload unicast traffic towards + * this neighbour.. * * Return: True on success and false in case of error during skb preparation. */ @@ -244,7 +244,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) * batadv_v_elp_periodic_work() - ELP periodic task per interface * @work: work queue item * - * Emits broadcast ELP message in regular intervals. + * Emits broadcast ELP messages in regular intervals. */ static void batadv_v_elp_periodic_work(struct work_struct *work) { @@ -499,7 +499,7 @@ static void batadv_v_elp_neigh_update(struct batadv_priv *bat_priv, * @skb: the received packet * @if_incoming: the interface this packet was received through * - * Return: NET_RX_SUCCESS and consumes the skb if the packet was peoperly + * Return: NET_RX_SUCCESS and consumes the skb if the packet was properly * processed or NET_RX_DROP in case of failure. */ int batadv_v_elp_packet_recv(struct sk_buff *skb, diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 18028b9f95f0..0d404f7bcd9f 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -47,9 +47,9 @@ * @bat_priv: the bat priv with all the soft interface information * @addr: the address of the originator * - * Return: the orig_node corresponding to the specified address. If such object - * does not exist it is allocated here. In case of allocation failure returns - * NULL. + * Return: the orig_node corresponding to the specified address. If such an + * object does not exist, it is allocated here. In case of allocation failure + * returns NULL. */ struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr) @@ -172,7 +172,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb, * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue * @hard_iface: the interface holding the aggregation queue * - * Empties the OGMv2 aggregation queue and frees all the skbs it contained. + * Empties the OGMv2 aggregation queue and frees all the skbs it contains. * * Caller needs to hold the hard_iface->bat_v.aggr_list.lock. */ @@ -378,7 +378,7 @@ static void batadv_v_ogm_send(struct work_struct *work) * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface * @work: work queue item * - * Emits aggregated OGM message in regular intervals. + * Emits aggregated OGM messages in regular intervals. */ void batadv_v_ogm_aggr_work(struct work_struct *work) { @@ -399,7 +399,7 @@ void batadv_v_ogm_aggr_work(struct work_struct *work) * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V * @hard_iface: the interface to prepare * - * Takes care of scheduling own OGM sending routine for this interface. + * Takes care of scheduling its own OGM sending routine for this interface. * * Return: 0 on success or a negative error code otherwise */ @@ -847,7 +847,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, * batadv_v_ogm_process() - process an incoming batman v OGM * @skb: the skb containing the OGM * @ogm_offset: offset to the OGM which should be processed (for aggregates) - * @if_incoming: the interface where this packet was receved + * @if_incoming: the interface where this packet was received */ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, struct batadv_hard_iface *if_incoming) diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 41cc87f06b14..91a04ca373dc 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -992,7 +992,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv, * @hw_dst: the Hardware destination in the ARP Header * @ethhdr: pointer to the Ethernet header of the claim frame * - * checks if it is a claim packet and if its on the same group. + * checks if it is a claim packet and if it's on the same group. * This function also applies the group ID of the sender * if it is in the same mesh. * @@ -1757,7 +1757,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv) * @vid: the VLAN ID of the frame * * Checks if this packet is a loop detect frame which has been sent by us, - * throw an uevent and log the event if that is the case. + * throws an uevent and logs the event if that is the case. * * Return: true if it is a loop detect frame which is to be dropped, false * otherwise. @@ -1815,7 +1815,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * * we have to race for a claim * * if the frame is allowed on the LAN * - * in these cases, the skb is further handled by this function + * In these cases, the skb is further handled by this function * * Return: true if handled, otherwise it returns false and the caller shall * further process the skb. diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index b85da4b7a77b..0e6e53e9b5f3 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -666,7 +666,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst, * @vid: VLAN identifier * @packet_subtype: unicast4addr packet subtype to use * - * This function copies the skb with pskb_copy() and is sent as unicast packet + * This function copies the skb with pskb_copy() and is sent as a unicast packet * to each of the selected candidates. * * Return: true if the packet is sent to at least one candidate, false diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 7cad97644d05..9fdbe3068153 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c @@ -102,8 +102,8 @@ static int batadv_frag_size_limit(void) * * Caller must hold chain->lock. * - * Return: true if chain is empty and caller can just insert the new fragment - * without searching for the right position. + * Return: true if chain is empty and the caller can just insert the new + * fragment without searching for the right position. */ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain, u16 seqno) @@ -306,7 +306,7 @@ batadv_frag_merge_packets(struct hlist_head *chain) * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb * to NULL; 3) Error: Return false and free skb. * - * Return: true when packet is merged or buffered, false when skb is not not + * Return: true when the packet is merged or buffered, false when skb is not not * used. */ bool batadv_frag_skb_buffer(struct sk_buff **skb, diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 3a256af92784..53c27c67cc11 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -138,10 +138,10 @@ static bool batadv_mutual_parents(const struct net_device *dev1, * @net_dev: the device to check * * If the user creates any virtual device on top of a batman-adv interface, it - * is important to prevent this new interface to be used to create a new mesh - * network (this behaviour would lead to a batman-over-batman configuration). - * This function recursively checks all the fathers of the device passed as - * argument looking for a batman-adv soft interface. + * is important to prevent this new interface from being used to create a new + * mesh network (this behaviour would lead to a batman-over-batman + * configuration). This function recursively checks all the fathers of the + * device passed as argument looking for a batman-adv soft interface. * * Return: true if the device is descendant of a batman-adv mesh interface (or * if it is a batman-adv interface itself), false otherwise @@ -680,8 +680,8 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface) * @slave: the interface enslaved in another master * @master: the master from which slave has to be removed * - * Invoke ndo_del_slave on master passing slave as argument. In this way slave - * is free'd and master can correctly change its internal state. + * Invoke ndo_del_slave on master passing slave as argument. In this way the + * slave is free'd and the master can correctly change its internal state. * * Return: 0 on success, a negative value representing the error otherwise */ @@ -818,7 +818,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, * @soft_iface: soft interface to check * * This function is only using RCU for locking - the result can therefore be - * off when another functions is modifying the list at the same time. The + * off when another function is modifying the list at the same time. The * caller can use the rtnl_lock to make sure that the count is accurate. * * Return: number of connected/enslaved hard interfaces diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h index f9884dc56cf3..979864c0fa6b 100644 --- a/net/batman-adv/log.h +++ b/net/batman-adv/log.h @@ -69,7 +69,7 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...) __printf(2, 3); /** - * _batadv_dbg() - Store debug output with(out) ratelimiting + * _batadv_dbg() - Store debug output with(out) rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @ratelimited: whether output should be rate limited @@ -95,7 +95,7 @@ static inline void _batadv_dbg(int type __always_unused, #endif /** - * batadv_dbg() - Store debug output without ratelimiting + * batadv_dbg() - Store debug output without rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @arg: format string and variable arguments @@ -104,7 +104,7 @@ static inline void _batadv_dbg(int type __always_unused, _batadv_dbg(type, bat_priv, 0, ## arg) /** - * batadv_dbg_ratelimited() - Store debug output with ratelimiting + * batadv_dbg_ratelimited() - Store debug output with rate limiting * @type: type of debug message * @bat_priv: the bat priv with all the soft interface information * @arg: format string and variable arguments diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index d8a255c85e77..519c08c2cfba 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -666,7 +666,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len) * @vid: the VLAN identifier for which the AP isolation attributed as to be * looked up * - * Return: true if AP isolation is on for the VLAN idenfied by vid, false + * Return: true if AP isolation is on for the VLAN identified by vid, false * otherwise */ bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 42b8d1e76dea..0393bb9ed3d0 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -308,7 +308,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, * @y: value to compare @x against * * It handles overflows/underflows and can correctly check for a predecessor - * unless the variable sequence number has grown by more then + * unless the variable sequence number has grown by more than * 2**(bitwidth(x)-1)-1. * * This means that for a u8 with the maximum value 255, it would think: @@ -330,11 +330,11 @@ static inline bool batadv_has_timed_out(unsigned long timestamp, /** * batadv_seq_after() - Checks if a sequence number x is a successor of y - * @x: potential sucessor of @y + * @x: potential successor of @y * @y: value to compare @x against * * It handles overflows/underflows and can correctly check for a successor - * unless the variable sequence number has grown by more then + * unless the variable sequence number has grown by more than * 2**(bitwidth(x)-1)-1. * * This means that for a u8 with the maximum value 255, it would think: diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index 9ebdc1e864b9..bdc4a1fba1c6 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -510,7 +510,7 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev, * the given mcast_list. In general, multicast listeners provided by * your multicast receiving applications run directly on this node. * - * If there is a bridge interface on top of dev, collects from that one + * If there is a bridge interface on top of dev, collect from that one * instead. Just like with IP addresses and routes, multicast listeners * will(/should) register to the bridge interface instead of an * enslaved bat0. @@ -832,8 +832,8 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @flags: TVLV flags indicating the new multicast state * - * Whenever the multicast TVLV flags this nodes announces change this notifies - * userspace via the 'mcast' log level. + * Whenever the multicast TVLV flags this node announces change, this function + * should be used to notify userspace about the change. */ static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags) { @@ -1244,7 +1244,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv) * @ethhdr: an ethernet header to determine the protocol family from * * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or - * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and + * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and * increases its refcount. */ static struct batadv_orig_node * @@ -1693,7 +1693,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv, } /** - * batadv_mcast_forw_send() - send packet to any detected multicast recpient + * batadv_mcast_forw_send() - send packet to any detected multicast recipient * @bat_priv: the bat priv with all the soft interface information * @skb: the multicast packet to transmit * @vid: the vlan identifier @@ -1742,7 +1742,8 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator, - * orig, has toggled then this method updates counter and list accordingly. + * orig, has toggled then this method updates the counter and the list + * accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1787,7 +1788,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1832,7 +1833,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1877,7 +1878,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ @@ -1922,7 +1923,7 @@ static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv, * @mcast_flags: flags indicating the new multicast state * * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has - * toggled then this method updates counter and list accordingly. + * toggled then this method updates the counter and the list accordingly. * * Caller needs to hold orig->mcast_handler_lock. */ diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 02ed073f95a9..cfb00dfa468a 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -640,7 +640,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie) * @bat_priv: the bat priv with all the soft interface information * @dst: destination of tp_meter session * @result: reason for tp meter session stop - * @test_time: total time ot the tp_meter session + * @test_time: total time of the tp_meter session * @total_bytes: bytes acked to the receiver * @cookie: cookie of tp_meter session * diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index b0469d15da0e..48d707850f3e 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -134,7 +134,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, } /** - * batadv_nc_mesh_init() - initialise coding hash table and start house keeping + * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping * @bat_priv: the bat priv with all the soft interface information * * Return: 0 on success or negative error number in case of failure @@ -700,7 +700,7 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv, } /** - * batadv_nc_worker() - periodic task for house keeping related to network + * batadv_nc_worker() - periodic task for housekeeping related to network * coding * @work: kernel work struct */ @@ -1316,7 +1316,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv, } /** - * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of + * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of * the skb's sender (may be equal to the originator). * @bat_priv: the bat priv with all the soft interface information * @skb: data skb to forward @@ -1402,10 +1402,10 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv, * @neigh_node: next hop to forward packet to * @ethhdr: pointer to the ethernet header inside the skb * - * Loops through list of neighboring nodes the next hop has a good connection to - * (receives OGMs with a sufficient quality). We need to find a neighbor of our - * next hop that potentially sent a packet which our next hop also received - * (overheard) and has stored for later decoding. + * Loops through the list of neighboring nodes the next hop has a good + * connection to (receives OGMs with a sufficient quality). We need to find a + * neighbor of our next hop that potentially sent a packet which our next hop + * also received (overheard) and has stored for later decoding. * * Return: true if the skb was consumed (encoded packet sent) or false otherwise */ diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 5b0c2fffc214..805d8969bdfb 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -325,7 +325,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node) * @if_outgoing: the interface where the payload packet has been received or * the OGM should be sent to * - * Return: the neighbor which should be router for this orig_node/iface. + * Return: the neighbor which should be the router for this orig_node/iface. * * The object is returned with refcounter increased by 1. */ @@ -515,7 +515,7 @@ batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh, * Looks for and possibly returns a neighbour belonging to this originator list * which is connected through the provided hard interface. * - * Return: neighbor when found. Othwerwise NULL + * Return: neighbor when found. Otherwise NULL */ static struct batadv_neigh_node * batadv_neigh_node_get(const struct batadv_orig_node *orig_node, @@ -620,7 +620,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface, * * Looks for and possibly returns a neighbour belonging to this hard interface. * - * Return: neighbor when found. Othwerwise NULL + * Return: neighbor when found. Otherwise NULL */ struct batadv_hardif_neigh_node * batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface, @@ -999,7 +999,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv) * @bat_priv: the bat priv with all the soft interface information * @addr: the mac address of the originator * - * Creates a new originator object and initialise all the generic fields. + * Creates a new originator object and initialises all the generic fields. * The new object is not added to the originator list. * * Return: the newly created object or NULL on failure. diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index d343382e9664..27cdf5e4349a 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, * @skb: packet to check * @hdr_size: size of header to pull * - * Check for short header and bad addresses in given packet. + * Checks for short header and bad addresses in the given packet. * * Return: negative value when check fails and 0 otherwise. The negative value * depends on the reason: -ENODATA for bad header, -EBADR for broadcast @@ -1113,7 +1113,7 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb, * @recv_if: interface that the skb is received on * * This function does one of the three following things: 1) Forward fragment, if - * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till + * the assembled packet will exceed our MTU; 2) Buffer fragment, if we still * lack further fragments; 3) Merge fragments, if we have all needed parts. * * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise. diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index 7f8ade04e08e..d267b94800d6 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -605,8 +605,8 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet, * given hard_iface. If hard_iface is NULL forwarding packets on all hard * interfaces will be claimed. * - * The packets are being moved from the forw_list to the cleanup_list and - * by that allows already running threads to notice the claiming. + * The packets are being moved from the forw_list to the cleanup_list. This + * makes it possible for already running threads to notice the claim. */ static void batadv_forw_packet_list_steal(struct hlist_head *forw_list, diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index f1f1c86f3419..23833a0ba5e6 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -406,7 +406,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, * @hdr_size: size of already parsed batman-adv header * @orig_node: originator from which the batman-adv packet was sent * - * Sends a ethernet frame to the receive path of the local @soft_iface. + * Sends an ethernet frame to the receive path of the local @soft_iface. * skb->data has still point to the batman-adv header with the size @hdr_size. * The caller has to have parsed this header already and made sure that at least * @hdr_size bytes are still available for pull in @skb. diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index bd2ac570c42c..db7e3774825b 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c @@ -66,7 +66,7 @@ /** * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond - * such amound of milliseconds, the receiver is considered unreachable and the + * such amount of milliseconds, the receiver is considered unreachable and the * connection is killed */ #define BATADV_TP_MAX_RTO 30000 @@ -108,10 +108,10 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid) * batadv_tp_cwnd() - compute the new cwnd size * @base: base cwnd size value * @increment: the value to add to base to get the new size - * @min: minumim cwnd value (usually MSS) + * @min: minimum cwnd value (usually MSS) * - * Return the new cwnd size and ensures it does not exceed the Advertised - * Receiver Window size. It is wrap around safe. + * Return the new cwnd size and ensure it does not exceed the Advertised + * Receiver Window size. It is wrapped around safely. * For details refer to Section 3.1 of RFC5681 * * Return: new congestion window size in bytes @@ -254,7 +254,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason, * @dst: the other endpoint MAC address to look for * * Look for a tp_vars object matching dst as end_point and return it after - * having incremented the refcounter. Return NULL is not found + * having increment the refcounter. Return NULL is not found * * Return: matching tp_vars or NULL when no tp_vars with @dst was found */ @@ -291,7 +291,7 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv, * @session: session identifier * * Look for a tp_vars object matching dst as end_point, session as tp meter - * session and return it after having incremented the refcounter. Return NULL + * session and return it after having increment the refcounter. Return NULL * is not found * * Return: matching tp_vars or NULL when no tp_vars was found diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a9635c882fe0..98a0aaaf0d50 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -301,7 +301,7 @@ void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry) * @vid: VLAN identifier * * Return: the number of originators advertising the given address/data - * (excluding ourself). + * (excluding our self). */ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv, const u8 *addr, unsigned short vid) @@ -842,7 +842,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr, * table. In case of success the value is updated with the real amount of * reserved bytes * Allocate the needed amount of memory for the entire TT TVLV and write its - * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data + * header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data * objects, one per active VLAN served by the originator node. * * Return: the size of the allocated buffer or 0 in case of failure. @@ -1674,7 +1674,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global, * the function argument. * If a TT local entry exists for this non-mesh client remove it. * - * The caller must hold orig_node refcount. + * The caller must hold the orig_node refcount. * * Return: true if the new entry has been added, false otherwise */ @@ -1839,7 +1839,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, * @bat_priv: the bat priv with all the soft interface information * @tt_global_entry: global translation table entry to be analyzed * - * This functon assumes the caller holds rcu_read_lock(). + * This function assumes the caller holds rcu_read_lock(). * Return: best originator list entry or NULL on errors. */ static struct batadv_tt_orig_list_entry * @@ -1887,7 +1887,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv, * @tt_global_entry: global translation table entry to be printed * @seq: debugfs table seq_file struct * - * This functon assumes the caller holds rcu_read_lock(). + * This function assumes the caller holds rcu_read_lock(). */ static void batadv_tt_global_print_entry(struct batadv_priv *bat_priv, diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c index 0963a43ad996..6a23a566cde1 100644 --- a/net/batman-adv/tvlv.c +++ b/net/batman-adv/tvlv.c @@ -353,8 +353,8 @@ u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv, * @tvlv_value: tvlv content * @tvlv_value_len: tvlv content length * - * Return: success if handler was not found or the return value of the handler - * callback. + * Return: success if the handler was not found or the return value of the + * handler callback. */ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv, struct batadv_tvlv_handler *tvlv_handler, diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index d152b8e81f61..cc151e1f23b2 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -455,8 +455,8 @@ struct batadv_orig_node { spinlock_t tt_buff_lock; /** - * @tt_lock: prevents from updating the table while reading it. Table - * update is made up by two operations (data structure update and + * @tt_lock: avoids concurrent read from and write to the table. Table + * update is made up of two operations (data structure update and * metadata -CRC/TTVN-recalculation) and they have to be executed * atomically in order to avoid another thread to read the * table/metadata between those. @@ -748,7 +748,7 @@ struct batadv_neigh_ifinfo { * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression */ struct batadv_bcast_duplist_entry { - /** @orig: mac address of orig node orginating the broadcast */ + /** @orig: mac address of orig node originating the broadcast */ u8 orig[ETH_ALEN]; /** @crc: crc32 checksum of broadcast payload */ @@ -1010,7 +1010,7 @@ struct batadv_priv_tt { /** * @commit_lock: prevents from executing a local TT commit while reading - * the local table. The local TT commit is made up by two operations + * the local table. The local TT commit is made up of two operations * (data structure update and metadata -CRC/TTVN- recalculation) and * they have to be executed atomically in order to avoid another thread * to read the table/metadata between those. @@ -1024,7 +1024,7 @@ struct batadv_priv_tt { #ifdef CONFIG_BATMAN_ADV_BLA /** - * struct batadv_priv_bla - per mesh interface bridge loope avoidance data + * struct batadv_priv_bla - per mesh interface bridge loop avoidance data */ struct batadv_priv_bla { /** @num_requests: number of bla requests in flight */ @@ -1718,7 +1718,7 @@ struct batadv_priv { spinlock_t softif_vlan_list_lock; #ifdef CONFIG_BATMAN_ADV_BLA - /** @bla: bridge loope avoidance data */ + /** @bla: bridge loop avoidance data */ struct batadv_priv_bla bla; #endif From 3bda14d09dc5789a895ab02b7dcfcec19b0a65b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Mon, 1 Jun 2020 22:35:22 +0200 Subject: [PATCH 0353/2056] batman-adv: Introduce a configurable per interface hop penalty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some setups multiple hard interfaces with similar link qualities or throughput values are available. But people have expressed the desire to consider one of them as a backup only. Some creative solutions are currently in use: Such people are configuring multiple batman-adv mesh/soft interfaces, wire them together with some veth pairs and then tune the hop penalty to achieve an effect similar to a tunable per interface hop penalty. This patch introduces a new, configurable, per hard interface hop penalty to simplify such setups. Signed-off-by: Linus Lüssing Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- include/uapi/linux/batman_adv.h | 3 ++- net/batman-adv/bat_iv_ogm.c | 17 +++++++++-------- net/batman-adv/bat_v_ogm.c | 13 ++++++++++--- net/batman-adv/hard-interface.c | 2 ++ net/batman-adv/netlink.c | 12 +++++++++++- net/batman-adv/types.h | 6 ++++++ 6 files changed, 40 insertions(+), 13 deletions(-) diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h index 8cf2ad11ead9..bb0ae945b36a 100644 --- a/include/uapi/linux/batman_adv.h +++ b/include/uapi/linux/batman_adv.h @@ -427,7 +427,8 @@ enum batadv_nl_attrs { /** * @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied - * to an originator message's tq-field on every hop. + * to an originator message's tq-field on every hop and/or per + * hard interface */ BATADV_ATTR_HOP_PENALTY, diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 5b3a41983156..a4faf5f904d9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1075,10 +1075,10 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, struct batadv_neigh_ifinfo *neigh_ifinfo; u8 total_count; u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own; + unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE; unsigned int neigh_rq_inv_cube, neigh_rq_max_cube; unsigned int tq_asym_penalty, inv_asym_penalty; unsigned int combined_tq; - unsigned int tq_iface_penalty; bool ret = false; /* find corresponding one hop neighbor */ @@ -1157,31 +1157,32 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube; inv_asym_penalty /= neigh_rq_max_cube; tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty; + tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty); /* penalize if the OGM is forwarded on the same interface. WiFi * interfaces and other half duplex devices suffer from throughput * drops as they can't send and receive at the same time. */ - tq_iface_penalty = BATADV_TQ_MAX_VALUE; if (if_outgoing && if_incoming == if_outgoing && batadv_is_wifi_hardif(if_outgoing)) - tq_iface_penalty = batadv_hop_penalty(BATADV_TQ_MAX_VALUE, - bat_priv); + tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty, + bat_priv); combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty * - tq_iface_penalty; + tq_iface_hop_penalty; combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE; batadv_ogm_packet->tq = combined_tq; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", + "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", orig_node->orig, orig_neigh_node->orig, total_count, - neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_penalty, - batadv_ogm_packet->tq, if_incoming->net_dev->name, + neigh_rq_count, tq_own, tq_asym_penalty, + tq_iface_hop_penalty, batadv_ogm_packet->tq, + if_incoming->net_dev->name, if_outgoing ? if_outgoing->net_dev->name : "DEFAULT"); /* if link has the minimum required transmission quality diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 0d404f7bcd9f..0f8495b9eeb1 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -455,15 +455,17 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface) * @throughput: the current throughput * * Apply a penalty on the current throughput metric value based on the - * characteristic of the interface where the OGM has been received. The return - * value is computed as follows: + * characteristic of the interface where the OGM has been received. + * + * Initially the per hardif hop penalty is applied to the throughput. After + * that the return value is then computed as follows: * - throughput * 50% if the incoming and outgoing interface are the * same WiFi interface and the throughput is above * 1MBit/s * - throughput if the outgoing interface is the default * interface (i.e. this OGM is processed for the * internal table and not forwarded) - * - throughput * hop penalty otherwise + * - throughput * node hop penalty otherwise * * Return: the penalised throughput metric. */ @@ -472,9 +474,14 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv, struct batadv_hard_iface *if_outgoing, u32 throughput) { + int if_hop_penalty = atomic_read(&if_incoming->hop_penalty); int hop_penalty = atomic_read(&bat_priv->hop_penalty); int hop_penalty_max = BATADV_TQ_MAX_VALUE; + /* Apply per hardif hop penalty */ + throughput = throughput * (hop_penalty_max - if_hop_penalty) / + hop_penalty_max; + /* Don't apply hop penalty in default originator table. */ if (if_outgoing == BATADV_IF_DEFAULT) return throughput; diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 53c27c67cc11..fa06b51c0144 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -939,6 +939,8 @@ batadv_hardif_add_interface(struct net_device *net_dev) if (batadv_is_wifi_hardif(hard_iface)) hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS; + atomic_set(&hard_iface->hop_penalty, 0); + batadv_v_hardif_init(hard_iface); batadv_check_known_mac_addr(hard_iface->net_dev); diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index cfb00dfa468a..dc193618a761 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -826,6 +826,10 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg, goto nla_put_failure; } + if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY, + atomic_read(&hard_iface->hop_penalty))) + goto nla_put_failure; + #ifdef CONFIG_BATMAN_ADV_BATMAN_V if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL, atomic_read(&hard_iface->bat_v.elp_interval))) @@ -920,9 +924,15 @@ static int batadv_netlink_set_hardif(struct sk_buff *skb, { struct batadv_hard_iface *hard_iface = info->user_ptr[1]; struct batadv_priv *bat_priv = info->user_ptr[0]; + struct nlattr *attr; + + if (info->attrs[BATADV_ATTR_HOP_PENALTY]) { + attr = info->attrs[BATADV_ATTR_HOP_PENALTY]; + + atomic_set(&hard_iface->hop_penalty, nla_get_u8(attr)); + } #ifdef CONFIG_BATMAN_ADV_BATMAN_V - struct nlattr *attr; if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) { attr = info->attrs[BATADV_ATTR_ELP_INTERVAL]; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index cc151e1f23b2..ed519efa3c36 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -208,6 +208,12 @@ struct batadv_hard_iface { /** @rcu: struct used for freeing in an RCU-safe manner */ struct rcu_head rcu; + /** + * @hop_penalty: penalty which will be applied to the tq-field + * of an OGM received via this interface + */ + atomic_t hop_penalty; + /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */ struct batadv_hard_iface_bat_iv bat_iv; From 4ced637bd24ac5f824eb8169324712f0bcd50d92 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 24 Jun 2020 23:21:17 +0530 Subject: [PATCH 0354/2056] bnx2x: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. The driver was also calling bnx2x_set_power_state() to set the power state of the device by changing the device's registers' value. It is no more needed. Compile-tested only. Signed-off-by: Vaibhav Gupta Acked-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 15 ++++++--------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 4 +--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 +-- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ee9e9290f112..e3d92e4f2193 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4988,8 +4988,9 @@ void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue) bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0); } -int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused bnx2x_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp; @@ -5001,8 +5002,6 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) rtnl_lock(); - pci_save_state(pdev); - if (!netif_running(dev)) { rtnl_unlock(); return 0; @@ -5012,15 +5011,14 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); - bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); - rtnl_unlock(); return 0; } -int bnx2x_resume(struct pci_dev *pdev) +static int __maybe_unused bnx2x_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *dev = pci_get_drvdata(pdev); struct bnx2x *bp; int rc; @@ -5038,14 +5036,11 @@ int bnx2x_resume(struct pci_dev *pdev) rtnl_lock(); - pci_restore_state(pdev); - if (!netif_running(dev)) { rtnl_unlock(); return 0; } - bnx2x_set_power_state(bp, PCI_D0); netif_device_attach(dev); rc = bnx2x_nic_load(bp, LOAD_OPEN); @@ -5055,6 +5050,8 @@ int bnx2x_resume(struct pci_dev *pdev) return rc; } +SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume); + void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt, u32 cid) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 6f1352d51cb2..a9817cd283fe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -541,9 +541,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p); /* NAPI poll Tx part */ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); -/* suspend/resume callbacks */ -int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state); -int bnx2x_resume(struct pci_dev *pdev); +extern const struct dev_pm_ops bnx2x_pm_ops; /* Release IRQ vectors */ void bnx2x_free_irq(struct bnx2x *bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index db5107e7937c..ea60cd436f5e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14462,8 +14462,7 @@ static struct pci_driver bnx2x_pci_driver = { .id_table = bnx2x_pci_tbl, .probe = bnx2x_init_one, .remove = bnx2x_remove_one, - .suspend = bnx2x_suspend, - .resume = bnx2x_resume, + .driver.pm = &bnx2x_pm_ops, .err_handler = &bnx2x_err_handler, #ifdef CONFIG_BNX2X_SRIOV .sriov_configure = bnx2x_sriov_configure, From 52660c0ec9e4ec26934ad9eb6f73857ad12fb4e9 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Thu, 25 Jun 2020 20:21:42 +0200 Subject: [PATCH 0355/2056] net: stmmac: dwmac-meson8b: use clk_parent_data for clock registration Simplify meson8b_init_rgmii_tx_clk() by using struct clk_parent_data to initialize the clock parents. No functional changes intended. Signed-off-by: Martin Blumenstingl Signed-off-by: David S. Miller --- .../ethernet/stmicro/stmmac/dwmac-meson8b.c | 49 +++++++------------ 1 file changed, 17 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index 544bc621146c..5afcf05bbf9c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -69,8 +69,6 @@ */ #define PRG_ETH0_ADJ_SKEW GENMASK(24, 20) -#define MUX_CLK_NUM_PARENTS 2 - struct meson8b_dwmac; struct meson8b_dwmac_data { @@ -110,12 +108,12 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac, const char *name_suffix, - const char **parent_names, + const struct clk_parent_data *parents, int num_parents, const struct clk_ops *ops, struct clk_hw *hw) { - struct clk_init_data init; + struct clk_init_data init = { }; char clk_name[32]; snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dwmac->dev), @@ -124,7 +122,7 @@ static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac, init.name = clk_name; init.ops = ops; init.flags = CLK_SET_RATE_PARENT; - init.parent_names = parent_names; + init.parent_data = parents; init.num_parents = num_parents; hw->init = &init; @@ -134,11 +132,12 @@ static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac, static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { - int i, ret; struct clk *clk; struct device *dev = dwmac->dev; - const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; - struct meson8b_dwmac_clk_configs *clk_configs; + static const struct clk_parent_data mux_parents[] = { + { .fw_name = "clkin0", }, + { .fw_name = "clkin1", }, + }; static const struct clk_div_table div_table[] = { { .div = 2, .val = 2, }, { .div = 3, .val = 3, }, @@ -148,62 +147,48 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac) { .div = 7, .val = 7, }, { /* end of array */ } }; + struct meson8b_dwmac_clk_configs *clk_configs; + struct clk_parent_data parent_data = { }; clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); if (!clk_configs) return -ENOMEM; - /* get the mux parents from DT */ - for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { - char name[16]; - - snprintf(name, sizeof(name), "clkin%d", i); - clk = devm_clk_get(dev, name); - if (IS_ERR(clk)) { - ret = PTR_ERR(clk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Missing clock %s\n", name); - return ret; - } - - mux_parent_names[i] = __clk_get_name(clk); - } - clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0; clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; - clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names, - MUX_CLK_NUM_PARENTS, &clk_mux_ops, + clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents, + ARRAY_SIZE(mux_parents), &clk_mux_ops, &clk_configs->m250_mux.hw); if (WARN_ON(IS_ERR(clk))) return PTR_ERR(clk); - parent_name = __clk_get_name(clk); + parent_data.hw = &clk_configs->m250_mux.hw; clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; clk_configs->m250_div.table = div_table; clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO | CLK_DIVIDER_ROUND_CLOSEST; - clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, + clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_data, 1, &clk_divider_ops, &clk_configs->m250_div.hw); if (WARN_ON(IS_ERR(clk))) return PTR_ERR(clk); - parent_name = __clk_get_name(clk); + parent_data.hw = &clk_configs->m250_div.hw; clk_configs->fixed_div2.mult = 1; clk_configs->fixed_div2.div = 2; - clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_name, 1, + clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_data, 1, &clk_fixed_factor_ops, &clk_configs->fixed_div2.hw); if (WARN_ON(IS_ERR(clk))) return PTR_ERR(clk); - parent_name = __clk_get_name(clk); + parent_data.hw = &clk_configs->fixed_div2.hw; clk_configs->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0; clk_configs->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN; - clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_name, 1, + clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_data, 1, &clk_gate_ops, &clk_configs->rgmii_tx_en.hw); if (WARN_ON(IS_ERR(clk))) From 18c955b730002afdb0f86be39c0d202450acbbfc Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 25 Jun 2020 21:10:02 -0700 Subject: [PATCH 0356/2056] bonding: Remove extraneous parentheses in bond_setup Clang warns: drivers/net/bonding/bond_main.c:4657:23: warning: equality comparison with extraneous parentheses [-Wparentheses-equality] if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~ drivers/net/bonding/bond_main.c:4681:23: warning: equality comparison with extraneous parentheses [-Wparentheses-equality] if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) ~~~~~~~~~~~~~~~~^~~~~~~~~~~~~~~~~~~~~~~~~ This warning occurs when a comparision has two sets of parentheses, which is usually the convention for doing an assignment within an if statement. Since equality comparisons do not need a second set of parentheses, remove them to fix the warning. Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Link: https://github.com/ClangBuiltLinux/linux/issues/1066 Signed-off-by: Nathan Chancellor Reported-by: kernelci.org bot Reviewed-by: Nick Desaulniers Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 4ef99efc37f6..b3479584cc16 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4654,7 +4654,7 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ - if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; bond->xs = NULL; #endif /* CONFIG_XFRM_OFFLOAD */ @@ -4678,7 +4678,7 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; #ifdef CONFIG_XFRM_OFFLOAD - if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)) + if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) bond_dev->hw_features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; From 61b5cc20c877f9703fa46b24c273cbb5affb26e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Gonz=C3=A1lez=20Cabanelas?= Date: Fri, 26 Jun 2020 17:18:19 +0200 Subject: [PATCH 0357/2056] net: mvneta: speed down the PHY, if WoL used, to save energy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some PHYs connected to this ethernet hardware support the WoL feature. But when WoL is enabled and the machine is powered off, the PHY remains waiting for a magic packet at max speed (i.e. 1Gbps), which is a waste of energy. Slow down the PHY speed before stopping the ethernet if WoL is enabled, and save some energy while the machine is powered off or sleeping. Tested using an Armada 370 based board (LS421DE) equipped with a Marvell 88E1518 PHY. Signed-off-by: Daniel González Cabanelas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 197d8c6d189d..37b9c75a5a60 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3611,6 +3611,10 @@ static void mvneta_start_dev(struct mvneta_port *pp) MVNETA_CAUSE_LINK_CHANGE); phylink_start(pp->phylink); + + /* We may have called phy_speed_down before */ + phylink_speed_up(pp->phylink); + netif_tx_start_all_queues(pp->dev); clear_bit(__MVNETA_DOWN, &pp->state); @@ -3622,6 +3626,9 @@ static void mvneta_stop_dev(struct mvneta_port *pp) set_bit(__MVNETA_DOWN, &pp->state); + if (device_may_wakeup(&pp->dev->dev)) + phylink_speed_down(pp->phylink, false); + phylink_stop(pp->phylink); if (!pp->neta_armada3700) { @@ -4090,6 +4097,10 @@ static int mvneta_mdio_probe(struct mvneta_port *pp) phylink_ethtool_get_wol(pp->phylink, &wol); device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); + /* PHY WoL may be enabled but device wakeup disabled */ + if (wol.supported) + device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts); + return err; } From 54e80ded36fc88d3ba2ca7891910baaa08671f8b Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:20 +0200 Subject: [PATCH 0358/2056] net: phy: arrange headers in mdio_bus.c alphabetically Keeping the headers in alphabetical order is better for readability and allows to easily see if given header is already included. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index ab9233c558d8..134f82d72da8 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -8,32 +8,32 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include -#include #include #include +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #include #include -#include -#include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include #include +#include #define CREATE_TRACE_POINTS #include From 1d0018a4306877f7ca4591ca6ea1d0099e2d0ef2 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:21 +0200 Subject: [PATCH 0359/2056] net: phy: arrange headers in mdio_device.c alphabetically Keeping the headers in alphabetical order is better for readability and allows to easily see if given header is already included. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index c1d345c3cab3..f60443e48622 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -20,7 +21,6 @@ #include #include #include -#include void mdio_device_free(struct mdio_device *mdiodev) { From e42bcd0f7e04e507ba90927598825efd99f948cb Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:22 +0200 Subject: [PATCH 0360/2056] net: phy: arrange headers in phy_device.c alphabetically Keeping the headers in alphabetical order is better for readability and allows to easily see if given header is already included. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 5998fb505b21..164e5429ab5a 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -9,29 +9,29 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include -#include -#include -#include -#include -#include -#include +#include #include -#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include -#include -#include +#include #include #include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include +#include MODULE_DESCRIPTION("PHY library"); MODULE_AUTHOR("Andy Fleming"); From 333740981f94fa80326cc8e5d2da105f17bc1dd5 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:23 +0200 Subject: [PATCH 0361/2056] net: mdio: add a forward declaration for reset_control to mdio.h This header refers to struct reset_control but doesn't include any reset header. The structure definition is probably somehow indirectly pulled in since no warnings are reported but for the sake of correctness add the forward declaration for struct reset_control. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/mdio.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 36d2e0673d03..898cbf00332a 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -18,6 +18,7 @@ struct gpio_desc; struct mii_bus; +struct reset_control; /* Multiple levels of nesting are possible. However typically this is * limited to nested DSA like layer, a MUX layer, and the normal From 1dba6995731e1c1a8ea167aea74c0a917b437e6c Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:24 +0200 Subject: [PATCH 0362/2056] net: phy: reset the PHY even if probe() is not implemented Currently we only call phy_device_reset() if the PHY driver implements the probe() callback. This is not mandatory and many drivers (e.g. realtek) don't need probe() for most devices but still can have reset GPIOs defined. There's no reason to depend on the presence of probe() here so pull the reset code out of the if clause. Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 164e5429ab5a..eb1068a77ce1 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -2846,16 +2846,13 @@ static int phy_probe(struct device *dev) mutex_lock(&phydev->lock); - if (phydev->drv->probe) { - /* Deassert the reset signal */ - phy_device_reset(phydev, 0); + /* Deassert the reset signal */ + phy_device_reset(phydev, 0); + if (phydev->drv->probe) { err = phydev->drv->probe(phydev); - if (err) { - /* Assert the reset signal */ - phy_device_reset(phydev, 1); + if (err) goto out; - } } /* Start out supporting everything. Eventually, @@ -2917,6 +2914,10 @@ static int phy_probe(struct device *dev) phydev->state = PHY_READY; out: + /* Assert the reset signal */ + if (err) + phy_device_reset(phydev, 1); + mutex_unlock(&phydev->lock); return err; @@ -2935,12 +2936,12 @@ static int phy_remove(struct device *dev) sfp_bus_del_upstream(phydev->sfp_bus); phydev->sfp_bus = NULL; - if (phydev->drv && phydev->drv->remove) { + if (phydev->drv && phydev->drv->remove) phydev->drv->remove(phydev); - /* Assert the reset signal */ - phy_device_reset(phydev, 1); - } + /* Assert the reset signal */ + phy_device_reset(phydev, 1); + phydev->drv = NULL; return 0; From 96e263592bf12476b100a48ebdeb4b637bf48f9f Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Fri, 26 Jun 2020 17:53:25 +0200 Subject: [PATCH 0363/2056] net: phy: mdio: reset MDIO devices even if probe() is not implemented Similarily to PHY drivers - there's no reason to require probe() to be implemented in order to call mdio_device_reset(). MDIO devices can have resets defined without needing to do anything in probe(). Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_device.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index f60443e48622..be615504b829 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -150,10 +150,10 @@ static int mdio_probe(struct device *dev) struct mdio_driver *mdiodrv = to_mdio_driver(drv); int err = 0; - if (mdiodrv->probe) { - /* Deassert the reset signal */ - mdio_device_reset(mdiodev, 0); + /* Deassert the reset signal */ + mdio_device_reset(mdiodev, 0); + if (mdiodrv->probe) { err = mdiodrv->probe(mdiodev); if (err) { /* Assert the reset signal */ @@ -170,12 +170,11 @@ static int mdio_remove(struct device *dev) struct device_driver *drv = mdiodev->dev.driver; struct mdio_driver *mdiodrv = to_mdio_driver(drv); - if (mdiodrv->remove) { + if (mdiodrv->remove) mdiodrv->remove(mdiodev); - /* Assert the reset signal */ - mdio_device_reset(mdiodev, 1); - } + /* Assert the reset signal */ + mdio_device_reset(mdiodev, 1); return 0; } From 132db93572821ec2fdf81e354cc40f558faf7e4f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:24 -0700 Subject: [PATCH 0364/2056] docs: networking: reorganize driver documentation again Organize driver documentation by device type. Most documents have fairly verbose yet uninformative names, so let users first select a well defined device type, and then search for a particular driver. While at it rename the section from Vendor drivers to Hardware drivers. This seems more accurate, besides people sometimes refer to out-of-tree drivers as vendor drivers. Signed-off-by: Jakub Kicinski Acked-by: Jeff Kirsher Acked-by: Shannon Nelson Signed-off-by: David S. Miller --- .../devicetree/bindings/misc/fsl,qoriq-mc.txt | 2 +- .../networking/device_drivers/cable/index.rst | 18 ++++++ .../device_drivers/{ => cable}/sb1000.rst | 0 .../device_drivers/cellular/index.rst | 18 ++++++ .../{ => cellular}/qualcomm/rmnet.rst | 0 .../{ => ethernet}/3com/3c509.rst | 0 .../{ => ethernet}/3com/vortex.rst | 2 - .../{ => ethernet}/amazon/ena.rst | 0 .../{ => ethernet}/aquantia/atlantic.rst | 0 .../{ => ethernet}/chelsio/cxgb.rst | 0 .../{ => ethernet}/cirrus/cs89x0.rst | 0 .../{ => ethernet}/davicom/dm9000.rst | 0 .../{ => ethernet}/dec/de4x5.rst | 0 .../{ => ethernet}/dec/dmfe.rst | 0 .../{ => ethernet}/dlink/dl2k.rst | 0 .../{ => ethernet}/freescale/dpaa.rst | 0 .../freescale/dpaa2/dpio-driver.rst | 6 +- .../freescale/dpaa2/ethernet-driver.rst | 3 +- .../{ => ethernet}/freescale/dpaa2/index.rst | 0 .../freescale/dpaa2/mac-phy-support.rst | 0 .../freescale/dpaa2/overview.rst | 0 .../{ => ethernet}/freescale/gianfar.rst | 0 .../{ => ethernet}/google/gve.rst | 0 .../device_drivers/ethernet/index.rst | 58 +++++++++++++++++++ .../{ => ethernet}/intel/e100.rst | 0 .../{ => ethernet}/intel/e1000.rst | 0 .../{ => ethernet}/intel/e1000e.rst | 0 .../{ => ethernet}/intel/fm10k.rst | 0 .../{ => ethernet}/intel/i40e.rst | 0 .../{ => ethernet}/intel/iavf.rst | 0 .../{ => ethernet}/intel/ice.rst | 0 .../{ => ethernet}/intel/igb.rst | 0 .../{ => ethernet}/intel/igbvf.rst | 0 .../{ => ethernet}/intel/ixgb.rst | 0 .../{ => ethernet}/intel/ixgbe.rst | 0 .../{ => ethernet}/intel/ixgbevf.rst | 0 .../{ => ethernet}/marvell/octeontx2.rst | 0 .../{ => ethernet}/mellanox/mlx5.rst | 0 .../{ => ethernet}/microsoft/netvsc.rst | 0 .../{ => ethernet}/neterion/s2io.rst | 0 .../{ => ethernet}/neterion/vxge.rst | 0 .../{ => ethernet}/netronome/nfp.rst | 0 .../{ => ethernet}/pensando/ionic.rst | 0 .../{ => ethernet}/smsc/smc9.rst | 0 .../{ => ethernet}/stmicro/stmmac.rst | 0 .../device_drivers/{ => ethernet}/ti/cpsw.rst | 0 .../{ => ethernet}/ti/cpsw_switchdev.rst | 0 .../device_drivers/{ => ethernet}/ti/tlan.rst | 0 .../{ => ethernet}/toshiba/spider_net.rst | 0 .../networking/device_drivers/index.rst | 51 ++-------------- .../networking/device_drivers/wifi/index.rst | 19 ++++++ .../{ => wifi}/intel/ipw2100.rst | 0 .../{ => wifi}/intel/ipw2200.rst | 0 MAINTAINERS | 55 +++++++----------- drivers/net/Kconfig | 2 +- drivers/net/ethernet/3com/3c59x.c | 4 +- drivers/net/ethernet/3com/Kconfig | 4 +- drivers/net/ethernet/chelsio/Kconfig | 2 +- drivers/net/ethernet/cirrus/Kconfig | 2 +- drivers/net/ethernet/dec/tulip/Kconfig | 4 +- drivers/net/ethernet/dlink/dl2k.c | 10 +--- drivers/net/ethernet/intel/Kconfig | 24 ++++---- drivers/net/ethernet/neterion/Kconfig | 4 +- drivers/net/ethernet/pensando/Kconfig | 2 +- drivers/net/ethernet/smsc/Kconfig | 4 +- drivers/net/ethernet/ti/Kconfig | 2 +- drivers/net/ethernet/ti/tlan.c | 2 +- drivers/net/wireless/intel/ipw2x00/Kconfig | 4 +- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 2 +- 69 files changed, 180 insertions(+), 124 deletions(-) create mode 100644 Documentation/networking/device_drivers/cable/index.rst rename Documentation/networking/device_drivers/{ => cable}/sb1000.rst (100%) create mode 100644 Documentation/networking/device_drivers/cellular/index.rst rename Documentation/networking/device_drivers/{ => cellular}/qualcomm/rmnet.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/3com/3c509.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/3com/vortex.rst (99%) rename Documentation/networking/device_drivers/{ => ethernet}/amazon/ena.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/aquantia/atlantic.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/chelsio/cxgb.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/cirrus/cs89x0.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/davicom/dm9000.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/dec/de4x5.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/dec/dmfe.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/dlink/dl2k.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/dpaa.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/dpaa2/dpio-driver.rst (97%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/dpaa2/ethernet-driver.rst (98%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/dpaa2/index.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/dpaa2/mac-phy-support.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/dpaa2/overview.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/freescale/gianfar.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/google/gve.rst (100%) create mode 100644 Documentation/networking/device_drivers/ethernet/index.rst rename Documentation/networking/device_drivers/{ => ethernet}/intel/e100.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/e1000.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/e1000e.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/fm10k.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/i40e.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/iavf.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/ice.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/igb.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/igbvf.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/ixgb.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/ixgbe.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/intel/ixgbevf.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/marvell/octeontx2.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/mellanox/mlx5.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/microsoft/netvsc.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/neterion/s2io.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/neterion/vxge.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/netronome/nfp.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/pensando/ionic.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/smsc/smc9.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/stmicro/stmmac.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/ti/cpsw.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/ti/cpsw_switchdev.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/ti/tlan.rst (100%) rename Documentation/networking/device_drivers/{ => ethernet}/toshiba/spider_net.rst (100%) create mode 100644 Documentation/networking/device_drivers/wifi/index.rst rename Documentation/networking/device_drivers/{ => wifi}/intel/ipw2100.rst (100%) rename Documentation/networking/device_drivers/{ => wifi}/intel/ipw2200.rst (100%) diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt index 9134e9bcca56..b12f9be1251f 100644 --- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt +++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt @@ -10,7 +10,7 @@ such as network interfaces, crypto accelerator instances, L2 switches, etc. For an overview of the DPAA2 architecture and fsl-mc bus see: -Documentation/networking/device_drivers/freescale/dpaa2/overview.rst +Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst As described in the above overview, all DPAA2 objects in a DPRC share the same hardware "isolation context" and a 10-bit value called an ICID diff --git a/Documentation/networking/device_drivers/cable/index.rst b/Documentation/networking/device_drivers/cable/index.rst new file mode 100644 index 000000000000..cce3c4392972 --- /dev/null +++ b/Documentation/networking/device_drivers/cable/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Cable Modem Device Drivers +========================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + sb1000 + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/sb1000.rst b/Documentation/networking/device_drivers/cable/sb1000.rst similarity index 100% rename from Documentation/networking/device_drivers/sb1000.rst rename to Documentation/networking/device_drivers/cable/sb1000.rst diff --git a/Documentation/networking/device_drivers/cellular/index.rst b/Documentation/networking/device_drivers/cellular/index.rst new file mode 100644 index 000000000000..fc1812d3fc70 --- /dev/null +++ b/Documentation/networking/device_drivers/cellular/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Cellular Modem Device Drivers +============================= + +Contents: + +.. toctree:: + :maxdepth: 2 + + qualcomm/rmnet + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/qualcomm/rmnet.rst b/Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst similarity index 100% rename from Documentation/networking/device_drivers/qualcomm/rmnet.rst rename to Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst diff --git a/Documentation/networking/device_drivers/3com/3c509.rst b/Documentation/networking/device_drivers/ethernet/3com/3c509.rst similarity index 100% rename from Documentation/networking/device_drivers/3com/3c509.rst rename to Documentation/networking/device_drivers/ethernet/3com/3c509.rst diff --git a/Documentation/networking/device_drivers/3com/vortex.rst b/Documentation/networking/device_drivers/ethernet/3com/vortex.rst similarity index 99% rename from Documentation/networking/device_drivers/3com/vortex.rst rename to Documentation/networking/device_drivers/ethernet/3com/vortex.rst index 800add5be338..eab10fc6da5c 100644 --- a/Documentation/networking/device_drivers/3com/vortex.rst +++ b/Documentation/networking/device_drivers/ethernet/3com/vortex.rst @@ -4,8 +4,6 @@ 3Com Vortex device driver ========================= -Documentation/networking/device_drivers/3com/vortex.rst - Andrew Morton 30 April 2000 diff --git a/Documentation/networking/device_drivers/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst similarity index 100% rename from Documentation/networking/device_drivers/amazon/ena.rst rename to Documentation/networking/device_drivers/ethernet/amazon/ena.rst diff --git a/Documentation/networking/device_drivers/aquantia/atlantic.rst b/Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst similarity index 100% rename from Documentation/networking/device_drivers/aquantia/atlantic.rst rename to Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst diff --git a/Documentation/networking/device_drivers/chelsio/cxgb.rst b/Documentation/networking/device_drivers/ethernet/chelsio/cxgb.rst similarity index 100% rename from Documentation/networking/device_drivers/chelsio/cxgb.rst rename to Documentation/networking/device_drivers/ethernet/chelsio/cxgb.rst diff --git a/Documentation/networking/device_drivers/cirrus/cs89x0.rst b/Documentation/networking/device_drivers/ethernet/cirrus/cs89x0.rst similarity index 100% rename from Documentation/networking/device_drivers/cirrus/cs89x0.rst rename to Documentation/networking/device_drivers/ethernet/cirrus/cs89x0.rst diff --git a/Documentation/networking/device_drivers/davicom/dm9000.rst b/Documentation/networking/device_drivers/ethernet/davicom/dm9000.rst similarity index 100% rename from Documentation/networking/device_drivers/davicom/dm9000.rst rename to Documentation/networking/device_drivers/ethernet/davicom/dm9000.rst diff --git a/Documentation/networking/device_drivers/dec/de4x5.rst b/Documentation/networking/device_drivers/ethernet/dec/de4x5.rst similarity index 100% rename from Documentation/networking/device_drivers/dec/de4x5.rst rename to Documentation/networking/device_drivers/ethernet/dec/de4x5.rst diff --git a/Documentation/networking/device_drivers/dec/dmfe.rst b/Documentation/networking/device_drivers/ethernet/dec/dmfe.rst similarity index 100% rename from Documentation/networking/device_drivers/dec/dmfe.rst rename to Documentation/networking/device_drivers/ethernet/dec/dmfe.rst diff --git a/Documentation/networking/device_drivers/dlink/dl2k.rst b/Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst similarity index 100% rename from Documentation/networking/device_drivers/dlink/dl2k.rst rename to Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst diff --git a/Documentation/networking/device_drivers/freescale/dpaa.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa.rst similarity index 100% rename from Documentation/networking/device_drivers/freescale/dpaa.rst rename to Documentation/networking/device_drivers/ethernet/freescale/dpaa.rst diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/dpio-driver.rst similarity index 97% rename from Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst rename to Documentation/networking/device_drivers/ethernet/freescale/dpaa2/dpio-driver.rst index 17dbee1ac53e..c50fd46631e0 100644 --- a/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst +++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/dpio-driver.rst @@ -19,8 +19,10 @@ pool management for network interfaces. This document provides an overview the Linux DPIO driver, its subcomponents, and its APIs. -See Documentation/networking/device_drivers/freescale/dpaa2/overview.rst for -a general overview of DPAA2 and the general DPAA2 driver architecture in Linux. +See +Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst +for a general overview of DPAA2 and the general DPAA2 driver architecture +in Linux. Driver Overview --------------- diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst similarity index 98% rename from Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst rename to Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst index cb4c9a0c5a17..682f3986c15b 100644 --- a/Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst +++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst @@ -33,7 +33,8 @@ hardware resources, like queues, do not have a corresponding MC object and are treated as internal resources of other objects. For a more detailed description of the DPAA2 architecture and its object -abstractions see *Documentation/networking/device_drivers/freescale/dpaa2/overview.rst*. +abstractions see +*Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst*. Each Linux net device is built on top of a Datapath Network Interface (DPNI) object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/index.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/index.rst similarity index 100% rename from Documentation/networking/device_drivers/freescale/dpaa2/index.rst rename to Documentation/networking/device_drivers/ethernet/freescale/dpaa2/index.rst diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst similarity index 100% rename from Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst rename to Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst diff --git a/Documentation/networking/device_drivers/freescale/dpaa2/overview.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst similarity index 100% rename from Documentation/networking/device_drivers/freescale/dpaa2/overview.rst rename to Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst diff --git a/Documentation/networking/device_drivers/freescale/gianfar.rst b/Documentation/networking/device_drivers/ethernet/freescale/gianfar.rst similarity index 100% rename from Documentation/networking/device_drivers/freescale/gianfar.rst rename to Documentation/networking/device_drivers/ethernet/freescale/gianfar.rst diff --git a/Documentation/networking/device_drivers/google/gve.rst b/Documentation/networking/device_drivers/ethernet/google/gve.rst similarity index 100% rename from Documentation/networking/device_drivers/google/gve.rst rename to Documentation/networking/device_drivers/ethernet/google/gve.rst diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst new file mode 100644 index 000000000000..fd3873024da8 --- /dev/null +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -0,0 +1,58 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Ethernet Device Drivers +======================= + +Device drivers for Ethernet and Ethernet-based virtual function devices. + +Contents: + +.. toctree:: + :maxdepth: 2 + + 3com/3c509 + 3com/vortex + amazon/ena + aquantia/atlantic + chelsio/cxgb + cirrus/cs89x0 + dlink/dl2k + davicom/dm9000 + dec/de4x5 + dec/dmfe + freescale/dpaa + freescale/dpaa2/index + freescale/gianfar + google/gve + intel/e100 + intel/e1000 + intel/e1000e + intel/fm10k + intel/igb + intel/igbvf + intel/ixgb + intel/ixgbe + intel/ixgbevf + intel/i40e + intel/iavf + intel/ice + marvell/octeontx2 + mellanox/mlx5 + microsoft/netvsc + neterion/s2io + neterion/vxge + netronome/nfp + pensando/ionic + smsc/smc9 + stmicro/stmmac + ti/cpsw + ti/cpsw_switchdev + ti/tlan + toshiba/spider_net + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/intel/e100.rst b/Documentation/networking/device_drivers/ethernet/intel/e100.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/e100.rst rename to Documentation/networking/device_drivers/ethernet/intel/e100.rst diff --git a/Documentation/networking/device_drivers/intel/e1000.rst b/Documentation/networking/device_drivers/ethernet/intel/e1000.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/e1000.rst rename to Documentation/networking/device_drivers/ethernet/intel/e1000.rst diff --git a/Documentation/networking/device_drivers/intel/e1000e.rst b/Documentation/networking/device_drivers/ethernet/intel/e1000e.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/e1000e.rst rename to Documentation/networking/device_drivers/ethernet/intel/e1000e.rst diff --git a/Documentation/networking/device_drivers/intel/fm10k.rst b/Documentation/networking/device_drivers/ethernet/intel/fm10k.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/fm10k.rst rename to Documentation/networking/device_drivers/ethernet/intel/fm10k.rst diff --git a/Documentation/networking/device_drivers/intel/i40e.rst b/Documentation/networking/device_drivers/ethernet/intel/i40e.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/i40e.rst rename to Documentation/networking/device_drivers/ethernet/intel/i40e.rst diff --git a/Documentation/networking/device_drivers/intel/iavf.rst b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/iavf.rst rename to Documentation/networking/device_drivers/ethernet/intel/iavf.rst diff --git a/Documentation/networking/device_drivers/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/ice.rst rename to Documentation/networking/device_drivers/ethernet/intel/ice.rst diff --git a/Documentation/networking/device_drivers/intel/igb.rst b/Documentation/networking/device_drivers/ethernet/intel/igb.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/igb.rst rename to Documentation/networking/device_drivers/ethernet/intel/igb.rst diff --git a/Documentation/networking/device_drivers/intel/igbvf.rst b/Documentation/networking/device_drivers/ethernet/intel/igbvf.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/igbvf.rst rename to Documentation/networking/device_drivers/ethernet/intel/igbvf.rst diff --git a/Documentation/networking/device_drivers/intel/ixgb.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgb.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/ixgb.rst rename to Documentation/networking/device_drivers/ethernet/intel/ixgb.rst diff --git a/Documentation/networking/device_drivers/intel/ixgbe.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/ixgbe.rst rename to Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst diff --git a/Documentation/networking/device_drivers/intel/ixgbevf.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgbevf.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/ixgbevf.rst rename to Documentation/networking/device_drivers/ethernet/intel/ixgbevf.rst diff --git a/Documentation/networking/device_drivers/marvell/octeontx2.rst b/Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst similarity index 100% rename from Documentation/networking/device_drivers/marvell/octeontx2.rst rename to Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst diff --git a/Documentation/networking/device_drivers/mellanox/mlx5.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst similarity index 100% rename from Documentation/networking/device_drivers/mellanox/mlx5.rst rename to Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst diff --git a/Documentation/networking/device_drivers/microsoft/netvsc.rst b/Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst similarity index 100% rename from Documentation/networking/device_drivers/microsoft/netvsc.rst rename to Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst diff --git a/Documentation/networking/device_drivers/neterion/s2io.rst b/Documentation/networking/device_drivers/ethernet/neterion/s2io.rst similarity index 100% rename from Documentation/networking/device_drivers/neterion/s2io.rst rename to Documentation/networking/device_drivers/ethernet/neterion/s2io.rst diff --git a/Documentation/networking/device_drivers/neterion/vxge.rst b/Documentation/networking/device_drivers/ethernet/neterion/vxge.rst similarity index 100% rename from Documentation/networking/device_drivers/neterion/vxge.rst rename to Documentation/networking/device_drivers/ethernet/neterion/vxge.rst diff --git a/Documentation/networking/device_drivers/netronome/nfp.rst b/Documentation/networking/device_drivers/ethernet/netronome/nfp.rst similarity index 100% rename from Documentation/networking/device_drivers/netronome/nfp.rst rename to Documentation/networking/device_drivers/ethernet/netronome/nfp.rst diff --git a/Documentation/networking/device_drivers/pensando/ionic.rst b/Documentation/networking/device_drivers/ethernet/pensando/ionic.rst similarity index 100% rename from Documentation/networking/device_drivers/pensando/ionic.rst rename to Documentation/networking/device_drivers/ethernet/pensando/ionic.rst diff --git a/Documentation/networking/device_drivers/smsc/smc9.rst b/Documentation/networking/device_drivers/ethernet/smsc/smc9.rst similarity index 100% rename from Documentation/networking/device_drivers/smsc/smc9.rst rename to Documentation/networking/device_drivers/ethernet/smsc/smc9.rst diff --git a/Documentation/networking/device_drivers/stmicro/stmmac.rst b/Documentation/networking/device_drivers/ethernet/stmicro/stmmac.rst similarity index 100% rename from Documentation/networking/device_drivers/stmicro/stmmac.rst rename to Documentation/networking/device_drivers/ethernet/stmicro/stmmac.rst diff --git a/Documentation/networking/device_drivers/ti/cpsw.rst b/Documentation/networking/device_drivers/ethernet/ti/cpsw.rst similarity index 100% rename from Documentation/networking/device_drivers/ti/cpsw.rst rename to Documentation/networking/device_drivers/ethernet/ti/cpsw.rst diff --git a/Documentation/networking/device_drivers/ti/cpsw_switchdev.rst b/Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst similarity index 100% rename from Documentation/networking/device_drivers/ti/cpsw_switchdev.rst rename to Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst diff --git a/Documentation/networking/device_drivers/ti/tlan.rst b/Documentation/networking/device_drivers/ethernet/ti/tlan.rst similarity index 100% rename from Documentation/networking/device_drivers/ti/tlan.rst rename to Documentation/networking/device_drivers/ethernet/ti/tlan.rst diff --git a/Documentation/networking/device_drivers/toshiba/spider_net.rst b/Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst similarity index 100% rename from Documentation/networking/device_drivers/toshiba/spider_net.rst rename to Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index e18dad11bc72..2d4817fc6a29 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -1,56 +1,17 @@ .. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -Vendor Device Drivers -===================== +Hardware Device Drivers +======================= Contents: .. toctree:: :maxdepth: 2 - freescale/dpaa2/index - intel/e100 - intel/e1000 - intel/e1000e - intel/fm10k - intel/igb - intel/igbvf - intel/ixgb - intel/ixgbe - intel/ixgbevf - intel/i40e - intel/iavf - intel/ice - google/gve - marvell/octeontx2 - mellanox/mlx5 - netronome/nfp - pensando/ionic - stmicro/stmmac - 3com/3c509 - 3com/vortex - amazon/ena - aquantia/atlantic - chelsio/cxgb - cirrus/cs89x0 - davicom/dm9000 - dec/de4x5 - dec/dmfe - dlink/dl2k - freescale/dpaa - freescale/gianfar - intel/ipw2100 - intel/ipw2200 - microsoft/netvsc - neterion/s2io - neterion/vxge - qualcomm/rmnet - sb1000 - smsc/smc9 - ti/cpsw_switchdev - ti/cpsw - ti/tlan - toshiba/spider_net + cable/index + cellular/index + ethernet/index + wifi/index .. only:: subproject and html diff --git a/Documentation/networking/device_drivers/wifi/index.rst b/Documentation/networking/device_drivers/wifi/index.rst new file mode 100644 index 000000000000..fb394f5de4a9 --- /dev/null +++ b/Documentation/networking/device_drivers/wifi/index.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Wi-Fi Device Drivers +==================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + intel/ipw2100 + intel/ipw2200 + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/device_drivers/intel/ipw2100.rst b/Documentation/networking/device_drivers/wifi/intel/ipw2100.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/ipw2100.rst rename to Documentation/networking/device_drivers/wifi/intel/ipw2100.rst diff --git a/Documentation/networking/device_drivers/intel/ipw2200.rst b/Documentation/networking/device_drivers/wifi/intel/ipw2200.rst similarity index 100% rename from Documentation/networking/device_drivers/intel/ipw2200.rst rename to Documentation/networking/device_drivers/wifi/intel/ipw2200.rst diff --git a/MAINTAINERS b/MAINTAINERS index 63c4cc4a04d6..43e7c9a94a48 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -147,7 +147,7 @@ Maintainers List M: Steffen Klassert L: netdev@vger.kernel.org S: Odd Fixes -F: Documentation/networking/device_drivers/3com/vortex.rst +F: Documentation/networking/device_drivers/ethernet/3com/vortex.rst F: drivers/net/ethernet/3com/3c59x.c 3CR990 NETWORK DRIVER @@ -816,7 +816,7 @@ R: Saeed Bishara R: Zorik Machulsky L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/amazon/ena.rst +F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst F: drivers/net/ethernet/amazon/ AMAZON RDMA EFA DRIVER @@ -1295,7 +1295,7 @@ L: netdev@vger.kernel.org S: Supported W: https://www.marvell.com/ Q: http://patchwork.ozlabs.org/project/netdev/list/ -F: Documentation/networking/device_drivers/aquantia/atlantic.rst +F: Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst F: drivers/net/ethernet/aquantia/atlantic/ AQUANTIA ETHERNET DRIVER PTP SUBSYSTEM @@ -4753,7 +4753,7 @@ F: net/ax25/sysctl_net_ax25.c DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER L: netdev@vger.kernel.org S: Orphan -F: Documentation/networking/device_drivers/dec/dmfe.rst +F: Documentation/networking/device_drivers/ethernet/dec/dmfe.rst F: drivers/net/ethernet/dec/tulip/dmfe.c DC390/AM53C974 SCSI driver @@ -5241,8 +5241,8 @@ M: Ioana Ciornei M: Ioana Radulescu L: netdev@vger.kernel.org S: Maintained -F: Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst -F: Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst +F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst +F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst F: drivers/net/ethernet/freescale/dpaa2/Kconfig F: drivers/net/ethernet/freescale/dpaa2/Makefile F: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth* @@ -7302,7 +7302,7 @@ R: Sagi Shahar R: Jon Olson L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/google/gve.rst +F: Documentation/networking/device_drivers/ethernet/google/gve.rst F: drivers/net/ethernet/google GPD POCKET FAN DRIVER @@ -7965,7 +7965,7 @@ S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git F: Documentation/ABI/stable/sysfs-bus-vmbus F: Documentation/ABI/testing/debugfs-hyperv -F: Documentation/networking/device_drivers/microsoft/netvsc.rst +F: Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst F: arch/x86/hyperv F: arch/x86/include/asm/hyperv-tlfs.h F: arch/x86/include/asm/mshyperv.h @@ -8647,18 +8647,7 @@ W: http://e1000.sourceforge.net/ Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git -F: Documentation/networking/device_drivers/intel/e100.rst -F: Documentation/networking/device_drivers/intel/e1000.rst -F: Documentation/networking/device_drivers/intel/e1000e.rst -F: Documentation/networking/device_drivers/intel/fm10k.rst -F: Documentation/networking/device_drivers/intel/i40e.rst -F: Documentation/networking/device_drivers/intel/iavf.rst -F: Documentation/networking/device_drivers/intel/ice.rst -F: Documentation/networking/device_drivers/intel/igb.rst -F: Documentation/networking/device_drivers/intel/igbvf.rst -F: Documentation/networking/device_drivers/intel/ixgb.rst -F: Documentation/networking/device_drivers/intel/ixgbe.rst -F: Documentation/networking/device_drivers/intel/ixgbevf.rst +F: Documentation/networking/device_drivers/ethernet/intel/ F: drivers/net/ethernet/intel/ F: drivers/net/ethernet/intel/*/ F: include/linux/avf/virtchnl.h @@ -8848,8 +8837,8 @@ INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT M: Stanislav Yakovlev L: linux-wireless@vger.kernel.org S: Maintained -F: Documentation/networking/device_drivers/intel/ipw2100.rst -F: Documentation/networking/device_drivers/intel/ipw2200.rst +F: Documentation/networking/device_drivers/wifi/intel/ipw2100.rst +F: Documentation/networking/device_drivers/wifi/intel/ipw2200.rst F: drivers/net/wireless/intel/ipw2x00/ INTEL PSTATE DRIVER @@ -10362,7 +10351,7 @@ M: Geetha sowjanya M: Jerin Jacob L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/marvell/octeontx2.rst +F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst F: drivers/net/ethernet/marvell/octeontx2/af/ MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER @@ -11031,7 +11020,7 @@ L: linux-rdma@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ -F: Documentation/networking/device_drivers/mellanox/ +F: Documentation/networking/device_drivers/ethernet/mellanox/ F: drivers/net/ethernet/mellanox/mlx5/core/ F: include/linux/mlx5/ @@ -11806,8 +11795,8 @@ NETERION 10GbE DRIVERS (s2io/vxge) M: Jon Mason L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/neterion/s2io.rst -F: Documentation/networking/device_drivers/neterion/vxge.rst +F: Documentation/networking/device_drivers/ethernet/neterion/s2io.rst +F: Documentation/networking/device_drivers/ethernet/neterion/vxge.rst F: drivers/net/ethernet/neterion/ NETFILTER @@ -13370,7 +13359,7 @@ M: Shannon Nelson M: Pensando Drivers L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/pensando/ionic.rst +F: Documentation/networking/device_drivers/ethernet/pensando/ionic.rst F: drivers/net/ethernet/pensando/ PER-CPU MEMORY ALLOCATOR @@ -14053,7 +14042,7 @@ QLOGIC QLA3XXX NETWORK DRIVER M: GR-Linux-NIC-Dev@marvell.com L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx +F: Documentation/networking/device_drivers/ethernet/qlogic/LICENSE.qla3xxx F: drivers/net/ethernet/qlogic/qla3xxx.* QLOGIC QLA4XXX iSCSI DRIVER @@ -14104,7 +14093,7 @@ M: Laurentiu Tudor L: linux-kernel@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt -F: Documentation/networking/device_drivers/freescale/dpaa2/overview.rst +F: Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst F: drivers/bus/fsl-mc/ QT1010 MEDIA DRIVER @@ -14225,7 +14214,7 @@ M: Subash Abhinov Kasiviswanathan M: Sean Tranchetti L: netdev@vger.kernel.org S: Maintained -F: Documentation/networking/device_drivers/qualcomm/rmnet.rst +F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst F: drivers/net/ethernet/qualcomm/rmnet/ F: include/linux/if_rmnet.h @@ -16106,7 +16095,7 @@ SPIDERNET NETWORK DRIVER for CELL M: Ishizaki Kou L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/device_drivers/toshiba/spider_net.rst +F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst F: drivers/net/ethernet/toshiba/spider_net* SPMI SUBSYSTEM @@ -16333,7 +16322,7 @@ M: Jose Abreu L: netdev@vger.kernel.org S: Supported W: http://www.stlinux.com -F: Documentation/networking/device_drivers/stmicro/ +F: Documentation/networking/device_drivers/ethernet/stmicro/ F: drivers/net/ethernet/stmicro/stmmac/ SUN3/3X @@ -17221,7 +17210,7 @@ M: Samuel Chessman L: tlan-devel@lists.sourceforge.net (subscribers-only) S: Maintained W: http://sourceforge.net/projects/tlan/ -F: Documentation/networking/device_drivers/ti/tlan.rst +F: Documentation/networking/device_drivers/ethernet/ti/tlan.rst F: drivers/net/ethernet/ti/tlan.* TM6000 VIDEO4LINUX DRIVER diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9a49c4c2316b..7dcb465824be 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -460,7 +460,7 @@ config NET_SB1000 At present this driver only compiles as a module, so say M here if you have this card. The module will be called sb1000. Then read - for + for information on how to use this module, as it needs special ppp scripts for establishing a connection. Further documentation and the necessary scripts can be found at: diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 5984b7033999..741c67e546d4 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1149,7 +1149,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, print_info = (vortex_debug > 1); if (print_info) - pr_info("See Documentation/networking/device_drivers/3com/vortex.rst\n"); + pr_info("See Documentation/networking/device_drivers/ethernet/3com/vortex.rst\n"); pr_info("%s: 3Com %s %s at %p.\n", print_name, @@ -1954,7 +1954,7 @@ vortex_error(struct net_device *dev, int status) dev->name, tx_status); if (tx_status == 0x82) { pr_err("Probably a duplex mismatch. See " - "Documentation/networking/device_drivers/3com/vortex.rst\n"); + "Documentation/networking/device_drivers/ethernet/3com/vortex.rst\n"); } dump_tx_ring(dev); } diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 074b0974769c..a52a3740f0c9 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -76,8 +76,8 @@ config VORTEX "Hurricane" (3c555/3cSOHO) PCI If you have such a card, say Y here. More specific information is in - and - in the comments at the beginning of + + and in the comments at the beginning of . To compile this support as a module, choose M here. diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index b7f43254598f..f6f3ef9a93cf 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -26,7 +26,7 @@ config CHELSIO_T1 This driver supports Chelsio gigabit and 10-gigabit Ethernet cards. More information about adapter features and performance tuning is in - . + . For general information about Chelsio and our products, visit our website at . diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index 7cf4526596f3..d8af9e64dd1e 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -24,7 +24,7 @@ config CS89x0 help Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file - . + . To compile this driver as a module, choose M here. The module will be called cs89x0. diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 8afc5942179a..79dc336ce709 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -114,7 +114,7 @@ config DE4X5 These include the DE425, DE434, DE435, DE450 and DE500 models. If you have a network card of this type, say Y. More specific information is contained in - . + . To compile this driver as a module, choose M here. The module will be called de4x5. @@ -138,7 +138,7 @@ config DM9102 This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from Davicom (). If you have such a network (Ethernet) card, say Y. Some information is contained in the file - . + . To compile this driver as a module, choose M here. The module will be called dmfe. diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 5143722c4419..be6d8a9ada27 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1863,13 +1863,5 @@ static struct pci_driver rio_driver = { }; module_pci_driver(rio_driver); -/* - -Compile command: - -gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c - -Read Documentation/networking/device_drivers/dlink/dl2k.rst for details. - -*/ +/* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */ diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 48a8f9aa1dd0..3cd13fd55011 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -34,7 +34,7 @@ config E100 to identify the adapter. More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called e100. @@ -50,7 +50,7 @@ config E1000 More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called e1000. @@ -70,7 +70,7 @@ config E1000E More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called e1000e. @@ -98,7 +98,7 @@ config IGB More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called igb. @@ -134,7 +134,7 @@ config IGBVF More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called igbvf. @@ -151,7 +151,7 @@ config IXGB More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called ixgb. @@ -170,7 +170,7 @@ config IXGBE More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called ixgbe. @@ -222,7 +222,7 @@ config IXGBEVF More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called ixgbevf. MSI-X interrupt support is required @@ -249,7 +249,7 @@ config I40E More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called i40e. @@ -284,7 +284,7 @@ config I40EVF This driver was formerly named i40evf. More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called iavf. MSI-X interrupt support is required @@ -303,7 +303,7 @@ config ICE More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called ice. @@ -321,7 +321,7 @@ config FM10K More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called fm10k. MSI-X interrupt support is required diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig index 5484f18f272e..0c0d127906dd 100644 --- a/drivers/net/ethernet/neterion/Kconfig +++ b/drivers/net/ethernet/neterion/Kconfig @@ -27,7 +27,7 @@ config S2IO on its age. More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called s2io. @@ -42,7 +42,7 @@ config VXGE labeled as either one, depending on its age. More specific information on configuring the driver is in - . + . To compile this driver as a module, choose M here. The module will be called vxge. diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig index d25b88f53de4..76f8cc502bf9 100644 --- a/drivers/net/ethernet/pensando/Kconfig +++ b/drivers/net/ethernet/pensando/Kconfig @@ -25,7 +25,7 @@ config IONIC This enables the support for the Pensando family of Ethernet adapters. More specific information on this driver can be found in - . + . To compile this driver as a module, choose M here. The module will be called ionic. diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 6293b1e6c772..b77e427e6729 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -28,7 +28,7 @@ config SMC9194 option if you have a DELL laptop with the docking station, or another SMC9192/9194 based chipset. Say Y if you want it compiled into the kernel, and read the file - . + . To compile this driver as a module, choose M here. The module will be called smc9194. @@ -44,7 +44,7 @@ config SMC91X This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it compiled into the kernel, and read the file - . + . This driver is also available as a module ( = code which can be inserted in and removed from the running kernel whenever you want). diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 50f55364d4fb..abfc4c435d59 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -156,7 +156,7 @@ config TLAN Devices currently supported by this driver are Compaq Netelligent, Compaq NetFlex and Olicom cards. Please read the file - + for more details. To compile this driver as a module, choose M here. The module diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 857709828058..583cd2ef7662 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -70,7 +70,7 @@ MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); MODULE_LICENSE("GPL"); /* Turn on debugging. - * See Documentation/networking/device_drivers/ti/tlan.rst for details + * See Documentation/networking/device_drivers/ethernet/ti/tlan.rst for details */ static int debug; module_param(debug, int, 0); diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig index d00386915a9d..18436592a3a6 100644 --- a/drivers/net/wireless/intel/ipw2x00/Kconfig +++ b/drivers/net/wireless/intel/ipw2x00/Kconfig @@ -16,7 +16,7 @@ config IPW2100 A driver for the Intel PRO/Wireless 2100 Network Connection 802.11b wireless network adapter. - See + See for information on the capabilities currently enabled in this driver and for tips for debugging issues and problems. @@ -78,7 +78,7 @@ config IPW2200 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network Connection adapters. - See + See for information on the capabilities currently enabled in this driver and for tips for debugging issues and problems. diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 624fe721e2b5..753dada5a243 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -8352,7 +8352,7 @@ static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) { printk(KERN_WARNING DRV_NAME ": Firmware image not compatible " "(detected version id of %u). " - "See Documentation/networking/device_drivers/intel/ipw2100.rst\n", + "See Documentation/networking/device_drivers/wifi/intel/ipw2100.rst\n", h->version); return 1; } From 14474950252cbb97b79fb1e5e7f4df9b13a8f9a0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:25 -0700 Subject: [PATCH 0365/2056] docs: networking: move z8530 to the hw driver section Move z8530 docs to hamradio and wan subdirectories. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../device_drivers/hamradio/index.rst | 18 ++++++++++++++++++ .../{ => device_drivers/hamradio}/z8530drv.rst | 0 .../networking/device_drivers/index.rst | 2 ++ .../networking/device_drivers/wan/index.rst | 18 ++++++++++++++++++ .../{ => device_drivers/wan}/z8530book.rst | 0 Documentation/networking/index.rst | 2 -- MAINTAINERS | 2 +- drivers/net/hamradio/Kconfig | 8 +++++--- drivers/net/hamradio/scc.c | 2 +- 9 files changed, 45 insertions(+), 7 deletions(-) create mode 100644 Documentation/networking/device_drivers/hamradio/index.rst rename Documentation/networking/{ => device_drivers/hamradio}/z8530drv.rst (100%) create mode 100644 Documentation/networking/device_drivers/wan/index.rst rename Documentation/networking/{ => device_drivers/wan}/z8530book.rst (100%) diff --git a/Documentation/networking/device_drivers/hamradio/index.rst b/Documentation/networking/device_drivers/hamradio/index.rst new file mode 100644 index 000000000000..df03a5acbfa7 --- /dev/null +++ b/Documentation/networking/device_drivers/hamradio/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Amateur Radio Device Drivers +============================ + +Contents: + +.. toctree:: + :maxdepth: 2 + + z8530drv + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/z8530drv.rst b/Documentation/networking/device_drivers/hamradio/z8530drv.rst similarity index 100% rename from Documentation/networking/z8530drv.rst rename to Documentation/networking/device_drivers/hamradio/z8530drv.rst diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 2d4817fc6a29..5497e3ae1ca9 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -11,6 +11,8 @@ Contents: cable/index cellular/index ethernet/index + hamradio/index + wan/index wifi/index .. only:: subproject and html diff --git a/Documentation/networking/device_drivers/wan/index.rst b/Documentation/networking/device_drivers/wan/index.rst new file mode 100644 index 000000000000..9d9ae94f00b4 --- /dev/null +++ b/Documentation/networking/device_drivers/wan/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Classic WAN Device Drivers +========================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + z8530book + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/z8530book.rst b/Documentation/networking/device_drivers/wan/z8530book.rst similarity index 100% rename from Documentation/networking/z8530book.rst rename to Documentation/networking/device_drivers/wan/z8530book.rst diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 0186e276690a..718e04e3f7ed 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -20,7 +20,6 @@ Contents: ieee802154 j1939 kapi - z8530book msg_zerocopy failover net_dim @@ -122,7 +121,6 @@ Contents: xfrm_proc xfrm_sync xfrm_sysctl - z8530drv .. only:: subproject and html diff --git a/MAINTAINERS b/MAINTAINERS index 43e7c9a94a48..bdde85c12b74 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18902,7 +18902,7 @@ L: linux-hams@vger.kernel.org S: Maintained W: http://yaina.de/jreuter/ W: http://www.qsl.net/dl1bke/ -F: Documentation/networking/z8530drv.rst +F: Documentation/networking/device_drivers/hamradio/z8530drv.rst F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index 70f754abbeca..cde8b48e7bc7 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -84,8 +84,9 @@ config SCC help These cards are used to connect your Linux box to an amateur radio in order to communicate with other computers. If you want to use - this, read and the - AX25-HOWTO, available from + this, read + + and the AX25-HOWTO, available from . Also make sure to say Y to "Amateur Radio AX.25 Level 2" support. @@ -98,7 +99,8 @@ config SCC_DELAY help Say Y here if you experience problems with the SCC driver not working properly; please read - for details. + + for details. If unsure, say N. diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 33fdd55c6122..1e915871baa7 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -7,7 +7,7 @@ * ------------------ * * You can find a subset of the documentation in - * Documentation/networking/z8530drv.rst. + * Documentation/networking/device_drivers/wan/z8530drv.rst. */ /* From f05c43e05629742ae75493e213aeb6d8c226654c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:26 -0700 Subject: [PATCH 0366/2056] docs: networking: move baycom to the hw driver section Move baycom to hamradio. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../networking/{ => device_drivers/hamradio}/baycom.rst | 0 .../networking/device_drivers/hamradio/index.rst | 1 + Documentation/networking/index.rst | 1 - drivers/net/hamradio/Kconfig | 8 ++++---- 4 files changed, 5 insertions(+), 5 deletions(-) rename Documentation/networking/{ => device_drivers/hamradio}/baycom.rst (100%) diff --git a/Documentation/networking/baycom.rst b/Documentation/networking/device_drivers/hamradio/baycom.rst similarity index 100% rename from Documentation/networking/baycom.rst rename to Documentation/networking/device_drivers/hamradio/baycom.rst diff --git a/Documentation/networking/device_drivers/hamradio/index.rst b/Documentation/networking/device_drivers/hamradio/index.rst index df03a5acbfa7..7e731732057b 100644 --- a/Documentation/networking/device_drivers/hamradio/index.rst +++ b/Documentation/networking/device_drivers/hamradio/index.rst @@ -8,6 +8,7 @@ Contents: .. toctree:: :maxdepth: 2 + baycom z8530drv .. only:: subproject and html diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 718e04e3f7ed..76831e9d7c9a 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -43,7 +43,6 @@ Contents: arcnet atm ax25 - baycom bonding cdc_mbim cops diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig index cde8b48e7bc7..f4843f9672c1 100644 --- a/drivers/net/hamradio/Kconfig +++ b/drivers/net/hamradio/Kconfig @@ -129,7 +129,7 @@ config BAYCOM_SER_FDX your serial interface chip. To configure the driver, use the sethdlc utility available in the standard ax25 utilities package. For information on the modems, see and - . + . To compile this driver as a module, choose M here: the module will be called baycom_ser_fdx. This is recommended. @@ -147,7 +147,7 @@ config BAYCOM_SER_HDX the driver, use the sethdlc utility available in the standard ax25 utilities package. For information on the modems, see and - . + . To compile this driver as a module, choose M here: the module will be called baycom_ser_hdx. This is recommended. @@ -162,7 +162,7 @@ config BAYCOM_PAR par96 designs. To configure the driver, use the sethdlc utility available in the standard ax25 utilities package. For information on the modems, see and the file - . + . To compile this driver as a module, choose M here: the module will be called baycom_par. This is recommended. @@ -177,7 +177,7 @@ config BAYCOM_EPP designs. To configure the driver, use the sethdlc utility available in the standard ax25 utilities package. For information on the modems, see and the file - . + . To compile this driver as a module, choose M here: the module will be called baycom_epp. This is recommended. From 5c3b5da432bcf6db822adee3e40da9b454369d31 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:27 -0700 Subject: [PATCH 0367/2056] docs: networking: move ray_cs to the hw driver section Move ray_cs into Wi-Fi driver docs subdirectory. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/networking/device_drivers/wifi/index.rst | 1 + Documentation/networking/{ => device_drivers/wifi}/ray_cs.rst | 0 Documentation/networking/index.rst | 1 - drivers/net/wireless/Kconfig | 3 ++- 4 files changed, 3 insertions(+), 2 deletions(-) rename Documentation/networking/{ => device_drivers/wifi}/ray_cs.rst (100%) diff --git a/Documentation/networking/device_drivers/wifi/index.rst b/Documentation/networking/device_drivers/wifi/index.rst index fb394f5de4a9..bf91a87c7acf 100644 --- a/Documentation/networking/device_drivers/wifi/index.rst +++ b/Documentation/networking/device_drivers/wifi/index.rst @@ -10,6 +10,7 @@ Contents: intel/ipw2100 intel/ipw2200 + ray_cs .. only:: subproject and html diff --git a/Documentation/networking/ray_cs.rst b/Documentation/networking/device_drivers/wifi/ray_cs.rst similarity index 100% rename from Documentation/networking/ray_cs.rst rename to Documentation/networking/device_drivers/wifi/ray_cs.rst diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 76831e9d7c9a..9f8230c325af 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -95,7 +95,6 @@ Contents: ppp_generic proc_net_tcp radiotap-headers - ray_cs rds regulatory rxrpc diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 8ab62bb6b853..2c6dd50b6935 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -57,7 +57,8 @@ config PCMCIA_RAYCS help Say Y here if you intend to attach an Aviator/Raytheon PCMCIA (PC-card) wireless Ethernet networking card to your computer. - Please read the file for + Please read the file + for details. To compile this driver as a module, choose M here: the module will be From 95298d63c67673c654c08952672d016212b26054 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:28 -0700 Subject: [PATCH 0368/2056] docs: networking: move remaining Ethernet driver docs to the hw section Move docs for hinic and altera_tse under device_drivers/ethernet. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../{ => device_drivers/ethernet/altera}/altera_tse.rst | 0 .../networking/{ => device_drivers/ethernet/huawei}/hinic.rst | 0 Documentation/networking/device_drivers/ethernet/index.rst | 2 ++ Documentation/networking/index.rst | 2 -- MAINTAINERS | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) rename Documentation/networking/{ => device_drivers/ethernet/altera}/altera_tse.rst (100%) rename Documentation/networking/{ => device_drivers/ethernet/huawei}/hinic.rst (100%) diff --git a/Documentation/networking/altera_tse.rst b/Documentation/networking/device_drivers/ethernet/altera/altera_tse.rst similarity index 100% rename from Documentation/networking/altera_tse.rst rename to Documentation/networking/device_drivers/ethernet/altera/altera_tse.rst diff --git a/Documentation/networking/hinic.rst b/Documentation/networking/device_drivers/ethernet/huawei/hinic.rst similarity index 100% rename from Documentation/networking/hinic.rst rename to Documentation/networking/device_drivers/ethernet/huawei/hinic.rst diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst index fd3873024da8..cbb75a1818c0 100644 --- a/Documentation/networking/device_drivers/ethernet/index.rst +++ b/Documentation/networking/device_drivers/ethernet/index.rst @@ -13,6 +13,7 @@ Contents: 3com/3c509 3com/vortex amazon/ena + altera/altera_tse aquantia/atlantic chelsio/cxgb cirrus/cs89x0 @@ -24,6 +25,7 @@ Contents: freescale/dpaa2/index freescale/gianfar google/gve + huawei/hinic intel/e100 intel/e1000 intel/e1000e diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 9f8230c325af..9bc8ecc79d94 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -38,7 +38,6 @@ Contents: nfc 6lowpan 6pack - altera_tse arcnet-hardware arcnet atm @@ -62,7 +61,6 @@ Contents: generic_netlink gen_stats gtp - hinic ila ipddp ip_dynaddr diff --git a/MAINTAINERS b/MAINTAINERS index bdde85c12b74..e5a1da603924 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7913,7 +7913,7 @@ HUAWEI ETHERNET DRIVER M: Bin Luo L: netdev@vger.kernel.org S: Supported -F: Documentation/networking/hinic.rst +F: Documentation/networking/device_drivers/ethernet/huawei/hinic.rst F: drivers/net/ethernet/huawei/hinic/ HUGETLB FILESYSTEM From 4daedf7abb41aacec8c0f868f64ec2270d577688 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:29 -0700 Subject: [PATCH 0369/2056] docs: networking: move AppleTalk / LocalTalk drivers to the hw driver section Move docs for cops and ltpc under device_drivers/appletalk. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../{ => device_drivers/appletalk}/cops.rst | 0 .../device_drivers/appletalk/index.rst | 19 +++++++++++++++++++ .../{ => device_drivers/appletalk}/ltpc.rst | 0 .../networking/device_drivers/index.rst | 1 + Documentation/networking/index.rst | 2 -- drivers/net/appletalk/Kconfig | 3 ++- 6 files changed, 22 insertions(+), 3 deletions(-) rename Documentation/networking/{ => device_drivers/appletalk}/cops.rst (100%) create mode 100644 Documentation/networking/device_drivers/appletalk/index.rst rename Documentation/networking/{ => device_drivers/appletalk}/ltpc.rst (100%) diff --git a/Documentation/networking/cops.rst b/Documentation/networking/device_drivers/appletalk/cops.rst similarity index 100% rename from Documentation/networking/cops.rst rename to Documentation/networking/device_drivers/appletalk/cops.rst diff --git a/Documentation/networking/device_drivers/appletalk/index.rst b/Documentation/networking/device_drivers/appletalk/index.rst new file mode 100644 index 000000000000..de7507f02037 --- /dev/null +++ b/Documentation/networking/device_drivers/appletalk/index.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +AppleTalk Device Drivers +======================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + cops + ltpc + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/ltpc.rst b/Documentation/networking/device_drivers/appletalk/ltpc.rst similarity index 100% rename from Documentation/networking/ltpc.rst rename to Documentation/networking/device_drivers/appletalk/ltpc.rst diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 5497e3ae1ca9..3995e2179aa0 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -8,6 +8,7 @@ Contents: .. toctree:: :maxdepth: 2 + appletalk/index cable/index cellular/index ethernet/index diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 9bc8ecc79d94..eb616ac48094 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -44,7 +44,6 @@ Contents: ax25 bonding cdc_mbim - cops cxacru dccp dctcp @@ -73,7 +72,6 @@ Contents: kcm l2tp lapb-module - ltpc mac80211-injection mpls-sysctl multiqueue diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 09f94d4b14e2..d4f22a2e5be4 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -59,7 +59,8 @@ config COPS package. This driver is experimental, which means that it may not work. This driver will only work if you choose "AppleTalk DDP" networking support, above. - Please read the file . + Please read the file + . config COPS_DAYNA bool "Dayna firmware support" From 9633a0e959174ecdfb732863e8de8171570d19a8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:30 -0700 Subject: [PATCH 0370/2056] docs: networking: move ATM drivers to the hw driver section Move docs for cxacru, fore200e and iphase under device_drivers/atm. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../{ => device_drivers/atm}/cxacru-cf.py | 0 .../{ => device_drivers/atm}/cxacru.rst | 0 .../{ => device_drivers/atm}/fore200e.rst | 0 .../networking/device_drivers/atm/index.rst | 20 +++++++++++++++++++ .../{ => device_drivers/atm}/iphase.rst | 0 .../networking/device_drivers/index.rst | 1 + Documentation/networking/index.rst | 3 --- drivers/atm/Kconfig | 8 +++++--- 8 files changed, 26 insertions(+), 6 deletions(-) rename Documentation/networking/{ => device_drivers/atm}/cxacru-cf.py (100%) rename Documentation/networking/{ => device_drivers/atm}/cxacru.rst (100%) rename Documentation/networking/{ => device_drivers/atm}/fore200e.rst (100%) create mode 100644 Documentation/networking/device_drivers/atm/index.rst rename Documentation/networking/{ => device_drivers/atm}/iphase.rst (100%) diff --git a/Documentation/networking/cxacru-cf.py b/Documentation/networking/device_drivers/atm/cxacru-cf.py similarity index 100% rename from Documentation/networking/cxacru-cf.py rename to Documentation/networking/device_drivers/atm/cxacru-cf.py diff --git a/Documentation/networking/cxacru.rst b/Documentation/networking/device_drivers/atm/cxacru.rst similarity index 100% rename from Documentation/networking/cxacru.rst rename to Documentation/networking/device_drivers/atm/cxacru.rst diff --git a/Documentation/networking/fore200e.rst b/Documentation/networking/device_drivers/atm/fore200e.rst similarity index 100% rename from Documentation/networking/fore200e.rst rename to Documentation/networking/device_drivers/atm/fore200e.rst diff --git a/Documentation/networking/device_drivers/atm/index.rst b/Documentation/networking/device_drivers/atm/index.rst new file mode 100644 index 000000000000..7b593f031a60 --- /dev/null +++ b/Documentation/networking/device_drivers/atm/index.rst @@ -0,0 +1,20 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Asynchronous Transfer Mode (ATM) Device Drivers +=============================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + cxacru + fore200e + iphase + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/iphase.rst b/Documentation/networking/device_drivers/atm/iphase.rst similarity index 100% rename from Documentation/networking/iphase.rst rename to Documentation/networking/device_drivers/atm/iphase.rst diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index 3995e2179aa0..d6a73e4592e0 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -9,6 +9,7 @@ Contents: :maxdepth: 2 appletalk/index + atm/index cable/index cellular/index ethernet/index diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index eb616ac48094..f48f1d19caff 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -44,7 +44,6 @@ Contents: ax25 bonding cdc_mbim - cxacru dccp dctcp decnet @@ -54,7 +53,6 @@ Contents: eql fib_trie filter - fore200e framerelay generic-hdlc generic_netlink @@ -63,7 +61,6 @@ Contents: ila ipddp ip_dynaddr - iphase ipsec ip-sysctl ipv6 diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 8007e051876c..b9370bbca828 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -306,8 +306,9 @@ config ATM_IA for more info about the cards. Say Y (or M to compile as a module named iphase) here if you have one of these cards. - See the file for further - details. + See the file + + for further details. config ATM_IA_DEBUG bool "Enable debugging messages" @@ -336,7 +337,8 @@ config ATM_FORE200E on PCI and SBUS hosts. Say Y (or M to compile as a module named fore_200e) here if you have one of these ATM adapters. - See the file for + See the file + for further details. config ATM_FORE200E_USE_TASKLET From 55f35cf79d68136ef6a2e39a232a86f4418e7df7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 26 Jun 2020 10:27:31 -0700 Subject: [PATCH 0371/2056] docs: networking: move FDDI drivers to the hw driver section Move docs for defza and skfp under device_drivers/fddi. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../{ => device_drivers/fddi}/defza.rst | 0 .../networking/device_drivers/fddi/index.rst | 19 +++++++++++++++++++ .../{ => device_drivers/fddi}/skfp.rst | 0 .../networking/device_drivers/index.rst | 1 + Documentation/networking/index.rst | 2 -- drivers/net/fddi/Kconfig | 4 ++-- 6 files changed, 22 insertions(+), 4 deletions(-) rename Documentation/networking/{ => device_drivers/fddi}/defza.rst (100%) create mode 100644 Documentation/networking/device_drivers/fddi/index.rst rename Documentation/networking/{ => device_drivers/fddi}/skfp.rst (100%) diff --git a/Documentation/networking/defza.rst b/Documentation/networking/device_drivers/fddi/defza.rst similarity index 100% rename from Documentation/networking/defza.rst rename to Documentation/networking/device_drivers/fddi/defza.rst diff --git a/Documentation/networking/device_drivers/fddi/index.rst b/Documentation/networking/device_drivers/fddi/index.rst new file mode 100644 index 000000000000..0b75294e6c8b --- /dev/null +++ b/Documentation/networking/device_drivers/fddi/index.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + +Fiber Distributed Data Interface (FDDI) Device Drivers +====================================================== + +Contents: + +.. toctree:: + :maxdepth: 2 + + defza + skfp + +.. only:: subproject and html + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/networking/skfp.rst b/Documentation/networking/device_drivers/fddi/skfp.rst similarity index 100% rename from Documentation/networking/skfp.rst rename to Documentation/networking/device_drivers/fddi/skfp.rst diff --git a/Documentation/networking/device_drivers/index.rst b/Documentation/networking/device_drivers/index.rst index d6a73e4592e0..a3113ffd7a16 100644 --- a/Documentation/networking/device_drivers/index.rst +++ b/Documentation/networking/device_drivers/index.rst @@ -13,6 +13,7 @@ Contents: cable/index cellular/index ethernet/index + fddi/index hamradio/index wan/index wifi/index diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index f48f1d19caff..c29496fff81c 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -47,7 +47,6 @@ Contents: dccp dctcp decnet - defza dns_resolver driver eql @@ -94,7 +93,6 @@ Contents: sctp secid seg6-sysctl - skfp strparser switchdev tc-actions-env-rules diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig index 60cc7524520c..f722079dfb6a 100644 --- a/drivers/net/fddi/Kconfig +++ b/drivers/net/fddi/Kconfig @@ -77,8 +77,8 @@ config SKFP - Netelligent 100 FDDI SAS UTP - Netelligent 100 FDDI SAS Fibre MIC - Read for information about - the driver. + Read + for information about the driver. Questions concerning this driver can be addressed to: From d39dceca388ad0e4f748836806349ebe09282283 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 26 Jun 2020 19:29:59 +0200 Subject: [PATCH 0372/2056] mptcp: add __init annotation on setup functions Add the missing annotation in some setup-only functions. Signed-off-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/pm.c | 2 +- net/mptcp/pm_netlink.c | 2 +- net/mptcp/protocol.c | 4 ++-- net/mptcp/protocol.h | 10 +++++----- net/mptcp/subflow.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 977d9c8b1453..7de09fdd42a3 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -234,7 +234,7 @@ void mptcp_pm_close(struct mptcp_sock *msk) sock_put((struct sock *)msk); } -void mptcp_pm_init(void) +void __init mptcp_pm_init(void) { pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8); if (!pm_wq) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index b78edf237ba0..c8820c4156e6 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -851,7 +851,7 @@ static struct pernet_operations mptcp_pm_pernet_ops = { .size = sizeof(struct pm_nl_pernet), }; -void mptcp_pm_nl_init(void) +void __init mptcp_pm_nl_init(void) { if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0) panic("Failed to register MPTCP PM pernet subsystem.\n"); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3980fbb6f31e..9163a05b9e46 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2077,7 +2077,7 @@ static struct inet_protosw mptcp_protosw = { .flags = INET_PROTOSW_ICSK, }; -void mptcp_proto_init(void) +void __init mptcp_proto_init(void) { mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; @@ -2139,7 +2139,7 @@ static struct inet_protosw mptcp_v6_protosw = { .flags = INET_PROTOSW_ICSK, }; -int mptcp_proto_v6_init(void) +int __init mptcp_proto_v6_init(void) { int err; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 482f53aed30a..571d39a1a17c 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -337,7 +337,7 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) int mptcp_is_enabled(struct net *net); bool mptcp_subflow_data_available(struct sock *sk); -void mptcp_subflow_init(void); +void __init mptcp_subflow_init(void); /* called with sk socket lock held */ int __mptcp_subflow_connect(struct sock *sk, int ifindex, @@ -355,9 +355,9 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk, inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops; } -void mptcp_proto_init(void); +void __init mptcp_proto_init(void); #if IS_ENABLED(CONFIG_MPTCP_IPV6) -int mptcp_proto_v6_init(void); +int __init mptcp_proto_v6_init(void); #endif struct sock *mptcp_sk_clone(const struct sock *sk, @@ -394,7 +394,7 @@ static inline void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn) void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac); -void mptcp_pm_init(void); +void __init mptcp_pm_init(void); void mptcp_pm_data_init(struct mptcp_sock *msk); void mptcp_pm_close(struct mptcp_sock *msk); void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side); @@ -428,7 +428,7 @@ bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, struct mptcp_addr_info *saddr); int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc); -void mptcp_pm_nl_init(void); +void __init mptcp_pm_nl_init(void); void mptcp_pm_nl_data_init(struct mptcp_sock *msk); void mptcp_pm_nl_fully_established(struct mptcp_sock *msk); void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 3838a0b3a21f..c2389ba2d4ee 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1255,7 +1255,7 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) return 0; } -void mptcp_subflow_init(void) +void __init mptcp_subflow_init(void) { subflow_request_sock_ops = tcp_request_sock_ops; if (subflow_ops_init(&subflow_request_sock_ops) != 0) From 2c5ebd001d4f0c64a2dfda94eb1d9b31a8863c8d Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 26 Jun 2020 19:30:00 +0200 Subject: [PATCH 0373/2056] mptcp: refactor token container Replace the radix tree with a hash table allocated at boot time. The radix tree has some shortcoming: a single lock is contented by all the mptcp operation, the lookup currently use such lock, and traversing all the items would require a lock, too. With hash table instead we trade a little memory to address all the above - a per bucket lock is used. To hash the MPTCP sockets, we re-use the msk' sk_node entry: the MPTCP sockets are never hashed by the stack. Replace the existing hash proto callbacks with a dummy implementation, annotating the above constraint. Additionally refactor the token creation to code to: - limit the number of consecutive attempts to a fixed maximum. Hitting a hash bucket with a long chain is considered a failed attempt - accept() no longer can fail to token management. - if token creation fails at connect() time, we do fallback to TCP (before the connection was closed) v1 -> v2: - fix "no newline at end of file" - Jakub Signed-off-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 45 ++++---- net/mptcp/protocol.h | 14 ++- net/mptcp/subflow.c | 19 ++-- net/mptcp/token.c | 265 ++++++++++++++++++++++++++++++------------- 4 files changed, 233 insertions(+), 110 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 9163a05b9e46..be09fd525f8f 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1448,20 +1448,6 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk->token = subflow_req->token; msk->subflow = NULL; - if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) { - nsk->sk_state = TCP_CLOSE; - bh_unlock_sock(nsk); - - /* we can't call into mptcp_close() here - possible BH context - * free the sock directly. - * sk_clone_lock() sets nsk refcnt to two, hence call sk_free() - * too. - */ - sk_common_release(nsk); - sk_free(nsk); - return NULL; - } - msk->write_seq = subflow_req->idsn + 1; atomic64_set(&msk->snd_una, msk->write_seq); if (mp_opt->mp_capable) { @@ -1547,7 +1533,7 @@ static void mptcp_destroy(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); - mptcp_token_destroy(msk->token); + mptcp_token_destroy(msk); if (msk->cached_ext) __skb_ext_put(msk->cached_ext); @@ -1636,6 +1622,20 @@ static void mptcp_release_cb(struct sock *sk) } } +static int mptcp_hash(struct sock *sk) +{ + /* should never be called, + * we hash the TCP subflows not the master socket + */ + WARN_ON_ONCE(1); + return 0; +} + +static void mptcp_unhash(struct sock *sk) +{ + /* called from sk_common_release(), but nothing to do here */ +} + static int mptcp_get_port(struct sock *sk, unsigned short snum) { struct mptcp_sock *msk = mptcp_sk(sk); @@ -1679,7 +1679,6 @@ void mptcp_finish_connect(struct sock *ssk) */ WRITE_ONCE(msk->remote_key, subflow->remote_key); WRITE_ONCE(msk->local_key, subflow->local_key); - WRITE_ONCE(msk->token, subflow->token); WRITE_ONCE(msk->write_seq, subflow->idsn + 1); WRITE_ONCE(msk->ack_seq, ack_seq); WRITE_ONCE(msk->can_ack, 1); @@ -1761,8 +1760,8 @@ static struct proto mptcp_prot = { .sendmsg = mptcp_sendmsg, .recvmsg = mptcp_recvmsg, .release_cb = mptcp_release_cb, - .hash = inet_hash, - .unhash = inet_unhash, + .hash = mptcp_hash, + .unhash = mptcp_unhash, .get_port = mptcp_get_port, .sockets_allocated = &mptcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, @@ -1771,6 +1770,7 @@ static struct proto mptcp_prot = { .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), .sysctl_mem = sysctl_tcp_mem, .obj_size = sizeof(struct mptcp_sock), + .slab_flags = SLAB_TYPESAFE_BY_RCU, .no_autobind = true, }; @@ -1800,6 +1800,7 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct mptcp_sock *msk = mptcp_sk(sock->sk); + struct mptcp_subflow_context *subflow; struct socket *ssock; int err; @@ -1812,19 +1813,23 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, goto do_connect; } + mptcp_token_destroy(msk); ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); if (IS_ERR(ssock)) { err = PTR_ERR(ssock); goto unlock; } + subflow = mptcp_subflow_ctx(ssock->sk); #ifdef CONFIG_TCP_MD5SIG /* no MPTCP if MD5SIG is enabled on this socket or we may run out of * TCP option space. */ if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) - mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; + subflow->request_mptcp = 0; #endif + if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) + subflow->request_mptcp = 0; do_connect: err = ssock->ops->connect(ssock, uaddr, addr_len, flags); @@ -1888,6 +1893,7 @@ static int mptcp_listen(struct socket *sock, int backlog) pr_debug("msk=%p", msk); lock_sock(sock->sk); + mptcp_token_destroy(msk); ssock = __mptcp_socket_create(msk, TCP_LISTEN); if (IS_ERR(ssock)) { err = PTR_ERR(ssock); @@ -2086,6 +2092,7 @@ void __init mptcp_proto_init(void) mptcp_subflow_init(); mptcp_pm_init(); + mptcp_token_init(); if (proto_register(&mptcp_prot, 1) != 0) panic("Failed to register MPTCP proto.\n"); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 571d39a1a17c..c05552e5fa23 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -250,6 +250,7 @@ struct mptcp_subflow_request_sock { u32 local_nonce; u32 remote_nonce; struct mptcp_sock *msk; + struct hlist_nulls_node token_node; }; static inline struct mptcp_subflow_request_sock * @@ -372,12 +373,19 @@ bool mptcp_finish_join(struct sock *sk); void mptcp_data_acked(struct sock *sk); void mptcp_subflow_eof(struct sock *sk); +void __init mptcp_token_init(void); +static inline void mptcp_token_init_request(struct request_sock *req) +{ + mptcp_subflow_rsk(req)->token_node.pprev = NULL; +} + int mptcp_token_new_request(struct request_sock *req); -void mptcp_token_destroy_request(u32 token); +void mptcp_token_destroy_request(struct request_sock *req); int mptcp_token_new_connect(struct sock *sk); -int mptcp_token_new_accept(u32 token, struct sock *conn); +void mptcp_token_accept(struct mptcp_subflow_request_sock *r, + struct mptcp_sock *msk); struct mptcp_sock *mptcp_token_get_sock(u32 token); -void mptcp_token_destroy(u32 token); +void mptcp_token_destroy(struct mptcp_sock *msk); void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn); static inline void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index c2389ba2d4ee..102db8c88e97 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -32,12 +32,9 @@ static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, static int subflow_rebuild_header(struct sock *sk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); - int local_id, err = 0; + int local_id; - if (subflow->request_mptcp && !subflow->token) { - pr_debug("subflow=%p", sk); - err = mptcp_token_new_connect(sk); - } else if (subflow->request_join && !subflow->local_nonce) { + if (subflow->request_join && !subflow->local_nonce) { struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn; pr_debug("subflow=%p", sk); @@ -57,9 +54,6 @@ static int subflow_rebuild_header(struct sock *sk) } out: - if (err) - return err; - return subflow->icsk_af_ops->rebuild_header(sk); } @@ -72,8 +66,7 @@ static void subflow_req_destructor(struct request_sock *req) if (subflow_req->msk) sock_put((struct sock *)subflow_req->msk); - if (subflow_req->mp_capable) - mptcp_token_destroy_request(subflow_req->token); + mptcp_token_destroy_request(req); tcp_request_sock_ops.destructor(req); } @@ -135,6 +128,7 @@ static void subflow_init_req(struct request_sock *req, subflow_req->mp_capable = 0; subflow_req->mp_join = 0; subflow_req->msk = NULL; + mptcp_token_init_request(req); #ifdef CONFIG_TCP_MD5SIG /* no MPTCP if MD5SIG is enabled on this socket or we may run out of @@ -250,7 +244,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) subflow->remote_nonce = mp_opt.nonce; pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, subflow->thmac, subflow->remote_nonce); - } else if (subflow->request_mptcp) { + } else { tp->is_mptcp = 0; } @@ -386,7 +380,7 @@ static void mptcp_sock_destruct(struct sock *sk) sock_orphan(sk); } - mptcp_token_destroy(mptcp_sk(sk)->token); + mptcp_token_destroy(mptcp_sk(sk)); inet_sock_destruct(sk); } @@ -505,6 +499,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, */ new_msk->sk_destruct = mptcp_sock_destruct; mptcp_pm_new_connection(mptcp_sk(new_msk), 1); + mptcp_token_accept(subflow_req, mptcp_sk(new_msk)); ctx->conn = new_msk; new_msk = NULL; diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 33352dd99d4d..9c0771774815 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -24,7 +24,7 @@ #include #include -#include +#include #include #include #include @@ -33,10 +33,55 @@ #include #include "protocol.h" -static RADIX_TREE(token_tree, GFP_ATOMIC); -static RADIX_TREE(token_req_tree, GFP_ATOMIC); -static DEFINE_SPINLOCK(token_tree_lock); -static int token_used __read_mostly; +#define TOKEN_MAX_RETRIES 4 +#define TOKEN_MAX_CHAIN_LEN 4 + +struct token_bucket { + spinlock_t lock; + int chain_len; + struct hlist_nulls_head req_chain; + struct hlist_nulls_head msk_chain; +}; + +static struct token_bucket *token_hash __read_mostly; +static unsigned int token_mask __read_mostly; + +static struct token_bucket *token_bucket(u32 token) +{ + return &token_hash[token & token_mask]; +} + +/* called with bucket lock held */ +static struct mptcp_subflow_request_sock * +__token_lookup_req(struct token_bucket *t, u32 token) +{ + struct mptcp_subflow_request_sock *req; + struct hlist_nulls_node *pos; + + hlist_nulls_for_each_entry_rcu(req, pos, &t->req_chain, token_node) + if (req->token == token) + return req; + return NULL; +} + +/* called with bucket lock held */ +static struct mptcp_sock * +__token_lookup_msk(struct token_bucket *t, u32 token) +{ + struct hlist_nulls_node *pos; + struct sock *sk; + + sk_nulls_for_each_rcu(sk, pos, &t->msk_chain) + if (mptcp_sk(sk)->token == token) + return mptcp_sk(sk); + return NULL; +} + +static bool __token_bucket_busy(struct token_bucket *t, u32 token) +{ + return !token || t->chain_len >= TOKEN_MAX_CHAIN_LEN || + __token_lookup_req(t, token) || __token_lookup_msk(t, token); +} /** * mptcp_token_new_request - create new key/idsn/token for subflow_request @@ -52,30 +97,32 @@ static int token_used __read_mostly; int mptcp_token_new_request(struct request_sock *req) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); - int err; + int retries = TOKEN_MAX_RETRIES; + struct token_bucket *bucket; + u32 token; - while (1) { - u32 token; +again: + mptcp_crypto_key_gen_sha(&subflow_req->local_key, + &subflow_req->token, + &subflow_req->idsn); + pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n", + req, subflow_req->local_key, subflow_req->token, + subflow_req->idsn); - mptcp_crypto_key_gen_sha(&subflow_req->local_key, - &subflow_req->token, - &subflow_req->idsn); - pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n", - req, subflow_req->local_key, subflow_req->token, - subflow_req->idsn); - - token = subflow_req->token; - spin_lock_bh(&token_tree_lock); - if (!radix_tree_lookup(&token_req_tree, token) && - !radix_tree_lookup(&token_tree, token)) - break; - spin_unlock_bh(&token_tree_lock); + token = subflow_req->token; + bucket = token_bucket(token); + spin_lock_bh(&bucket->lock); + if (__token_bucket_busy(bucket, token)) { + spin_unlock_bh(&bucket->lock); + if (!--retries) + return -EBUSY; + goto again; } - err = radix_tree_insert(&token_req_tree, - subflow_req->token, &token_used); - spin_unlock_bh(&token_tree_lock); - return err; + hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); + bucket->chain_len++; + spin_unlock_bh(&bucket->lock); + return 0; } /** @@ -97,48 +144,56 @@ int mptcp_token_new_request(struct request_sock *req) int mptcp_token_new_connect(struct sock *sk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); - struct sock *mptcp_sock = subflow->conn; - int err; + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + int retries = TOKEN_MAX_RETRIES; + struct token_bucket *bucket; - while (1) { - u32 token; + pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n", + sk, subflow->local_key, subflow->token, subflow->idsn); - mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token, - &subflow->idsn); +again: + mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token, + &subflow->idsn); - pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n", - sk, subflow->local_key, subflow->token, subflow->idsn); - - token = subflow->token; - spin_lock_bh(&token_tree_lock); - if (!radix_tree_lookup(&token_req_tree, token) && - !radix_tree_lookup(&token_tree, token)) - break; - spin_unlock_bh(&token_tree_lock); + bucket = token_bucket(subflow->token); + spin_lock_bh(&bucket->lock); + if (__token_bucket_busy(bucket, subflow->token)) { + spin_unlock_bh(&bucket->lock); + if (!--retries) + return -EBUSY; + goto again; } - err = radix_tree_insert(&token_tree, subflow->token, mptcp_sock); - spin_unlock_bh(&token_tree_lock); - return err; + WRITE_ONCE(msk->token, subflow->token); + __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain); + bucket->chain_len++; + spin_unlock_bh(&bucket->lock); + return 0; } /** - * mptcp_token_new_accept - insert token for later processing - * @token: the token to insert to the tree - * @conn: the just cloned socket linked to the new connection + * mptcp_token_accept - replace a req sk with full sock in token hash + * @req: the request socket to be removed + * @msk: the just cloned socket linked to the new connection * * Called when a SYN packet creates a new logical connection, i.e. * is not a join request. */ -int mptcp_token_new_accept(u32 token, struct sock *conn) +void mptcp_token_accept(struct mptcp_subflow_request_sock *req, + struct mptcp_sock *msk) { - int err; + struct mptcp_subflow_request_sock *pos; + struct token_bucket *bucket; - spin_lock_bh(&token_tree_lock); - err = radix_tree_insert(&token_tree, token, conn); - spin_unlock_bh(&token_tree_lock); + bucket = token_bucket(req->token); + spin_lock_bh(&bucket->lock); - return err; + /* pedantic lookup check for the moved token */ + pos = __token_lookup_req(bucket, req->token); + if (!WARN_ON_ONCE(pos != req)) + hlist_nulls_del_init_rcu(&req->token_node); + __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain); + spin_unlock_bh(&bucket->lock); } /** @@ -152,45 +207,103 @@ int mptcp_token_new_accept(u32 token, struct sock *conn) */ struct mptcp_sock *mptcp_token_get_sock(u32 token) { - struct sock *conn; + struct hlist_nulls_node *pos; + struct token_bucket *bucket; + struct mptcp_sock *msk; + struct sock *sk; - spin_lock_bh(&token_tree_lock); - conn = radix_tree_lookup(&token_tree, token); - if (conn) { - /* token still reserved? */ - if (conn == (struct sock *)&token_used) - conn = NULL; - else - sock_hold(conn); + rcu_read_lock(); + bucket = token_bucket(token); + +again: + sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) { + msk = mptcp_sk(sk); + if (READ_ONCE(msk->token) != token) + continue; + if (!refcount_inc_not_zero(&sk->sk_refcnt)) + goto not_found; + if (READ_ONCE(msk->token) != token) { + sock_put(sk); + goto again; + } + goto found; } - spin_unlock_bh(&token_tree_lock); + if (get_nulls_value(pos) != (token & token_mask)) + goto again; - return mptcp_sk(conn); +not_found: + msk = NULL; + +found: + rcu_read_unlock(); + return msk; } /** * mptcp_token_destroy_request - remove mptcp connection/token - * @token: token of mptcp connection to remove + * @req: mptcp request socket dropping the token * - * Remove not-yet-fully-established incoming connection identified - * by @token. + * Remove the token associated to @req. */ -void mptcp_token_destroy_request(u32 token) +void mptcp_token_destroy_request(struct request_sock *req) { - spin_lock_bh(&token_tree_lock); - radix_tree_delete(&token_req_tree, token); - spin_unlock_bh(&token_tree_lock); + struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); + struct mptcp_subflow_request_sock *pos; + struct token_bucket *bucket; + + if (hlist_nulls_unhashed(&subflow_req->token_node)) + return; + + bucket = token_bucket(subflow_req->token); + spin_lock_bh(&bucket->lock); + pos = __token_lookup_req(bucket, subflow_req->token); + if (!WARN_ON_ONCE(pos != subflow_req)) { + hlist_nulls_del_init_rcu(&pos->token_node); + bucket->chain_len--; + } + spin_unlock_bh(&bucket->lock); } /** * mptcp_token_destroy - remove mptcp connection/token - * @token: token of mptcp connection to remove + * @msk: mptcp connection dropping the token * - * Remove the connection identified by @token. + * Remove the token associated to @msk */ -void mptcp_token_destroy(u32 token) +void mptcp_token_destroy(struct mptcp_sock *msk) { - spin_lock_bh(&token_tree_lock); - radix_tree_delete(&token_tree, token); - spin_unlock_bh(&token_tree_lock); + struct token_bucket *bucket; + struct mptcp_sock *pos; + + if (sk_unhashed((struct sock *)msk)) + return; + + bucket = token_bucket(msk->token); + spin_lock_bh(&bucket->lock); + pos = __token_lookup_msk(bucket, msk->token); + if (!WARN_ON_ONCE(pos != msk)) { + __sk_nulls_del_node_init_rcu((struct sock *)pos); + bucket->chain_len--; + } + spin_unlock_bh(&bucket->lock); +} + +void __init mptcp_token_init(void) +{ + int i; + + token_hash = alloc_large_system_hash("MPTCP token", + sizeof(struct token_bucket), + 0, + 20,/* one slot per 1MB of memory */ + 0, + NULL, + &token_mask, + 0, + 64 * 1024); + for (i = 0; i < token_mask + 1; ++i) { + INIT_HLIST_NULLS_HEAD(&token_hash[i].req_chain, i); + INIT_HLIST_NULLS_HEAD(&token_hash[i].msk_chain, i); + spin_lock_init(&token_hash[i].lock); + } } From a00a582203dbc43ea311a50e979038fc0c8ee19f Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 26 Jun 2020 19:30:01 +0200 Subject: [PATCH 0374/2056] mptcp: move crypto test to KUNIT currently MPTCP uses a custom hook to executed unit tests at boot time. Let's use the KUNIT framework instead. Additionally move the relevant code to a separate file and export the function needed by the test when self-tests are build as a module. Co-developed-by: Matthieu Baerts Signed-off-by: Matthieu Baerts Signed-off-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/Kconfig | 24 +++++++++----- net/mptcp/Makefile | 3 ++ net/mptcp/crypto.c | 63 ++---------------------------------- net/mptcp/crypto_test.c | 72 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 93 insertions(+), 69 deletions(-) create mode 100644 net/mptcp/crypto_test.c diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig index a9ed3bf1d93f..d7d5f9349366 100644 --- a/net/mptcp/Kconfig +++ b/net/mptcp/Kconfig @@ -18,12 +18,20 @@ config MPTCP_IPV6 select IPV6 default y -config MPTCP_HMAC_TEST - bool "Tests for MPTCP HMAC implementation" - help - This option enable boot time self-test for the HMAC implementation - used by the MPTCP code - - Say N if you are unsure. - endif + +config MPTCP_KUNIT_TESTS + tristate "This builds the MPTCP KUnit tests" if !KUNIT_ALL_TESTS + select MPTCP + depends on KUNIT + default KUNIT_ALL_TESTS + help + Currently covers the MPTCP crypto helpers. + Only useful for kernel devs running KUnit test harness and are not + for inclusion into a production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index baa0640527c7..f9039804207b 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -3,3 +3,6 @@ obj-$(CONFIG_MPTCP) += mptcp.o mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \ mib.o pm_netlink.o + +mptcp_crypto_test-objs := crypto_test.o +obj-$(CONFIG_MPTCP_KUNIT_TESTS) += mptcp_crypto_test.o \ No newline at end of file diff --git a/net/mptcp/crypto.c b/net/mptcp/crypto.c index 3d980713a9e2..6c4ea979dfd4 100644 --- a/net/mptcp/crypto.c +++ b/net/mptcp/crypto.c @@ -87,65 +87,6 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) sha256_final(&state, (u8 *)hmac); } -#ifdef CONFIG_MPTCP_HMAC_TEST -struct test_cast { - char *key; - char *msg; - char *result; -}; - -/* we can't reuse RFC 4231 test vectors, as we have constraint on the - * input and key size. - */ -static struct test_cast tests[] = { - { - .key = "0b0b0b0b0b0b0b0b", - .msg = "48692054", - .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa", - }, - { - .key = "aaaaaaaaaaaaaaaa", - .msg = "dddddddd", - .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9", - }, - { - .key = "0102030405060708", - .msg = "cdcdcdcd", - .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d", - }, -}; - -static int __init test_mptcp_crypto(void) -{ - char hmac[32], hmac_hex[65]; - u32 nonce1, nonce2; - u64 key1, key2; - u8 msg[8]; - int i, j; - - for (i = 0; i < ARRAY_SIZE(tests); ++i) { - /* mptcp hmap will convert to be before computing the hmac */ - key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0])); - key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8])); - nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0])); - nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4])); - - put_unaligned_be32(nonce1, &msg[0]); - put_unaligned_be32(nonce2, &msg[4]); - - mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); - for (j = 0; j < 32; ++j) - sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff); - hmac_hex[64] = 0; - - if (memcmp(hmac_hex, tests[i].result, 64)) - pr_err("test %d failed, got %s expected %s", i, - hmac_hex, tests[i].result); - else - pr_info("test %d [ ok ]", i); - } - return 0; -} - -late_initcall(test_mptcp_crypto); +#if IS_MODULE(CONFIG_MPTCP_KUNIT_TESTS) +EXPORT_SYMBOL_GPL(mptcp_crypto_hmac_sha); #endif diff --git a/net/mptcp/crypto_test.c b/net/mptcp/crypto_test.c new file mode 100644 index 000000000000..017248dea038 --- /dev/null +++ b/net/mptcp/crypto_test.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include "protocol.h" + +struct test_case { + char *key; + char *msg; + char *result; +}; + +/* we can't reuse RFC 4231 test vectors, as we have constraint on the + * input and key size. + */ +static struct test_case tests[] = { + { + .key = "0b0b0b0b0b0b0b0b", + .msg = "48692054", + .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa", + }, + { + .key = "aaaaaaaaaaaaaaaa", + .msg = "dddddddd", + .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9", + }, + { + .key = "0102030405060708", + .msg = "cdcdcdcd", + .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d", + }, +}; + +static void mptcp_crypto_test_basic(struct kunit *test) +{ + char hmac[32], hmac_hex[65]; + u32 nonce1, nonce2; + u64 key1, key2; + u8 msg[8]; + int i, j; + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + /* mptcp hmap will convert to be before computing the hmac */ + key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0])); + key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8])); + nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0])); + nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4])); + + put_unaligned_be32(nonce1, &msg[0]); + put_unaligned_be32(nonce2, &msg[4]); + + mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); + for (j = 0; j < 32; ++j) + sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff); + hmac_hex[64] = 0; + + KUNIT_EXPECT_STREQ(test, &hmac_hex[0], tests[i].result); + } +} + +static struct kunit_case mptcp_crypto_test_cases[] = { + KUNIT_CASE(mptcp_crypto_test_basic), + {} +}; + +static struct kunit_suite mptcp_crypto_suite = { + .name = "mptcp-crypto", + .test_cases = mptcp_crypto_test_cases, +}; + +kunit_test_suite(mptcp_crypto_suite); + +MODULE_LICENSE("GPL"); From a8ee9c9b58199be2692f7eb761d7a01749f79655 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 26 Jun 2020 19:30:02 +0200 Subject: [PATCH 0375/2056] mptcp: introduce token KUNIT self-tests Unit tests for the internal MPTCP token APIs, using KUNIT v1 -> v2: - use the correct RCU annotation when initializing icsk ulp - fix a few checkpatch issues Signed-off-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/Kconfig | 2 +- net/mptcp/Makefile | 3 +- net/mptcp/token.c | 9 +++ net/mptcp/token_test.c | 140 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 152 insertions(+), 2 deletions(-) create mode 100644 net/mptcp/token_test.c diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig index d7d5f9349366..af84fce70bb0 100644 --- a/net/mptcp/Kconfig +++ b/net/mptcp/Kconfig @@ -26,7 +26,7 @@ config MPTCP_KUNIT_TESTS depends on KUNIT default KUNIT_ALL_TESTS help - Currently covers the MPTCP crypto helpers. + Currently covers the MPTCP crypto and token helpers. Only useful for kernel devs running KUnit test harness and are not for inclusion into a production build. diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index f9039804207b..c53f9b845523 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -5,4 +5,5 @@ mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \ mib.o pm_netlink.o mptcp_crypto_test-objs := crypto_test.o -obj-$(CONFIG_MPTCP_KUNIT_TESTS) += mptcp_crypto_test.o \ No newline at end of file +mptcp_token_test-objs := token_test.o +obj-$(CONFIG_MPTCP_KUNIT_TESTS) += mptcp_crypto_test.o mptcp_token_test.o diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 9c0771774815..66a4990bd897 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -307,3 +307,12 @@ void __init mptcp_token_init(void) spin_lock_init(&token_hash[i].lock); } } + +#if IS_MODULE(CONFIG_MPTCP_KUNIT_TESTS) +EXPORT_SYMBOL_GPL(mptcp_token_new_request); +EXPORT_SYMBOL_GPL(mptcp_token_new_connect); +EXPORT_SYMBOL_GPL(mptcp_token_accept); +EXPORT_SYMBOL_GPL(mptcp_token_get_sock); +EXPORT_SYMBOL_GPL(mptcp_token_destroy_request); +EXPORT_SYMBOL_GPL(mptcp_token_destroy); +#endif diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c new file mode 100644 index 000000000000..e1bd6f0a0676 --- /dev/null +++ b/net/mptcp/token_test.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include "protocol.h" + +static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test) +{ + struct mptcp_subflow_request_sock *req; + + req = kunit_kzalloc(test, sizeof(struct mptcp_subflow_request_sock), + GFP_USER); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req); + mptcp_token_init_request((struct request_sock *)req); + return req; +} + +static void mptcp_token_test_req_basic(struct kunit *test) +{ + struct mptcp_subflow_request_sock *req = build_req_sock(test); + struct mptcp_sock *null_msk = NULL; + + KUNIT_ASSERT_EQ(test, 0, + mptcp_token_new_request((struct request_sock *)req)); + KUNIT_EXPECT_NE(test, 0, (int)req->token); + KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token)); + + /* cleanup */ + mptcp_token_destroy_request((struct request_sock *)req); +} + +static struct inet_connection_sock *build_icsk(struct kunit *test) +{ + struct inet_connection_sock *icsk; + + icsk = kunit_kzalloc(test, sizeof(struct inet_connection_sock), + GFP_USER); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, icsk); + return icsk; +} + +static struct mptcp_subflow_context *build_ctx(struct kunit *test) +{ + struct mptcp_subflow_context *ctx; + + ctx = kunit_kzalloc(test, sizeof(struct mptcp_subflow_context), + GFP_USER); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ctx); + return ctx; +} + +static struct mptcp_sock *build_msk(struct kunit *test) +{ + struct mptcp_sock *msk; + + msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER); + KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk); + refcount_set(&((struct sock *)msk)->sk_refcnt, 1); + return msk; +} + +static void mptcp_token_test_msk_basic(struct kunit *test) +{ + struct inet_connection_sock *icsk = build_icsk(test); + struct mptcp_subflow_context *ctx = build_ctx(test); + struct mptcp_sock *msk = build_msk(test); + struct mptcp_sock *null_msk = NULL; + struct sock *sk; + + rcu_assign_pointer(icsk->icsk_ulp_data, ctx); + ctx->conn = (struct sock *)msk; + sk = (struct sock *)msk; + + KUNIT_ASSERT_EQ(test, 0, + mptcp_token_new_connect((struct sock *)icsk)); + KUNIT_EXPECT_NE(test, 0, (int)ctx->token); + KUNIT_EXPECT_EQ(test, ctx->token, msk->token); + KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token)); + KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt)); + + mptcp_token_destroy(msk); + KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token)); +} + +static void mptcp_token_test_accept(struct kunit *test) +{ + struct mptcp_subflow_request_sock *req = build_req_sock(test); + struct mptcp_sock *msk = build_msk(test); + + KUNIT_ASSERT_EQ(test, 0, + mptcp_token_new_request((struct request_sock *)req)); + msk->token = req->token; + mptcp_token_accept(req, msk); + KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token)); + + /* this is now a no-op */ + mptcp_token_destroy_request((struct request_sock *)req); + KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token)); + + /* cleanup */ + mptcp_token_destroy(msk); +} + +static void mptcp_token_test_destroyed(struct kunit *test) +{ + struct mptcp_subflow_request_sock *req = build_req_sock(test); + struct mptcp_sock *msk = build_msk(test); + struct mptcp_sock *null_msk = NULL; + struct sock *sk; + + sk = (struct sock *)msk; + + KUNIT_ASSERT_EQ(test, 0, + mptcp_token_new_request((struct request_sock *)req)); + msk->token = req->token; + mptcp_token_accept(req, msk); + + /* simulate race on removal */ + refcount_set(&sk->sk_refcnt, 0); + KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token)); + + /* cleanup */ + mptcp_token_destroy(msk); +} + +static struct kunit_case mptcp_token_test_cases[] = { + KUNIT_CASE(mptcp_token_test_req_basic), + KUNIT_CASE(mptcp_token_test_msk_basic), + KUNIT_CASE(mptcp_token_test_accept), + KUNIT_CASE(mptcp_token_test_destroyed), + {} +}; + +static struct kunit_suite mptcp_token_suite = { + .name = "mptcp-token", + .test_cases = mptcp_token_test_cases, +}; + +kunit_test_suite(mptcp_token_suite); + +MODULE_LICENSE("GPL"); From 3a8b44546979cf682324bd2fd61e539f428911b4 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Fri, 26 Jun 2020 21:40:31 +0300 Subject: [PATCH 0376/2056] net: atlantic: MACSec offload statistics checkpatch fix This patch fixes a checkpatch warning. Fixes: aec0f1aac58e ("net: atlantic: MACSec offload statistics implementation") Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index e53ba7bfaf61..f352b206b5cf 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -124,21 +124,21 @@ static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = { "MACSec OutUnctrlHitDropRedir", }; -static const char *aq_macsec_txsc_stat_names[] = { +static const char * const aq_macsec_txsc_stat_names[] = { "MACSecTXSC%d ProtectedPkts", "MACSecTXSC%d EncryptedPkts", "MACSecTXSC%d ProtectedOctets", "MACSecTXSC%d EncryptedOctets", }; -static const char *aq_macsec_txsa_stat_names[] = { +static const char * const aq_macsec_txsa_stat_names[] = { "MACSecTXSC%dSA%d HitDropRedirect", "MACSecTXSC%dSA%d Protected2Pkts", "MACSecTXSC%dSA%d ProtectedPkts", "MACSecTXSC%dSA%d EncryptedPkts", }; -static const char *aq_macsec_rxsa_stat_names[] = { +static const char * const aq_macsec_rxsa_stat_names[] = { "MACSecRXSC%dSA%d UntaggedHitPkts", "MACSecRXSC%dSA%d CtrlHitDrpRedir", "MACSecRXSC%dSA%d NotUsingSa", From e39b8ffeb9b9a185438b6dc21bb9997f9fb29cbe Mon Sep 17 00:00:00 2001 From: Nikita Danilov Date: Fri, 26 Jun 2020 21:40:32 +0300 Subject: [PATCH 0377/2056] net: atlantic: fix variable type in aq_ethtool_get_pauseparam This patch fixes the type for variable which is assigned from enum, as such it should have been int, not u32. Signed-off-by: Nikita Danilov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index f352b206b5cf..51dfc12a44be 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -716,13 +716,12 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct aq_nic_s *aq_nic = netdev_priv(ndev); - u32 fc = aq_nic->aq_nic_cfg.fc.req; + int fc = aq_nic->aq_nic_cfg.fc.req; pause->autoneg = 0; pause->rx_pause = !!(fc & AQ_NIC_FC_RX); pause->tx_pause = !!(fc & AQ_NIC_FC_TX); - } static int aq_ethtool_set_pauseparam(struct net_device *ndev, From e35df2186580885c59886452c9b33f02f57ab91f Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Fri, 26 Jun 2020 21:40:33 +0300 Subject: [PATCH 0378/2056] net: atlantic: Replace ENOTSUPP usage to EOPNOTSUPP This patch replaces ENOTSUPP (where it was used by mistake) with EOPNOTSUPP. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 2 +- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 647b22d89b1a..43b8914c3ef5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -1188,7 +1188,7 @@ int aq_nic_set_loopback(struct aq_nic_s *self) if (!self->aq_hw_ops->hw_set_loopback || !self->aq_fw_ops->set_phyloopback) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&self->fwreq_mutex); self->aq_hw_ops->hw_set_loopback(self->aq_hw, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 1d9dee4951f9..bf4c41cc312b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -217,7 +217,7 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) if (rbl_status == 0xF1A7) { aq_pr_err("No FW detected. Dynamic FW load not implemented\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } for (k = 0; k < 1000; k++) { From ab3518acac42751f5e85c57d5d14ffcd897e1957 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Fri, 26 Jun 2020 21:40:34 +0300 Subject: [PATCH 0379/2056] net: atlantic: make aq_pci_func_init static This patch makes aq_pci_func_init() static, because it's not used anywhere outside the file itself. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | 9 +++++---- drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h | 8 ++++---- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 41c0f560f95b..59253846e885 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_pci_func.c: Definition of PCI functions. */ @@ -114,7 +115,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev, return 0; } -int aq_pci_func_init(struct pci_dev *pdev) +static int aq_pci_func_init(struct pci_dev *pdev) { int err; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h index 77be7ee0d7b3..3fa5f7a73680 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_pci_func.h: Declaration of PCI functions. */ @@ -19,7 +20,6 @@ struct aq_board_revision_s { const struct aq_hw_caps_s *caps; }; -int aq_pci_func_init(struct pci_dev *pdev); int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i, char *name, irq_handler_t irq_handler, void *irq_arg, cpumask_t *affinity_mask); From 586616cbd4663a51a53f57ad503d7a93416762c0 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Fri, 26 Jun 2020 21:40:35 +0300 Subject: [PATCH 0380/2056] net: atlantic: fix typo in aq_ring_tx_clean This patch fixes a typo in aq_ring_tx_clean. stats is a union, so the typo doesn't cause any issues, but it's a typo nonetheless. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 68fdb3994088..b67b24a0d9a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ @@ -279,7 +280,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } if (unlikely(buff->is_eop)) { - ++self->stats.rx.packets; + ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; dev_kfree_skb_any(buff->skb); From 6ec99221d780b3b4b86cf9cdafc18699ff3d06c9 Mon Sep 17 00:00:00 2001 From: Dmitry Bezrukov Date: Fri, 26 Jun 2020 21:40:36 +0300 Subject: [PATCH 0381/2056] net: atlantic: missing space in a comment in aq_nic.h This patch add a missing space in the comment in aq_nic.h Signed-off-by: Dmitry Bezrukov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 2ab003065e62..317bfc646f0a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_nic.h: Declaration of common code for NIC. */ @@ -111,7 +112,7 @@ struct aq_hw_rx_fltrs_s { u16 active_filters; struct aq_hw_rx_fl2 fl2; struct aq_hw_rx_fl3l4 fl3l4; - /*filter ether type */ + /* filter ether type */ u8 fet_reserved_count; }; From 8664240e303827de2d40f38fc397d1912309359c Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Fri, 26 Jun 2020 21:40:37 +0300 Subject: [PATCH 0382/2056] net: atlantic: add alignment checks in hw_atl2_utils_fw.c This patch adds alignment checks in all the helper macros in hw_atl2_utils_fw.c These alignment checks are compile-time, so runtime is not affected. All these helper macros assume the length to be aligned (multiple of 4). If it's not aligned, then there might be issues, e.g. stack corruption. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 32 ++++++++++++++++--- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index 3a9352190816..a8ce9a2c1c51 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -16,15 +16,29 @@ #define AQ_A2_FW_READ_TRY_MAX 1000 #define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \ +{\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned write " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\ + "Unaligned write length " # ITEM);\ hw_atl2_mif_shared_buf_write(HW,\ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\ - (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32)) + (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\ +} #define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \ +{\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned get " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\ + "Unaligned get length " # ITEM);\ hw_atl2_mif_shared_buf_get(HW, \ (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\ (u32 *)&(VARIABLE), \ - sizeof(VARIABLE) / sizeof(u32)) + sizeof(VARIABLE) / sizeof(u32));\ +} /* This should never be used on non atomic fields, * treat any > u32 read as non atomic. @@ -33,7 +47,9 @@ {\ BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \ sizeof(u32)) != 0,\ - "Non aligned read " # ITEM);\ + "Unaligned read " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\ + "Unaligned read length " # ITEM);\ BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\ "Non atomic read " # ITEM);\ hw_atl2_mif_shared_buf_read(HW, \ @@ -42,10 +58,18 @@ } #define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \ +({\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned read_safe " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(((struct fw_interface_out *)0)->ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned read_safe length " # ITEM);\ hw_atl2_shared_buffer_read_block((HW), \ (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\ sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\ - (DATA)) + (DATA));\ +}) static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self, u32 offset, u32 dwords, void *data) From 4378b882bf03f2e0471b7d92fa97a7227ff3a126 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Fri, 26 Jun 2020 21:40:38 +0300 Subject: [PATCH 0383/2056] net: atlantic: put ptp code under IS_REACHABLE check A1 requires additional processing for both egress and ingress to support PTP. And it makes sense to get rid of this processing altogether (via ifdef), if PTP clock is disabled globally. This patch puts the PTP code under the corresponding IS_REACHABLE check. Signed-off-by: Igor Russkikh Signed-off-by: Mark Starovoytov Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 2 ++ .../net/ethernet/aquantia/atlantic/aq_main.c | 20 +++++++++++++++---- .../net/ethernet/aquantia/atlantic/aq_ptp.c | 9 +++++++-- .../net/ethernet/aquantia/atlantic/aq_ring.c | 2 ++ 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 51dfc12a44be..a8f0fbbbd91a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -607,7 +607,9 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev, BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp)); +#endif return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 8a1da044e908..dfb29b933eb7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_main.c: Main file for aQuantia Linux driver. */ @@ -98,6 +99,7 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) { /* Hardware adds the Timestamp for PTPv2 802.AS1 * and PTPv2 IPv4 UDP. @@ -114,6 +116,7 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588))) return aq_ptp_xmit(aq_nic, skb); } +#endif skb_tx_timestamp(skb); return aq_nic_xmit(aq_nic, skb); @@ -222,6 +225,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) (void)aq_nic_set_multicast_list(aq_nic, ndev); } +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, struct hwtstamp_config *config) { @@ -256,26 +260,31 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic, return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config); } +#endif static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr) { struct hwtstamp_config config; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) int ret_val; +#endif if (!aq_nic->aq_ptp) return -EOPNOTSUPP; if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; - +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) ret_val = aq_ndev_config_hwtstamp(aq_nic, &config); if (ret_val) return ret_val; +#endif return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr) { struct hwtstamp_config config; @@ -287,6 +296,7 @@ static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr) return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0; } +#endif static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { @@ -296,8 +306,10 @@ static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCSHWTSTAMP: return aq_ndev_hwtstamp_set(aq_nic, ifr); +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) case SIOCGHWTSTAMP: return aq_ndev_hwtstamp_get(aq_nic, ifr); +#endif } return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 599ced261b2a..cb9bf41470fd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Aquantia Corporation Network Driver - * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_ptp.c: @@ -18,6 +20,8 @@ #include "aq_phy.h" #include "aq_filters.h" +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + #define AQ_PTP_TX_TIMEOUT (HZ * 10) #define POLL_SYNC_TIMER_MS 15 @@ -1389,3 +1393,4 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w) schedule_delayed_work(&aq_ptp->poll_sync, timeout); } } +#endif diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b67b24a0d9a6..8dd59e9fc3aa 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -490,6 +490,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) { +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) while (self->sw_head != self->hw_head) { u64 ns; @@ -501,6 +502,7 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) self->sw_head = aq_ring_next_dx(self, self->sw_head); } +#endif } int aq_ring_rx_fill(struct aq_ring_s *self) From 9205d7b1c1cffa827c23bdbf35e04c7cbe1e1f10 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 25 Jun 2020 22:59:41 -0700 Subject: [PATCH 0384/2056] net/mlx5: Avoid RDMA file inclusion in core driver mlx5 cq.h does not depend on RDMA verbs. Remove RDMA verbs file inclusion. Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- include/linux/mlx5/cq.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index b5a9399e07ee..7bfb67363434 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -33,7 +33,6 @@ #ifndef MLX5_CORE_CQ_H #define MLX5_CORE_CQ_H -#include #include #include From 188f0f988bdf27a94208568314e3b0c012d1629c Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 25 Jun 2020 22:59:42 -0700 Subject: [PATCH 0385/2056] net/mlx5: Avoid eswitch header inclusion in fs core layer Flow steering core layer is independent of the eswitch layer. Hence avoid fs_core dependency on eswitch. Fixes: 328edb499f99 ("net/mlx5: Split FDB fast path prio to multiple namespaces") Signed-off-by: Parav Pandit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 10 ---------- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 10 ++++++++++ 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index a5175e98c0b3..bb309b2f77f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -44,16 +44,6 @@ #include "lib/mpfs.h" #include "en/tc_ct.h" -#define FDB_TC_MAX_CHAIN 3 -#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) -#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) - -/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ -#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) - -#define FDB_TC_MAX_PRIO 16 -#define FDB_TC_LEVELS_PER_PRIO 2 - #ifdef CONFIG_MLX5_ESWITCH #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 13e2fb79c21a..e47a66983935 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -41,7 +41,6 @@ #include "diag/fs_tracepoint.h" #include "accel/ipsec.h" #include "fpga/ipsec.h" -#include "eswitch.h" #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 825b662f809b..afe7f0bffb93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -39,6 +39,16 @@ #include #include +#define FDB_TC_MAX_CHAIN 3 +#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) +#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) + +/* The index of the last real chain (FT) + 1 as chain zero is valid as well */ +#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) + +#define FDB_TC_MAX_PRIO 16 +#define FDB_TC_LEVELS_PER_PRIO 2 + struct mlx5_modify_hdr { enum mlx5_flow_namespace_type ns_type; union { From 2d1b69ed65ee033aa541518cc9f6a815296ac493 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 25 Jun 2020 22:59:43 -0700 Subject: [PATCH 0386/2056] net/mlx5: kTLS, Improve TLS params layout structures Add explicit WQE segment structures for the TLS static and progress params. According to the HW spec, TISN is not part of the progress params context, take it out of it. Rename the control segment tisn field as it could hold either a TIS or a TIR number. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/ktls.h | 2 +- .../ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 14 +++++++++----- .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 2 +- include/linux/mlx5/device.h | 9 +++++++++ include/linux/mlx5/mlx5_ifc.h | 5 +---- include/linux/mlx5/qp.h | 2 +- 7 files changed, 23 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index bfd3e1161bc6..31cac239563d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -182,7 +182,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg) { - return cseg && !!cseg->tisn; + return cseg && !!cseg->tis_tir_num; } static inline u8 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index c6180892cfcb..806ed185dd4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -19,7 +19,7 @@ #define MLX5E_KTLS_PROGRESS_WQE_SZ \ (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ - MLX5_ST_SZ_BYTES(tls_progress_params)) + sizeof(struct mlx5_wqe_tls_progress_params_seg)) #define MLX5E_KTLS_PROGRESS_WQEBBS \ (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 3cd78d9503c1..ad7300f19815 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -64,7 +64,7 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | STATIC_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - cseg->tisn = cpu_to_be32(priv_tx->tisn << 8); + cseg->tis_tir_num = cpu_to_be32(priv_tx->tisn << 8); ucseg->flags = MLX5_UMR_INLINE; ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); @@ -75,10 +75,14 @@ build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, static void fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn); - MLX5_SET(tls_progress_params, ctx, record_tracker_state, + struct mlx5_wqe_tls_progress_params_seg *params; + + params = ctx; + + params->tis_tir_num = cpu_to_be32(priv_tx->tisn); + MLX5_SET(tls_progress_params, params->ctx, record_tracker_state, MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); - MLX5_SET(tls_progress_params, ctx, auth_state, + MLX5_SET(tls_progress_params, params->ctx, auth_state, MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD); } @@ -284,7 +288,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - cseg->tisn = cpu_to_be32(tisn << 8); + cseg->tis_tir_num = cpu_to_be32(tisn << 8); cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; fsz = skb_frag_size(frag); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 05454a843b28..72d26fbc8d5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -305,7 +305,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state) { - cseg->tisn = cpu_to_be32(state->tls_tisn << 8); + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); } static int tls_update_resync_sn(struct net_device *netdev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 1bc27aca648b..57db125e5802 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -458,6 +458,15 @@ enum { MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, }; +struct mlx5_wqe_tls_static_params_seg { + u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)]; +}; + +struct mlx5_wqe_tls_progress_params_seg { + __be32 tis_tir_num; + u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)]; +}; + enum { MLX5_SET_PORT_RESET_QKEY = 0, MLX5_SET_PORT_GUID0 = 16, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 116bd9bb347f..a227518c70cf 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -10638,16 +10638,13 @@ struct mlx5_ifc_tls_static_params_bits { }; struct mlx5_ifc_tls_progress_params_bits { - u8 reserved_at_0[0x8]; - u8 tisn[0x18]; - u8 next_record_tcp_sn[0x20]; u8 hw_resync_tcp_sn[0x20]; u8 record_tracker_state[0x2]; u8 auth_state[0x2]; - u8 reserved_at_64[0x4]; + u8 reserved_at_44[0x4]; u8 hw_offset_record_number[0x18]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b8992b861ae6..36492a1342cf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -209,7 +209,7 @@ struct mlx5_wqe_ctrl_seg { __be32 general_id; __be32 imm; __be32 umr_mkey; - __be32 tisn; + __be32 tis_tir_num; }; }; From 8d94b590f1e4d60e0a1f51a43aded1b0e6fd06f7 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 26 Nov 2019 16:23:23 +0200 Subject: [PATCH 0387/2056] net/mlx5e: Turn XSK ICOSQ into a general asynchronous one There is an upcoming demand (in downstream patches) for an ICOSQ to be populated out of the NAPI context, asynchronously. There is already an existing one serving XSK-related use case. In this patch, promote this ICOSQ to serve as general async ICOSQ, to be used for XSK and non-XSK flows. As part of this, the reg_umr bit of the SQ context is now set (if capable), as the general async ICOSQ should support possible posts of UMR WQEs. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 8 ++-- .../mellanox/mlx5/core/en/xsk/setup.c | 47 ++----------------- .../ethernet/mellanox/mlx5/core/en/xsk/tx.c | 12 ++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 24 +++++++++- .../net/ethernet/mellanox/mlx5/core/en_txrx.c | 12 ++--- 5 files changed, 42 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 842db20493df..29265ca6050c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -651,9 +651,11 @@ struct mlx5e_channel { /* AF_XDP zero-copy */ struct mlx5e_rq xskrq; struct mlx5e_xdpsq xsksq; - struct mlx5e_icosq xskicosq; - /* xskicosq can be accessed from any CPU - the spinlock protects it. */ - spinlock_t xskicosq_lock; + + /* Async ICOSQ */ + struct mlx5e_icosq async_icosq; + /* async_icosq can be accessed from any CPU - the spinlock protects it. */ + spinlock_t async_icosq_lock; /* data path - accessed per napi poll */ struct irq_desc *irq_desc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 2c80205dc939..1eb817e62830 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -34,31 +34,15 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, } } -static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv, - u8 log_wq_size, - struct mlx5e_sq_param *param) -{ - void *sqc = param->sqc; - void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - - mlx5e_build_sq_param_common(priv, param); - - MLX5_SET(wq, wq, log_wq_sz, log_wq_size); -} - static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk, struct mlx5e_channel_param *cparam) { - const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); - mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq); mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq); mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); - mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq); } int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, @@ -66,7 +50,6 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_channel *c) { struct mlx5e_channel_param *cparam; - struct dim_cq_moder icocq_moder = {}; int err; if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev)) @@ -100,31 +83,12 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (unlikely(err)) goto err_close_tx_cq; - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq); - if (unlikely(err)) - goto err_close_sq; - - /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be - * triggered and NAPI to be called on the correct CPU. - */ - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq); - if (unlikely(err)) - goto err_close_icocq; - kvfree(cparam); - spin_lock_init(&c->xskicosq_lock); - set_bit(MLX5E_CHANNEL_STATE_XSK, c->state); return 0; -err_close_icocq: - mlx5e_close_cq(&c->xskicosq.cq); - -err_close_sq: - mlx5e_close_xdpsq(&c->xsksq); - err_close_tx_cq: mlx5e_close_cq(&c->xsksq.cq); @@ -148,32 +112,27 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); - mlx5e_close_icosq(&c->xskicosq); - mlx5e_close_cq(&c->xskicosq.cq); mlx5e_close_xdpsq(&c->xsksq); mlx5e_close_cq(&c->xsksq.cq); memset(&c->xskrq, 0, sizeof(c->xskrq)); memset(&c->xsksq, 0, sizeof(c->xsksq)); - memset(&c->xskicosq, 0, sizeof(c->xskicosq)); } void mlx5e_activate_xsk(struct mlx5e_channel *c) { - mlx5e_activate_icosq(&c->xskicosq); set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); /* TX queue is created active. */ - spin_lock(&c->xskicosq_lock); - mlx5e_trigger_irq(&c->xskicosq); - spin_unlock(&c->xskicosq_lock); + spin_lock(&c->async_icosq_lock); + mlx5e_trigger_irq(&c->async_icosq); + spin_unlock(&c->async_icosq_lock); } void mlx5e_deactivate_xsk(struct mlx5e_channel *c) { mlx5e_deactivate_rq(&c->xskrq); /* TX queue is disabled on close. */ - mlx5e_deactivate_icosq(&c->xskicosq); } static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 83dce9cdb8c2..e0b3c61af93e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -26,19 +26,19 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) return -ENXIO; if (!napi_if_scheduled_mark_missed(&c->napi)) { - /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not + /* To avoid WQE overrun, don't post a NOP if async_icosq is not * active and not polled by NAPI. Return 0, because the upcoming * activate will trigger the IRQ for us. */ - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state))) + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state))) return 0; - if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state)) + if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state)) return 0; - spin_lock(&c->xskicosq_lock); - mlx5e_trigger_irq(&c->xskicosq); - spin_unlock(&c->xskicosq_lock); + spin_lock(&c->async_icosq_lock); + mlx5e_trigger_irq(&c->async_icosq); + spin_unlock(&c->async_icosq_lock); } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a836a02a2116..72ce5808d583 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1817,10 +1817,14 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, struct dim_cq_moder icocq_moder = {0, 0}; int err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->async_icosq.cq); if (err) return err; + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); + if (err) + goto err_close_async_icosq_cq; + err = mlx5e_open_tx_cqs(c, params, cparam); if (err) goto err_close_icosq_cq; @@ -1841,10 +1845,16 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, napi_enable(&c->napi); - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); + spin_lock_init(&c->async_icosq_lock); + + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->async_icosq); if (err) goto err_disable_napi; + err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); + if (err) + goto err_close_async_icosq; + err = mlx5e_open_sqs(c, params, cparam); if (err) goto err_close_icosq; @@ -1879,6 +1889,9 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, err_close_icosq: mlx5e_close_icosq(&c->icosq); +err_close_async_icosq: + mlx5e_close_icosq(&c->async_icosq); + err_disable_napi: napi_disable(&c->napi); @@ -1897,6 +1910,9 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, err_close_icosq_cq: mlx5e_close_cq(&c->icosq.cq); +err_close_async_icosq_cq: + mlx5e_close_cq(&c->async_icosq.cq); + return err; } @@ -1908,6 +1924,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_xdpsq(&c->rq_xdpsq); mlx5e_close_sqs(c); mlx5e_close_icosq(&c->icosq); + mlx5e_close_icosq(&c->async_icosq); napi_disable(&c->napi); if (c->xdp) mlx5e_close_cq(&c->rq_xdpsq.cq); @@ -1915,6 +1932,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c) mlx5e_close_cq(&c->xdpsq.cq); mlx5e_close_tx_cqs(c); mlx5e_close_cq(&c->icosq.cq); + mlx5e_close_cq(&c->async_icosq.cq); } static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) @@ -1995,6 +2013,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c) for (tc = 0; tc < c->num_tc; tc++) mlx5e_activate_txqsq(&c->sq[tc]); mlx5e_activate_icosq(&c->icosq); + mlx5e_activate_icosq(&c->async_icosq); mlx5e_activate_rq(&c->rq); if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) @@ -2009,6 +2028,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) mlx5e_deactivate_xsk(c); mlx5e_deactivate_rq(&c->rq); + mlx5e_deactivate_icosq(&c->async_icosq); mlx5e_deactivate_icosq(&c->icosq); for (tc = 0; tc < c->num_tc; tc++) mlx5e_deactivate_txqsq(&c->sq[tc]); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 8480278f2ee2..e3dbab2a294c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -149,17 +149,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) } mlx5e_poll_ico_cq(&c->icosq.cq); + if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) + /* Don't clear the flag if nothing was polled to prevent + * queueing more WQEs and overflowing the async ICOSQ. + */ + clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state); busy |= INDIRECT_CALL_2(rq->post_wqes, mlx5e_post_rx_mpwqes, mlx5e_post_rx_wqes, rq); if (xsk_open) { - if (mlx5e_poll_ico_cq(&c->xskicosq.cq)) - /* Don't clear the flag if nothing was polled to prevent - * queueing more WQEs and overflowing XSKICOSQ. - */ - clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state); busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); } @@ -189,11 +189,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) mlx5e_cq_arm(&rq->cq); mlx5e_cq_arm(&c->icosq.cq); + mlx5e_cq_arm(&c->async_icosq.cq); mlx5e_cq_arm(&c->xdpsq.cq); if (xsk_open) { mlx5e_handle_rx_dim(xskrq); - mlx5e_cq_arm(&c->xskicosq.cq); mlx5e_cq_arm(&xsksq->cq); mlx5e_cq_arm(&xskrq->cq); } From c293ac927fbb0c804b91118535f64d5d9f7a0d8f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sat, 13 Jun 2020 22:53:32 +0300 Subject: [PATCH 0388/2056] net/mlx5e: Refactor build channel params Take the CQ params into their respective RQ/SQ params. Split the params build of the different ICOSQs (sync and async), as they require different init values. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/params.h | 32 +++++++++---------- .../mellanox/mlx5/core/en/xsk/setup.c | 6 ++-- .../net/ethernet/mellanox/mlx5/core/en_main.c | 30 +++++++++-------- 3 files changed, 34 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 989d8f429438..a87273e801b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -11,18 +11,6 @@ struct mlx5e_xsk_param { u16 chunk_size; }; -struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; - struct mlx5e_rq_frags_info frags_info; -}; - -struct mlx5e_sq_param { - u32 sqc[MLX5_ST_SZ_DW(sqc)]; - struct mlx5_wq_param wq; - bool is_mpw; -}; - struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; @@ -30,14 +18,26 @@ struct mlx5e_cq_param { u8 cq_period_mode; }; +struct mlx5e_rq_param { + struct mlx5e_cq_param cqp; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + struct mlx5e_rq_frags_info frags_info; +}; + +struct mlx5e_sq_param { + struct mlx5e_cq_param cqp; + u32 sqc[MLX5_ST_SZ_DW(sqc)]; + struct mlx5_wq_param wq; + bool is_mpw; +}; + struct mlx5e_channel_param { struct mlx5e_rq_param rq; - struct mlx5e_sq_param sq; + struct mlx5e_sq_param txq_sq; struct mlx5e_sq_param xdp_sq; struct mlx5e_sq_param icosq; - struct mlx5e_cq_param rx_cq; - struct mlx5e_cq_param tx_cq; - struct mlx5e_cq_param icosq_cq; + struct mlx5e_sq_param async_icosq; }; static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index 1eb817e62830..cc46414773b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -41,8 +41,6 @@ static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv, { mlx5e_build_rq_param(priv, params, xsk, &cparam->rq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); - mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq); - mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); } int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, @@ -61,7 +59,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, mlx5e_build_xsk_cparam(priv, params, xsk, cparam); - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq); + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq); if (unlikely(err)) goto err_free_cparam; @@ -69,7 +67,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, if (unlikely(err)) goto err_close_rx_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq); + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq); if (unlikely(err)) goto err_close_rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 72ce5808d583..11997c23dfb5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1675,7 +1675,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, params->tx_cq_moderation, - &cparam->tx_cq, &c->sq[tc].cq); + &cparam->txq_sq.cqp, &c->sq[tc].cq); if (err) goto err_close_tx_cqs; } @@ -1707,7 +1707,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, int txq_ix = c->ix + tc * params->num_channels; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, - params, &cparam->sq, &c->sq[tc], tc); + params, &cparam->txq_sq, &c->sq[tc], tc); if (err) goto err_close_sqs; } @@ -1817,11 +1817,11 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, struct dim_cq_moder icocq_moder = {0, 0}; int err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->async_icosq.cq); + err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq); if (err) return err; - err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); + err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq); if (err) goto err_close_async_icosq_cq; @@ -1829,17 +1829,16 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, if (err) goto err_close_icosq_cq; - err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq); if (err) goto err_close_tx_cqs; - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq); if (err) goto err_close_xdp_tx_cqs; - /* XDP SQ CQ params are same as normal TXQ sq CQ params */ err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, - &cparam->tx_cq, &c->rq_xdpsq.cq) : 0; + &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0; if (err) goto err_close_rx_cq; @@ -1847,7 +1846,7 @@ static int mlx5e_open_queues(struct mlx5e_channel *c, spin_lock_init(&c->async_icosq_lock); - err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->async_icosq); + err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); if (err) goto err_disable_napi; @@ -2158,6 +2157,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en); param->wq.buf_numa_node = dev_to_node(mdev->device); + mlx5e_build_rx_cq_param(priv, params, xsk, ¶m->cqp); } static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv, @@ -2200,6 +2200,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); MLX5_SET(sqc, sqc, allow_swp, allow_swp); + mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -2276,6 +2277,7 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, log_wq_size); MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); + mlx5e_build_ico_cq_param(priv, log_wq_size, ¶m->cqp); } void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, @@ -2288,6 +2290,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); + mlx5e_build_tx_cq_param(priv, params, ¶m->cqp); } static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, @@ -2306,18 +2309,17 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) { - u8 icosq_log_wq_sz; + u8 icosq_log_wq_sz, async_icosq_log_wq_sz; mlx5e_build_rq_param(priv, params, NULL, &cparam->rq); icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); + async_icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; - mlx5e_build_sq_param(priv, params, &cparam->sq); + mlx5e_build_sq_param(priv, params, &cparam->txq_sq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq); - mlx5e_build_rx_cq_param(priv, params, NULL, &cparam->rx_cq); - mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq); - mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq); + mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq); } int mlx5e_open_channels(struct mlx5e_priv *priv, From b8922a73ec3e37db8836dbcce2af19d85a8bc9ef Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Thu, 2 Apr 2020 02:02:33 -0700 Subject: [PATCH 0389/2056] net/mlx5e: API to manipulate TTC rules destinations Store the default destinations of the on-load generated TTC (Traffic Type Classifier) rules in the ttc rules table. Introduce TTC API functions to manipulate/restore and get the TTC rule destination and use these API functions in arfs implementation. This will allow a better decoupling between TTC implementation and its users. Signed-off-by: Saeed Mahameed Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy --- .../net/ethernet/mellanox/mlx5/core/en/fs.h | 16 +++- .../net/ethernet/mellanox/mlx5/core/en_arfs.c | 34 +++----- .../net/ethernet/mellanox/mlx5/core/en_fs.c | 84 ++++++++++++++----- 3 files changed, 86 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 0416f7712109..c633579474c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -105,11 +105,16 @@ enum mlx5e_tunnel_types { bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); +struct mlx5e_ttc_rule { + struct mlx5_flow_handle *rule; + struct mlx5_flow_destination default_dest; +}; + /* L3/L4 traffic type classifier */ struct mlx5e_ttc_table { - struct mlx5e_flow_table ft; - struct mlx5_flow_handle *rules[MLX5E_NUM_TT]; - struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; + struct mlx5e_flow_table ft; + struct mlx5e_ttc_rule rules[MLX5E_NUM_TT]; + struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; }; /* NIC prio FTS */ @@ -248,6 +253,11 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, struct mlx5e_ttc_table *ttc); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); +int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, + struct mlx5_flow_destination *new_dest); +struct mlx5_flow_destination +mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type); +int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type); void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index c4c9d6cda7e6..39475f6565c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -90,23 +90,15 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) static int arfs_disable(struct mlx5e_priv *priv) { - struct mlx5_flow_destination dest = {}; - struct mlx5e_tir *tir = priv->indir_tir; - int err = 0; - int tt; - int i; + int err, i; - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; for (i = 0; i < ARFS_NUM_TYPES; i++) { - dest.tir_num = tir[i].tirn; - tt = arfs_get_tt(i); - /* Modify ttc rules destination to bypass the aRFS tables*/ - err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], - &dest, NULL); + /* Modify ttc rules destination back to their default */ + err = mlx5e_ttc_fwd_default_dest(priv, arfs_get_tt(i)); if (err) { netdev_err(priv->netdev, - "%s: modify ttc destination failed\n", - __func__); + "%s: modify ttc[%d] default destination failed, err(%d)\n", + __func__, arfs_get_tt(i), err); return err; } } @@ -125,21 +117,17 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv) int mlx5e_arfs_enable(struct mlx5e_priv *priv) { struct mlx5_flow_destination dest = {}; - int err = 0; - int tt; - int i; + int err, i; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; for (i = 0; i < ARFS_NUM_TYPES; i++) { dest.ft = priv->fs.arfs.arfs_tables[i].ft.t; - tt = arfs_get_tt(i); /* Modify ttc rules destination to point on the aRFS FTs */ - err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], - &dest, NULL); + err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest); if (err) { netdev_err(priv->netdev, - "%s: modify ttc destination failed err=%d\n", - __func__, err); + "%s: modify ttc[%d] dest to arfs, failed err(%d)\n", + __func__, arfs_get_tt(i), err); arfs_disable(priv); return err; } @@ -186,8 +174,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, return -EINVAL; } + /* FIXME: Must use mlx5e_ttc_get_default_dest(), + * but can't since TTC default is not setup yet ! + */ dest.tir_num = tir[tt].tirn; - arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL, &flow_act, &dest, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 73d3dc07331f..64d002d92250 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -672,9 +672,9 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) int i; for (i = 0; i < MLX5E_NUM_TT; i++) { - if (!IS_ERR_OR_NULL(ttc->rules[i])) { - mlx5_del_flow_rules(ttc->rules[i]); - ttc->rules[i] = NULL; + if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) { + mlx5_del_flow_rules(ttc->rules[i].rule); + ttc->rules[i].rule = NULL; } } @@ -857,7 +857,8 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, struct mlx5e_ttc_table *ttc) { struct mlx5_flow_destination dest = {}; - struct mlx5_flow_handle **rules; + struct mlx5_flow_handle **trules; + struct mlx5e_ttc_rule *rules; struct mlx5_flow_table *ft; int tt; int err; @@ -867,39 +868,47 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + struct mlx5e_ttc_rule *rule = &rules[tt]; + if (tt == MLX5E_TT_ANY) dest.tir_num = params->any_tt_tirn; else dest.tir_num = params->indir_tirn[tt]; - rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, - ttc_rules[tt].etype, - ttc_rules[tt].proto); - if (IS_ERR(rules[tt])) + + rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest, + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rule->rule)) { + err = PTR_ERR(rule->rule); + rule->rule = NULL; goto del_rules; + } + rule->default_dest = dest; } if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) return 0; - rules = ttc->tunnel_rules; + trules = ttc->tunnel_rules; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = params->inner_ttc->ft.t; for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { if (!mlx5e_tunnel_proto_supported(priv->mdev, ttc_tunnel_rules[tt].proto)) continue; - rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, - ttc_tunnel_rules[tt].etype, - ttc_tunnel_rules[tt].proto); - if (IS_ERR(rules[tt])) + trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, + ttc_tunnel_rules[tt].etype, + ttc_tunnel_rules[tt].proto); + if (IS_ERR(trules[tt])) { + err = PTR_ERR(trules[tt]); + trules[tt] = NULL; goto del_rules; + } } return 0; del_rules: - err = PTR_ERR(rules[tt]); - rules[tt] = NULL; mlx5e_cleanup_ttc_rules(ttc); return err; } @@ -1015,33 +1024,38 @@ static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv, struct mlx5e_ttc_table *ttc) { struct mlx5_flow_destination dest = {}; - struct mlx5_flow_handle **rules; + struct mlx5e_ttc_rule *rules; struct mlx5_flow_table *ft; int err; int tt; ft = ttc->ft.t; rules = ttc->rules; - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + struct mlx5e_ttc_rule *rule = &rules[tt]; + if (tt == MLX5E_TT_ANY) dest.tir_num = params->any_tt_tirn; else dest.tir_num = params->indir_tirn[tt]; - rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, - ttc_rules[tt].etype, - ttc_rules[tt].proto); - if (IS_ERR(rules[tt])) + rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rule->rule)) { + err = PTR_ERR(rule->rule); + rule->rule = NULL; goto del_rules; + } + rule->default_dest = dest; } return 0; del_rules: - err = PTR_ERR(rules[tt]); - rules[tt] = NULL; + mlx5e_cleanup_ttc_rules(ttc); return err; } @@ -1210,6 +1224,30 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, return err; } +int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, + struct mlx5_flow_destination *new_dest) +{ + return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL); +} + +struct mlx5_flow_destination +mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) +{ + struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest; + + WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR, + "TTC[%d] default dest is not setup yet", type); + + return *dest; +} + +int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) +{ + struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type); + + return mlx5e_ttc_fwd_dest(priv, type, &dest); +} + static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai) { From c062d52ac24ceea0b4a5fbdc70c5cff1e47a0e49 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Sun, 14 Apr 2019 16:35:24 +0300 Subject: [PATCH 0390/2056] net/mlx5e: Receive flow steering framework for accelerated TCP flows The framework allows creating flow tables to steer incoming traffic of TCP sockets to the acceleration TIRs. This is used in downstream patches for TLS, and will be used in the future for other offloads. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/en/fs.h | 10 + .../mellanox/mlx5/core/en_accel/fs_tcp.c | 280 ++++++++++++++++++ .../mellanox/mlx5/core/en_accel/fs_tcp.h | 18 ++ .../net/ethernet/mellanox/mlx5/core/fs_core.c | 4 +- 5 files changed, 311 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index b61e47bc16e8..8ffa1325a18f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -74,7 +74,7 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ - en_accel/ktls.o en_accel/ktls_tx.o + en_accel/ktls.o en_accel/ktls_tx.o en_accel/fs_tcp.o mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ steering/dr_matcher.o steering/dr_rule.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index c633579474c3..385cbff1caf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -123,6 +123,9 @@ enum { MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, MLX5E_INNER_TTC_FT_LEVEL, +#ifdef CONFIG_MLX5_EN_TLS + MLX5E_ACCEL_FS_TCP_FT_LEVEL, +#endif #ifdef CONFIG_MLX5_EN_ARFS MLX5E_ARFS_FT_LEVEL #endif @@ -216,6 +219,10 @@ static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUP static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; } #endif +#ifdef CONFIG_MLX5_EN_TLS +struct mlx5e_accel_fs_tcp; +#endif + struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; #ifdef CONFIG_MLX5_EN_RXNFC @@ -229,6 +236,9 @@ struct mlx5e_flow_steering { #ifdef CONFIG_MLX5_EN_ARFS struct mlx5e_arfs_tables arfs; #endif +#ifdef CONFIG_MLX5_EN_TLS + struct mlx5e_accel_fs_tcp *accel_tcp; +#endif }; struct ttc_params { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c new file mode 100644 index 000000000000..a0e9082e15b0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#include +#include "en_accel/fs_tcp.h" +#include "fs_core.h" + +enum accel_fs_tcp_type { + ACCEL_FS_IPV4_TCP, + ACCEL_FS_IPV6_TCP, + ACCEL_FS_TCP_NUM_TYPES, +}; + +struct mlx5e_accel_fs_tcp { + struct mlx5e_flow_table tables[ACCEL_FS_TCP_NUM_TYPES]; + struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES]; +}; + +static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) +{ + switch (i) { + case ACCEL_FS_IPV4_TCP: + return MLX5E_TT_IPV4_TCP; + default: /* ACCEL_FS_IPV6_TCP */ + return MLX5E_TT_IPV6_TCP; + } +} + +static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, + enum accel_fs_tcp_type type) +{ + struct mlx5e_flow_table *accel_fs_t; + struct mlx5_flow_destination dest; + struct mlx5e_accel_fs_tcp *fs_tcp; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + int err = 0; + + fs_tcp = priv->fs.accel_tcp; + accel_fs_t = &fs_tcp->tables[type]; + + dest = mlx5e_ttc_get_default_dest(priv, fs_accel2tt(type)); + rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, + "%s: add default rule failed, accel_fs type=%d, err %d\n", + __func__, type, err); + return err; + } + + fs_tcp->default_rules[type] = rule; + return 0; +} + +#define MLX5E_ACCEL_FS_TCP_NUM_GROUPS (2) +#define MLX5E_ACCEL_FS_TCP_GROUP1_SIZE (BIT(16) - 1) +#define MLX5E_ACCEL_FS_TCP_GROUP2_SIZE (BIT(0)) +#define MLX5E_ACCEL_FS_TCP_TABLE_SIZE (MLX5E_ACCEL_FS_TCP_GROUP1_SIZE +\ + MLX5E_ACCEL_FS_TCP_GROUP2_SIZE) +static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft, + enum accel_fs_tcp_type type) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in || !ft->g) { + kvfree(ft->g); + kvfree(in); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version); + + switch (type) { + case ACCEL_FS_IPV4_TCP: + case ACCEL_FS_IPV6_TCP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport); + break; + default: + err = -EINVAL; + goto out; + } + + switch (type) { + case ACCEL_FS_IPV4_TCP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + break; + case ACCEL_FS_IPV6_TCP: + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); + break; + default: + err = -EINVAL; + goto out; + } + + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_ACCEL_FS_TCP_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + /* Default Flow Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_ACCEL_FS_TCP_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; +out: + kvfree(in); + + return err; +} + +static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type) +{ + struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type]; + struct mlx5_flow_table_attr ft_attr = {}; + int err; + + ft->num_groups = 0; + + ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE; + ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + + ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n", + ft->t->id, ft->t->level); + + err = accel_fs_tcp_create_groups(ft, type); + if (err) + goto err; + + err = accel_fs_tcp_add_default_rule(priv, type); + if (err) + goto err; + + return 0; +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +static int accel_fs_tcp_disable(struct mlx5e_priv *priv) +{ + int err, i; + + for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { + /* Modify ttc rules destination to point back to the indir TIRs */ + err = mlx5e_ttc_fwd_default_dest(priv, fs_accel2tt(i)); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc[%d] default destination failed, err(%d)\n", + __func__, fs_accel2tt(i), err); + return err; + } + } + + return 0; +} + +static int accel_fs_tcp_enable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest = {}; + int err, i; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { + dest.ft = priv->fs.accel_tcp->tables[i].t; + + /* Modify ttc rules destination to point on the accel_fs FTs */ + err = mlx5e_ttc_fwd_dest(priv, fs_accel2tt(i), &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc[%d] destination to accel failed, err(%d)\n", + __func__, fs_accel2tt(i), err); + return err; + } + } + return 0; +} + +static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i) +{ + struct mlx5e_accel_fs_tcp *fs_tcp; + + fs_tcp = priv->fs.accel_tcp; + if (IS_ERR_OR_NULL(fs_tcp->tables[i].t)) + return; + + mlx5_del_flow_rules(fs_tcp->default_rules[i]); + mlx5e_destroy_flow_table(&fs_tcp->tables[i]); + fs_tcp->tables[i].t = NULL; +} + +void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) +{ + int i; + + if (!priv->fs.accel_tcp) + return; + + accel_fs_tcp_disable(priv); + + for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) + accel_fs_tcp_destroy_table(priv, i); + + kfree(priv->fs.accel_tcp); + priv->fs.accel_tcp = NULL; +} + +int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) +{ + int i, err; + + if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version)) + return -EOPNOTSUPP; + + priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL); + if (!priv->fs.accel_tcp) + return -ENOMEM; + + for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { + err = accel_fs_tcp_create_table(priv, i); + if (err) + goto err_destroy_tables; + } + + err = accel_fs_tcp_enable(priv); + if (err) + goto err_destroy_tables; + + return 0; + +err_destroy_tables: + while (--i >= 0) + accel_fs_tcp_destroy_table(priv, i); + + kfree(priv->fs.accel_tcp); + priv->fs.accel_tcp = NULL; + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h new file mode 100644 index 000000000000..0df53473550a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5E_ACCEL_FS_TCP_H__ +#define __MLX5E_ACCEL_FS_TCP_H__ + +#include "en.h" + +#ifdef CONFIG_MLX5_EN_TLS +int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv); +void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv); +#else +static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {} +#endif + +#endif /* __MLX5E_ACCEL_FS_TCP_H__ */ + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e47a66983935..785b2960d6b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -105,8 +105,8 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Vlan, mac, ttc, inner ttc, aRFS */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 5 +/* Vlan, mac, ttc, inner ttc, {aRFS/accel} */ +#define KERNEL_NIC_PRIO_NUM_LEVELS 6 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) From 5229a96e59ec32466add5e87b537cc3f244afb06 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 16 Jun 2020 13:29:07 +0300 Subject: [PATCH 0391/2056] net/mlx5e: Accel, Expose flow steering API for rules add/del Given a socket, the function extracts the TCP/IP{4,6} ntuple and adds rule to steering. Another function gets the rule and deletes it. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Reviewed-by: Maxim Mikityanskiy --- .../mellanox/mlx5/core/en_accel/fs_tcp.c | 120 ++++++++++++++++++ .../mellanox/mlx5/core/en_accel/fs_tcp.h | 9 ++ 2 files changed, 129 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index a0e9082e15b0..4cdd9eac647d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -26,6 +26,126 @@ static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) } } +static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk) +{ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), + &inet_sk(sk)->inet_daddr, 4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &inet_sk(sk)->inet_rcv_saddr, 4); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); +} + +static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk) +{ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + &sk->sk_v6_daddr, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &inet6_sk(sk)->saddr, 16); + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); +} + +void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) +{ + mlx5_del_flow_rules(rule); +} + +struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, + struct sock *sk, u32 tirn, + uint32_t flow_tag) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5e_flow_table *ft = NULL; + struct mlx5e_accel_fs_tcp *fs_tcp; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *flow; + struct mlx5_flow_spec *spec; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + fs_tcp = priv->fs.accel_tcp; + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + + switch (sk->sk_family) { + case AF_INET: + accel_fs_tcp_set_ipv4_flow(spec, sk); + ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; + mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__, + &inet_sk(sk)->inet_rcv_saddr, + inet_sk(sk)->inet_sport, + &inet_sk(sk)->inet_daddr, + inet_sk(sk)->inet_dport); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + if (!sk->sk_ipv6only && + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { + accel_fs_tcp_set_ipv4_flow(spec, sk); + ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP]; + } else { + accel_fs_tcp_set_ipv6_flow(spec, sk); + ft = &fs_tcp->tables[ACCEL_FS_IPV6_TCP]; + } + break; +#endif + default: + break; + } + + if (!ft) { + flow = ERR_PTR(-EINVAL); + goto out; + } + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.tcp_dport); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.tcp_sport); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport, + ntohs(inet_sk(sk)->inet_sport)); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport, + ntohs(inet_sk(sk)->inet_dport)); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = tirn; + if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG) { + spec->flow_context.flow_tag = flow_tag; + spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG; + } + + flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1); + + if (IS_ERR(flow)) + netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n", + PTR_ERR(flow)); + +out: + kvfree(spec); + return flow; +} + static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, enum accel_fs_tcp_type type) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h index 0df53473550a..589235824543 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h @@ -9,9 +9,18 @@ #ifdef CONFIG_MLX5_EN_TLS int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv); void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv); +struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, + struct sock *sk, u32 tirn, + uint32_t flow_tag); +void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule); #else static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {} +static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, + struct sock *sk, u32 tirn, + uint32_t flow_tag) +{ return ERR_PTR(-EOPNOTSUPP); } +static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {} #endif #endif /* __MLX5E_ACCEL_FS_TCP_H__ */ From 7d0d0d86ec6c67d3eb4f49d3bbccc2d8c02799cc Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 26 May 2020 13:58:09 +0300 Subject: [PATCH 0392/2056] net/mlx5e: kTLS, Improve TLS feature modularity Better separate the code into c/h files, so that kTLS internals are exposed to the corresponding non-accel flow as follows: - Necessary datapath functions are exposed via ktls_txrx.h. - Necessary caps and configuration functions are exposed via ktls.h, which became very small. In addition, kTLS internal code sharing is done via ktls_utils.h, which is not exposed to any non-accel file. Add explicit WQE structures for the TLS static and progress params, breaking the union of the static with UMR, and the progress with PSV. Generalize the API as a preparation for TLS RX offload support. Move kTLS TX-specific code to the proper file. Remove the inline tag for function in C files, let the compiler decide. Use kzalloc/kfree for the priv_tx context. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Reviewed-by: Maxim Mikityanskiy --- .../net/ethernet/mellanox/mlx5/core/Makefile | 3 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 14 +- .../mellanox/mlx5/core/en_accel/ktls.c | 62 +---- .../mellanox/mlx5/core/en_accel/ktls.h | 107 --------- .../mellanox/mlx5/core/en_accel/ktls_tx.c | 218 +++++++++++------- .../mellanox/mlx5/core/en_accel/ktls_txrx.c | 110 +++++++++ .../mellanox/mlx5/core/en_accel/ktls_txrx.h | 36 +++ .../mellanox/mlx5/core/en_accel/ktls_utils.h | 69 ++++++ .../mellanox/mlx5/core/en_accel/tls.c | 14 -- .../mellanox/mlx5/core/en_accel/tls.h | 7 - .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 14 ++ .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 13 +- .../net/ethernet/mellanox/mlx5/core/en_tx.c | 1 - 13 files changed, 378 insertions(+), 290 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 8ffa1325a18f..70ad24fff2e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -74,7 +74,8 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ - en_accel/ktls.o en_accel/ktls_tx.o en_accel/fs_tcp.o + en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ + en_accel/ktls_tx.o mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ steering/dr_matcher.o steering/dr_rule.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 29265ca6050c..ec5bf73f9b07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -191,13 +191,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; - union { - struct { - struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; - }; - u8 tls_progress_params_ctx[0]; - }; + struct mlx5_wqe_eth_seg eth; + struct mlx5_wqe_data_seg data[0]; }; struct mlx5e_rx_wqe_ll { @@ -213,10 +208,7 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; - union { - struct mlx5_mtt inline_mtts[0]; - u8 tls_static_params_ctx[0]; - }; + struct mlx5_mtt inline_mtts[0]; }; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 452fcf59c36b..8970ea68d005 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -3,31 +3,7 @@ #include "en.h" #include "en_accel/ktls.h" - -u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq) -{ - u16 num_dumps, stop_room = 0; - - num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); - - stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_STATIC_WQEBBS); - stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_PROGRESS_WQEBBS); - stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS); - - return stop_room; -} - -static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn) -{ - u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; - void *tisc; - - tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); - - MLX5_SET(tisc, tisc, tls_en, 1); - - return mlx5e_create_tis(mdev, in, tisn); -} +#include "en_accel/ktls_utils.h" static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, @@ -35,8 +11,6 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, u32 start_offload_tcp_sn) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_ktls_offload_context_tx *tx_priv; - struct tls_context *tls_ctx = tls_get_ctx(sk); struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -46,31 +20,8 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) return -EOPNOTSUPP; - tx_priv = kvzalloc(sizeof(*tx_priv), GFP_KERNEL); - if (!tx_priv) - return -ENOMEM; + err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn); - tx_priv->expected_seq = start_offload_tcp_sn; - tx_priv->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; - mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv); - - /* tc and underlay_qpn values are not in use for tls tis */ - err = mlx5e_ktls_create_tis(mdev, &tx_priv->tisn); - if (err) - goto create_tis_fail; - - err = mlx5_ktls_create_key(mdev, crypto_info, &tx_priv->key_id); - if (err) - goto encryption_key_create_fail; - - mlx5e_ktls_tx_offload_set_pending(tx_priv); - - return 0; - -encryption_key_create_fail: - mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); -create_tis_fail: - kvfree(tx_priv); return err; } @@ -78,13 +29,10 @@ static void mlx5e_ktls_del(struct net_device *netdev, struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_ktls_offload_context_tx *tx_priv = - mlx5e_get_ktls_tx_priv_ctx(tls_ctx); + if (direction != TLS_OFFLOAD_CTX_DIR_TX) + return; - mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); - mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); - kvfree(tx_priv); + mlx5e_ktls_del_tx(netdev, tls_ctx); } static const struct tlsdev_ops mlx5e_ktls_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 806ed185dd4c..69d736954977 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -7,122 +7,15 @@ #include "en.h" #ifdef CONFIG_MLX5_EN_TLS -#include -#include "accel/tls.h" -#include "en_accel/tls_rxtx.h" - -#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \ - (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \ - MLX5_ST_SZ_BYTES(tls_static_params)) -#define MLX5E_KTLS_STATIC_WQEBBS \ - (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB)) - -#define MLX5E_KTLS_PROGRESS_WQE_SZ \ - (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \ - sizeof(struct mlx5_wqe_tls_progress_params_seg)) -#define MLX5E_KTLS_PROGRESS_WQEBBS \ - (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB)) - -struct mlx5e_dump_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_data_seg data; -}; - -#define MLX5E_TLS_FETCH_UMR_WQE(sq, pi) \ - ((struct mlx5e_umr_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_STATIC_UMR_WQE_SZ)) -#define MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi) \ - ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_PROGRESS_WQE_SZ)) -#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \ - ((struct mlx5e_dump_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, \ - sizeof(struct mlx5e_dump_wqe))) - -#define MLX5E_KTLS_DUMP_WQEBBS \ - (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) - -enum { - MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, - MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD = 1, - MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2, -}; - -enum { - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2, -}; - -struct mlx5e_ktls_offload_context_tx { - struct tls_offload_context_tx *tx_ctx; - struct tls12_crypto_info_aes_gcm_128 crypto_info; - u32 expected_seq; - u32 tisn; - u32 key_id; - bool ctx_post_pending; -}; - -struct mlx5e_ktls_offload_context_tx_shadow { - struct tls_offload_context_tx tx_ctx; - struct mlx5e_ktls_offload_context_tx *priv_tx; -}; - -static inline void -mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, - struct mlx5e_ktls_offload_context_tx *priv_tx) -{ - struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); - struct mlx5e_ktls_offload_context_tx_shadow *shadow; - - BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); - - shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; - - shadow->priv_tx = priv_tx; - priv_tx->tx_ctx = tx_ctx; -} - -static inline struct mlx5e_ktls_offload_context_tx * -mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) -{ - struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); - struct mlx5e_ktls_offload_context_tx_shadow *shadow; - - BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); - - shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; - - return shadow->priv_tx; -} void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); -void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx); -bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, - struct sk_buff *skb, int datalen, - struct mlx5e_accel_tx_tls_state *state); -void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, - struct mlx5e_tx_wqe_info *wi, - u32 *dma_fifo_cc); -u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq); - -static inline u8 -mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags, - unsigned int sync_len) -{ - /* Given the MTU and sync_len, calculates an upper bound for the - * number of DUMP WQEs needed for the TX resync of a record. - */ - return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu); -} #else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { } -static inline void -mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, - struct mlx5e_tx_wqe_info *wi, - u32 *dma_fifo_cc) {} #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index ad7300f19815..349e29214b92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -1,109 +1,149 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. -#include -#include "en.h" -#include "en/txrx.h" -#include "en_accel/ktls.h" +#include "en_accel/ktls_txrx.h" +#include "en_accel/ktls_utils.h" -enum { - MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2, +struct mlx5e_dump_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_data_seg data; }; -enum { - MLX5E_ENCRYPTION_STANDARD_TLS = 0x1, +#define MLX5E_KTLS_DUMP_WQEBBS \ + (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB)) + +static u8 +mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags, + unsigned int sync_len) +{ + /* Given the MTU and sync_len, calculates an upper bound for the + * number of DUMP WQEs needed for the TX resync of a record. + */ + return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu); +} + +u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq) +{ + u16 num_dumps, stop_room = 0; + + num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE); + + stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS); + stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS); + stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS); + + return stop_room; +} + +static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn) +{ + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; + void *tisc; + + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + + MLX5_SET(tisc, tisc, tls_en, 1); + + return mlx5e_create_tis(mdev, in, tisn); +} + +struct mlx5e_ktls_offload_context_tx { + struct tls_offload_context_tx *tx_ctx; + struct tls12_crypto_info_aes_gcm_128 crypto_info; + u32 expected_seq; + u32 tisn; + u32 key_id; + bool ctx_post_pending; }; -#define EXTRACT_INFO_FIELDS do { \ - salt = info->salt; \ - rec_seq = info->rec_seq; \ - salt_sz = sizeof(info->salt); \ - rec_seq_sz = sizeof(info->rec_seq); \ -} while (0) +struct mlx5e_ktls_offload_context_tx_shadow { + struct tls_offload_context_tx tx_ctx; + struct mlx5e_ktls_offload_context_tx *priv_tx; +}; static void -fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) +mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, + struct mlx5e_ktls_offload_context_tx *priv_tx) { - struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info; - char *initial_rn, *gcm_iv; - u16 salt_sz, rec_seq_sz; - char *salt, *rec_seq; - u8 tls_version; + struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); + struct mlx5e_ktls_offload_context_tx_shadow *shadow; - EXTRACT_INFO_FIELDS; + BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); - gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv); - initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number); + shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; - memcpy(gcm_iv, salt, salt_sz); - memcpy(initial_rn, rec_seq, rec_seq_sz); - - tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2; - - MLX5_SET(tls_static_params, ctx, tls_version, tls_version); - MLX5_SET(tls_static_params, ctx, const_1, 1); - MLX5_SET(tls_static_params, ctx, const_2, 2); - MLX5_SET(tls_static_params, ctx, encryption_standard, - MLX5E_ENCRYPTION_STANDARD_TLS); - MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id); + shadow->priv_tx = priv_tx; + priv_tx->tx_ctx = tx_ctx; } -static void -build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn, - struct mlx5e_ktls_offload_context_tx *priv_tx, - bool fence) +static struct mlx5e_ktls_offload_context_tx * +mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) { - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; + struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); + struct mlx5e_ktls_offload_context_tx_shadow *shadow; -#define STATIC_PARAMS_DS_CNT \ - DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS) + BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); - cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | - (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24)); - cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | - STATIC_PARAMS_DS_CNT); - cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - cseg->tis_tir_num = cpu_to_be32(priv_tx->tisn << 8); + shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; - ucseg->flags = MLX5_UMR_INLINE; - ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); - - fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx); + return shadow->priv_tx; } -static void -fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) +int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) { - struct mlx5_wqe_tls_progress_params_seg *params; + struct mlx5e_ktls_offload_context_tx *priv_tx; + struct tls_context *tls_ctx; + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + int err; - params = ctx; + tls_ctx = tls_get_ctx(sk); + priv = netdev_priv(netdev); + mdev = priv->mdev; - params->tis_tir_num = cpu_to_be32(priv_tx->tisn); - MLX5_SET(tls_progress_params, params->ctx, record_tracker_state, - MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); - MLX5_SET(tls_progress_params, params->ctx, auth_state, - MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD); + priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL); + if (!priv_tx) + return -ENOMEM; + + err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id); + if (err) + goto err_create_key; + + priv_tx->expected_seq = start_offload_tcp_sn; + priv_tx->crypto_info = + *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + + mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx); + + err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn); + if (err) + goto err_create_tis; + + priv_tx->ctx_post_pending = true; + + return 0; + +err_create_tis: + mlx5_ktls_destroy_key(mdev, priv_tx->key_id); +err_create_key: + kfree(priv_tx); + return err; } -static void -build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn, - struct mlx5e_ktls_offload_context_tx *priv_tx, - bool fence) +void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx) { - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5e_ktls_offload_context_tx *priv_tx; + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; -#define PROGRESS_PARAMS_DS_CNT \ - DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS) + priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx); + priv = netdev_priv(netdev); + mdev = priv->mdev; - cseg->opmod_idx_opcode = - cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | - (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24)); - cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | - PROGRESS_PARAMS_DS_CNT); - cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - - fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx); + mlx5e_destroy_tis(mdev, priv_tx->tisn); + mlx5_ktls_destroy_key(mdev, priv_tx->key_id); + kfree(priv_tx); } static void tx_fill_wi(struct mlx5e_txqsq *sq, @@ -119,11 +159,6 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq, }; } -void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) -{ - priv_tx->ctx_post_pending = true; -} - static bool mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx) { @@ -139,12 +174,15 @@ post_static_params(struct mlx5e_txqsq *sq, struct mlx5e_ktls_offload_context_tx *priv_tx, bool fence) { - u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS; - struct mlx5e_umr_wqe *umr_wqe; + struct mlx5e_set_tls_static_params_wqe *wqe; + u16 pi, num_wqebbs; + num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); - umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi); - build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); + wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); + mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, + priv_tx->tisn, priv_tx->key_id, fence, + TLS_OFFLOAD_CTX_DIR_TX); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); sq->pc += num_wqebbs; } @@ -154,12 +192,14 @@ post_progress_params(struct mlx5e_txqsq *sq, struct mlx5e_ktls_offload_context_tx *priv_tx, bool fence) { - u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS; - struct mlx5e_tx_wqe *wqe; + struct mlx5e_set_tls_progress_params_wqe *wqe; + u16 pi, num_wqebbs; + num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); - wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi); - build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); + wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); + mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, + TLS_OFFLOAD_CTX_DIR_TX); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); sq->pc += num_wqebbs; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c new file mode 100644 index 000000000000..edf404eaa275 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#include "en_accel/ktls_txrx.h" +#include "en_accel/ktls_utils.h" + +enum { + MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2, +}; + +enum { + MLX5E_ENCRYPTION_STANDARD_TLS = 0x1, +}; + +#define EXTRACT_INFO_FIELDS do { \ + salt = info->salt; \ + rec_seq = info->rec_seq; \ + salt_sz = sizeof(info->salt); \ + rec_seq_sz = sizeof(info->rec_seq); \ +} while (0) + +static void +fill_static_params(struct mlx5_wqe_tls_static_params_seg *params, + struct tls12_crypto_info_aes_gcm_128 *info, + u32 key_id) +{ + char *initial_rn, *gcm_iv; + u16 salt_sz, rec_seq_sz; + char *salt, *rec_seq; + u8 tls_version; + u8 *ctx; + + ctx = params->ctx; + + EXTRACT_INFO_FIELDS; + + gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv); + initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number); + + memcpy(gcm_iv, salt, salt_sz); + memcpy(initial_rn, rec_seq, rec_seq_sz); + + tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2; + + MLX5_SET(tls_static_params, ctx, tls_version, tls_version); + MLX5_SET(tls_static_params, ctx, const_1, 1); + MLX5_SET(tls_static_params, ctx, const_2, 2); + MLX5_SET(tls_static_params, ctx, encryption_standard, + MLX5E_ENCRYPTION_STANDARD_TLS); + MLX5_SET(tls_static_params, ctx, dek_index, key_id); +} + +void +mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, + u16 pc, u32 sqn, + struct tls12_crypto_info_aes_gcm_128 *info, + u32 tis_tir_num, u32 key_id, + bool fence, enum tls_offload_ctx_dir direction) +{ + struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + +#define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) + + cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | + (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24)); + cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | + STATIC_PARAMS_DS_CNT); + cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; + cseg->tis_tir_num = cpu_to_be32(tis_tir_num << 8); + + ucseg->flags = MLX5_UMR_INLINE; + ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); + + fill_static_params(&wqe->params, info, key_id); +} + +static void +fill_progress_params(struct mlx5_wqe_tls_progress_params_seg *params, u32 tis_tir_num) +{ + u8 *ctx = params->ctx; + + params->tis_tir_num = cpu_to_be32(tis_tir_num); + + MLX5_SET(tls_progress_params, ctx, record_tracker_state, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); + MLX5_SET(tls_progress_params, ctx, auth_state, + MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD); +} + +void +mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, + u16 pc, u32 sqn, + u32 tis_tir_num, bool fence, + enum tls_offload_ctx_dir direction) +{ + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + +#define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) + + cseg->opmod_idx_opcode = + cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | + (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24)); + cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | + PROGRESS_PARAMS_DS_CNT); + cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; + + fill_progress_params(&wqe->params, tis_tir_num); +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h new file mode 100644 index 000000000000..7ce6b1c41d9b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5E_KTLS_TXRX_H__ +#define __MLX5E_KTLS_TXRX_H__ + +#ifdef CONFIG_MLX5_EN_TLS + +#include +#include "en.h" +#include "en/txrx.h" + +struct mlx5e_accel_tx_tls_state { + u32 tls_tisn; +}; + +u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq); + +bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, + struct sk_buff *skb, int datalen, + struct mlx5e_accel_tx_tls_state *state); + +void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe_info *wi, + u32 *dma_fifo_cc); +#else +static inline void +mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, + struct mlx5e_tx_wqe_info *wi, + u32 *dma_fifo_cc) +{ +} + +#endif /* CONFIG_MLX5_EN_TLS */ + +#endif /* __MLX5E_TLS_TXRX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h new file mode 100644 index 000000000000..d1d747cb2dcb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5E_KTLS_UTILS_H__ +#define __MLX5E_KTLS_UTILS_H__ + +#include +#include "en.h" +#include "accel/tls.h" + +enum { + MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD = 0, + MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD = 1, + MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2, +}; + +enum { + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START = 0, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING = 1, + MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2, +}; + +int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn); +void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx); + +struct mlx5e_set_tls_static_params_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_umr_ctrl_seg uctrl; + struct mlx5_mkey_seg mkc; + struct mlx5_wqe_tls_static_params_seg params; +}; + +struct mlx5e_set_tls_progress_params_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_tls_progress_params_seg params; +}; + +#define MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS \ + (DIV_ROUND_UP(sizeof(struct mlx5e_set_tls_static_params_wqe), MLX5_SEND_WQE_BB)) + +#define MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS \ + (DIV_ROUND_UP(sizeof(struct mlx5e_set_tls_progress_params_wqe), MLX5_SEND_WQE_BB)) + +#define MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi) \ + ((struct mlx5e_set_tls_static_params_wqe *)\ + mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_static_params_wqe))) + +#define MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi) \ + ((struct mlx5e_set_tls_progress_params_wqe *)\ + mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_progress_params_wqe))) + +#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \ + ((struct mlx5e_dump_wqe *)\ + mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_dump_wqe))) + +void +mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, + u16 pc, u32 sqn, + struct tls12_crypto_info_aes_gcm_128 *info, + u32 tis_tir_num, u32 key_id, + bool fence, enum tls_offload_ctx_dir direction); +void +mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, + u16 pc, u32 sqn, + u32 tis_tir_num, bool fence, + enum tls_offload_ctx_dir direction); + +#endif /* __MLX5E_TLS_UTILS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 1fbb5a90cb38..c01c17a5c6de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -240,17 +240,3 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv) kfree(tls); priv->tls = NULL; } - -u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) -{ - struct mlx5_core_dev *mdev = sq->channel->mdev; - - if (!mlx5_accel_is_tls_device(mdev)) - return 0; - - if (MLX5_CAP_GEN(mdev, tls_tx)) - return mlx5e_ktls_get_stop_room(sq); - - /* Resync SKB. */ - return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index 9219bdb2786e..9015f3f7792d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -94,8 +94,6 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv); int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data); int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data); -u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq); - #else static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) @@ -110,11 +108,6 @@ static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; } static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; } -static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) -{ - return 0; -} - #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 72d26fbc8d5b..4d796fea906d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -387,3 +387,17 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, remove_metadata_hdr(skb); *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; } + +u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) +{ + struct mlx5_core_dev *mdev = sq->channel->mdev; + + if (!mlx5_accel_is_tls_device(mdev)) + return 0; + + if (MLX5_CAP_GEN(mdev, tls_tx)) + return mlx5e_ktls_get_stop_room(sq); + + /* Resync SKB. */ + return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index a50d0394df0a..2a7b98531539 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -34,15 +34,15 @@ #ifndef __MLX5E_TLS_RXTX_H__ #define __MLX5E_TLS_RXTX_H__ +#include "en_accel/ktls_txrx.h" + #ifdef CONFIG_MLX5_EN_TLS #include #include "en.h" #include "en/txrx.h" -struct mlx5e_accel_tx_tls_state { - u32 tls_tisn; -}; +u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq); bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state); @@ -52,6 +52,13 @@ void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *c void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 *cqe_bcnt); +#else + +static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) +{ + return 0; +} + #endif /* CONFIG_MLX5_EN_TLS */ #endif /* __MLX5E_TLS_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 6d406063aca4..da596de3abba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -38,7 +38,6 @@ #include "en/txrx.h" #include "ipoib/ipoib.h" #include "en_accel/en_accel.h" -#include "en_accel/ktls.h" #include "lib/clock.h" static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) From df8d866770f9877dedc864af4346c73694931cab Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 28 May 2020 10:04:03 +0300 Subject: [PATCH 0393/2056] net/mlx5e: kTLS, Use kernel API to extract private offload context Modify the implementation of the private kTLS TX HW offload context getter and setter, so it uses the kernel API functions, instead of a local shadow structure. A single BUILD_BUG_ON check is sufficient, remove the duplicate. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_accel/ktls_tx.c | 28 ++++++------------- 1 file changed, 9 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 349e29214b92..5a980f93c326 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -56,37 +56,26 @@ struct mlx5e_ktls_offload_context_tx { bool ctx_post_pending; }; -struct mlx5e_ktls_offload_context_tx_shadow { - struct tls_offload_context_tx tx_ctx; - struct mlx5e_ktls_offload_context_tx *priv_tx; -}; - static void mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, struct mlx5e_ktls_offload_context_tx *priv_tx) { - struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); - struct mlx5e_ktls_offload_context_tx_shadow *shadow; + struct mlx5e_ktls_offload_context_tx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); - BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); + BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) > + TLS_OFFLOAD_CONTEXT_SIZE_TX); - shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; - - shadow->priv_tx = priv_tx; - priv_tx->tx_ctx = tx_ctx; + *ctx = priv_tx; } static struct mlx5e_ktls_offload_context_tx * mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx) { - struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx); - struct mlx5e_ktls_offload_context_tx_shadow *shadow; + struct mlx5e_ktls_offload_context_tx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); - BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX); - - shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx; - - return shadow->priv_tx; + return *ctx; } int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, @@ -113,6 +102,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, priv_tx->expected_seq = start_offload_tcp_sn; priv_tx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx); From 1182f36593570e8e9ca53f6fabfc40ccf93c21d7 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 28 May 2020 10:13:00 +0300 Subject: [PATCH 0394/2056] net/mlx5e: kTLS, Add kTLS RX HW offload support Implement driver support for the kTLS RX HW offload feature. Resync support is added in a downstream patch. New offload contexts post their static/progress params WQEs over the per-channel async ICOSQ, protected under a spin-lock. The Channel/RQ is selected according to the socket's rxq index. Feature is OFF by default. Can be turned on by: $ ethtool -K tls-hw-rx-offload on A new TLS-RX workqueue is used to allow asynchronous addition of steering rules, out of the NAPI context. It will be also used in a downstream patch in the resync procedure. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Kconfig | 1 + .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../ethernet/mellanox/mlx5/core/accel/tls.h | 19 +- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 11 + .../mellanox/mlx5/core/en_accel/en_accel.h | 20 ++ .../mellanox/mlx5/core/en_accel/ktls.c | 66 +++- .../mellanox/mlx5/core/en_accel/ktls.h | 19 +- .../mellanox/mlx5/core/en_accel/ktls_rx.c | 311 ++++++++++++++++++ .../mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- .../mellanox/mlx5/core/en_accel/ktls_txrx.c | 20 +- .../mellanox/mlx5/core/en_accel/ktls_txrx.h | 4 + .../mellanox/mlx5/core/en_accel/ktls_utils.h | 4 + .../mellanox/mlx5/core/en_accel/tls.c | 12 +- .../mellanox/mlx5/core/en_accel/tls.h | 1 + .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 20 +- .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 4 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 9 + .../net/ethernet/mellanox/mlx5/core/en_rx.c | 33 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 3 +- 19 files changed, 529 insertions(+), 32 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 4dfdbb82ea9d..76b39659c39b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -173,6 +173,7 @@ config MLX5_TLS config MLX5_EN_TLS bool "TLS cryptography-offload accelaration" depends on MLX5_CORE_EN + depends on XPS depends on MLX5_FPGA_TLS || MLX5_TLS default y help diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 70ad24fff2e2..1e7c7f10db6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -75,7 +75,7 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ - en_accel/ktls_tx.o + en_accel/ktls_tx.o en_accel/ktls_rx.o mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \ steering/dr_matcher.o steering/dr_rule.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h index aefea467f7b3..fd874f0c380a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h @@ -43,9 +43,20 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, u32 *p_key_id); void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id); +static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, tls_tx); +} + +static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, tls_rx); +} + static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev) { - if (!MLX5_CAP_GEN(mdev, tls_tx)) + if (!mlx5_accel_is_ktls_tx(mdev) && + !mlx5_accel_is_ktls_rx(mdev)) return false; if (!MLX5_CAP_GEN(mdev, log_max_dek)) @@ -67,6 +78,12 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev, return false; } #else +static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev) +{ return false; } + +static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev) +{ return false; } + static inline int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, struct tls_crypto_info *crypto_info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 31cac239563d..7f55bd3229f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -11,6 +11,10 @@ enum mlx5e_icosq_wqe_type { MLX5E_ICOSQ_WQE_NOP, MLX5E_ICOSQ_WQE_UMR_RX, +#ifdef CONFIG_MLX5_EN_TLS + MLX5E_ICOSQ_WQE_UMR_TLS, + MLX5E_ICOSQ_WQE_SET_PSV_TLS, +#endif }; static inline bool @@ -114,9 +118,16 @@ struct mlx5e_icosq_wqe_info { struct { struct mlx5e_rq *rq; } umr; +#ifdef CONFIG_MLX5_EN_TLS + struct { + struct mlx5e_ktls_offload_context_rx *priv_rx; + } tls_set_params; +#endif }; }; +void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq); + static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size) { struct mlx5_wq_cyc *wq = &sq->wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index fac145dcf2ce..7b6abea850d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -37,6 +37,7 @@ #include #include #include "en_accel/ipsec_rxtx.h" +#include "en_accel/tls.h" #include "en_accel/tls_rxtx.h" #include "en.h" #include "en/txrx.h" @@ -147,4 +148,23 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv, return true; } +static inline int mlx5e_accel_sk_get_rxq(struct sock *sk) +{ + int rxq = sk_rx_queue_get(sk); + + if (unlikely(rxq == -1)) + rxq = 0; + + return rxq; +} + +static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) +{ + return mlx5e_ktls_init_rx(priv); +} + +static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv) +{ + mlx5e_ktls_cleanup_rx(priv); +} #endif /* __MLX5E_EN_ACCEL_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index 8970ea68d005..d4ef016ab444 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -4,6 +4,7 @@ #include "en.h" #include "en_accel/ktls.h" #include "en_accel/ktls_utils.h" +#include "en_accel/fs_tcp.h" static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, enum tls_offload_ctx_dir direction, @@ -14,13 +15,13 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk, struct mlx5_core_dev *mdev = priv->mdev; int err; - if (WARN_ON(direction != TLS_OFFLOAD_CTX_DIR_TX)) - return -EINVAL; - if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info))) return -EOPNOTSUPP; - err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn); + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn); + else + err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn); return err; } @@ -29,26 +30,71 @@ static void mlx5e_ktls_del(struct net_device *netdev, struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { - if (direction != TLS_OFFLOAD_CTX_DIR_TX) - return; + if (direction == TLS_OFFLOAD_CTX_DIR_TX) + mlx5e_ktls_del_tx(netdev, tls_ctx); + else + mlx5e_ktls_del_rx(netdev, tls_ctx); +} - mlx5e_ktls_del_tx(netdev, tls_ctx); +static int mlx5e_ktls_resync(struct net_device *netdev, + struct sock *sk, u32 seq, u8 *rcd_sn, + enum tls_offload_ctx_dir direction) +{ + return -EOPNOTSUPP; } static const struct tlsdev_ops mlx5e_ktls_ops = { .tls_dev_add = mlx5e_ktls_add, .tls_dev_del = mlx5e_ktls_del, + .tls_dev_resync = mlx5e_ktls_resync, }; void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; - if (!mlx5_accel_is_ktls_device(priv->mdev)) + if (!mlx5_accel_is_ktls_device(mdev)) return; - netdev->hw_features |= NETIF_F_HW_TLS_TX; - netdev->features |= NETIF_F_HW_TLS_TX; + if (mlx5_accel_is_ktls_tx(mdev)) { + netdev->hw_features |= NETIF_F_HW_TLS_TX; + netdev->features |= NETIF_F_HW_TLS_TX; + } + + if (mlx5_accel_is_ktls_rx(mdev)) + netdev->hw_features |= NETIF_F_HW_TLS_RX; netdev->tlsdev_ops = &mlx5e_ktls_ops; } + +int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err = 0; + + mutex_lock(&priv->state_lock); + if (enable) + err = mlx5e_accel_fs_tcp_create(priv); + else + mlx5e_accel_fs_tcp_destroy(priv); + mutex_unlock(&priv->state_lock); + + return err; +} + +int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) +{ + int err = 0; + + if (priv->netdev->features & NETIF_F_HW_TLS_RX) + err = mlx5e_accel_fs_tcp_create(priv); + + return err; +} + +void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) +{ + if (priv->netdev->features & NETIF_F_HW_TLS_RX) + mlx5e_accel_fs_tcp_destroy(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index 69d736954977..baa58b62e8df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -9,13 +9,30 @@ #ifdef CONFIG_MLX5_EN_TLS void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv); - +int mlx5e_ktls_init_rx(struct mlx5e_priv *priv); +void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv); +int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable); #else static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) { } +static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv) +{ + return 0; +} + +static inline void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv) +{ +} + +static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable) +{ + netdev_warn(netdev, "kTLS is not supported\n"); + return -EOPNOTSUPP; +} + #endif #endif /* __MLX5E_TLS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c new file mode 100644 index 000000000000..aae4245d0c91 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2019 Mellanox Technologies. + +#include "en_accel/en_accel.h" +#include "en_accel/ktls_txrx.h" +#include "en_accel/ktls_utils.h" +#include "en_accel/fs_tcp.h" + +struct accel_rule { + struct work_struct work; + struct mlx5e_priv *priv; + struct mlx5_flow_handle *rule; +}; + +enum { + MLX5E_PRIV_RX_FLAG_DELETING, + MLX5E_NUM_PRIV_RX_FLAGS, +}; + +struct mlx5e_ktls_offload_context_rx { + struct tls12_crypto_info_aes_gcm_128 crypto_info; + struct accel_rule rule; + struct sock *sk; + struct completion add_ctx; + u32 tirn; + u32 key_id; + u32 rxq; + DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); +}; + +static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) +{ + int err, inlen; + void *tirc; + u32 *in; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + + MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); + MLX5_SET(tirc, tirc, indirect_table, rqtn); + MLX5_SET(tirc, tirc, tls_en, 1); + MLX5_SET(tirc, tirc, self_lb_block, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); + + err = mlx5_core_create_tir(mdev, in, tirn); + + kvfree(in); + return err; +} + +static void accel_rule_handle_work(struct work_struct *work) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct accel_rule *accel_rule; + struct mlx5_flow_handle *rule; + + accel_rule = container_of(work, struct accel_rule, work); + priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) + goto out; + + rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, + priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG); + if (!IS_ERR_OR_NULL(rule)) + accel_rule->rule = rule; +out: + complete(&priv_rx->add_ctx); +} + +static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv, + struct sock *sk) +{ + INIT_WORK(&rule->work, accel_rule_handle_work); + rule->priv = priv; +} + +static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, + struct mlx5e_icosq_wqe_info *wi) +{ + sq->db.wqe_info[pi] = *wi; +} + +static struct mlx5_wqe_ctrl_seg * +post_static_params(struct mlx5e_icosq *sq, + struct mlx5e_ktls_offload_context_rx *priv_rx) +{ + struct mlx5e_set_tls_static_params_wqe *wqe; + struct mlx5e_icosq_wqe_info wi; + u16 pi, num_wqebbs, room; + + num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; + room = mlx5e_stop_room_for_wqe(num_wqebbs); + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room))) + return ERR_PTR(-ENOSPC); + + pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); + wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); + mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, + priv_rx->tirn, priv_rx->key_id, false, + TLS_OFFLOAD_CTX_DIR_RX); + wi = (struct mlx5e_icosq_wqe_info) { + .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, + .num_wqebbs = num_wqebbs, + .tls_set_params.priv_rx = priv_rx, + }; + icosq_fill_wi(sq, pi, &wi); + sq->pc += num_wqebbs; + + return &wqe->ctrl; +} + +static struct mlx5_wqe_ctrl_seg * +post_progress_params(struct mlx5e_icosq *sq, + struct mlx5e_ktls_offload_context_rx *priv_rx, + u32 next_record_tcp_sn) +{ + struct mlx5e_set_tls_progress_params_wqe *wqe; + struct mlx5e_icosq_wqe_info wi; + u16 pi, num_wqebbs, room; + + num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; + room = mlx5e_stop_room_for_wqe(num_wqebbs); + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room))) + return ERR_PTR(-ENOSPC); + + pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); + wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); + mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false, + next_record_tcp_sn, + TLS_OFFLOAD_CTX_DIR_RX); + wi = (struct mlx5e_icosq_wqe_info) { + .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, + .num_wqebbs = num_wqebbs, + .tls_set_params.priv_rx = priv_rx, + }; + + icosq_fill_wi(sq, pi, &wi); + sq->pc += num_wqebbs; + + return &wqe->ctrl; +} + +static int post_rx_param_wqes(struct mlx5e_channel *c, + struct mlx5e_ktls_offload_context_rx *priv_rx, + u32 next_record_tcp_sn) +{ + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_icosq *sq; + int err; + + err = 0; + sq = &c->async_icosq; + spin_lock(&c->async_icosq_lock); + + cseg = post_static_params(sq, priv_rx); + if (IS_ERR(cseg)) + goto err_out; + cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); + if (IS_ERR(cseg)) + goto err_out; + + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); +unlock: + spin_unlock(&c->async_icosq_lock); + + return err; + +err_out: + err = PTR_ERR(cseg); + complete(&priv_rx->add_ctx); + goto unlock; +} + +static void +mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, + struct mlx5e_ktls_offload_context_rx *priv_rx) +{ + struct mlx5e_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + + BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > + TLS_OFFLOAD_CONTEXT_SIZE_RX); + + *ctx = priv_rx; +} + +static struct mlx5e_ktls_offload_context_rx * +mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) +{ + struct mlx5e_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + + return *ctx; +} + +void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) +{ + u8 tls_offload = get_cqe_tls_offload(cqe); + + if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED)) + return; + + switch (tls_offload) { + case CQE_TLS_OFFLOAD_DECRYPTED: + skb->decrypted = 1; + break; + case CQE_TLS_OFFLOAD_RESYNC: + break; + default: /* CQE_TLS_OFFLOAD_ERROR: */ + break; + } +} + +void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; + struct accel_rule *rule = &priv_rx->rule; + + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { + complete(&priv_rx->add_ctx); + return; + } + queue_work(rule->priv->tls->rx_wq, &rule->work); +} + +int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, + u32 start_offload_tcp_sn) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct tls_context *tls_ctx; + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + int rxq, err; + u32 rqtn; + + tls_ctx = tls_get_ctx(sk); + priv = netdev_priv(netdev); + mdev = priv->mdev; + priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); + if (unlikely(!priv_rx)) + return -ENOMEM; + + err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id); + if (err) + goto err_create_key; + + priv_rx->crypto_info = + *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; + priv_rx->sk = sk; + priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk); + + mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); + + rxq = priv_rx->rxq; + rqtn = priv->direct_tir[rxq].rqt.rqtn; + + err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); + if (err) + goto err_create_tir; + + init_completion(&priv_rx->add_ctx); + accel_rule_init(&priv_rx->rule, priv, sk); + err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); + if (err) + goto err_post_wqes; + + return 0; + +err_post_wqes: + mlx5_core_destroy_tir(mdev, priv_rx->tirn); +err_create_tir: + mlx5_ktls_destroy_key(mdev, priv_rx->key_id); +err_create_key: + kfree(priv_rx); + return err; +} + +void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + priv = netdev_priv(netdev); + mdev = priv->mdev; + + priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); + set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); + if (!cancel_work_sync(&priv_rx->rule.work)) + /* completion is needed, as the priv_rx in the add flow + * is maintained on the wqe info (wi), not on the socket. + */ + wait_for_completion(&priv_rx->add_ctx); + + if (priv_rx->rule.rule) + mlx5e_accel_fs_del_sk(priv_rx->rule.rule); + + mlx5_core_destroy_tir(mdev, priv_rx->tirn); + mlx5_ktls_destroy_key(mdev, priv_rx->key_id); + kfree(priv_rx); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 5a980f93c326..9c34ffa55b32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -188,7 +188,7 @@ post_progress_params(struct mlx5e_txqsq *sq, num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); - mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, + mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0, TLS_OFFLOAD_CTX_DIR_TX); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); sq->pc += num_wqebbs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c index edf404eaa275..c1f1ad32ca4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c @@ -59,11 +59,13 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, { struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + u8 opmod = direction == TLS_OFFLOAD_CTX_DIR_TX ? + MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS : + MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS; #define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) - cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | - (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24)); + cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | (opmod << 24)); cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | STATIC_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; @@ -76,12 +78,15 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, } static void -fill_progress_params(struct mlx5_wqe_tls_progress_params_seg *params, u32 tis_tir_num) +fill_progress_params(struct mlx5_wqe_tls_progress_params_seg *params, u32 tis_tir_num, + u32 next_record_tcp_sn) { u8 *ctx = params->ctx; params->tis_tir_num = cpu_to_be32(tis_tir_num); + MLX5_SET(tls_progress_params, ctx, next_record_tcp_sn, + next_record_tcp_sn); MLX5_SET(tls_progress_params, ctx, record_tracker_state, MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START); MLX5_SET(tls_progress_params, ctx, auth_state, @@ -92,19 +97,22 @@ void mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, u16 pc, u32 sqn, u32 tis_tir_num, bool fence, + u32 next_record_tcp_sn, enum tls_offload_ctx_dir direction) { struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + u8 opmod = direction == TLS_OFFLOAD_CTX_DIR_TX ? + MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS : + MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS; #define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS) cseg->opmod_idx_opcode = - cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | - (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24)); + cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | (opmod << 24)); cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) | PROGRESS_PARAMS_DS_CNT); cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0; - fill_progress_params(&wqe->params, tis_tir_num); + fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 7ce6b1c41d9b..7bdd6ec6c981 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -19,6 +19,10 @@ u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq); bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq, struct sk_buff *skb, int datalen, struct mlx5e_accel_tx_tls_state *state); +void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); + +void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi); void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h index d1d747cb2dcb..566cf24eb0fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h @@ -23,6 +23,9 @@ enum { int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn); void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx); +int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, + struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn); +void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx); struct mlx5e_set_tls_static_params_wqe { struct mlx5_wqe_ctrl_seg ctrl; @@ -64,6 +67,7 @@ void mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, u16 pc, u32 sqn, u32 tis_tir_num, bool fence, + u32 next_record_tcp_sn, enum tls_offload_ctx_dir direction); #endif /* __MLX5E_TLS_UTILS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index c01c17a5c6de..99beb928feff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -197,6 +197,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) return; } + /* FPGA */ if (!mlx5_accel_is_tls_device(priv->mdev)) return; @@ -221,11 +222,19 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) int mlx5e_tls_init(struct mlx5e_priv *priv) { - struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL); + struct mlx5e_tls *tls; + if (!mlx5_accel_is_tls_device(priv->mdev)) + return 0; + + tls = kzalloc(sizeof(*tls), GFP_KERNEL); if (!tls) return -ENOMEM; + tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); + if (!tls->rx_wq) + return -ENOMEM; + priv->tls = tls; return 0; } @@ -237,6 +246,7 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv) if (!tls) return; + destroy_workqueue(tls->rx_wq); kfree(tls); priv->tls = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index 9015f3f7792d..ca0c2ebb41a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -53,6 +53,7 @@ struct mlx5e_tls_sw_stats { struct mlx5e_tls { struct mlx5e_tls_sw_stats sw_stats; + struct workqueue_struct *rx_wq; }; struct mlx5e_tls_offload_context_tx { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 4d796fea906d..182841322ce4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -278,9 +278,10 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) goto err_out; - if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) + if (mlx5_accel_is_ktls_tx(sq->channel->mdev)) return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); + /* FPGA */ skb_seq = ntohl(tcp_hdr(skb)->seq); context = mlx5e_get_tls_tx_context(tls_ctx); expected_seq = context->expected_seq; @@ -354,12 +355,16 @@ static int tls_update_resync_sn(struct net_device *netdev, return 0; } -void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, - u32 *cqe_bcnt) +void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) { struct mlx5e_tls_metadata *mdata; struct mlx5e_priv *priv; + if (likely(mlx5_accel_is_ktls_rx(rq->mdev))) + return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); + + /* FPGA */ if (!is_metadata_hdr_valid(skb)) return; @@ -370,13 +375,13 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, skb->decrypted = 1; break; case SYNDROM_RESYNC_REQUEST: - tls_update_resync_sn(netdev, skb, mdata); - priv = netdev_priv(netdev); + tls_update_resync_sn(rq->netdev, skb, mdata); + priv = netdev_priv(rq->netdev); atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); break; case SYNDROM_AUTH_FAILED: /* Authentication failure will be observed and verified by kTLS */ - priv = netdev_priv(netdev); + priv = netdev_priv(rq->netdev); atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); break; default: @@ -395,9 +400,10 @@ u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) if (!mlx5_accel_is_tls_device(mdev)) return 0; - if (MLX5_CAP_GEN(mdev, tls_tx)) + if (mlx5_accel_is_ktls_device(mdev)) return mlx5e_ktls_get_stop_room(sq); + /* FPGA */ /* Resync SKB. */ return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 2a7b98531539..8bb790674042 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -49,8 +49,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state); -void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, - u32 *cqe_bcnt); +void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); #else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 11997c23dfb5..0f1578a5e538 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1441,6 +1441,7 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq) struct mlx5e_channel *c = sq->channel; mlx5e_destroy_sq(c->mdev, sq->sqn); + mlx5e_free_icosq_descs(sq); mlx5e_free_icosq(sq); } @@ -3853,6 +3854,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) #ifdef CONFIG_MLX5_EN_ARFS err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); #endif + err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx); if (err) { netdev->features = oper_features; @@ -5143,8 +5145,14 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_flow_steering; + err = mlx5e_accel_init_rx(priv); + if (err) + goto err_tc_nic_cleanup; + return 0; +err_tc_nic_cleanup: + mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: mlx5e_destroy_flow_steering(priv); err_destroy_xsk_tirs: @@ -5168,6 +5176,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { + mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index dbb1c6323967..9a6958acf87d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -578,6 +578,30 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) return !!err; } +void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) +{ + u16 sqcc; + + sqcc = sq->cc; + + while (sqcc != sq->pc) { + struct mlx5e_icosq_wqe_info *wi; + u16 ci; + + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + sqcc += wi->num_wqebbs; +#ifdef CONFIG_MLX5_EN_TLS + switch (wi->wqe_type) { + case MLX5E_ICOSQ_WQE_SET_PSV_TLS: + mlx5e_ktls_handle_ctx_completion(wi); + break; + } +#endif + } + sq->cc = sqcc; +} + int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); @@ -633,6 +657,13 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) break; case MLX5E_ICOSQ_WQE_NOP: break; +#ifdef CONFIG_MLX5_EN_TLS + case MLX5E_ICOSQ_WQE_UMR_TLS: + break; + case MLX5E_ICOSQ_WQE_SET_PSV_TLS: + mlx5e_ktls_handle_ctx_completion(wi); + break; +#endif default: netdev_WARN_ONCE(cq->channel->netdev, "Bad WQE type in ICOSQ WQE info: 0x%x\n", @@ -983,7 +1014,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mac_len = ETH_HLEN; #ifdef CONFIG_MLX5_EN_TLS - mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); + mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); #endif if (lro_num_seg > 1) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index a5fbe7343508..c3095863372c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -35,6 +35,7 @@ #include #include "mlx5_core.h" #include "../../mlxfw/mlxfw.h" +#include "accel/tls.h" enum { MCQS_IDENTIFIER_BOOT_IMG = 0x1, @@ -236,7 +237,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } - if (MLX5_CAP_GEN(dev, tls_tx)) { + if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) { err = mlx5_core_get_caps(dev, MLX5_CAP_TLS); if (err) return err; From acb5a07aaf2723cd273a4089e62611a414fb1c35 Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 8 Jun 2020 12:42:52 +0300 Subject: [PATCH 0395/2056] Revert "net/tls: Add force_resync for driver resync" This reverts commit b3ae2459f89773adcbf16fef4b68deaaa3be1929. Revert the force resync API. Not in use. To be replaced by a better async resync API downstream. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- include/net/tls.h | 12 +----------- net/tls/tls_device.c | 9 +++------ 2 files changed, 4 insertions(+), 17 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index 3212d3c214a9..ca5f7f437289 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -607,22 +607,12 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) #endif /* The TLS context is valid until sk_destruct is called */ -#define RESYNC_REQ (1 << 0) -#define RESYNC_REQ_FORCE (1 << 1) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); -} - -static inline void tls_offload_rx_force_resync_request(struct sock *sk) -{ - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - - atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); } static inline void diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 0e55f8365ce2..a562ebaaa33c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -694,11 +694,10 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx; - bool is_req_pending, is_force_resync; u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; + u32 sock_data, is_req_pending; struct tls_prot_info *prot; s64 resync_req; - u32 sock_data; u32 req_seq; if (tls_ctx->rx_conf != TLS_HW) @@ -713,11 +712,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) resync_req = atomic64_read(&rx_ctx->resync_req); req_seq = resync_req >> 32; seq += TLS_HEADER_SIZE - 1; - is_req_pending = resync_req & RESYNC_REQ; - is_force_resync = resync_req & RESYNC_REQ_FORCE; + is_req_pending = resync_req; - if (likely(!is_req_pending) || - (!is_force_resync && req_seq != seq) || + if (likely(!is_req_pending) || req_seq != seq || !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) return; break; From ed9b7646b06a2ed2450dd9437fc7d1ad2783140c Mon Sep 17 00:00:00 2001 From: Boris Pismenny Date: Mon, 8 Jun 2020 19:11:38 +0300 Subject: [PATCH 0396/2056] net/tls: Add asynchronous resync This patch adds support for asynchronous resynchronization in tls_device. Async resync follows two distinct stages: 1. The NIC driver indicates that it would like to resync on some TLS record within the received packet (P), but the driver does not know (yet) which of the TLS records within the packet. At this stage, the NIC driver will query the device to find the exact TCP sequence for resync (tcpsn), however, the driver does not wait for the device to provide the response. 2. Eventually, the device responds, and the driver provides the tcpsn within the resync packet to KTLS. Now, KTLS can check the tcpsn against any processed TLS records within packet P, and also against any record that is processed in the future within packet P. The asynchronous resync path simplifies the device driver, as it can save bits on the packet completion (32-bit TCP sequence), and pass this information on an asynchronous command instead. Signed-off-by: Boris Pismenny Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- include/net/tls.h | 38 ++++++++++++++++++++++++++++++++- net/tls/tls_device.c | 51 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) diff --git a/include/net/tls.h b/include/net/tls.h index ca5f7f437289..c875c0a445a6 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -291,11 +291,19 @@ struct tlsdev_ops { enum tls_offload_sync_type { TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0, TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1, + TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2, }; #define TLS_DEVICE_RESYNC_NH_START_IVAL 2 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL 128 +#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13 +struct tls_offload_resync_async { + atomic64_t req; + u32 loglen; + u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX]; +}; + struct tls_offload_context_rx { /* sw must be the first member of tls_offload_context_rx */ struct tls_sw_context_rx sw; @@ -314,6 +322,10 @@ struct tls_offload_context_rx { u32 decrypted_failed; u32 decrypted_tgt; } resync_nh; + /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */ + struct { + struct tls_offload_resync_async *resync_async; + }; }; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state @@ -606,13 +618,37 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) } #endif +#define RESYNC_REQ BIT(0) +#define RESYNC_REQ_ASYNC BIT(1) /* The TLS context is valid until sk_destruct is called */ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); +} + +/* Log all TLS record header TCP sequences in [seq, seq+len] */ +static inline void +tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | + (len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); + rx_ctx->resync_async->loglen = 0; +} + +static inline void +tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_async->req, + ((u64)ntohl(seq) << 32) | RESYNC_REQ); } static inline void diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a562ebaaa33c..18fa6067bb7f 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -690,6 +690,47 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); } +static bool +tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, + s64 resync_req, u32 *seq) +{ + u32 is_async = resync_req & RESYNC_REQ_ASYNC; + u32 req_seq = resync_req >> 32; + u32 req_end = req_seq + ((resync_req >> 16) & 0xffff); + + if (is_async) { + /* asynchronous stage: log all headers seq such that + * req_seq <= seq <= end_seq, and wait for real resync request + */ + if (between(*seq, req_seq, req_end) && + resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX) + resync_async->log[resync_async->loglen++] = *seq; + + return false; + } + + /* synchronous stage: check against the logged entries and + * proceed to check the next entries if no match was found + */ + while (resync_async->loglen) { + if (req_seq == resync_async->log[resync_async->loglen - 1] && + atomic64_try_cmpxchg(&resync_async->req, + &resync_req, 0)) { + resync_async->loglen = 0; + *seq = req_seq; + return true; + } + resync_async->loglen--; + } + + if (req_seq == *seq && + atomic64_try_cmpxchg(&resync_async->req, + &resync_req, 0)) + return true; + + return false; +} + void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); @@ -736,6 +777,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) seq += rcd_len; tls_bigint_increment(rcd_sn, prot->rec_seq_size); break; + case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC: + resync_req = atomic64_read(&rx_ctx->resync_async->req); + is_req_pending = resync_req; + if (likely(!is_req_pending)) + return; + + if (!tls_device_rx_resync_async(rx_ctx->resync_async, + resync_req, &seq)) + return; + break; } tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn); From 0419d8c9d8f8d825576a41b2bb1e6043f34d1ae0 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 16 Jun 2020 15:15:06 +0300 Subject: [PATCH 0397/2056] net/mlx5e: kTLS, Add kTLS RX resync support Implement the RX resync procedure, using the TLS async resync API. The HW offload of TLS decryption in RX side might get out-of-sync due to out-of-order reception of packets. This requires SW intervention to update the HW context and get it back in-sync. Performance: CPU: Intel(R) Xeon(R) CPU E5-2687W v4 @ 3.00GHz, 24 cores, HT off NIC: ConnectX-6 Dx 100GbE dual port Goodput (app-layer throughput) comparison: +---------------+-------+-------+---------+ | # connections | 1 | 4 | 8 | +---------------+-------+-------+---------+ | SW (Gbps) | 7.26 | 24.70 | 50.30 | +---------------+-------+-------+---------+ | HW (Gbps) | 18.50 | 64.30 | 92.90 | +---------------+-------+-------+---------+ | Speedup | 2.55x | 2.56x | 1.85x * | +---------------+-------+-------+---------+ * After linerate is reached, diff is observed in CPU util. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 4 + .../mellanox/mlx5/core/en_accel/ktls.c | 6 +- .../mellanox/mlx5/core/en_accel/ktls_rx.c | 346 +++++++++++++++++- .../mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- .../mellanox/mlx5/core/en_accel/ktls_txrx.c | 7 +- .../mellanox/mlx5/core/en_accel/ktls_txrx.h | 2 + .../mellanox/mlx5/core/en_accel/ktls_utils.h | 15 +- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 6 + 8 files changed, 381 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 7f55bd3229f1..e9d4a61b6bbb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -14,6 +14,7 @@ enum mlx5e_icosq_wqe_type { #ifdef CONFIG_MLX5_EN_TLS MLX5E_ICOSQ_WQE_UMR_TLS, MLX5E_ICOSQ_WQE_SET_PSV_TLS, + MLX5E_ICOSQ_WQE_GET_PSV_TLS, #endif }; @@ -122,6 +123,9 @@ struct mlx5e_icosq_wqe_info { struct { struct mlx5e_ktls_offload_context_rx *priv_rx; } tls_set_params; + struct { + struct mlx5e_ktls_rx_resync_buf *buf; + } tls_get_params; #endif }; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index d4ef016ab444..deec17af5a69 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -40,7 +40,11 @@ static int mlx5e_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn, enum tls_offload_ctx_dir direction) { - return -EOPNOTSUPP; + if (unlikely(direction != TLS_OFFLOAD_CTX_DIR_RX)) + return -EOPNOTSUPP; + + mlx5e_ktls_rx_resync(netdev, sk, seq, rcd_sn); + return 0; } static const struct tlsdev_ops mlx5e_ktls_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index aae4245d0c91..b26dad909ef5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // Copyright (c) 2019 Mellanox Technologies. +#include #include "en_accel/en_accel.h" +#include "en_accel/tls.h" #include "en_accel/ktls_txrx.h" #include "en_accel/ktls_utils.h" #include "en_accel/fs_tcp.h" @@ -12,11 +14,34 @@ struct accel_rule { struct mlx5_flow_handle *rule; }; +#define PROGRESS_PARAMS_WRITE_UNIT 64 +#define PROGRESS_PARAMS_PADDED_SIZE \ + (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ + PROGRESS_PARAMS_WRITE_UNIT)) + +struct mlx5e_ktls_rx_resync_buf { + union { + struct mlx5_wqe_tls_progress_params_seg progress; + u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; + } ____cacheline_aligned_in_smp; + dma_addr_t dma_addr; + struct mlx5e_ktls_offload_context_rx *priv_rx; +}; + enum { MLX5E_PRIV_RX_FLAG_DELETING, MLX5E_NUM_PRIV_RX_FLAGS, }; +struct mlx5e_ktls_rx_resync_ctx { + struct tls_offload_resync_async core; + struct work_struct work; + struct mlx5e_priv *priv; + refcount_t refcnt; + __be64 sw_rcd_sn_be; + u32 seq; +}; + struct mlx5e_ktls_offload_context_rx { struct tls12_crypto_info_aes_gcm_128 crypto_info; struct accel_rule rule; @@ -26,6 +51,9 @@ struct mlx5e_ktls_offload_context_rx { u32 key_id; u32 rxq; DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); + + /* resync */ + struct mlx5e_ktls_rx_resync_ctx resync; }; static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) @@ -104,7 +132,8 @@ post_static_params(struct mlx5e_icosq *sq, pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, - priv_rx->tirn, priv_rx->key_id, false, + priv_rx->tirn, priv_rx->key_id, + priv_rx->resync.seq, false, TLS_OFFLOAD_CTX_DIR_RX); wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, @@ -201,6 +230,281 @@ mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) return *ctx; } +/* Re-sync */ +/* Runs in work context */ +static struct mlx5_wqe_ctrl_seg * +resync_post_get_progress_params(struct mlx5e_icosq *sq, + struct mlx5e_ktls_offload_context_rx *priv_rx) +{ + struct mlx5e_get_tls_progress_params_wqe *wqe; + struct mlx5e_ktls_rx_resync_buf *buf; + struct mlx5e_icosq_wqe_info wi; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5_seg_get_psv *psv; + struct device *pdev; + int err; + u16 pi; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (unlikely(!buf)) { + err = -ENOMEM; + goto err_out; + } + + pdev = sq->channel->priv->mdev->device; + buf->dma_addr = dma_map_single(pdev, &buf->progress, + PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { + err = -ENOMEM; + goto err_out; + } + + buf->priv_rx = priv_rx; + + BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1); + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { + err = -ENOSPC; + goto err_out; + } + + pi = mlx5e_icosq_get_next_pi(sq, 1); + wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); + +#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) + + cseg = &wqe->ctrl; + cseg->opmod_idx_opcode = + cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | + (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); + cseg->qpn_ds = + cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); + + psv = &wqe->psv; + psv->num_psv = 1 << 4; + psv->l_key = sq->channel->mkey_be; + psv->psv_index[0] = cpu_to_be32(priv_rx->tirn); + psv->va = cpu_to_be64(buf->dma_addr); + + wi = (struct mlx5e_icosq_wqe_info) { + .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, + .num_wqebbs = 1, + .tls_get_params.buf = buf, + }; + icosq_fill_wi(sq, pi, &wi); + sq->pc++; + + return cseg; + +err_out: + return ERR_PTR(err); +} + +/* Function is called with elevated refcount. + * It decreases it only if no WQE is posted. + */ +static void resync_handle_work(struct work_struct *work) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_ctx *resync; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_channel *c; + struct mlx5e_icosq *sq; + struct mlx5_wq_cyc *wq; + + resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); + priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); + + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { + refcount_dec(&resync->refcnt); + return; + } + + c = resync->priv->channels.c[priv_rx->rxq]; + sq = &c->async_icosq; + wq = &sq->wq; + + spin_lock(&c->async_icosq_lock); + + cseg = resync_post_get_progress_params(sq, priv_rx); + if (IS_ERR(cseg)) { + refcount_dec(&resync->refcnt); + goto unlock; + } + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); +unlock: + spin_unlock(&c->async_icosq_lock); +} + +static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, + struct mlx5e_priv *priv) +{ + INIT_WORK(&resync->work, resync_handle_work); + resync->priv = priv; + refcount_set(&resync->refcnt, 1); +} + +/* Function can be called with the refcount being either elevated or not. + * It does not affect the refcount. + */ +static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, + struct mlx5e_channel *c) +{ + struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info; + struct mlx5_wqe_ctrl_seg *cseg; + struct mlx5e_icosq *sq; + int err; + + memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); + err = 0; + + sq = &c->async_icosq; + spin_lock(&c->async_icosq_lock); + + cseg = post_static_params(sq, priv_rx); + if (IS_ERR(cseg)) { + err = PTR_ERR(cseg); + goto unlock; + } + /* Do not increment priv_rx refcnt, CQE handling is empty */ + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); +unlock: + spin_unlock(&c->async_icosq_lock); + + return err; +} + +/* Function is called with elevated refcount, it decreases it. */ +void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, + struct mlx5e_icosq *sq) +{ + struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_ctx *resync; + u8 tracker_state, auth_state, *ctx; + u32 hw_seq; + + priv_rx = buf->priv_rx; + resync = &priv_rx->resync; + + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) + goto out; + + dma_sync_single_for_cpu(resync->priv->mdev->device, buf->dma_addr, + PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); + + ctx = buf->progress.ctx; + tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); + auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); + if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || + auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) + goto out; + + hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); + tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); +out: + refcount_dec(&resync->refcnt); + kfree(buf); +} + +/* Runs in NAPI. + * Function elevates the refcount, unless no work is queued. + */ +static bool resync_queue_get_psv(struct sock *sk) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_ctx *resync; + + priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); + if (unlikely(!priv_rx)) + return false; + + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) + return false; + + resync = &priv_rx->resync; + refcount_inc(&resync->refcnt); + if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) + refcount_dec(&resync->refcnt); + + return true; +} + +/* Runs in NAPI */ +static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) +{ + struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct net_device *netdev = rq->netdev; + struct sock *sk = NULL; + unsigned int datalen; + struct iphdr *iph; + struct tcphdr *th; + __be32 seq; + int depth = 0; + + __vlan_get_protocol(skb, eth->h_proto, &depth); + iph = (struct iphdr *)(skb->data + depth); + + if (iph->version == 4) { + depth += sizeof(struct iphdr); + th = (void *)iph + sizeof(struct iphdr); + + sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, + iph->saddr, th->source, iph->daddr, + th->dest, netdev->ifindex); +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; + + depth += sizeof(struct ipv6hdr); + th = (void *)ipv6h + sizeof(struct ipv6hdr); + + sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, + &ipv6h->saddr, th->source, + &ipv6h->daddr, ntohs(th->dest), + netdev->ifindex, 0); +#endif + } + + depth += sizeof(struct tcphdr); + + if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT)) + return; + + if (unlikely(!resync_queue_get_psv(sk))) + return; + + skb->sk = sk; + skb->destructor = sock_edemux; + + seq = th->seq; + datalen = skb->len - depth; + tls_offload_rx_resync_async_request_start(sk, seq, datalen); +} + +void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, + u32 seq, u8 *rcd_sn) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_ctx *resync; + struct mlx5e_priv *priv; + struct mlx5e_channel *c; + + priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); + if (unlikely(!priv_rx)) + return; + + resync = &priv_rx->resync; + resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; + resync->seq = seq; + + priv = netdev_priv(netdev); + c = priv->channels.c[priv_rx->rxq]; + + resync_handle_seq_match(priv_rx, c); +} + +/* End of resync section */ + void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) { @@ -214,6 +518,7 @@ void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, skb->decrypted = 1; break; case CQE_TLS_OFFLOAD_RESYNC: + resync_update_sn(rq, skb); break; default: /* CQE_TLS_OFFLOAD_ERROR: */ break; @@ -237,6 +542,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, u32 start_offload_tcp_sn) { struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_ctx *resync; struct tls_context *tls_ctx; struct mlx5_core_dev *mdev; struct mlx5e_priv *priv; @@ -269,7 +575,13 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, goto err_create_tir; init_completion(&priv_rx->add_ctx); + accel_rule_init(&priv_rx->rule, priv, sk); + resync = &priv_rx->resync; + resync_init(resync, priv); + tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; + tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); + err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); if (err) goto err_post_wqes; @@ -285,9 +597,35 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, return err; } +/* Elevated refcount on the resync object means there are + * outstanding operations (uncompleted GET_PSV WQEs) that + * will read the resync / priv_rx objects once completed. + * Wait for them to avoid use-after-free. + */ +static void wait_for_resync(struct net_device *netdev, + struct mlx5e_ktls_rx_resync_ctx *resync) +{ +#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */ + unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT); + unsigned int refcnt; + + do { + refcnt = refcount_read(&resync->refcnt); + if (refcnt == 1) + return; + + msleep(20); + } while (time_before(jiffies, exp_time)); + + netdev_warn(netdev, + "Failed waiting for kTLS RX resync refcnt to be released (%u).\n", + refcnt); +} + void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) { struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_ctx *resync; struct mlx5_core_dev *mdev; struct mlx5e_priv *priv; @@ -296,11 +634,17 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); + mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); + napi_synchronize(&priv->channels.c[priv_rx->rxq]->napi); if (!cancel_work_sync(&priv_rx->rule.work)) /* completion is needed, as the priv_rx in the add flow * is maintained on the wqe info (wi), not on the socket. */ wait_for_completion(&priv_rx->add_ctx); + resync = &priv_rx->resync; + if (cancel_work_sync(&resync->work)) + refcount_dec(&resync->refcnt); + wait_for_resync(netdev, resync); if (priv_rx->rule.rule) mlx5e_accel_fs_del_sk(priv_rx->rule.rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 9c34ffa55b32..0e6698d1b4ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -171,7 +171,7 @@ post_static_params(struct mlx5e_txqsq *sq, pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info, - priv_tx->tisn, priv_tx->key_id, fence, + priv_tx->tisn, priv_tx->key_id, 0, fence, TLS_OFFLOAD_CTX_DIR_TX); tx_fill_wi(sq, pi, num_wqebbs, 0, NULL); sq->pc += num_wqebbs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c index c1f1ad32ca4c..ac29aeb8af49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c @@ -22,7 +22,7 @@ enum { static void fill_static_params(struct mlx5_wqe_tls_static_params_seg *params, struct tls12_crypto_info_aes_gcm_128 *info, - u32 key_id) + u32 key_id, u32 resync_tcp_sn) { char *initial_rn, *gcm_iv; u16 salt_sz, rec_seq_sz; @@ -47,6 +47,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params, MLX5_SET(tls_static_params, ctx, const_2, 2); MLX5_SET(tls_static_params, ctx, encryption_standard, MLX5E_ENCRYPTION_STANDARD_TLS); + MLX5_SET(tls_static_params, ctx, resync_tcp_sn, resync_tcp_sn); MLX5_SET(tls_static_params, ctx, dek_index, key_id); } @@ -54,7 +55,7 @@ void mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, u16 pc, u32 sqn, struct tls12_crypto_info_aes_gcm_128 *info, - u32 tis_tir_num, u32 key_id, + u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn, bool fence, enum tls_offload_ctx_dir direction) { struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; @@ -74,7 +75,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, ucseg->flags = MLX5_UMR_INLINE; ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16); - fill_static_params(&wqe->params, info, key_id); + fill_static_params(&wqe->params, info, key_id, resync_tcp_sn); } static void diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index 7bdd6ec6c981..ff4c740af10b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -23,6 +23,8 @@ void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi); +void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, + struct mlx5e_icosq *sq); void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h index 566cf24eb0fe..e5c180f2403b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h @@ -26,6 +26,7 @@ void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx); int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn); void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx); +void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn); struct mlx5e_set_tls_static_params_wqe { struct mlx5_wqe_ctrl_seg ctrl; @@ -39,12 +40,20 @@ struct mlx5e_set_tls_progress_params_wqe { struct mlx5_wqe_tls_progress_params_seg params; }; +struct mlx5e_get_tls_progress_params_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_seg_get_psv psv; +}; + #define MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS \ (DIV_ROUND_UP(sizeof(struct mlx5e_set_tls_static_params_wqe), MLX5_SEND_WQE_BB)) #define MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS \ (DIV_ROUND_UP(sizeof(struct mlx5e_set_tls_progress_params_wqe), MLX5_SEND_WQE_BB)) +#define MLX5E_KTLS_GET_PROGRESS_WQEBBS \ + (DIV_ROUND_UP(sizeof(struct mlx5e_get_tls_progress_params_wqe), MLX5_SEND_WQE_BB)) + #define MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi) \ ((struct mlx5e_set_tls_static_params_wqe *)\ mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_static_params_wqe))) @@ -53,6 +62,10 @@ struct mlx5e_set_tls_progress_params_wqe { ((struct mlx5e_set_tls_progress_params_wqe *)\ mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_progress_params_wqe))) +#define MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi) \ + ((struct mlx5e_get_tls_progress_params_wqe *)\ + mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_get_tls_progress_params_wqe))) + #define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \ ((struct mlx5e_dump_wqe *)\ mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_dump_wqe))) @@ -61,7 +74,7 @@ void mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe, u16 pc, u32 sqn, struct tls12_crypto_info_aes_gcm_128 *info, - u32 tis_tir_num, u32 key_id, + u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn, bool fence, enum tls_offload_ctx_dir direction); void mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9a6958acf87d..4b7c119c8946 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -596,6 +596,9 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) case MLX5E_ICOSQ_WQE_SET_PSV_TLS: mlx5e_ktls_handle_ctx_completion(wi); break; + case MLX5E_ICOSQ_WQE_GET_PSV_TLS: + mlx5e_ktls_handle_get_psv_completion(wi, sq); + break; } #endif } @@ -663,6 +666,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) case MLX5E_ICOSQ_WQE_SET_PSV_TLS: mlx5e_ktls_handle_ctx_completion(wi); break; + case MLX5E_ICOSQ_WQE_GET_PSV_TLS: + mlx5e_ktls_handle_get_psv_completion(wi, sq); + break; #endif default: netdev_WARN_ONCE(cq->channel->netdev, From 76c1e1ac2aaeddd5505e4ecfafa963885b5551ab Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 15 Jun 2020 15:25:23 +0300 Subject: [PATCH 0398/2056] net/mlx5e: kTLS, Add kTLS RX stats Add global and per-channel ethtool SW stats for the device offload. Document the new counters in tls-offload.rst. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- Documentation/networking/tls-offload.rst | 18 +++++++++ .../mellanox/mlx5/core/en_accel/ktls_rx.c | 29 ++++++++++++-- .../ethernet/mellanox/mlx5/core/en_stats.c | 39 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/en_stats.h | 25 ++++++++++++ 4 files changed, 107 insertions(+), 4 deletions(-) diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst index f914e81fd3a6..37773da2bee5 100644 --- a/Documentation/networking/tls-offload.rst +++ b/Documentation/networking/tls-offload.rst @@ -428,6 +428,24 @@ by the driver: which were part of a TLS stream. * ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets which were successfully decrypted. + * ``rx_tls_ctx`` - number of TLS RX HW offload contexts added to device for + decryption. + * ``rx_tls_del`` - number of TLS RX HW offload contexts deleted from device + (connection has finished). + * ``rx_tls_resync_req_pkt`` - number of received TLS packets with a resync + request. + * ``rx_tls_resync_req_start`` - number of times the TLS async resync request + was started. + * ``rx_tls_resync_req_end`` - number of times the TLS async resync request + properly ended with providing the HW tracked tcp-seq. + * ``rx_tls_resync_req_skip`` - number of times the TLS async resync request + procedure was started by not properly ended. + * ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to + the driver was successfully handled. + * ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to + the driver was terminated unsuccessfully. + * ``rx_tls_err`` - number of RX packets which were part of a TLS stream + but were not decrypted due to unexpected error in the state machine. * ``tx_tls_encrypted_packets`` - number of TX packets passed to the device for encryption of their TLS payload. * ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index b26dad909ef5..e0a8f9d63b30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -46,6 +46,7 @@ struct mlx5e_ktls_offload_context_rx { struct tls12_crypto_info_aes_gcm_128 crypto_info; struct accel_rule rule; struct sock *sk; + struct mlx5e_rq_stats *stats; struct completion add_ctx; u32 tirn; u32 key_id; @@ -203,6 +204,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c, return err; err_out: + priv_rx->stats->tls_resync_req_skip++; err = PTR_ERR(cseg); complete(&priv_rx->add_ctx); goto unlock; @@ -296,6 +298,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, return cseg; err_out: + priv_rx->stats->tls_resync_req_skip++; return ERR_PTR(err); } @@ -362,11 +365,13 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx cseg = post_static_params(sq, priv_rx); if (IS_ERR(cseg)) { + priv_rx->stats->tls_resync_res_skip++; err = PTR_ERR(cseg); goto unlock; } /* Do not increment priv_rx refcnt, CQE handling is empty */ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); + priv_rx->stats->tls_resync_res_ok++; unlock: spin_unlock(&c->async_icosq_lock); @@ -396,11 +401,14 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || - auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) + auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { + priv_rx->stats->tls_resync_req_skip++; goto out; + } hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); + priv_rx->stats->tls_resync_req_end++; out: refcount_dec(&resync->refcnt); kfree(buf); @@ -479,6 +487,7 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) seq = th->seq; datalen = skb->len - depth; tls_offload_rx_resync_async_request_start(sk, seq, datalen); + rq->stats->tls_resync_req_start++; } void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, @@ -509,18 +518,25 @@ void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) { u8 tls_offload = get_cqe_tls_offload(cqe); + struct mlx5e_rq_stats *stats; if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED)) return; + stats = rq->stats; + switch (tls_offload) { case CQE_TLS_OFFLOAD_DECRYPTED: skb->decrypted = 1; + stats->tls_decrypted_packets++; + stats->tls_decrypted_bytes += *cqe_bcnt; break; case CQE_TLS_OFFLOAD_RESYNC: + stats->tls_resync_req_pkt++; resync_update_sn(rq, skb); break; default: /* CQE_TLS_OFFLOAD_ERROR: */ + stats->tls_err++; break; } } @@ -562,12 +578,14 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; - priv_rx->sk = sk; - priv_rx->rxq = mlx5e_accel_sk_get_rxq(sk); + rxq = mlx5e_accel_sk_get_rxq(sk); + priv_rx->rxq = rxq; + priv_rx->sk = sk; + + priv_rx->stats = &priv->channel_stats[rxq].rq; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rxq = priv_rx->rxq; rqtn = priv->direct_tir[rxq].rqt.rqtn; err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); @@ -586,6 +604,8 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (err) goto err_post_wqes; + priv_rx->stats->tls_ctx++; + return 0; err_post_wqes: @@ -646,6 +666,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) refcount_dec(&resync->refcnt); wait_for_resync(netdev, resync); + priv_rx->stats->tls_del++; if (priv_rx->rule.rule) mlx5e_accel_fs_del_sk(priv_rx->rule.rule); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index f009fe09e99b..e3b2f59408e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -163,6 +163,19 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) }, +#endif { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) }, @@ -275,6 +288,19 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw) s->rx_congst_umr += rq_stats->congst_umr; s->rx_arfs_err += rq_stats->arfs_err; s->rx_recover += rq_stats->recover; +#ifdef CONFIG_MLX5_EN_TLS + s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets; + s->rx_tls_decrypted_bytes += rq_stats->tls_decrypted_bytes; + s->rx_tls_ctx += rq_stats->tls_ctx; + s->rx_tls_del += rq_stats->tls_del; + s->rx_tls_resync_req_pkt += rq_stats->tls_resync_req_pkt; + s->rx_tls_resync_req_start += rq_stats->tls_resync_req_start; + s->rx_tls_resync_req_end += rq_stats->tls_resync_req_end; + s->rx_tls_resync_req_skip += rq_stats->tls_resync_req_skip; + s->rx_tls_resync_res_ok += rq_stats->tls_resync_res_ok; + s->rx_tls_resync_res_skip += rq_stats->tls_resync_res_skip; + s->rx_tls_err += rq_stats->tls_err; +#endif s->ch_events += ch_stats->events; s->ch_poll += ch_stats->poll; s->ch_arm += ch_stats->arm; @@ -1475,6 +1501,19 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) }, +#ifdef CONFIG_MLX5_EN_TLS + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) }, +#endif }; static const struct counter_desc sq_stats_desc[] = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 2b83ba990714..2e1cca1923b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -186,6 +186,18 @@ struct mlx5e_sw_stats { u64 tx_tls_skip_no_sync_data; u64 tx_tls_drop_no_sync_data; u64 tx_tls_drop_bypass_req; + + u64 rx_tls_decrypted_packets; + u64 rx_tls_decrypted_bytes; + u64 rx_tls_ctx; + u64 rx_tls_del; + u64 rx_tls_resync_req_pkt; + u64 rx_tls_resync_req_start; + u64 rx_tls_resync_req_end; + u64 rx_tls_resync_req_skip; + u64 rx_tls_resync_res_ok; + u64 rx_tls_resync_res_skip; + u64 rx_tls_err; #endif u64 rx_xsk_packets; @@ -305,6 +317,19 @@ struct mlx5e_rq_stats { u64 congst_umr; u64 arfs_err; u64 recover; +#ifdef CONFIG_MLX5_EN_TLS + u64 tls_decrypted_packets; + u64 tls_decrypted_bytes; + u64 tls_ctx; + u64 tls_del; + u64 tls_resync_req_pkt; + u64 tls_resync_req_start; + u64 tls_resync_req_end; + u64 tls_resync_req_skip; + u64 tls_resync_res_ok; + u64 tls_resync_res_skip; + u64 tls_err; +#endif }; struct mlx5e_sq_stats { From c5607360ec4ea5d0c955de501b9eeb04fa2465d0 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 18 Jun 2020 12:45:59 +0300 Subject: [PATCH 0399/2056] net/mlx5e: Increase Async ICO SQ size Resync communication with HW for kTLS RX is done via the async ICOSQs. kTLS RX resync requests might come in bursts. To improve the success chances for such bursts, use a larger ICOSQ. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0f1578a5e538..3e6fcd545d2c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2306,6 +2306,14 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params, } } +static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev) +{ + if (netdev->hw_features & NETIF_F_HW_TLS_RX) + return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + + return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; +} + static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -2315,7 +2323,7 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv, mlx5e_build_rq_param(priv, params, NULL, &cparam->rq); icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq); - async_icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev); mlx5e_build_sq_param(priv, params, &cparam->txq_sq); mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq); From ed9a7c53b8781f851bd8406551d6abfb2d826683 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 22 Jun 2020 18:32:36 +0300 Subject: [PATCH 0400/2056] net/mlx5e: kTLS, Cleanup redundant capability check All callers of mlx5e_ktls_build_netdev() check capability before the call. Remove the repeated check in the function. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c index deec17af5a69..1b392696280d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c @@ -58,9 +58,6 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv) struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - if (!mlx5_accel_is_ktls_device(mdev)) - return; - if (mlx5_accel_is_ktls_tx(mdev)) { netdev->hw_features |= NETIF_F_HW_TLS_TX; netdev->features |= NETIF_F_HW_TLS_TX; From a29074367b347af9e19d36522f7ad9a7db4b9c28 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 15 Jun 2020 13:02:49 +0300 Subject: [PATCH 0401/2056] net/mlx5e: kTLS, Improve rx handler function call Prior to this patch mlx5e tls rx handler was called unconditionally on all rx frames and the decision whether a frame is a valid tls record is done inside that function. A function call can be expensive especially for regular rx packet rate. To avoid this, check the tls validity before jumping into the tls rx handler. While at it, split between kTLS device offload rx handler and FPGA tls rx handler using a similar method. Signed-off-by: Saeed Mahameed Signed-off-by: Tariq Toukan --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../mellanox/mlx5/core/en_accel/ktls_rx.c | 10 ++------- .../mellanox/mlx5/core/en_accel/tls.h | 6 ++++++ .../mellanox/mlx5/core/en_accel/tls_rxtx.c | 12 +++-------- .../mellanox/mlx5/core/en_accel/tls_rxtx.h | 21 +++++++++++++++++-- .../net/ethernet/mellanox/mlx5/core/en_main.c | 3 +++ .../net/ethernet/mellanox/mlx5/core/en_rx.c | 2 -- 7 files changed, 34 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ec5bf73f9b07..2957edb7e0b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -263,6 +263,7 @@ enum { MLX5E_RQ_STATE_AM, MLX5E_RQ_STATE_NO_CSUM_COMPLETE, MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */ + MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */ }; struct mlx5e_cq { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index e0a8f9d63b30..d7215defd403 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -517,15 +517,9 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) { - u8 tls_offload = get_cqe_tls_offload(cqe); - struct mlx5e_rq_stats *stats; + struct mlx5e_rq_stats *stats = rq->stats; - if (likely(tls_offload == CQE_TLS_OFFLOAD_NOT_DECRYPTED)) - return; - - stats = rq->stats; - - switch (tls_offload) { + switch (get_cqe_tls_offload(cqe)) { case CQE_TLS_OFFLOAD_DECRYPTED: skb->decrypted = 1; stats->tls_decrypted_packets++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h index ca0c2ebb41a1..bd270a85c804 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h @@ -87,6 +87,11 @@ mlx5e_get_tls_rx_context(struct tls_context *tls_ctx) base); } +static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) +{ + return priv->tls; +} + void mlx5e_tls_build_netdev(struct mlx5e_priv *priv); int mlx5e_tls_init(struct mlx5e_priv *priv); void mlx5e_tls_cleanup(struct mlx5e_priv *priv); @@ -103,6 +108,7 @@ static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) mlx5e_ktls_build_netdev(priv); } +static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; } static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { } static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c index 182841322ce4..b0c31d49ff8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c @@ -355,19 +355,13 @@ static int tls_update_resync_sn(struct net_device *netdev, return 0; } -void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) +/* FPGA tls rx handler */ +void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, + u32 *cqe_bcnt) { struct mlx5e_tls_metadata *mdata; struct mlx5e_priv *priv; - if (likely(mlx5_accel_is_ktls_rx(rq->mdev))) - return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); - - /* FPGA */ - if (!is_metadata_hdr_valid(skb)) - return; - /* Use the metadata */ mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); switch (mdata->content.recv.syndrome) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h index 8bb790674042..5f162ad2ee8f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h @@ -34,6 +34,7 @@ #ifndef __MLX5E_TLS_RXTX_H__ #define __MLX5E_TLS_RXTX_H__ +#include "accel/accel.h" #include "en_accel/ktls_txrx.h" #ifdef CONFIG_MLX5_EN_TLS @@ -49,11 +50,27 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, struct mlx5e_accel_tx_tls_state *state); -void mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, - struct mlx5_cqe64 *cqe, u32 *cqe_bcnt); +void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, + u32 *cqe_bcnt); + +static inline void +mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) +{ + if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */ + return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt); + + if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb))) + return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt); +} #else +static inline bool +mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; } +static inline void +mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, + struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {} static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) { return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3e6fcd545d2c..046cfb0ea180 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -873,6 +873,9 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params, if (err) goto err_destroy_rq; + if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev)) + __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */ + if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full)) __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 4b7c119c8946..8b42f729a4f7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1019,9 +1019,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mac_len = ETH_HLEN; -#ifdef CONFIG_MLX5_EN_TLS mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); -#endif if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); From fdb7eb21ddd3cc07b8120b6f5cc0b279a0ed198e Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Fri, 26 Jun 2020 21:05:32 -0700 Subject: [PATCH 0402/2056] tcp: stamp SCM_TSTAMP_ACK later in tcp_clean_rtx_queue() Currently tp->delivered is updated with sacked packets but not cumulatively acked when SCP_TSTAMP_ACK is timestamped. This patch moves a tcp_ack_tstamp() call in tcp_clean_rtx_queue() to later in the loop so that when a skb is fully acked OPT_STATS of SCM_TSTAMP_ACK will include the current skb in the delivered count. When not fully acked tcp_ack_tstamp() is a no-op and there is no change in behavior. Signed-off-by: Yousuk Seung Signed-off-by: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f3a0eb139b76..2a683e785cca 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3078,8 +3078,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, u8 sacked = scb->sacked; u32 acked_pcount; - tcp_ack_tstamp(sk, skb, prior_snd_una); - /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || @@ -3143,6 +3141,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, if (!fully_acked) break; + tcp_ack_tstamp(sk, skb, prior_snd_una); + next = skb_rb_next(skb); if (unlikely(skb == tp->retransmit_skb_hint)) tp->retransmit_skb_hint = NULL; From c634e34f6ebfb75259e6ce467523fd3adf30d3d2 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Fri, 26 Jun 2020 21:05:33 -0700 Subject: [PATCH 0403/2056] tcp: add ece_ack flag to reno sack functions Pass a boolean flag that tells the ECE state of the current ack to reno sack functions. This is pure refactor for future patches to improve tracking delivered counts. Signed-off-by: Yousuk Seung Signed-off-by: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2a683e785cca..09bed29e3ef4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1893,7 +1893,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) /* Emulate SACKs for SACKless connection: account for a new dupack. */ -static void tcp_add_reno_sack(struct sock *sk, int num_dupack) +static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack) { if (num_dupack) { struct tcp_sock *tp = tcp_sk(sk); @@ -1911,7 +1911,7 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack) /* Account for ACK, ACKing some data in Reno Recovery phase. */ -static void tcp_remove_reno_sacks(struct sock *sk, int acked) +static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); @@ -2697,7 +2697,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack, * delivered. Lower inflight to clock out (re)tranmissions. */ if (after(tp->snd_nxt, tp->high_seq) && num_dupack) - tcp_add_reno_sack(sk, num_dupack); + tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE); else if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); } @@ -2779,6 +2779,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int fast_rexmit = 0, flag = *ack_flag; + bool ece_ack = flag & FLAG_ECE; bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) && tcp_force_fast_retransmit(sk)); @@ -2787,7 +2788,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ - if (flag & FLAG_ECE) + if (ece_ack) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ @@ -2828,7 +2829,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp)) - tcp_add_reno_sack(sk, num_dupack); + tcp_add_reno_sack(sk, num_dupack, ece_ack); } else { if (tcp_try_undo_partial(sk, prior_snd_una)) return; @@ -2853,7 +2854,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); - tcp_add_reno_sack(sk, num_dupack); + tcp_add_reno_sack(sk, num_dupack, ece_ack); } if (icsk->icsk_ca_state <= TCP_CA_Disorder) @@ -2877,7 +2878,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una, } /* Otherwise enter Recovery state */ - tcp_enter_recovery(sk, (flag & FLAG_ECE)); + tcp_enter_recovery(sk, ece_ack); fast_rexmit = 1; } @@ -3053,7 +3054,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, */ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, u32 prior_snd_una, - struct tcp_sacktag_state *sack) + struct tcp_sacktag_state *sack, bool ece_ack) { const struct inet_connection_sock *icsk = inet_csk(sk); u64 first_ackt, last_ackt; @@ -3191,7 +3192,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, } if (tcp_is_reno(tp)) { - tcp_remove_reno_sacks(sk, pkts_acked); + tcp_remove_reno_sacks(sk, pkts_acked, ece_ack); /* If any of the cumulatively ACKed segments was * retransmitted, non-SACK case cannot confirm that @@ -3685,7 +3686,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) goto no_queue; /* See if we can take anything off of the retransmit queue. */ - flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state); + flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state, + flag & FLAG_ECE); tcp_rack_update_reo_wnd(sk, &rs); From f00394ce6054e54319aefa51e4c495f9ebeb8669 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Fri, 26 Jun 2020 21:05:34 -0700 Subject: [PATCH 0404/2056] tcp: count sacked packets in tcp_sacktag_state Add sack_delivered to tcp_sacktag_state and count the number of sacked and dsacked packets. This is pure refactor for future patches to improve tracking delivered counts. Signed-off-by: Yousuk Seung Signed-off-by: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 09bed29e3ef4..db61ea597e39 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1138,6 +1138,7 @@ struct tcp_sacktag_state { struct rate_sample *rate; int flag; unsigned int mss_now; + u32 sack_delivered; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, @@ -1259,6 +1260,7 @@ static u8 tcp_sacktag_one(struct sock *sk, state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; tp->delivered += pcount; /* Out-of-order packets delivered */ + state->sack_delivered += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (tp->lost_skb_hint && @@ -1685,6 +1687,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, if (found_dup_sack) { state->flag |= FLAG_DSACKING_ACK; tp->delivered++; /* A spurious retransmission is delivered */ + state->sack_delivered++; } /* Eliminate too old ACKs, but take into @@ -3586,6 +3589,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) sack_state.first_sackt = 0; sack_state.rate = &rs; + sack_state.sack_delivered = 0; /* We very likely will need to access rtx queue. */ prefetch(sk->tcp_rtx_queue.rb_node); From 082d4fa980b07b1cc602305e9cc0815d19663ed3 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Fri, 26 Jun 2020 21:05:35 -0700 Subject: [PATCH 0405/2056] tcp: update delivered_ce with delivered Currently tp->delivered is updated in various places in tcp_ack() but tp->delivered_ce is updated once at the end. As a result two counts in OPT_STATS of SCM_TSTAMP_ACK timestamps generated in tcp_ack() may not be in sync. This patch updates both counts at the same in tcp_ack(). Signed-off-by: Yousuk Seung Signed-off-by: Yuchung Cheng Acked-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index db61ea597e39..8479b84f0a7f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -962,6 +962,15 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) } } +/* Updates the delivered and delivered_ce counts */ +static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, + bool ece_ack) +{ + tp->delivered += delivered; + if (ece_ack) + tp->delivered_ce += delivered; +} + /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). @@ -1259,7 +1268,7 @@ static u8 tcp_sacktag_one(struct sock *sk, sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; - tp->delivered += pcount; /* Out-of-order packets delivered */ + /* Out-of-order packets delivered */ state->sack_delivered += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ @@ -1686,7 +1695,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, num_sacks, prior_snd_una); if (found_dup_sack) { state->flag |= FLAG_DSACKING_ACK; - tp->delivered++; /* A spurious retransmission is delivered */ + /* A spurious retransmission is delivered */ state->sack_delivered++; } @@ -1907,7 +1916,7 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack) tcp_check_reno_reordering(sk, 0); delivered = tp->sacked_out - prior_sacked; if (delivered > 0) - tp->delivered += delivered; + tcp_count_delivered(tp, delivered, ece_ack); tcp_verify_left_out(tp); } } @@ -1920,7 +1929,8 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack) if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ - tp->delivered += max_t(int, acked - tp->sacked_out, 1); + tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1), + ece_ack); if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else @@ -3116,7 +3126,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, if (sacked & TCPCB_SACKED_ACKED) { tp->sacked_out -= acked_pcount; } else if (tcp_is_sack(tp)) { - tp->delivered += acked_pcount; + tcp_count_delivered(tp, acked_pcount, ece_ack); if (!tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, sacked, scb->end_seq, tcp_skb_timestamp_us(skb)); @@ -3562,10 +3572,9 @@ static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag) delivered = tp->delivered - prior_delivered; NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered); - if (flag & FLAG_ECE) { - tp->delivered_ce += delivered; + if (flag & FLAG_ECE) NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered); - } + return delivered; } @@ -3665,6 +3674,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) ack_ev_flags |= CA_ACK_ECE; } + if (sack_state.sack_delivered) + tcp_count_delivered(tp, sack_state.sack_delivered, + flag & FLAG_ECE); + if (flag & FLAG_WIN_UPDATE) ack_ev_flags |= CA_ACK_WIN_UPDATE; From d929758101fc0674008169dc1de963e3181c587b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 25 Jun 2020 16:26:28 -0700 Subject: [PATCH 0406/2056] libbpf: Support disabling auto-loading BPF programs Currently, bpf_object__load() (and by induction skeleton's load), will always attempt to prepare, relocate, and load into kernel every single BPF program found inside the BPF object file. This is often convenient and the right thing to do and what users expect. But there are plenty of cases (especially with BPF development constantly picking up the pace), where BPF application is intended to work with old kernels, with potentially reduced set of features. But on kernels supporting extra features, it would like to take a full advantage of them, by employing extra BPF program. This could be a choice of using fentry/fexit over kprobe/kretprobe, if kernel is recent enough and is built with BTF. Or BPF program might be providing optimized bpf_iter-based solution that user-space might want to use, whenever available. And so on. With libbpf and BPF CO-RE in particular, it's advantageous to not have to maintain two separate BPF object files to achieve this. So to enable such use cases, this patch adds ability to request not auto-loading chosen BPF programs. In such case, libbpf won't attempt to perform relocations (which might fail due to old kernel), won't try to resolve BTF types for BTF-aware (tp_btf/fentry/fexit/etc) program types, because BTF might not be present, and so on. Skeleton will also automatically skip auto-attachment step for such not loaded BPF programs. Overall, this feature allows to simplify development and deployment of real-world BPF applications with complicated compatibility requirements. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200625232629.3444003-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 48 +++++++++++++++++++++++++++++++++------- tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 2 ++ 3 files changed, 44 insertions(+), 8 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6b4955d170ff..4ea7f4f1a691 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -230,6 +230,7 @@ struct bpf_program { struct bpf_insn *insns; size_t insns_cnt, main_prog_cnt; enum bpf_prog_type type; + bool load; struct reloc_desc *reloc_desc; int nr_reloc; @@ -541,6 +542,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx, prog->instances.fds = NULL; prog->instances.nr = -1; prog->type = BPF_PROG_TYPE_UNSPEC; + prog->load = true; return 0; errout: @@ -2513,6 +2515,8 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) need_vmlinux_btf = true; bpf_object__for_each_program(prog, obj) { + if (!prog->load) + continue; if (libbpf_prog_needs_vmlinux_btf(prog)) { need_vmlinux_btf = true; break; @@ -5445,6 +5449,12 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) { int err = 0, fd, i, btf_id; + if (prog->obj->loaded) { + pr_warn("prog '%s'('%s'): can't load after object was loaded\n", + prog->name, prog->section_name); + return -EINVAL; + } + if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { @@ -5533,16 +5543,21 @@ static bool bpf_program__is_function_storage(const struct bpf_program *prog, static int bpf_object__load_progs(struct bpf_object *obj, int log_level) { + struct bpf_program *prog; size_t i; int err; for (i = 0; i < obj->nr_programs; i++) { - if (bpf_program__is_function_storage(&obj->programs[i], obj)) + prog = &obj->programs[i]; + if (bpf_program__is_function_storage(prog, obj)) continue; - obj->programs[i].log_level |= log_level; - err = bpf_program__load(&obj->programs[i], - obj->license, - obj->kern_version); + if (!prog->load) { + pr_debug("prog '%s'('%s'): skipped loading\n", + prog->name, prog->section_name); + continue; + } + prog->log_level |= log_level; + err = bpf_program__load(prog, obj->license, obj->kern_version); if (err) return err; } @@ -5869,12 +5884,10 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) return -EINVAL; if (obj->loaded) { - pr_warn("object should not be loaded twice\n"); + pr_warn("object '%s': load can't be attempted twice\n", obj->name); return -EINVAL; } - obj->loaded = true; - err = bpf_object__probe_loading(obj); err = err ? : bpf_object__probe_caps(obj); err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); @@ -5889,6 +5902,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) btf__free(obj->btf_vmlinux); obj->btf_vmlinux = NULL; + obj->loaded = true; /* doesn't matter if successfully or not */ + if (err) goto out; @@ -6661,6 +6676,20 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) return title; } +bool bpf_program__autoload(const struct bpf_program *prog) +{ + return prog->load; +} + +int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) +{ + if (prog->obj->loaded) + return -EINVAL; + + prog->load = autoload; + return 0; +} + int bpf_program__fd(const struct bpf_program *prog) { return bpf_program__nth_fd(prog, 0); @@ -9283,6 +9312,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) const struct bpf_sec_def *sec_def; const char *sec_name = bpf_program__title(prog, false); + if (!prog->load) + continue; + sec_def = find_sec_def(sec_name); if (!sec_def || !sec_def->attach_fn) continue; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index fdd279fb1866..2335971ed0bd 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -200,6 +200,8 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog); LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy); +LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog); +LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload); /* returns program size in bytes */ LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 9914e0db4859..6544d2cd1ed6 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -286,4 +286,6 @@ LIBBPF_0.1.0 { bpf_map__set_value_size; bpf_map__type; bpf_map__value_size; + bpf_program__autoload; + bpf_program__set_autoload; } LIBBPF_0.0.9; From 5712174c5c9e3d684ad05d4aaed1e14acda4bb74 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 25 Jun 2020 16:26:29 -0700 Subject: [PATCH 0407/2056] selftests/bpf: Test auto-load disabling logic for BPF programs Validate that BPF object with broken (in multiple ways) BPF program can still be successfully loaded, if that broken BPF program is disabled. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200625232629.3444003-3-andriin@fb.com --- .../selftests/bpf/prog_tests/autoload.c | 41 +++++++++++++++++++ .../selftests/bpf/progs/test_autoload.c | 40 ++++++++++++++++++ 2 files changed, 81 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/autoload.c create mode 100644 tools/testing/selftests/bpf/progs/test_autoload.c diff --git a/tools/testing/selftests/bpf/prog_tests/autoload.c b/tools/testing/selftests/bpf/prog_tests/autoload.c new file mode 100644 index 000000000000..3693f7d133eb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/autoload.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include +#include +#include "test_autoload.skel.h" + +void test_autoload(void) +{ + int duration = 0, err; + struct test_autoload* skel; + + skel = test_autoload__open_and_load(); + /* prog3 should be broken */ + if (CHECK(skel, "skel_open_and_load", "unexpected success\n")) + goto cleanup; + + skel = test_autoload__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + goto cleanup; + + /* don't load prog3 */ + bpf_program__set_autoload(skel->progs.prog3, false); + + err = test_autoload__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + err = test_autoload__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + usleep(1); + + CHECK(!skel->bss->prog1_called, "prog1", "not called\n"); + CHECK(!skel->bss->prog2_called, "prog2", "not called\n"); + CHECK(skel->bss->prog3_called, "prog3", "called?!\n"); + +cleanup: + test_autoload__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_autoload.c b/tools/testing/selftests/bpf/progs/test_autoload.c new file mode 100644 index 000000000000..62c8cdec6d5d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_autoload.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include "vmlinux.h" +#include +#include +#include + +bool prog1_called = false; +bool prog2_called = false; +bool prog3_called = false; + +SEC("raw_tp/sys_enter") +int prog1(const void *ctx) +{ + prog1_called = true; + return 0; +} + +SEC("raw_tp/sys_exit") +int prog2(const void *ctx) +{ + prog2_called = true; + return 0; +} + +struct fake_kernel_struct { + int whatever; +} __attribute__((preserve_access_index)); + +SEC("fentry/unexisting-kprobe-will-fail-if-loaded") +int prog3(const void *ctx) +{ + struct fake_kernel_struct *fake = (void *)ctx; + fake->whatever = 123; + prog3_called = true; + return 0; +} + +char _license[] SEC("license") = "GPL"; From ea256222a463858ab034b667486137c2b56e6120 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sun, 28 Jun 2020 20:36:20 +0800 Subject: [PATCH 0408/2056] hinic: add support to set and get pause params add support to set pause params with ethtool -A and get pause params with ethtool -a. Also remove set_link_ksettings ops for VF and enable pause by default. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 86 ++++++++++++++++++- .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 2 + .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 2 + .../net/ethernet/huawei/hinic/hinic_hw_io.h | 10 +++ .../net/ethernet/huawei/hinic/hinic_main.c | 75 +++++++++++++--- .../net/ethernet/huawei/hinic/hinic_port.c | 40 +++++++++ .../net/ethernet/huawei/hinic/hinic_port.h | 13 +++ 7 files changed, 217 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index efb02e03e7da..edd60c892ab2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -613,6 +613,64 @@ static int hinic_set_ringparam(struct net_device *netdev, return 0; } + +static void hinic_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_pause_config pause_info = {0}; + struct hinic_nic_cfg *nic_cfg; + int err; + + nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; + + err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info); + if (!err) { + pause->autoneg = pause_info.auto_neg; + if (nic_cfg->pause_set || !pause_info.auto_neg) { + pause->rx_pause = nic_cfg->rx_pause; + pause->tx_pause = nic_cfg->tx_pause; + } else { + pause->rx_pause = pause_info.rx_pause; + pause->tx_pause = pause_info.tx_pause; + } + } +} + +static int hinic_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_pause_config pause_info = {0}; + struct hinic_port_cap port_cap = {0}; + int err; + + err = hinic_port_get_cap(nic_dev, &port_cap); + if (err) + return -EIO; + + if (pause->autoneg != port_cap.autoneg_state) + return -EOPNOTSUPP; + + pause_info.auto_neg = pause->autoneg; + pause_info.rx_pause = pause->rx_pause; + pause_info.tx_pause = pause->tx_pause; + + mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); + err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); + if (err) { + mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); + return err; + } + nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true; + nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg; + nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause; + nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause; + mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex); + + return 0; +} + static void hinic_get_channels(struct net_device *netdev, struct ethtool_channels *channels) { @@ -1241,6 +1299,27 @@ static const struct ethtool_ops hinic_ethtool_ops = { .get_link = ethtool_op_get_link, .get_ringparam = hinic_get_ringparam, .set_ringparam = hinic_set_ringparam, + .get_pauseparam = hinic_get_pauseparam, + .set_pauseparam = hinic_set_pauseparam, + .get_channels = hinic_get_channels, + .set_channels = hinic_set_channels, + .get_rxnfc = hinic_get_rxnfc, + .set_rxnfc = hinic_set_rxnfc, + .get_rxfh_key_size = hinic_get_rxfh_key_size, + .get_rxfh_indir_size = hinic_get_rxfh_indir_size, + .get_rxfh = hinic_get_rxfh, + .set_rxfh = hinic_set_rxfh, + .get_sset_count = hinic_get_sset_count, + .get_ethtool_stats = hinic_get_ethtool_stats, + .get_strings = hinic_get_strings, +}; + +static const struct ethtool_ops hinicvf_ethtool_ops = { + .get_link_ksettings = hinic_get_link_ksettings, + .get_drvinfo = hinic_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic_get_ringparam, + .set_ringparam = hinic_set_ringparam, .get_channels = hinic_get_channels, .set_channels = hinic_set_channels, .get_rxnfc = hinic_get_rxnfc, @@ -1256,5 +1335,10 @@ static const struct ethtool_ops hinic_ethtool_ops = { void hinic_set_ethtool_ops(struct net_device *netdev) { - netdev->ethtool_ops = &hinic_ethtool_ops; + struct hinic_dev *nic_dev = netdev_priv(netdev); + + if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) + netdev->ethtool_ops = &hinic_ethtool_ops; + else + netdev->ethtool_ops = &hinicvf_ethtool_ops; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 0245da02efbb..747d50b841ba 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -777,6 +777,8 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) goto err_dev_cap; } + mutex_init(&hwdev->func_to_io.nic_cfg.cfg_mutex); + err = hinic_vf_func_init(hwdev); if (err) { dev_err(&pdev->dev, "Failed to init nic mbox\n"); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 71ea7e46dbbc..cc776ca2d737 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -48,6 +48,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_ADD_VLAN = 3, HINIC_PORT_CMD_DEL_VLAN = 4, + HINIC_PORT_CMD_SET_PFC = 5, + HINIC_PORT_CMD_SET_MAC = 9, HINIC_PORT_CMD_GET_MAC = 10, HINIC_PORT_CMD_DEL_MAC = 11, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h index 214f162f7579..ee6d60762d84 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -47,6 +47,15 @@ struct hinic_free_db_area { struct semaphore idx_lock; }; +struct hinic_nic_cfg { + /* lock for getting nic cfg */ + struct mutex cfg_mutex; + bool pause_set; + u32 auto_neg; + u32 rx_pause; + u32 tx_pause; +}; + struct hinic_func_to_io { struct hinic_hwif *hwif; struct hinic_hwdev *hwdev; @@ -78,6 +87,7 @@ struct hinic_func_to_io { u16 max_vfs; struct vf_data_storage *vf_infos; u8 link_status; + struct hinic_nic_cfg nic_cfg; }; struct hinic_wq_page_size { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index e9e6f4c9309a..76e3debfebe5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -887,6 +887,26 @@ static void netdev_features_init(struct net_device *netdev) netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; } +static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev) +{ + struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg; + struct hinic_pause_config pause_info = {0}; + struct hinic_port_cap port_cap = {0}; + + if (hinic_port_get_cap(nic_dev, &port_cap)) + return; + + mutex_lock(&nic_cfg->cfg_mutex); + if (nic_cfg->pause_set || !port_cap.autoneg_state) { + nic_cfg->auto_neg = port_cap.autoneg_state; + pause_info.auto_neg = nic_cfg->auto_neg; + pause_info.rx_pause = nic_cfg->rx_pause; + pause_info.tx_pause = nic_cfg->tx_pause; + hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info); + } + mutex_unlock(&nic_cfg->cfg_mutex); +} + /** * link_status_event_handler - link event handler * @handle: nic device for the handler @@ -918,6 +938,9 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, up(&nic_dev->mgmt_lock); + if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) + hinic_refresh_nic_cfg(nic_dev); + netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); } else { down(&nic_dev->mgmt_lock); @@ -948,28 +971,54 @@ static int set_features(struct hinic_dev *nic_dev, { netdev_features_t changed = force_change ? ~0 : pre_features ^ features; u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; + netdev_features_t failed_features = 0; + int ret = 0; int err = 0; - if (changed & NETIF_F_TSO) - err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? + if (changed & NETIF_F_TSO) { + ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); + if (ret) { + err = ret; + failed_features |= NETIF_F_TSO; + } + } - if (changed & NETIF_F_RXCSUM) - err = hinic_set_rx_csum_offload(nic_dev, csum_en); + if (changed & NETIF_F_RXCSUM) { + ret = hinic_set_rx_csum_offload(nic_dev, csum_en); + if (ret) { + err = ret; + failed_features |= NETIF_F_RXCSUM; + } + } if (changed & NETIF_F_LRO) { - err = hinic_set_rx_lro_state(nic_dev, + ret = hinic_set_rx_lro_state(nic_dev, !!(features & NETIF_F_LRO), HINIC_LRO_RX_TIMER_DEFAULT, HINIC_LRO_MAX_WQE_NUM_DEFAULT); + if (ret) { + err = ret; + failed_features |= NETIF_F_LRO; + } } - if (changed & NETIF_F_HW_VLAN_CTAG_RX) - err = hinic_set_rx_vlan_offload(nic_dev, + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + ret = hinic_set_rx_vlan_offload(nic_dev, !!(features & NETIF_F_HW_VLAN_CTAG_RX)); + if (ret) { + err = ret; + failed_features |= NETIF_F_HW_VLAN_CTAG_RX; + } + } - return err; + if (err) { + nic_dev->netdev->features = features ^ failed_features; + return -EIO; + } + + return 0; } /** @@ -1008,8 +1057,6 @@ static int nic_dev_init(struct pci_dev *pdev) goto err_alloc_etherdev; } - hinic_set_ethtool_ops(netdev); - if (!HINIC_IS_VF(hwdev->hwif)) netdev->netdev_ops = &hinic_netdev_ops; else @@ -1032,6 +1079,8 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->sriov_info.pdev = pdev; nic_dev->max_qps = num_qps; + hinic_set_ethtool_ops(netdev); + sema_init(&nic_dev->mgmt_lock, 1); tx_stats = &nic_dev->tx_stats; @@ -1100,6 +1149,11 @@ static int nic_dev_init(struct pci_dev *pdev) if (err) goto err_set_features; + /* enable pause and disable pfc by default */ + err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0); + if (err) + goto err_set_pfc; + SET_NETDEV_DEV(netdev, &pdev->dev); err = register_netdev(netdev); @@ -1111,6 +1165,7 @@ static int nic_dev_init(struct pci_dev *pdev) return 0; err_reg_netdev: +err_set_pfc: err_set_features: hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 175c0ee00038..8b007a268675 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -1082,6 +1082,7 @@ int hinic_get_link_mode(struct hinic_hwdev *hwdev, if (!hwdev || !link_mode) return -EINVAL; + link_mode->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); out_size = sizeof(*link_mode); err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE, @@ -1172,6 +1173,8 @@ int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev, u16 out_size = sizeof(*pause_info); int err; + pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO, pause_info, sizeof(*pause_info), pause_info, &out_size); @@ -1190,6 +1193,8 @@ int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev, u16 out_size = sizeof(*pause_info); int err; + pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO, pause_info, sizeof(*pause_info), pause_info, &out_size); @@ -1201,3 +1206,38 @@ int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev, return 0; } + +int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap) +{ + struct hinic_nic_cfg *nic_cfg = &hwdev->func_to_io.nic_cfg; + struct hinic_set_pfc pfc = {0}; + u16 out_size = sizeof(pfc); + int err; + + if (HINIC_IS_VF(hwdev->hwif)) + return 0; + + mutex_lock(&nic_cfg->cfg_mutex); + + pfc.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + pfc.pfc_bitmap = pfc_bitmap; + pfc.pfc_en = pfc_en; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PFC, + &pfc, sizeof(pfc), &pfc, &out_size); + if (err || pfc.status || !out_size) { + dev_err(&hwdev->hwif->pdev->dev, "Failed to %s pfc, err: %d, status: 0x%x, out size: 0x%x\n", + pfc_en ? "enable" : "disable", err, pfc.status, + out_size); + mutex_unlock(&nic_cfg->cfg_mutex); + return -EIO; + } + + /* pause settings is opposite from pfc */ + nic_cfg->rx_pause = pfc_en ? 0 : 1; + nic_cfg->tx_pause = pfc_en ? 0 : 1; + + mutex_unlock(&nic_cfg->cfg_mutex); + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 661c6322dc15..7b17460d4e2c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -641,6 +641,17 @@ struct hinic_pause_config { u32 tx_pause; }; +struct hinic_set_pfc { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 pfc_en; + u8 pfc_bitmap; + u8 rsvd1[4]; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -736,6 +747,8 @@ int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev, int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev, struct hinic_pause_config *pause_info); +int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap); + int hinic_open(struct net_device *netdev); int hinic_close(struct net_device *netdev); From a0337c0dee686acf9b38d50abb923d13e27f7e83 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sun, 28 Jun 2020 20:36:21 +0800 Subject: [PATCH 0409/2056] hinic: add support to set and get irq coalesce add support to set TX/RX irq coalesce params with ethtool -C and get these params with ethtool -c. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 8 + .../net/ethernet/huawei/hinic/hinic_ethtool.c | 234 ++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 62 +++++ .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 21 ++ .../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 3 + .../net/ethernet/huawei/hinic/hinic_main.c | 53 ++++ drivers/net/ethernet/huawei/hinic/hinic_rx.c | 19 +- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 19 ++ 8 files changed, 418 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 48b40be3e84d..75d6dee948f5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -49,6 +49,12 @@ enum hinic_rss_hash_type { HINIC_RSS_HASH_ENGINE_TYPE_MAX, }; +struct hinic_intr_coal_info { + u8 pending_limt; + u8 coalesce_timer_cfg; + u8 resend_timer_cfg; +}; + struct hinic_dev { struct net_device *netdev; struct hinic_hwdev *hwdev; @@ -82,6 +88,8 @@ struct hinic_dev { struct hinic_rss_type rss_type; u8 *rss_hkey_user; s32 *rss_indir_user; + struct hinic_intr_coal_info *rx_intr_coalesce; + struct hinic_intr_coal_info *tx_intr_coalesce; struct hinic_sriov_info sriov_info; }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index edd60c892ab2..8e5c326c7687 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -49,6 +49,13 @@ #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode) \ ((ecmd)->advertising |= ADVERTISED_##mode) +#define COALESCE_PENDING_LIMIT_UNIT 8 +#define COALESCE_TIMER_CFG_UNIT 9 +#define COALESCE_ALL_QUEUE 0xFFFF +#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT) +#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT) +#define OBJ_STR_MAX_LEN 32 + struct hw2ethtool_link_mode { enum ethtool_link_mode_bit_indices link_mode_bit; u32 speed; @@ -614,6 +621,215 @@ static int hinic_set_ringparam(struct net_device *netdev, return 0; } +static int __hinic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_intr_coal_info *rx_intr_coal_info; + struct hinic_intr_coal_info *tx_intr_coal_info; + + if (queue == COALESCE_ALL_QUEUE) { + /* get tx/rx irq0 as default parameters */ + rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0]; + tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0]; + } else { + if (queue >= nic_dev->num_qps) { + netif_err(nic_dev, drv, netdev, + "Invalid queue_id: %d\n", queue); + return -EINVAL; + } + rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue]; + tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue]; + } + + /* coalesce_timer is in unit of 9us */ + coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + /* coalesced_frames is in unit of 8 */ + coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt * + COALESCE_PENDING_LIMIT_UNIT; + coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg * + COALESCE_TIMER_CFG_UNIT; + coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt * + COALESCE_PENDING_LIMIT_UNIT; + + return 0; +} + +static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal) +{ + if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || + coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT || + coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG || + coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT) + return -ERANGE; + + return 0; +} + +static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id, + struct hinic_intr_coal_info *coal, + bool set_rx_coal) +{ + struct hinic_intr_coal_info *intr_coal = NULL; + struct hinic_msix_config interrupt_info = {0}; + struct net_device *netdev = nic_dev->netdev; + u16 msix_idx; + int err; + + intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] : + &nic_dev->tx_intr_coalesce[q_id]; + + intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg; + intr_coal->pending_limt = coal->pending_limt; + + /* netdev not running or qp not in using, + * don't need to set coalesce to hw + */ + if (!(nic_dev->flags & HINIC_INTF_UP) || + q_id >= nic_dev->num_qps) + return 0; + + msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry : + nic_dev->txqs[q_id].sq->msix_entry; + interrupt_info.msix_index = msix_idx; + interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; + interrupt_info.pending_cnt = intr_coal->pending_limt; + interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; + + err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info); + if (err) + netif_warn(nic_dev, drv, netdev, + "Failed to set %s queue%d coalesce", + set_rx_coal ? "rx" : "tx", q_id); + + return err; +} + +static int __set_hw_coal_param(struct hinic_dev *nic_dev, + struct hinic_intr_coal_info *intr_coal, + u16 queue, bool set_rx_coal) +{ + int err; + u16 i; + + if (queue == COALESCE_ALL_QUEUE) { + for (i = 0; i < nic_dev->max_qps; i++) { + err = set_queue_coalesce(nic_dev, i, intr_coal, + set_rx_coal); + if (err) + return err; + } + } else { + if (queue >= nic_dev->num_qps) { + netif_err(nic_dev, drv, nic_dev->netdev, + "Invalid queue_id: %d\n", queue); + return -EINVAL; + } + err = set_queue_coalesce(nic_dev, queue, intr_coal, + set_rx_coal); + if (err) + return err; + } + + return 0; +} + +static int __hinic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, u16 queue) +{ + struct hinic_intr_coal_info *ori_rx_intr_coal = NULL; + struct hinic_intr_coal_info *ori_tx_intr_coal = NULL; + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_intr_coal_info rx_intr_coal = {0}; + struct hinic_intr_coal_info tx_intr_coal = {0}; + char obj_str[OBJ_STR_MAX_LEN] = {0}; + bool set_rx_coal = false; + bool set_tx_coal = false; + int err; + + err = is_coalesce_exceed_limit(coal); + if (err) + return err; + + if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) { + rx_intr_coal.coalesce_timer_cfg = + (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + set_rx_coal = true; + } + + if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) { + tx_intr_coal.coalesce_timer_cfg = + (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT); + tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames / + COALESCE_PENDING_LIMIT_UNIT); + set_tx_coal = true; + } + + if (queue == COALESCE_ALL_QUEUE) { + ori_rx_intr_coal = &nic_dev->rx_intr_coalesce[0]; + ori_tx_intr_coal = &nic_dev->tx_intr_coalesce[0]; + err = snprintf(obj_str, OBJ_STR_MAX_LEN, "for netdev"); + } else { + ori_rx_intr_coal = &nic_dev->rx_intr_coalesce[queue]; + ori_tx_intr_coal = &nic_dev->tx_intr_coalesce[queue]; + err = snprintf(obj_str, OBJ_STR_MAX_LEN, "for queue %d", queue); + } + if (err <= 0 || err >= OBJ_STR_MAX_LEN) { + netif_err(nic_dev, drv, netdev, "Failed to snprintf string, function return(%d) and dest_len(%d)\n", + err, OBJ_STR_MAX_LEN); + return -EFAULT; + } + + /* setting coalesce timer or pending limit to zero will disable + * coalesce + */ + if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg || + !rx_intr_coal.pending_limt)) + netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n"); + if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg || + !tx_intr_coal.pending_limt)) + netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n"); + + if (set_rx_coal) { + err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true); + if (err) + return err; + } + if (set_tx_coal) { + err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false); + if (err) + return err; + } + return 0; +} + +static int hinic_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int hinic_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); +} + +static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return __hinic_get_coalesce(netdev, coal, queue); +} + +static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *coal) +{ + return __hinic_set_coalesce(netdev, coal, queue); +} + static void hinic_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { @@ -1293,12 +1509,21 @@ static void hinic_get_strings(struct net_device *netdev, } static const struct ethtool_ops hinic_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES, + .get_link_ksettings = hinic_get_link_ksettings, .set_link_ksettings = hinic_set_link_ksettings, .get_drvinfo = hinic_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = hinic_get_ringparam, .set_ringparam = hinic_set_ringparam, + .get_coalesce = hinic_get_coalesce, + .set_coalesce = hinic_set_coalesce, + .get_per_queue_coalesce = hinic_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic_set_per_queue_coalesce, .get_pauseparam = hinic_get_pauseparam, .set_pauseparam = hinic_set_pauseparam, .get_channels = hinic_get_channels, @@ -1315,11 +1540,20 @@ static const struct ethtool_ops hinic_ethtool_ops = { }; static const struct ethtool_ops hinicvf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | + ETHTOOL_COALESCE_RX_MAX_FRAMES | + ETHTOOL_COALESCE_TX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES, + .get_link_ksettings = hinic_get_link_ksettings, .get_drvinfo = hinic_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = hinic_get_ringparam, .set_ringparam = hinic_set_ringparam, + .get_coalesce = hinic_get_coalesce, + .set_coalesce = hinic_set_coalesce, + .get_per_queue_coalesce = hinic_get_per_queue_coalesce, + .set_per_queue_coalesce = hinic_set_per_queue_coalesce, .get_channels = hinic_get_channels, .set_channels = hinic_set_channels, .get_rxnfc = hinic_get_rxnfc, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 747d50b841ba..4de50e4ba4df 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -705,6 +705,68 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev) return 0; } +int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, + struct hinic_msix_config *interrupt_info) +{ + u16 out_size = sizeof(*interrupt_info); + struct hinic_pfhwdev *pfhwdev; + int err; + + if (!hwdev || !interrupt_info) + return -EINVAL; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, + interrupt_info, sizeof(*interrupt_info), + interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC); + if (err || !out_size || interrupt_info->status) { + dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, interrupt_info->status, out_size); + return -EIO; + } + + return 0; +} + +int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, + struct hinic_msix_config *interrupt_info) +{ + u16 out_size = sizeof(*interrupt_info); + struct hinic_msix_config temp_info; + struct hinic_pfhwdev *pfhwdev; + int err; + + if (!hwdev) + return -EINVAL; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + + err = hinic_get_interrupt_cfg(hwdev, &temp_info); + if (err) + return -EINVAL; + + interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt; + interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt; + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, + interrupt_info, sizeof(*interrupt_info), + interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC); + if (err || !out_size || interrupt_info->status) { + dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n", + err, interrupt_info->status, out_size); + return -EIO; + } + + return 0; +} + /** * hinic_init_hwdev - Initialize the NIC HW * @pdev: the NIC pci device diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index cc776ca2d737..ed3cc154ce18 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -285,6 +285,21 @@ struct hinic_cmd_l2nic_reset { u16 reset_flag; }; +struct hinic_msix_config { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 msix_index; + u8 pending_cnt; + u8 coalesce_timer_cnt; + u8 lli_timer_cnt; + u8 lli_credit_cnt; + u8 resend_timer_cnt; + u8 rsvd1[3]; +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; @@ -378,4 +393,10 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, enum hinic_msix_state flag); +int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, + struct hinic_msix_config *interrupt_info); + +int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, + struct hinic_msix_config *interrupt_info); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index c2b142c08b0e..a3349ae30ff3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -78,6 +78,9 @@ enum hinic_comm_cmd { HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33, + HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, + HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, + HINIC_COMM_CMD_L2NIC_RESET = 0x4b, HINIC_COMM_CMD_PAGESIZE_SET = 0x50, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 76e3debfebe5..834a20a0043c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -69,6 +69,10 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); #define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000 +#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT 2 +#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG 32 +#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG 7 + static int change_mac_addr(struct net_device *netdev, const u8 *addr); static int set_features(struct hinic_dev *nic_dev, @@ -1021,6 +1025,45 @@ static int set_features(struct hinic_dev *nic_dev, return 0; } +static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev) +{ + u64 size; + u16 i; + + size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps; + nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL); + if (!nic_dev->rx_intr_coalesce) + return -ENOMEM; + nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL); + if (!nic_dev->tx_intr_coalesce) { + kfree(nic_dev->rx_intr_coalesce); + return -ENOMEM; + } + + for (i = 0; i < nic_dev->max_qps; i++) { + nic_dev->rx_intr_coalesce[i].pending_limt = + HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; + nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg = + HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; + nic_dev->rx_intr_coalesce[i].resend_timer_cfg = + HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + nic_dev->tx_intr_coalesce[i].pending_limt = + HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT; + nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg = + HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG; + nic_dev->tx_intr_coalesce[i].resend_timer_cfg = + HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG; + } + + return 0; +} + +static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev) +{ + kfree(nic_dev->tx_intr_coalesce); + kfree(nic_dev->rx_intr_coalesce); +} + /** * nic_dev_init - Initialize the NIC device * @pdev: the NIC pci device @@ -1156,6 +1199,12 @@ static int nic_dev_init(struct pci_dev *pdev) SET_NETDEV_DEV(netdev, &pdev->dev); + err = hinic_init_intr_coalesce(nic_dev); + if (err) { + dev_err(&pdev->dev, "Failed to init_intr_coalesce\n"); + goto err_init_intr; + } + err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); @@ -1165,6 +1214,8 @@ static int nic_dev_init(struct pci_dev *pdev) return 0; err_reg_netdev: + hinic_free_intr_coalesce(nic_dev); +err_init_intr: err_set_pfc: err_set_features: hinic_hwdev_cb_unregister(nic_dev->hwdev, @@ -1279,6 +1330,8 @@ static void hinic_remove(struct pci_dev *pdev) unregister_netdev(netdev); + hinic_free_intr_coalesce(nic_dev); + hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); hinic_hwdev_cb_unregister(nic_dev->hwdev, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index af20d0dd6de7..c9a65a1f0347 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -478,11 +478,15 @@ static irqreturn_t rx_irq(int irq, void *data) static int rx_request_irq(struct hinic_rxq *rxq) { struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_msix_config interrupt_info = {0}; + struct hinic_intr_coal_info *intr_coal = NULL; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_rq *rq = rxq->rq; struct hinic_qp *qp; int err; + qp = container_of(rq, struct hinic_qp, rq); + rx_add_napi(rxq); hinic_hwdev_msix_set(hwdev, rq->msix_entry, @@ -490,13 +494,26 @@ static int rx_request_irq(struct hinic_rxq *rxq) RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, RX_IRQ_NO_RESEND_TIMER); + intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; + interrupt_info.msix_index = rq->msix_entry; + interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; + interrupt_info.pending_cnt = intr_coal->pending_limt; + interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; + + err = hinic_set_interrupt_cfg(hwdev, &interrupt_info); + if (err) { + netif_err(nic_dev, drv, rxq->netdev, + "Failed to set RX interrupt coalescing attribute\n"); + rx_del_napi(rxq); + return err; + } + err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); if (err) { rx_del_napi(rxq); return err; } - qp = container_of(rq, struct hinic_qp, rq); cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); return irq_set_affinity_hint(rq->irq, &rq->affinity_mask); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 4c66a0bc1b28..0f6d27f29de5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -718,12 +718,17 @@ static irqreturn_t tx_irq(int irq, void *data) static int tx_request_irq(struct hinic_txq *txq) { struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_msix_config interrupt_info = {0}; + struct hinic_intr_coal_info *intr_coal = NULL; struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_hwif *hwif = hwdev->hwif; struct pci_dev *pdev = hwif->pdev; struct hinic_sq *sq = txq->sq; + struct hinic_qp *qp; int err; + qp = container_of(sq, struct hinic_qp, sq); + tx_napi_add(txq, nic_dev->tx_weight); hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, @@ -731,6 +736,20 @@ static int tx_request_irq(struct hinic_txq *txq) TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, TX_IRQ_NO_RESEND_TIMER); + intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id]; + interrupt_info.msix_index = sq->msix_entry; + interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg; + interrupt_info.pending_cnt = intr_coal->pending_limt; + interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg; + + err = hinic_set_interrupt_cfg(hwdev, &interrupt_info); + if (err) { + netif_err(nic_dev, drv, txq->netdev, + "Failed to set TX interrupt coalescing attribute\n"); + tx_napi_del(txq); + return err; + } + err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); if (err) { dev_err(&pdev->dev, "Failed to request Tx irq\n"); From 4aa218a4fe77d57edd068210da4c90dd19523954 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sun, 28 Jun 2020 20:36:22 +0800 Subject: [PATCH 0410/2056] hinic: add self test support add support to excute internal and external loopback test with ethtool -t cmd. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 6 + .../net/ethernet/huawei/hinic/hinic_ethtool.c | 177 ++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 3 + .../net/ethernet/huawei/hinic/hinic_port.c | 27 +++ .../net/ethernet/huawei/hinic/hinic_port.h | 16 ++ drivers/net/ethernet/huawei/hinic/hinic_rx.c | 39 ++++ drivers/net/ethernet/huawei/hinic/hinic_tx.c | 61 ++++++ drivers/net/ethernet/huawei/hinic/hinic_tx.h | 2 + 8 files changed, 331 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 75d6dee948f5..9adb755f0820 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -20,11 +20,14 @@ #define HINIC_DRV_NAME "hinic" +#define LP_PKT_CNT 64 + enum hinic_flags { HINIC_LINK_UP = BIT(0), HINIC_INTF_UP = BIT(1), HINIC_RSS_ENABLE = BIT(2), HINIC_LINK_DOWN = BIT(3), + HINIC_LP_TEST = BIT(4), }; struct hinic_rx_mode_work { @@ -91,6 +94,9 @@ struct hinic_dev { struct hinic_intr_coal_info *rx_intr_coalesce; struct hinic_intr_coal_info *tx_intr_coalesce; struct hinic_sriov_info sriov_info; + int lb_test_rx_idx; + int lb_pkt_len; + u8 *lb_test_rx_buf; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 8e5c326c7687..2c94b48692c9 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -133,6 +133,16 @@ static struct hw2ethtool_link_mode }, }; +#define LP_DEFAULT_TIME 5 /* seconds */ +#define LP_PKT_LEN 1514 + +#define PORT_DOWN_ERR_IDX 0 +enum diag_test_index { + INTERNAL_LP_TEST = 0, + EXTERNAL_LP_TEST = 1, + DIAG_TEST_MAX = 2, +}; + static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, enum hinic_speed speed) { @@ -1244,6 +1254,11 @@ static struct hinic_stats hinic_function_stats[] = { HINIC_FUNC_STAT(rx_err_vport), }; +static char hinic_test_strings[][ETH_GSTRING_LEN] = { + "Internal lb test (on/offline)", + "External lb test (external_lb)", +}; + #define HINIC_PORT_STAT(_stat_item) { \ .name = #_stat_item, \ .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ @@ -1453,6 +1468,8 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset) int count, q_num; switch (sset) { + case ETH_SS_TEST: + return ARRAY_LEN(hinic_test_strings); case ETH_SS_STATS: q_num = nic_dev->num_qps; count = ARRAY_LEN(hinic_function_stats) + @@ -1475,6 +1492,9 @@ static void hinic_get_strings(struct net_device *netdev, u16 i, j; switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings)); + return; case ETH_SS_STATS: for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) { memcpy(p, hinic_function_stats[i].name, @@ -1508,6 +1528,162 @@ static void hinic_get_strings(struct net_device *netdev, } } +static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time) +{ + u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf; + struct net_device *netdev = nic_dev->netdev; + struct sk_buff *skb_tmp = NULL; + struct sk_buff *skb = NULL; + u32 cnt = test_time * 5; + u8 *test_data = NULL; + u32 i; + u8 j; + + skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC); + if (!skb_tmp) + return -ENOMEM; + + test_data = __skb_put(skb_tmp, LP_PKT_LEN); + + memset(test_data, 0xFF, 2 * ETH_ALEN); + test_data[ETH_ALEN] = 0xFE; + test_data[2 * ETH_ALEN] = 0x08; + test_data[2 * ETH_ALEN + 1] = 0x0; + + for (i = ETH_HLEN; i < LP_PKT_LEN; i++) + test_data[i] = i & 0xFF; + + skb_tmp->queue_mapping = 0; + skb_tmp->ip_summed = CHECKSUM_COMPLETE; + skb_tmp->dev = netdev; + + for (i = 0; i < cnt; i++) { + nic_dev->lb_test_rx_idx = 0; + memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN); + + for (j = 0; j < LP_PKT_CNT; j++) { + skb = pskb_copy(skb_tmp, GFP_ATOMIC); + if (!skb) { + dev_kfree_skb_any(skb_tmp); + netif_err(nic_dev, drv, netdev, + "Copy skb failed for loopback test\n"); + return -ENOMEM; + } + + /* mark index for every pkt */ + skb->data[LP_PKT_LEN - 1] = j; + + if (hinic_lb_xmit_frame(skb, netdev)) { + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb_tmp); + netif_err(nic_dev, drv, netdev, + "Xmit pkt failed for loopback test\n"); + return -EBUSY; + } + } + + /* wait till all pkts received to RX buffer */ + msleep(200); + + for (j = 0; j < LP_PKT_CNT; j++) { + if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN, + skb_tmp->data, LP_PKT_LEN - 1) || + (*(lb_test_rx_buf + j * LP_PKT_LEN + + LP_PKT_LEN - 1) != j)) { + dev_kfree_skb_any(skb_tmp); + netif_err(nic_dev, drv, netdev, + "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n", + j + i * LP_PKT_CNT, + LP_PKT_LEN - 1, + *(lb_test_rx_buf + j * LP_PKT_LEN + + LP_PKT_LEN - 1)); + return -EIO; + } + } + } + + dev_kfree_skb_any(skb_tmp); + return 0; +} + +static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time, + enum diag_test_index *test_index) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_test_rx_buf = NULL; + int err = 0; + + if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { + *test_index = INTERNAL_LP_TEST; + if (hinic_set_loopback_mode(nic_dev->hwdev, + HINIC_INTERNAL_LP_MODE, true)) { + netif_err(nic_dev, drv, netdev, + "Failed to set port loopback mode before loopback test\n"); + return -EIO; + } + } else { + *test_index = EXTERNAL_LP_TEST; + } + + lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN); + if (!lb_test_rx_buf) { + err = -ENOMEM; + } else { + nic_dev->lb_test_rx_buf = lb_test_rx_buf; + nic_dev->lb_pkt_len = LP_PKT_LEN; + nic_dev->flags |= HINIC_LP_TEST; + err = hinic_run_lp_test(nic_dev, test_time); + nic_dev->flags &= ~HINIC_LP_TEST; + msleep(100); + vfree(lb_test_rx_buf); + nic_dev->lb_test_rx_buf = NULL; + } + + if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) { + if (hinic_set_loopback_mode(nic_dev->hwdev, + HINIC_INTERNAL_LP_MODE, false)) { + netif_err(nic_dev, drv, netdev, + "Failed to cancel port loopback mode after loopback test\n"); + err = -EIO; + } + } + + return err; +} + +static void hinic_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + enum hinic_port_link_state link_state; + enum diag_test_index test_index = 0; + int err = 0; + + memset(data, 0, DIAG_TEST_MAX * sizeof(u64)); + + /* don't support loopback test when netdev is closed. */ + if (!(nic_dev->flags & HINIC_INTF_UP)) { + netif_err(nic_dev, drv, netdev, + "Do not support loopback test when netdev is closed\n"); + eth_test->flags |= ETH_TEST_FL_FAILED; + data[PORT_DOWN_ERR_IDX] = 1; + return; + } + + netif_carrier_off(netdev); + + err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME, + &test_index); + if (err) { + eth_test->flags |= ETH_TEST_FL_FAILED; + data[test_index] = 1; + } + + err = hinic_port_link_state(nic_dev, &link_state); + if (!err && link_state == HINIC_LINK_STATE_UP) + netif_carrier_on(netdev); +} + static const struct ethtool_ops hinic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | @@ -1537,6 +1713,7 @@ static const struct ethtool_ops hinic_ethtool_ops = { .get_sset_count = hinic_get_sset_count, .get_ethtool_stats = hinic_get_ethtool_stats, .get_strings = hinic_get_strings, + .self_test = hinic_diag_test, }; static const struct ethtool_ops hinicvf_ethtool_ops = { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index ed3cc154ce18..c92c39a50b81 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -97,6 +97,9 @@ enum hinic_port_cmd { HINIC_PORT_CMD_FWCTXT_INIT = 69, + HINIC_PORT_CMD_GET_LOOPBACK_MODE = 72, + HINIC_PORT_CMD_SET_LOOPBACK_MODE, + HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78, HINIC_PORT_CMD_GET_MGMT_VERSION = 88, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 8b007a268675..98ea3f781611 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -1241,3 +1241,30 @@ int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap) return 0; } + +int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable) +{ + struct hinic_port_loopback lb = {0}; + u16 out_size = sizeof(lb); + int err; + + lb.mode = mode; + lb.en = enable; + + if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) { + dev_err(&hwdev->hwif->pdev->dev, + "Invalid loopback mode %d to set\n", mode); + return -EINVAL; + } + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE, + &lb, sizeof(lb), &lb, &out_size); + if (err || !out_size || lb.status) { + dev_err(&hwdev->hwif->pdev->dev, + "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n", + mode, enable, err, lb.status, out_size); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 7b17460d4e2c..b21956d7232e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -652,6 +652,20 @@ struct hinic_set_pfc { u8 rsvd1[4]; }; +/* get or set loopback mode, need to modify by base API */ +#define HINIC_INTERNAL_LP_MODE 5 +#define LOOP_MODE_MIN 1 +#define LOOP_MODE_MAX 6 + +struct hinic_port_loopback { + u8 status; + u8 version; + u8 rsvd[6]; + + u32 mode; + u32 en; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -749,6 +763,8 @@ int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev, int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap); +int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable); + int hinic_open(struct net_device *netdev); int hinic_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index c9a65a1f0347..5bee951fe9d4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -316,6 +316,39 @@ static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, return num_wqes; } +static void hinic_copy_lp_data(struct hinic_dev *nic_dev, + struct sk_buff *skb) +{ + struct net_device *netdev = nic_dev->netdev; + u8 *lb_buf = nic_dev->lb_test_rx_buf; + int lb_len = nic_dev->lb_pkt_len; + int pkt_offset, frag_len, i; + void *frag_data = NULL; + + if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) { + nic_dev->lb_test_rx_idx = 0; + netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n"); + } + + if (skb->len != nic_dev->lb_pkt_len) { + netif_warn(nic_dev, drv, netdev, "Wrong packet length\n"); + nic_dev->lb_test_rx_idx++; + return; + } + + pkt_offset = nic_dev->lb_test_rx_idx * lb_len; + frag_len = (int)skb_headlen(skb); + memcpy(lb_buf + pkt_offset, skb->data, frag_len); + pkt_offset += frag_len; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]); + frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]); + memcpy((lb_buf + pkt_offset), frag_data, frag_len); + pkt_offset += frag_len; + } + nic_dev->lb_test_rx_idx++; +} + /** * rxq_recv - Rx handler * @rxq: rx queue @@ -330,6 +363,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) u64 pkt_len = 0, rx_bytes = 0; struct hinic_rq *rq = rxq->rq; struct hinic_rq_wqe *rq_wqe; + struct hinic_dev *nic_dev; unsigned int free_wqebbs; struct hinic_rq_cqe *cqe; int num_wqes, pkts = 0; @@ -342,6 +376,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) u32 vlan_len; u16 vid; + nic_dev = netdev_priv(netdev); + while (pkts < budget) { num_wqes = 0; @@ -384,6 +420,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } + if (unlikely(nic_dev->flags & HINIC_LP_TEST)) + hinic_copy_lp_data(nic_dev, skb); + skb_record_rx_queue(skb, qp->q_id); skb->protocol = eth_type_trans(skb, rxq->netdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 0f6d27f29de5..a97498ee6914 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -459,6 +459,67 @@ static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task, return 0; } +netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 prod_idx, q_id = skb->queue_mapping; + struct netdev_queue *netdev_txq; + int nr_sges, err = NETDEV_TX_OK; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + struct hinic_txq *txq; + struct hinic_qp *qp; + + txq = &nic_dev->txqs[q_id]; + qp = container_of(txq->sq, struct hinic_qp, sq); + nr_sges = skb_shinfo(skb)->nr_frags + 1; + + err = tx_map_skb(nic_dev, skb, txq->sges); + if (err) + goto skb_error; + + wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); + + sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); + if (!sq_wqe) { + netif_stop_subqueue(netdev, qp->q_id); + + sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); + if (sq_wqe) { + netif_wake_subqueue(nic_dev->netdev, qp->q_id); + goto process_sq_wqe; + } + + tx_unmap_skb(nic_dev, skb, txq->sges); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_busy++; + u64_stats_update_end(&txq->txq_stats.syncp); + err = NETDEV_TX_BUSY; + wqe_size = 0; + goto flush_skbs; + } + +process_sq_wqe: + hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); + hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); + +flush_skbs: + netdev_txq = netdev_get_tx_queue(netdev, q_id); + if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq))) + hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); + + return err; + +skb_error: + dev_kfree_skb_any(skb); + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + + return NETDEV_TX_OK; +} + netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h index f158b7db7fb8..b3c8657774a7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -44,6 +44,8 @@ void hinic_txq_clean_stats(struct hinic_txq *txq); void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); +netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, From 07afcc7ab40e921c90de0392c99c3d25611a94df Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sun, 28 Jun 2020 20:36:23 +0800 Subject: [PATCH 0411/2056] hinic: add support to identify physical device add support to identify physical device by flashing an LED attached to it with ethtool -p cmd. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 34 ++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 2 + .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 1 + .../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 4 +- .../net/ethernet/huawei/hinic/hinic_port.c | 55 +++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_port.h | 35 ++++++++++++ .../net/ethernet/huawei/hinic/hinic_sriov.c | 4 +- 7 files changed, 132 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 2c94b48692c9..f012d136b7b6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -1684,6 +1684,39 @@ static void hinic_diag_test(struct net_device *netdev, netif_carrier_on(netdev); } +static int hinic_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int err = 0; + u8 port; + + port = nic_dev->hwdev->port_id; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + err = hinic_set_led_status(nic_dev->hwdev, port, + HINIC_LED_TYPE_LINK, + HINIC_LED_MODE_FORCE_2HZ); + if (err) + netif_err(nic_dev, drv, netdev, + "Set LED blinking in 2HZ failed\n"); + break; + + case ETHTOOL_ID_INACTIVE: + err = hinic_reset_led_status(nic_dev->hwdev, port); + if (err) + netif_err(nic_dev, drv, netdev, + "Reset LED to original status failed\n"); + break; + + default: + return -EOPNOTSUPP; + } + + return err; +} + static const struct ethtool_ops hinic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | @@ -1714,6 +1747,7 @@ static const struct ethtool_ops hinic_ethtool_ops = { .get_ethtool_stats = hinic_get_ethtool_stats, .get_strings = hinic_get_strings, .self_test = hinic_diag_test, + .set_phys_id = hinic_set_phys_id, }; static const struct ethtool_ops hinicvf_ethtool_ops = { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 4de50e4ba4df..45c137827a16 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -83,6 +83,8 @@ static int parse_capability(struct hinic_hwdev *hwdev, nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1; } + hwdev->port_id = dev_cap->port_id; + return 0; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index c92c39a50b81..01fe94f2d4bc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -312,6 +312,7 @@ struct hinic_hwdev { struct hinic_mbox_func_to_func *func_to_func; struct hinic_cap nic_cap; + u8 port_id; }; struct hinic_nic_cb { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index a3349ae30ff3..919d2c6ffc35 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -81,7 +81,9 @@ enum hinic_comm_cmd { HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, - HINIC_COMM_CMD_L2NIC_RESET = 0x4b, + HINIC_COMM_CMD_SET_LED_STATUS = 0x4a, + + HINIC_COMM_CMD_L2NIC_RESET = 0x4b, HINIC_COMM_CMD_PAGESIZE_SET = 0x50, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 98ea3f781611..116ca1c877f2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -1268,3 +1268,58 @@ int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable) return 0; } + +static int _set_led_status(struct hinic_hwdev *hwdev, u8 port, + enum hinic_led_type type, + enum hinic_led_mode mode, u8 reset) +{ + struct hinic_led_info led_info = {0}; + u16 out_size = sizeof(led_info); + struct hinic_pfhwdev *pfhwdev; + int err; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + led_info.port = port; + led_info.reset = reset; + + led_info.type = type; + led_info.mode = mode; + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_SET_LED_STATUS, + &led_info, sizeof(led_info), + &led_info, &out_size, HINIC_MGMT_MSG_SYNC); + if (err || led_info.status || !out_size) { + dev_err(&hwdev->hwif->pdev->dev, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n", + err, led_info.status, out_size); + return -EIO; + } + + return 0; +} + +int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port, + enum hinic_led_type type, enum hinic_led_mode mode) +{ + if (!hwdev) + return -EINVAL; + + return _set_led_status(hwdev, port, type, mode, 0); +} + +int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port) +{ + int err; + + if (!hwdev) + return -EINVAL; + + err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID, + HINIC_LED_MODE_INVALID, 1); + if (err) + dev_err(&hwdev->hwif->pdev->dev, + "Failed to reset led status\n"); + + return err; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index b21956d7232e..5c916875f295 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -666,6 +666,17 @@ struct hinic_port_loopback { u32 en; }; +struct hinic_led_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port; + u8 type; + u8 mode; + u8 reset; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -765,6 +776,30 @@ int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap); int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable); +enum hinic_led_mode { + HINIC_LED_MODE_ON, + HINIC_LED_MODE_OFF, + HINIC_LED_MODE_FORCE_1HZ, + HINIC_LED_MODE_FORCE_2HZ, + HINIC_LED_MODE_FORCE_4HZ, + HINIC_LED_MODE_1HZ, + HINIC_LED_MODE_2HZ, + HINIC_LED_MODE_4HZ, + HINIC_LED_MODE_INVALID, +}; + +enum hinic_led_type { + HINIC_LED_TYPE_LINK, + HINIC_LED_TYPE_LOW_SPEED, + HINIC_LED_TYPE_HIGH_SPEED, + HINIC_LED_TYPE_INVALID, +}; + +int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port); + +int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port, + enum hinic_led_type type, enum hinic_led_mode mode); + int hinic_open(struct net_device *netdev); int hinic_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index efab2dd2c889..f5c7c1f48542 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -383,7 +383,7 @@ static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id, nic_io = &hw_dev->func_to_io; vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id); - if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) && + if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) && !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) { dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n"); mac_out->status = HINIC_PF_SET_VF_ALREADY; @@ -905,7 +905,6 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) err = hinic_set_vf_spoofchk(sriov_info->hwdev, OS_VF_ID_TO_HW(vf), setting); - if (!err) { netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n", vf, setting ? "on" : "off"); @@ -1020,6 +1019,7 @@ static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, dev_cap->max_vf = cap->max_vf; dev_cap->max_sqs = cap->max_vf_qps; dev_cap->max_rqs = cap->max_vf_qps; + dev_cap->port_id = dev->port_id; *out_size = sizeof(*dev_cap); From 2ac84cd160a7d8562ac8ff9acca808edced4c84e Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sun, 28 Jun 2020 20:36:24 +0800 Subject: [PATCH 0412/2056] hinic: add support to get eeprom information add support to get eeprom information from the plug-in module with ethtool -m cmd. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 69 ++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 4 ++ .../net/ethernet/huawei/hinic/hinic_port.c | 72 +++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_port.h | 30 ++++++++ 4 files changed, 175 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index f012d136b7b6..a4a2a2d68f5c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "hinic_hw_qp.h" #include "hinic_hw_dev.h" @@ -1717,6 +1718,72 @@ static int hinic_set_phys_id(struct net_device *netdev, return err; } +static int hinic_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_type_ext; + u8 sfp_type; + int err; + + err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext); + if (err) + return err; + + switch (sfp_type) { + case SFF8024_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case SFF8024_ID_QSFP_8438: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + break; + case SFF8024_ID_QSFP_8436_8636: + if (sfp_type_ext >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; + } + break; + case SFF8024_ID_QSFP28_8636: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; + break; + default: + netif_warn(nic_dev, drv, netdev, + "Optical module unknown: 0x%x\n", sfp_type); + return -EINVAL; + } + + return 0; +} + +static int hinic_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + u16 len; + int err; + + if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE)) + return -EINVAL; + + memset(data, 0, ee->len); + + err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len); + if (err) + return err; + + memcpy(data, sfp_data + ee->offset, ee->len); + + return 0; +} + static const struct ethtool_ops hinic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | @@ -1748,6 +1815,8 @@ static const struct ethtool_ops hinic_ethtool_ops = { .get_strings = hinic_get_strings, .self_test = hinic_diag_test, .set_phys_id = hinic_set_phys_id, + .get_module_info = hinic_get_module_info, + .get_module_eeprom = hinic_get_module_eeprom, }; static const struct ethtool_ops hinicvf_ethtool_ops = { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 01fe94f2d4bc..958ea1a6a60d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -130,9 +130,13 @@ enum hinic_port_cmd { HINIC_PORT_CMD_SET_AUTONEG = 219, + HINIC_PORT_CMD_GET_STD_SFP_INFO = 240, + HINIC_PORT_CMD_SET_LRO_TIMER = 244, HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249, + + HINIC_PORT_CMD_GET_SFP_ABS = 251, }; /* cmd of mgmt CPU message for HILINK module */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 116ca1c877f2..ba358bbb74a2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -1323,3 +1323,75 @@ int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port) return err; } + +static bool hinic_if_sfp_absent(struct hinic_hwdev *hwdev) +{ + struct hinic_cmd_get_light_module_abs sfp_abs = {0}; + u16 out_size = sizeof(sfp_abs); + u8 port_id = hwdev->port_id; + int err; + + sfp_abs.port_id = port_id; + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_SFP_ABS, + &sfp_abs, sizeof(sfp_abs), &sfp_abs, + &out_size); + if (sfp_abs.status || err || !out_size) { + dev_err(&hwdev->hwif->pdev->dev, + "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_abs.status, out_size); + return true; + } + + return ((sfp_abs.abs_status == 0) ? false : true); +} + +int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len) +{ + struct hinic_cmd_get_std_sfp_info sfp_info = {0}; + u16 out_size = sizeof(sfp_info); + u8 port_id; + int err; + + if (!hwdev || !data || !len) + return -EINVAL; + + port_id = hwdev->port_id; + + if (hinic_if_sfp_absent(hwdev)) + return -ENXIO; + + sfp_info.port_id = port_id; + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO, + &sfp_info, sizeof(sfp_info), &sfp_info, + &out_size); + if (sfp_info.status || err || !out_size) { + dev_err(&hwdev->hwif->pdev->dev, + "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n", + port_id, err, sfp_info.status, out_size); + return -EIO; + } + + *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE); + memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE); + + return 0; +} + +int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1) +{ + u8 sfp_data[STD_SFP_INFO_MAX_SIZE]; + u16 len; + int err; + + if (hinic_if_sfp_absent(hwdev)) + return -ENXIO; + + err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len); + if (err) + return err; + + *data0 = sfp_data[0]; + *data1 = sfp_data[1]; + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 5c916875f295..0e444d2c02bb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -677,6 +677,32 @@ struct hinic_led_info { u8 reset; }; +#define STD_SFP_INFO_MAX_SIZE 640 + +struct hinic_cmd_get_light_module_abs { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 abs_status; /* 0:present, 1:absent */ + u8 rsv[2]; +}; + +#define STD_SFP_INFO_MAX_SIZE 640 + +struct hinic_cmd_get_std_sfp_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 port_id; + u8 wire_type; + u16 eeprom_len; + u32 rsvd; + u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -800,6 +826,10 @@ int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port); int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port, enum hinic_led_type type, enum hinic_led_mode mode); +int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1); + +int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len); + int hinic_open(struct net_device *netdev); int hinic_close(struct net_device *netdev); From fe80536acf8397827be77f9b8ada384b90e790d0 Mon Sep 17 00:00:00 2001 From: Martin Date: Sun, 28 Jun 2020 23:18:23 +0530 Subject: [PATCH 0413/2056] bareudp: Added attribute to enable & disable rx metadata collection Metadata need not be collected in receive if the packet from bareudp device is not targeted to openvswitch. Signed-off-by: Martin Signed-off-by: David S. Miller --- Documentation/networking/bareudp.rst | 6 ++++-- drivers/net/bareudp.c | 23 +++++++++++++++++------ include/net/bareudp.h | 1 + include/uapi/linux/if_link.h | 1 + 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 465a8b251bfe..0e00636d8d74 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -48,5 +48,7 @@ enabled. The bareudp device could be used along with OVS or flower filter in TC. The OVS or TC flower layer must set the tunnel information in SKB dst field before sending packet buffer to the bareudp device for transmission. On reception the -bareudp device extracts and stores the tunnel information in SKB dst field before -passing the packet buffer to the network stack. +bareudp device decapsulates the udp header and passes the inner packet to the +network stack. If RX_COLLECT_METADATA flag is enabled in the device the tunnel +information will be stored in the SKB dst field before the packet buffer is +passed to the network stack. diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3dd46cd55114..108a8cafc4f8 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -46,6 +46,7 @@ struct bareudp_dev { __be16 port; u16 sport_min; bool multi_proto_mode; + bool rx_collect_metadata; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; @@ -125,13 +126,14 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) bareudp->dev->stats.rx_dropped++; goto drop; } - - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); - if (!tun_dst) { - bareudp->dev->stats.rx_dropped++; - goto drop; + if (bareudp->rx_collect_metadata) { + tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + if (!tun_dst) { + bareudp->dev->stats.rx_dropped++; + goto drop; + } + skb_dst_set(skb, &tun_dst->dst); } - skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -575,6 +577,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; + if (data[IFLA_BAREUDP_RX_COLLECT_METADATA]) + conf->rx_collect_metadata = true; + return 0; } @@ -612,6 +617,8 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; + bareudp->rx_collect_metadata = conf->rx_collect_metadata; + err = register_netdevice(dev); if (err) return err; @@ -669,6 +676,7 @@ static size_t bareudp_get_size(const struct net_device *dev) nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ + nla_total_size(0) + /* IFLA_BAREUDP_RX_COLLECT_METADATA */ 0; } @@ -685,6 +693,9 @@ static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; + if (bareudp->rx_collect_metadata && + nla_put_flag(skb, IFLA_BAREUDP_RX_COLLECT_METADATA)) + goto nla_put_failure; return 0; diff --git a/include/net/bareudp.h b/include/net/bareudp.h index dc65a0d71d9b..3dd5f9a8d01c 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -12,6 +12,7 @@ struct bareudp_conf { __be16 port; u16 sport_min; bool multi_proto_mode; + bool rx_collect_metadata; }; struct net_device *bareudp_dev_create(struct net *net, const char *name, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index a009365ad67b..cc185a007ade 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -600,6 +600,7 @@ enum { IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, + IFLA_BAREUDP_RX_COLLECT_METADATA, __IFLA_BAREUDP_MAX }; From c6ecd475165ef5729031f0d27d29b8862b188cd1 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:23 +0200 Subject: [PATCH 0414/2056] cail,hsi: fix cfhsi_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too and returning NETDEV_TX_OK instead of 0 accordingly. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/caif/caif_hsi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index bbb2575d4728..4a33ec4fc089 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1006,7 +1006,7 @@ static void cfhsi_aggregation_tout(struct timer_list *t) cfhsi_start_tx(cfhsi); } -static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) { struct cfhsi *cfhsi = NULL; int start_xfer = 0; @@ -1072,7 +1072,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) spin_unlock_bh(&cfhsi->lock); if (aggregate_ready) cfhsi_start_tx(cfhsi); - return 0; + return NETDEV_TX_OK; } /* Delete inactivity timer if started. */ @@ -1102,7 +1102,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) queue_work(cfhsi->wq, &cfhsi->wake_up_work); } - return 0; + return NETDEV_TX_OK; } static const struct net_device_ops cfhsi_netdevops; From 8805464a6e7e27cccca73f785ab3604f6af421cf Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:24 +0200 Subject: [PATCH 0415/2056] caif: fix caif_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/caif/caif_serial.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index d737ceb61203..bcc14c5875bf 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c @@ -266,7 +266,7 @@ static int handle_tx(struct ser_device *ser) return tty_wr; } -static int caif_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev) { struct ser_device *ser; From 1d01230bd56e24b3e6b4c09da2c37a70905a51da Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:25 +0200 Subject: [PATCH 0416/2056] caif: fix cfspi_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too and returning NETDEV_TX_OK instead of 0 accordingly. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/caif/caif_spi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 63f2548f5b1b..7d5899626130 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -488,7 +488,7 @@ static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) complete(&cfspi->comp); } -static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t cfspi_xmit(struct sk_buff *skb, struct net_device *dev) { struct cfspi *cfspi = NULL; unsigned long flags; @@ -514,7 +514,7 @@ static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev) cfspi->cfdev.flowctrl(cfspi->ndev, 0); } - return 0; + return NETDEV_TX_OK; } int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) From b8fc70906b3ba40c84e794124dc19e4320d93dc7 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:26 +0200 Subject: [PATCH 0417/2056] caif: fix cfv_netdev_tx()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/caif/caif_virtio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index eb426822ad06..80ea2e913c2b 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -519,7 +519,7 @@ static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, } /* Put the CAIF packet on the virtio ring and kick the receiver */ -static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) { struct cfv_info *cfv = netdev_priv(netdev); struct buf_info *buf_info; From 92c5e1150732ac7cc3f876183db62b08572ec63e Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:27 +0200 Subject: [PATCH 0418/2056] net: aquantia: fix aq_ndev_start_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index dfb29b933eb7..8f70a3909929 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -95,7 +95,7 @@ static int aq_ndev_close(struct net_device *ndev) return err; } -static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); From de37b0a58a3e76f5235ca023e74311060624603e Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:28 +0200 Subject: [PATCH 0419/2056] net: arc_emac: fix arc_emac_tx()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 38cd968b6a3b..b56a9e2aecd9 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -673,7 +673,7 @@ static struct net_device_stats *arc_emac_stats(struct net_device *ndev) * * This function is invoked from upper layers to initiate transmission. */ -static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int len, *txbd_curr = &priv->txbd_curr; From f649c35551c7e4c0e8797862ff755d2ca0be2e2e Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:29 +0200 Subject: [PATCH 0420/2056] net: nb8800: fix nb8800_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/aurora/nb8800.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index bc273e0db7ff..5b20185cbd62 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -384,7 +384,7 @@ static void nb8800_tx_dma_start_irq(struct net_device *dev) spin_unlock(&priv->tx_lock); } -static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); struct nb8800_tx_desc *txd; From 737ce1e98668cda4fd2ff64ac4132511a3b09f64 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:30 +0200 Subject: [PATCH 0421/2056] net: nfp: fix nfp_net_tx()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 0e0cc3d58bdc..83ff18140bfe 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -974,7 +974,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle) * * Return: NETDEV_TX_OK on success. */ -static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const skb_frag_t *frag; From 4e516a35eb01864b82c7f36fdac94ac9a9b71c3f Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:31 +0200 Subject: [PATCH 0422/2056] net: pch_gbe: fix pch_gbe_xmit_frame()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 73ec195fbc30..23f7c76737c9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -2064,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev) * - NETDEV_TX_OK: Normal end * - NETDEV_TX_BUSY: Error end */ -static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; From 673d8eb6cfbea0693a49e46775e325c0e651e6b1 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:32 +0200 Subject: [PATCH 0423/2056] net: dwc-xlgmac: fix xlgmac_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/synopsys/dwc-xlgmac-net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 07046a2370b3..26aa7f32151f 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -697,7 +697,7 @@ static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue) schedule_work(&pdata->restart_work); } -static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_pkt_info *tx_pkt_info; From a63a3749c4d887981f49367db71d0401505be022 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:33 +0200 Subject: [PATCH 0424/2056] net: plip: fix plip_tx_packet()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/plip/plip.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index e89cdebae6f1..d82016dcde3b 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -142,7 +142,7 @@ static void plip_timer_bh(struct work_struct *work); static void plip_interrupt(void *dev_id); /* Functions for DEV methods */ -static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev); +static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev); static int plip_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); @@ -958,7 +958,7 @@ plip_interrupt(void *dev_id) spin_unlock_irqrestore(&nl->lock, flags); } -static int +static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev) { struct net_local *nl = netdev_priv(dev); From 146ba9a3679ff8846e2044886b0dfce4ad03bc08 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:34 +0200 Subject: [PATCH 0425/2056] usbnet: ipheth: fix ipheth_tx()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/usb/ipheth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index c792d65dd7b4..b09b45382faf 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -358,7 +358,7 @@ static int ipheth_close(struct net_device *net) return 0; } -static int ipheth_tx(struct sk_buff *skb, struct net_device *net) +static netdev_tx_t ipheth_tx(struct sk_buff *skb, struct net_device *net) { struct ipheth_device *dev = netdev_priv(net); struct usb_device *udev = dev->udev; From 42deace2a54cac2ecb0fb3c38d98502942bd1167 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:35 +0200 Subject: [PATCH 0426/2056] net/hsr: fix hsr_dev_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 478852ef98ef..1032b83d7047 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -210,7 +210,7 @@ static netdev_features_t hsr_fix_features(struct net_device *dev, return hsr_features_recompute(hsr, features); } -static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct hsr_priv *hsr = netdev_priv(dev); struct hsr_port *master; From 433f17a93c52478e10d6541fb706fbcd0c6a6124 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:36 +0200 Subject: [PATCH 0427/2056] l2tp: fix l2tp_eth_dev_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- net/l2tp/l2tp_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index fd5ac2788e45..3099efa19249 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -73,7 +73,7 @@ static void l2tp_eth_dev_uninit(struct net_device *dev) */ } -static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_session *session = priv->session; From 2a78478439317bb18eafb834e10b0dc02c1e53c2 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Sun, 28 Jun 2020 21:53:37 +0200 Subject: [PATCH 0428/2056] cxgb4vf: fix t4vf_eth_xmit()'s return type The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type, but the implementation in this driver returns an 'int'. Fix this by returning 'netdev_tx_t' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | 2 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 3782e48dada2..f55105a4112f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -562,7 +562,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *, unsigned int); void t4vf_free_sge_resources(struct adapter *); -int t4vf_eth_xmit(struct sk_buff *, struct net_device *); +netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *); int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *, const struct pkt_gl *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8c3d6e11a4bf..95657da0aa4b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1154,7 +1154,7 @@ static inline void txq_advance(struct sge_txq *tq, unsigned int n) * * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled. */ -int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) +netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) { u32 wr_mid; u64 cntrl, *end; From 93c09ca6b140fcd672bb815624a6682016b005b6 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 28 Jun 2020 23:15:45 +0200 Subject: [PATCH 0429/2056] r8169: merge handling of RTL8101e and RTL8100e Chip versions 13, 14, 15 are treated the same by the driver, therefore let's merge them. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.h | 2 -- drivers/net/ethernet/realtek/r8169_main.c | 10 +++------- drivers/net/ethernet/realtek/r8169_phy_config.c | 2 -- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 22a6a057b11e..afefdec9d191 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -25,8 +25,6 @@ enum mac_version { RTL_GIGA_MAC_VER_11, RTL_GIGA_MAC_VER_12, RTL_GIGA_MAC_VER_13, - RTL_GIGA_MAC_VER_14, - RTL_GIGA_MAC_VER_15, RTL_GIGA_MAC_VER_16, RTL_GIGA_MAC_VER_17, RTL_GIGA_MAC_VER_18, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 226205099253..124827b19a4a 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -105,9 +105,7 @@ static const struct { [RTL_GIGA_MAC_VER_10] = {"RTL8101e" }, [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, - [RTL_GIGA_MAC_VER_13] = {"RTL8101e" }, - [RTL_GIGA_MAC_VER_14] = {"RTL8100e" }, - [RTL_GIGA_MAC_VER_15] = {"RTL8100e" }, + [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, @@ -2009,8 +2007,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, /* FIXME: where did these entries come from ? -- FR */ - { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 }, - { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 }, + { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 }, + { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 }, /* 8110 family. */ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, @@ -3616,8 +3614,6 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_13] = NULL, - [RTL_GIGA_MAC_VER_14] = NULL, - [RTL_GIGA_MAC_VER_15] = NULL, [RTL_GIGA_MAC_VER_16] = NULL, [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index 0cf4893e5624..a0c2b3330e1e 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1261,8 +1261,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_13] = NULL, - [RTL_GIGA_MAC_VER_14] = NULL, - [RTL_GIGA_MAC_VER_15] = NULL, [RTL_GIGA_MAC_VER_16] = NULL, [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, From cdafdc29ef75a5bca300cada5fd4a205d5ef29cf Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Sun, 28 Jun 2020 23:17:07 +0200 Subject: [PATCH 0430/2056] r8169: sync support for RTL8401 with vendor driver So far RTL8401 was treated like a RTL8101e, means we relied on the BIOS to configure MAC and PHY properly. Make RTL8401 a separate chip version and copy MAC / PHY config from r8101 vendor driver. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.h | 1 + drivers/net/ethernet/realtek/r8169_main.c | 18 ++++++++++++++++-- .../net/ethernet/realtek/r8169_phy_config.c | 8 ++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index afefdec9d191..422a8e5a8d3b 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -25,6 +25,7 @@ enum mac_version { RTL_GIGA_MAC_VER_11, RTL_GIGA_MAC_VER_12, RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, RTL_GIGA_MAC_VER_16, RTL_GIGA_MAC_VER_17, RTL_GIGA_MAC_VER_18, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 124827b19a4a..07a33af1f64e 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -106,6 +106,7 @@ static const struct { [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e" }, + [RTL_GIGA_MAC_VER_14] = {"RTL8401" }, [RTL_GIGA_MAC_VER_16] = {"RTL8101e" }, [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b" }, [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp" }, @@ -1999,8 +2000,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, - /* RTL8401, reportedly works if treated as RTL8101e */ - { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 }, { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, @@ -3401,6 +3401,19 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) rtl_ephy_write(tp, 0x03, 0xc2f9); } +static void rtl_hw_start_8401(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8401[] = { + { 0x01, 0xffff, 0x6fe5 }, + { 0x03, 0xffff, 0x0599 }, + { 0x06, 0xffff, 0xaf25 }, + { 0x07, 0xffff, 0x8e68 }, + }; + + rtl_ephy_init(tp, e_info_8401); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); +} + static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) { static const struct ephy_info e_info_8105e_1[] = { @@ -3614,6 +3627,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401, [RTL_GIGA_MAC_VER_16] = NULL, [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b, [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1, diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index a0c2b3330e1e..bc8bf48bdbb0 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -1091,6 +1091,13 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } +static void rtl8401_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + phy_set_bits(phydev, 0x11, BIT(12)); + phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003); +} + static void rtl8105e_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1261,6 +1268,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config, [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_13] = NULL, + [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config, [RTL_GIGA_MAC_VER_16] = NULL, [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config, [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config, From 836e0e555893bbff8997d666b3e4d508c8a7641b Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 27 Jun 2020 15:03:06 +0300 Subject: [PATCH 0431/2056] net: mscc: ocelot: remove EXPORT_SYMBOL from ocelot_net.c Now that all net_device operations are bundled together inside mscc_ocelot.ko and no longer part of the common library, there's no reason to export these symbols. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_net.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 702b42543fb7..5868ff753232 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -953,7 +953,6 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct notifier_block ocelot_netdevice_nb __read_mostly = { .notifier_call = ocelot_netdevice_event, }; -EXPORT_SYMBOL(ocelot_netdevice_nb); static int ocelot_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) @@ -975,7 +974,6 @@ static int ocelot_switchdev_event(struct notifier_block *unused, struct notifier_block ocelot_switchdev_nb __read_mostly = { .notifier_call = ocelot_switchdev_event, }; -EXPORT_SYMBOL(ocelot_switchdev_nb); static int ocelot_switchdev_blocking_event(struct notifier_block *unused, unsigned long event, void *ptr) @@ -1008,7 +1006,6 @@ static int ocelot_switchdev_blocking_event(struct notifier_block *unused, struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { .notifier_call = ocelot_switchdev_blocking_event, }; -EXPORT_SYMBOL(ocelot_switchdev_blocking_nb); int ocelot_probe_port(struct ocelot *ocelot, u8 port, void __iomem *regs, @@ -1054,4 +1051,3 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, return err; } -EXPORT_SYMBOL(ocelot_probe_port); From ac6a86a5392f123671e309697ec7b8f0c6f01c62 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sun, 28 Jun 2020 00:07:47 +0200 Subject: [PATCH 0432/2056] 8390: Fix coding-style issues Fix some coding-style issues, including one which made the function pointers in the struct ei_device hard to understand. Signed-off-by: Armin Wolf Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/8390.h | 61 +++++++++++++++++++------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h index 529c728f334a..e52264465998 100644 --- a/drivers/net/ethernet/8390/8390.h +++ b/drivers/net/ethernet/8390/8390.h @@ -1,8 +1,10 @@ /* Generic NS8390 register definitions. */ + /* This file is part of Donald Becker's 8390 drivers, and is distributed - under the same license. Auto-loading of 8390.o only in v2.2 - Paul G. - Some of these names and comments originated from the Crynwr - packet drivers, which are distributed under the GPL. */ + * under the same license. Auto-loading of 8390.o only in v2.2 - Paul G. + * Some of these names and comments originated from the Crynwr + * packet drivers, which are distributed under the GPL. + */ #ifndef _8390_h #define _8390_h @@ -16,9 +18,9 @@ /* The 8390 specific per-packet-header format. */ struct e8390_pkt_hdr { - unsigned char status; /* status */ - unsigned char next; /* pointer to next packet. */ - unsigned short count; /* header + packet length in bytes */ + unsigned char status; /* status */ + unsigned char next; /* pointer to next packet. */ + unsigned short count; /* header + packet length in bytes */ }; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -66,18 +68,24 @@ static inline struct net_device *alloc_eip_netdev(void) /* You have one of these per-board */ struct ei_device { const char *name; - void (*reset_8390)(struct net_device *); - void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int); - void (*block_output)(struct net_device *, int, const unsigned char *, int); - void (*block_input)(struct net_device *, int, struct sk_buff *, int); + void (*reset_8390)(struct net_device *dev); + void (*get_8390_hdr)(struct net_device *dev, + struct e8390_pkt_hdr *hdr, int ring_page); + void (*block_output)(struct net_device *dev, int count, + const unsigned char *buf, int start_page); + void (*block_input)(struct net_device *dev, int count, + struct sk_buff *skb, int ring_offset); unsigned long rmem_start; unsigned long rmem_end; void __iomem *mem; unsigned char mcfilter[8]; unsigned open:1; - unsigned word16:1; /* We have the 16-bit (vs 8-bit) version of the card. */ - unsigned bigendian:1; /* 16-bit big endian mode. Do NOT */ - /* set this on random 8390 clones! */ + unsigned word16:1; /* We have the 16-bit (vs 8-bit) + * version of the card. + */ + unsigned bigendian:1; /* 16-bit big endian mode. Do NOT + * set this on random 8390 clones! + */ unsigned txing:1; /* Transmit Active */ unsigned irqlock:1; /* 8390's intrs disabled when '1'. */ unsigned dmaing:1; /* Remote DMA Active */ @@ -115,12 +123,16 @@ struct ei_device { #define E8390_RXCONFIG (ei_status.rxcr_base | 0x04) #define E8390_RXOFF (ei_status.rxcr_base | 0x20) #else -#define E8390_RXCONFIG 0x4 /* EN0_RXCR: broadcasts, no multicast,errors */ -#define E8390_RXOFF 0x20 /* EN0_RXCR: Accept no packets */ +/* EN0_RXCR: broadcasts, no multicast,errors */ +#define E8390_RXCONFIG 0x4 +/* EN0_RXCR: Accept no packets */ +#define E8390_RXOFF 0x20 #endif -#define E8390_TXCONFIG 0x00 /* EN0_TXCR: Normal transmit mode */ -#define E8390_TXOFF 0x02 /* EN0_TXCR: Transmitter off */ +/* EN0_TXCR: Normal transmit mode */ +#define E8390_TXCONFIG 0x00 +/* EN0_TXCR: Transmitter off */ +#define E8390_TXOFF 0x02 /* Register accessed at EN_CMD, the 8390 base addr. */ @@ -134,17 +146,16 @@ struct ei_device { #define E8390_PAGE1 0x40 /* using the two high-order bits */ #define E8390_PAGE2 0x80 /* Page 3 is invalid. */ -/* - * Only generate indirect loads given a machine that needs them. - * - removed AMIGA_PCMCIA from this list, handled as ISA io now - * - the _p for generates no delay by default 8390p.c overrides this. +/* Only generate indirect loads given a machine that needs them. + * - removed AMIGA_PCMCIA from this list, handled as ISA io now + * - the _p for generates no delay by default 8390p.c overrides this. */ #ifndef ei_inb #define ei_inb(_p) inb(_p) -#define ei_outb(_v,_p) outb(_v,_p) +#define ei_outb(_v, _p) outb(_v, _p) #define ei_inb_p(_p) inb(_p) -#define ei_outb_p(_v,_p) outb(_v,_p) +#define ei_outb_p(_v, _p) outb(_v, _p) #endif #ifndef EI_SHIFT @@ -153,9 +164,9 @@ struct ei_device { #define E8390_CMD EI_SHIFT(0x00) /* The command register (for all pages) */ /* Page 0 register offsets. */ -#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */ +#define EN0_CLDALO EI_SHIFT(0x01) /* Low byte of current local dma addr RD */ #define EN0_STARTPG EI_SHIFT(0x01) /* Starting page of ring bfr WR */ -#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */ +#define EN0_CLDAHI EI_SHIFT(0x02) /* High byte of current local dma addr RD */ #define EN0_STOPPG EI_SHIFT(0x02) /* Ending page +1 of ring bfr WR */ #define EN0_BOUNDARY EI_SHIFT(0x03) /* Boundary page of ring bfr RD WR */ #define EN0_TSR EI_SHIFT(0x04) /* Transmit status reg RD */ From 6fc3e68f5b35c4861b28733fa32f636db7188746 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Sun, 28 Jun 2020 17:32:25 +0800 Subject: [PATCH 0433/2056] sctp: use list_is_singular in sctp_list_single_entry Use list_is_singular() instead of open-coding. Signed-off-by: Geliang Tang Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index f8bcb75bb044..e3bd198b00ae 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -412,7 +412,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) /* Tests if the list has one and only one entry. */ static inline int sctp_list_single_entry(struct list_head *head) { - return (head->next != head) && (head->next == head->prev); + return list_is_singular(head); } static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) From b8483ecaf72ee9059dcca5de969781028a550f89 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Sun, 28 Jun 2020 18:14:13 +0800 Subject: [PATCH 0434/2056] liquidio: use list_empty_careful in lio_list_delete_head Use list_empty_careful() instead of open-coding. Signed-off-by: Geliang Tang Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_network.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 50201fc86dcf..ebe56bd8849b 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -612,7 +612,7 @@ static inline struct list_head *lio_list_delete_head(struct list_head *root) { struct list_head *node; - if (root->prev == root && root->next == root) + if (list_empty_careful(root)) node = NULL; else node = root->next; From 7bcffde02152dd3cb180f6f3aef27e8586b2a905 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 26 Jun 2020 21:17:04 +0300 Subject: [PATCH 0435/2056] net: ethernet: ti: am65-cpsw-nuss: restore vlan configuration while down/up The vlan configuration is not restored after interface down/up sequence. Steps to check: # ip link add link eth0 name eth0.100 type vlan id 100 # ifconfig eth0 down # ifconfig eth0 up This patch fixes it, restoring vlan ALE entries on .ndo_open(). Fixes: 93a76530316a ("net: ethernet: ti: introduce am65x/j721e gigabit eth subsystem driver") Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 1492648247d9..82d3b1f20890 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -223,6 +223,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, u32 port_mask, unreg_mcast = 0; int ret; + if (!netif_running(ndev) || !vid) + return 0; + ret = pm_runtime_get_sync(common->dev); if (ret < 0) { pm_runtime_put_noidle(common->dev); @@ -246,6 +249,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, struct am65_cpsw_common *common = am65_ndev_to_common(ndev); int ret; + if (!netif_running(ndev) || !vid) + return 0; + ret = pm_runtime_get_sync(common->dev); if (ret < 0) { pm_runtime_put_noidle(common->dev); @@ -571,6 +577,16 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) return 0; } +static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) +{ + struct am65_cpsw_port *port = arg; + + if (!vdev) + return 0; + + return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid); +} + static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); @@ -644,6 +660,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) } } + /* restore vlan configurations */ + vlan_for_each(ndev, cpsw_restore_vlans, port); + phy_attached_info(port->slave.phy); phy_start(port->slave.phy); From 518240480601d8f7f533929e3debe0973e2fad2a Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 26 Jun 2020 21:17:05 +0300 Subject: [PATCH 0436/2056] net: ethernet: ti: am65-cpsw: move to pf_p0_rx_ptype_rrobin init in probe The pf_p0_rx_ptype_rrobin is global parameter so move its initialization in probe. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 82d3b1f20890..89ea29a26c3a 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1894,8 +1894,6 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) netif_napi_add(port->ndev, &common->napi_rx, am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT); - common->pf_p0_rx_ptype_rrobin = false; - return ret; } @@ -2038,6 +2036,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) common->rx_flow_id_base = -1; init_completion(&common->tdown_complete); common->tx_ch_num = 1; + common->pf_p0_rx_ptype_rrobin = false; ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { From d6d0aeafb3afaca91148691377e021c31760fc6a Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 26 Jun 2020 21:17:06 +0300 Subject: [PATCH 0437/2056] net: ethernet: ti: am65-cpsw-nuss: fix ports mac sl initialization The MAC SL has to be initialized for each port otherwise am65_cpsw_nuss_slave_disable_unused() will crash for disabled ports. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 89ea29a26c3a..7a921a480f29 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1768,6 +1768,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) common->cpsw_base + AM65_CPSW_NU_FRAM_BASE + (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1)); + port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); + if (IS_ERR(port->slave.mac_sl)) + return PTR_ERR(port->slave.mac_sl); + port->disabled = !of_device_is_available(port_np); if (port->disabled) continue; @@ -1811,10 +1815,6 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) return ret; } - port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); - if (IS_ERR(port->slave.mac_sl)) - return PTR_ERR(port->slave.mac_sl); - mac_addr = of_get_mac_address(port_np); if (!IS_ERR(mac_addr)) { ether_addr_copy(port->slave.mac_addr, mac_addr); From 7d58d3ebe445699b610ccc5df432dcef84eb4374 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 26 Jun 2020 21:17:07 +0300 Subject: [PATCH 0438/2056] net: ethernet: ti: am65-cpsw-ethtool: skip hw cfg when change p0-rx-ptype-rrobin Skip HW configuration when p0-rx-ptype-rrobin is changed as it will be done by .ndev_open(), Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-ethtool.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index 8c4690f3ebcb..f7b33875a385 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -741,7 +741,6 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags) } common->pf_p0_rx_ptype_rrobin = rrobin; - am65_cpsw_nuss_set_p0_ptype(common); return 0; } From 3d0fda901c058844bcd72bc6098c6c86884d1877 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 26 Jun 2020 21:17:08 +0300 Subject: [PATCH 0439/2056] net: ethernet: ti: am65-cpsw-ethtool: configured critical setting only when no running netdevs Ensure that critical setting can only be configured when there are no running netdevs - all ports are down. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-ethtool.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index f7b33875a385..496dafb25128 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -445,7 +445,7 @@ static int am65_cpsw_set_channels(struct net_device *ndev, /* Check if interface is up. Can change the num queues when * the interface is down. */ - if (netif_running(ndev)) + if (common->usage_count) return -EBUSY; am65_cpsw_nuss_remove_tx_chns(common); @@ -734,6 +734,9 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags) rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN); + if (common->usage_count) + return -EBUSY; + if (common->est_enabled && rrobin) { netdev_err(ndev, "p0-rx-ptype-rrobin flag conflicts with QOS\n"); From 38389aa6ba82b8d598fdb02f8907eca132ec2c1b Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 26 Jun 2020 21:17:09 +0300 Subject: [PATCH 0440/2056] net: ethernet: ti: am65-cpsw-nuss: enable am65x sr2.0 support The AM65x SR2.0 MCU CPSW has fixed errata i2027 "CPSW: CPSW Does Not Support CPPI Receive Checksum (Host to Ethernet) Offload Feature". This errata also fixed for J271E SoC. Use SOC bus data for K3 SoC identification and apply i2027 errata w/a only for the AM65x SR1.0 SoC. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/am65-cpsw-nuss.c | 47 ++++++++++++++++++++---- drivers/net/ethernet/ti/am65-cpsw-nuss.h | 2 +- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 7a921a480f29..9fdcd90e8323 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -148,10 +149,11 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) common->nuss_ver = readl(common->ss_base); common->cpsw_ver = readl(common->cpsw_base); dev_info(common->dev, - "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u\n", + "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n", common->nuss_ver, common->cpsw_ver, - common->port_num + 1); + common->port_num + 1, + common->pdata.quirks); } void am65_cpsw_nuss_adjust_link(struct net_device *ndev) @@ -1877,7 +1879,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; /* Disable TX checksum offload by default due to HW bug */ - if (common->pdata->quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) + if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) port->ndev->features &= ~NETIF_F_HW_CSUM; ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats); @@ -1981,21 +1983,50 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) } } +struct am65_cpsw_soc_pdata { + u32 quirks_dis; +}; + +static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = { + .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, +}; + +static const struct soc_device_attribute am65_cpsw_socinfo[] = { + { .family = "AM65X", + .revision = "SR2.0", + .data = &am65x_soc_sr2_0 + }, + {/* sentinel */} +}; + static const struct am65_cpsw_pdata am65x_sr1_0 = { .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, }; -static const struct am65_cpsw_pdata j721e_sr1_0 = { +static const struct am65_cpsw_pdata j721e_pdata = { .quirks = 0, }; static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { - { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0 }, - { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_sr1_0 }, + { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0}, + { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); +static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common) +{ + const struct soc_device_attribute *soc; + + soc = soc_device_match(am65_cpsw_socinfo); + if (soc && soc->data) { + const struct am65_cpsw_soc_pdata *socdata = soc->data; + + /* disable quirks */ + common->pdata.quirks &= ~socdata->quirks_dis; + } +} + static int am65_cpsw_nuss_probe(struct platform_device *pdev) { struct cpsw_ale_params ale_params = { 0 }; @@ -2014,7 +2045,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev) of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev); if (!of_id) return -EINVAL; - common->pdata = of_id->data; + common->pdata = *(const struct am65_cpsw_pdata *)of_id->data; + + am65_cpsw_nuss_apply_socinfo(common); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss"); common->ss_base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 9faf4fb1409b..94f666ea0e53 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -82,7 +82,7 @@ struct am65_cpsw_pdata { struct am65_cpsw_common { struct device *dev; struct device *mdio_dev; - const struct am65_cpsw_pdata *pdata; + struct am65_cpsw_pdata pdata; void __iomem *ss_base; void __iomem *cpsw_base; From aebe4426ccaa4838f36ea805cdf7d76503e65117 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:25 +0300 Subject: [PATCH 0441/2056] net: sched: Pass root lock to Qdisc_ops.enqueue A following patch introduces qevents, points in qdisc algorithm where packet can be processed by user-defined filters. Should this processing lead to a situation where a new packet is to be enqueued on the same port, holding the root lock would lead to deadlocks. To solve the issue, qevent handler needs to unlock and relock the root lock when necessary. To that end, add the root lock argument to the qdisc op enqueue, and propagate throughout. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/net/sch_generic.h | 6 ++++-- net/core/dev.c | 4 ++-- net/sched/sch_atm.c | 4 ++-- net/sched/sch_blackhole.c | 2 +- net/sched/sch_cake.c | 2 +- net/sched/sch_cbq.c | 4 ++-- net/sched/sch_cbs.c | 18 +++++++++--------- net/sched/sch_choke.c | 2 +- net/sched/sch_codel.c | 2 +- net/sched/sch_drr.c | 4 ++-- net/sched/sch_dsmark.c | 4 ++-- net/sched/sch_etf.c | 2 +- net/sched/sch_ets.c | 4 ++-- net/sched/sch_fifo.c | 6 +++--- net/sched/sch_fq.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_fq_pie.c | 2 +- net/sched/sch_generic.c | 4 ++-- net/sched/sch_gred.c | 2 +- net/sched/sch_hfsc.c | 6 +++--- net/sched/sch_hhf.c | 2 +- net/sched/sch_htb.c | 4 ++-- net/sched/sch_multiq.c | 4 ++-- net/sched/sch_netem.c | 8 ++++---- net/sched/sch_pie.c | 2 +- net/sched/sch_plug.c | 2 +- net/sched/sch_prio.c | 6 +++--- net/sched/sch_qfq.c | 4 ++-- net/sched/sch_red.c | 4 ++-- net/sched/sch_sfb.c | 4 ++-- net/sched/sch_sfq.c | 2 +- net/sched/sch_skbprio.c | 2 +- net/sched/sch_taprio.c | 4 ++-- net/sched/sch_tbf.c | 10 +++++----- net/sched/sch_teql.c | 4 ++-- 35 files changed, 73 insertions(+), 71 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c510b03b9751..fceb3d63c925 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -57,6 +57,7 @@ struct qdisc_skb_head { struct Qdisc { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *sch); unsigned int flags; @@ -241,6 +242,7 @@ struct Qdisc_ops { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); @@ -788,11 +790,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, #endif } -static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { qdisc_calculate_pkt_len(skb, sch); - return sch->enqueue(skb, sch, to_free); + return sch->enqueue(skb, sch, root_lock, to_free); } static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, diff --git a/net/core/dev.c b/net/core/dev.c index 3a46b86cbd67..c02bae927812 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_calculate_pkt_len(skb, q); if (q->flags & TCQ_F_NOLOCK) { - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK; qdisc_run(q); if (unlikely(to_free)) @@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index ee12ca9f55b4..fb6b16c4e46d 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct atm_qdisc_data *p = qdisc_priv(sch); @@ -432,7 +432,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, #endif } - ret = qdisc_enqueue(skb, flow->q, to_free); + ret = qdisc_enqueue(skb, flow->q, root_lock, to_free); if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index a7f7667ae984..187644657c4f 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -13,7 +13,7 @@ #include #include -static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { qdisc_drop(skb, sch, to_free); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 65a95cb094e8..e9c502dd29a2 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1687,7 +1687,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, static void cake_reconfigure(struct Qdisc *sch); -static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cake_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 39b427dc7512..052d4a1af69a 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) } static int -cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, return ret; } - ret = qdisc_enqueue(skb, cl->q, to_free); + ret = qdisc_enqueue(skb, cl->q, root_lock, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; cbq_mark_toplevel(q, cl); diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 2eaac2ff380f..7af15ebe07f7 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -77,7 +77,7 @@ struct cbs_sched_data { s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; - int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, + int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); struct Qdisc *qdisc; @@ -85,13 +85,13 @@ struct cbs_sched_data { }; static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, - struct Qdisc *child, + struct Qdisc *child, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); int err; - err = child->ops->enqueue(skb, child, to_free); + err = child->ops->enqueue(skb, child, root_lock, to_free); if (err != NET_XMIT_SUCCESS) return err; @@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_SUCCESS; } -static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, +static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; - return cbs_child_enqueue(skb, sch, qdisc, to_free); + return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); } -static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, +static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); @@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, q->last = ktime_get_ns(); } - return cbs_child_enqueue(skb, sch, qdisc, to_free); + return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); } -static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); - return q->enqueue(skb, sch, to_free); + return q->enqueue(skb, sch, root_lock, to_free); } /* timediff is in ns, slope is in bytes/s */ diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index bd618b00d319..baf3faee31aa 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q, return choke_match_flow(oskb, nskb); } -static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 30169b3adbbb..1d94837abdd8 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) return skb; } -static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct codel_sched_data *q; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 07a2b0b35495..0d5c9a8ec61d 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } -static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 05605b30bef3..fbe49fffcdbb 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, } } - err = qdisc_enqueue(skb, p->q, to_free); + err = qdisc_enqueue(skb, p->q, root_lock, to_free); if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index c48f91075b5c..7a7c50a68115 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) } static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, - struct sk_buff **to_free) + spinlock_t *root_lock, struct sk_buff **to_free) { struct etf_sched_data *q = qdisc_priv(sch); struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index a87e9159338c..373dc5855d4e 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, return &q->classes[band]; } -static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index a579a4131d22..b4da5b624ad8 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -16,7 +16,7 @@ /* 1 band FIFO pseudo-"scheduler" */ -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) @@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, return qdisc_drop(skb, sch, to_free); } -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { if (likely(sch->q.qlen < sch->limit)) @@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, return qdisc_drop(skb, sch, to_free); } -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int prev_backlog; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index 2fb76fc0cc31..a90d745c41e0 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb, return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); } -static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 459a784056c0..6bf979f95509 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, return idx; } -static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct fq_codel_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index fb760cee824e..a27a250ab8f9 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow, skb->next = NULL; } -static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct fq_pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 265a61d011df..715cde1df9e4 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off); cheaper. */ -static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, +static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, struct sk_buff **to_free) { __qdisc_drop(skb, to_free); @@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, return &priv->q[band]; } -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, struct sk_buff **to_free) { int band = prio2band[skb->priority & TC_PRIO_MAX]; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 8599c6f31b05..7d67c6cd6605 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table) return false; } -static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct gred_sched_data *q = NULL; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 433f2190960f..7f6670044f0a 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) return -1; } -static int -hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; @@ -1545,7 +1545,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index 420ede875322..ddc6bf1d85d0 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free) return bucket - q->buckets; } -static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct hhf_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 8184c87da8be..52fc513688b1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) cl->prio_activity = 0; } -static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { int uninitialized_var(ret); @@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, + } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock, to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 1330ad224931..648611f5c105 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } static int -multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct Qdisc *qdisc; @@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, } #endif - ret = qdisc_enqueue(skb, qdisc, to_free); + ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 84f82771cdf5..8fb17483a34f 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ -static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); @@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; - rootq->enqueue(skb2, rootq, to_free); + rootq->enqueue(skb2, rootq, root_lock, to_free); q->duplicate = dupsave; rc_drop = NET_XMIT_SUCCESS; } @@ -604,7 +604,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch, to_free); + rc = qdisc_enqueue(segs, sch, root_lock, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@ -720,7 +720,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) struct sk_buff *to_free = NULL; int err; - err = qdisc_enqueue(skb, q->qdisc, &to_free); + err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free); kfree_skb_list(to_free); if (err != NET_XMIT_SUCCESS && net_xmit_drop_count(err)) { diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index c65077f0c0f3..b305313b64e3 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, } EXPORT_SYMBOL_GPL(pie_drop_early); -static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index cbc2ebca4548..e5f8b4769b4d 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -84,7 +84,7 @@ struct plug_sched_data { u32 pkts_to_release; }; -static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 647941702f9f..a3e187f2603c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return q->queues[band]; } -static int -prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct Qdisc *qdisc; @@ -83,7 +83,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } #endif - ret = qdisc_enqueue(skb, qdisc, to_free); + ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); if (ret == NET_XMIT_SUCCESS) { sch->qstats.backlog += len; sch->q.qlen++; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 0b05ac7c848e..ede854516825 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) return agg; } -static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb), gso_segs; @@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, to_free); + err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 555a1b9e467f..6ace7d757e8b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -65,7 +65,7 @@ static int red_use_nodrop(struct red_sched_data *q) return q->flags & TC_RED_NODROP; } -static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct red_sched_data *q = qdisc_priv(sch); @@ -118,7 +118,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, break; } - ret = qdisc_enqueue(skb, child, to_free); + ret = qdisc_enqueue(skb, child, root_lock, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 4074c50ac3d7..d2a6e78262bb 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, return false; } -static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { @@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, } enqueue: - ret = qdisc_enqueue(skb, child, to_free); + ret = qdisc_enqueue(skb, child, root_lock, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 5a6def5e4e6d..46cdefd69e44 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) } static int -sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 7a5e4c454715..f75f237c4436 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q) return SKBPRIO_MAX_PRIORITY - 1; } -static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e981992634dd..daef2ff60a98 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -410,7 +410,7 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) return txtime; } -static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct taprio_sched *q = qdisc_priv(sch); @@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; - return qdisc_enqueue(skb, child, to_free); + return qdisc_enqueue(skb, child, root_lock, to_free); } static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 78e79029dc63..c3eb5cdb83a8 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch) /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ -static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, +static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; - ret = qdisc_enqueue(segs, q->qdisc, to_free); + ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); @@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; } -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (qdisc_pkt_len(skb) > q->max_size) { if (skb_is_gso(skb) && skb_gso_validate_mac_len(skb, q->max_size)) - return tbf_segment(skb, sch, to_free); + return tbf_segment(skb, sch, root_lock, to_free); return qdisc_drop(skb, sch, to_free); } - ret = qdisc_enqueue(skb, q->qdisc, to_free); + ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 689ef6f3ded8..511964653476 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -72,8 +72,8 @@ struct teql_sched_data { /* "teql*" qdisc routines */ -static int -teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) +static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + struct sk_buff **to_free) { struct net_device *dev = qdisc_dev(sch); struct teql_sched_data *q = qdisc_priv(sch); From 3625750f05ecce21a0fce429c1ff85acfffb461b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:26 +0300 Subject: [PATCH 0442/2056] net: sched: Introduce helpers for qevent blocks Qevents are attach points for TC blocks, where filters can be put that are executed when "interesting events" take place in a qdisc. The data to keep and the functions to invoke to maintain a qevent will be largely the same between qevents. Therefore introduce sched-wide helpers for qevent management. Currently, similarly to ingress and egress blocks of clsact pseudo-qdisc, blocks attachment cannot be changed after the qdisc is created. To that end, add a helper tcf_qevent_validate_change(), which verifies whether block index attribute is not attached, or if it is, whether its value matches the current one (i.e. there is no material change). The function tcf_qevent_handle() should be invoked when qdisc hits the "interesting event" corresponding to a block. This function releases root lock for the duration of executing the attached filters, to allow packets generated through user actions (notably mirred) to be reinserted to the same qdisc tree. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 49 +++++++++++++++++ net/sched/cls_api.c | 119 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 168 insertions(+) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ff017e5b3ea2..690a7f49c8f9 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -32,6 +32,12 @@ struct tcf_block_ext_info { u32 block_index; }; +struct tcf_qevent { + struct tcf_block *block; + struct tcf_block_ext_info info; + struct tcf_proto __rcu *filter_chain; +}; + struct tcf_block_cb; bool tcf_queue_work(struct rcu_work *rwork, work_func_t func); @@ -553,6 +559,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, void *cb_priv, u32 *flags, unsigned int *in_hw_count); unsigned int tcf_exts_num_actions(struct tcf_exts *exts); +#ifdef CONFIG_NET_CLS_ACT +int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, + enum flow_block_binder_type binder_type, + struct nlattr *block_index_attr, + struct netlink_ext_ack *extack); +void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); +int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, + struct netlink_ext_ack *extack); +struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, + spinlock_t *root_lock, struct sk_buff **to_free, int *ret); +int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); +#else +static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, + enum flow_block_binder_type binder_type, + struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) +{ +} + +static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + return 0; +} + +static inline struct sk_buff * +tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, + spinlock_t *root_lock, struct sk_buff **to_free, int *ret) +{ + return skb; +} + +static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) +{ + return 0; +} +#endif + struct tc_cls_u32_knode { struct tcf_exts *exts; struct tcf_result *res; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 5bfa6b985bb8..1b14d5f57e7f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3748,6 +3748,125 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts) } EXPORT_SYMBOL(tcf_exts_num_actions); +#ifdef CONFIG_NET_CLS_ACT +static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, + u32 *p_block_index, + struct netlink_ext_ack *extack) +{ + *p_block_index = nla_get_u32(block_index_attr); + if (!*p_block_index) { + NL_SET_ERR_MSG(extack, "Block number may not be zero"); + return -EINVAL; + } + + return 0; +} + +int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, + enum flow_block_binder_type binder_type, + struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + u32 block_index; + int err; + + if (!block_index_attr) + return 0; + + err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); + if (err) + return err; + + if (!block_index) + return 0; + + qe->info.binder_type = binder_type; + qe->info.chain_head_change = tcf_chain_head_change_dflt; + qe->info.chain_head_change_priv = &qe->filter_chain; + qe->info.block_index = block_index; + + return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); +} +EXPORT_SYMBOL(tcf_qevent_init); + +void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) +{ + if (qe->info.block_index) + tcf_block_put_ext(qe->block, sch, &qe->info); +} +EXPORT_SYMBOL(tcf_qevent_destroy); + +int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, + struct netlink_ext_ack *extack) +{ + u32 block_index; + int err; + + if (!block_index_attr) + return 0; + + err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); + if (err) + return err; + + /* Bounce newly-configured block or change in block. */ + if (block_index != qe->info.block_index) { + NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL(tcf_qevent_validate_change); + +struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, + spinlock_t *root_lock, struct sk_buff **to_free, int *ret) +{ + struct tcf_result cl_res; + struct tcf_proto *fl; + + if (!qe->info.block_index) + return skb; + + fl = rcu_dereference_bh(qe->filter_chain); + + if (root_lock) + spin_unlock(root_lock); + + switch (tcf_classify(skb, fl, &cl_res, false)) { + case TC_ACT_SHOT: + qdisc_qstats_drop(sch); + __qdisc_drop(skb, to_free); + *ret = __NET_XMIT_BYPASS; + return NULL; + case TC_ACT_STOLEN: + case TC_ACT_QUEUED: + case TC_ACT_TRAP: + __qdisc_drop(skb, to_free); + *ret = __NET_XMIT_STOLEN; + return NULL; + case TC_ACT_REDIRECT: + skb_do_redirect(skb); + *ret = __NET_XMIT_STOLEN; + return NULL; + } + + if (root_lock) + spin_lock(root_lock); + + return skb; +} +EXPORT_SYMBOL(tcf_qevent_handle); + +int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) +{ + if (!qe->info.block_index) + return 0; + return nla_put_u32(skb, attr_name, qe->info.block_index); +} +EXPORT_SYMBOL(tcf_qevent_dump); +#endif + static __net_init int tcf_net_init(struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); From 65545ea24998bb9aab1ce713a67c693dc7a947ec Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:27 +0300 Subject: [PATCH 0443/2056] net: sched: sch_red: Split init and change callbacks In the following patches, RED will get two qevents. The implementation will be clearer if the callback for change is not a pure subset of the callback for init. Split the two and promote attribute parsing to the callbacks themselves from the common code, because it will be handy there. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- net/sched/sch_red.c | 42 ++++++++++++++++++++++++++++++------------ 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 6ace7d757e8b..225ce370e5a8 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -215,12 +215,11 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS), }; -static int red_change(struct Qdisc *sch, struct nlattr *opt, - struct netlink_ext_ack *extack) +static int __red_change(struct Qdisc *sch, struct nlattr **tb, + struct netlink_ext_ack *extack) { struct Qdisc *old_child = NULL, *child = NULL; struct red_sched_data *q = qdisc_priv(sch); - struct nlattr *tb[TCA_RED_MAX + 1]; struct nla_bitfield32 flags_bf; struct tc_red_qopt *ctl; unsigned char userbits; @@ -228,14 +227,6 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, int err; u32 max_P; - if (opt == NULL) - return -EINVAL; - - err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy, - NULL); - if (err < 0) - return err; - if (tb[TCA_RED_PARMS] == NULL || tb[TCA_RED_STAB] == NULL) return -EINVAL; @@ -323,11 +314,38 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct red_sched_data *q = qdisc_priv(sch); + struct nlattr *tb[TCA_RED_MAX + 1]; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy, + extack); + if (err < 0) + return err; q->qdisc = &noop_qdisc; q->sch = sch; timer_setup(&q->adapt_timer, red_adaptative_timer, 0); - return red_change(sch, opt, extack); + return __red_change(sch, tb, extack); +} + +static int red_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[TCA_RED_MAX + 1]; + int err; + + if (!opt) + return -EINVAL; + + err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy, + extack); + if (err < 0) + return err; + + return __red_change(sch, tb, extack); } static int red_dump_offload_stats(struct Qdisc *sch) From aee9caa03fc3c8b02f8f31824354d85f30e562e0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:28 +0300 Subject: [PATCH 0444/2056] net: sched: sch_red: Add qevents "early_drop" and "mark" In order to allow acting on dropped and/or ECN-marked packets, add two new qevents to the RED qdisc: "early_drop" and "mark". Filters attached at "early_drop" block are executed as packets are early-dropped, those attached at the "mark" block are executed as packets are ECN-marked. Two new attributes are introduced: TCA_RED_EARLY_DROP_BLOCK with the block index for the "early_drop" qevent, and TCA_RED_MARK_BLOCK for the "mark" qevent. Absence of these attributes signifies "don't care": no block is allocated in that case, or the existing blocks are left intact in case of the change callback. For purposes of offloading, blocks attached to these qevents appear with newly-introduced binder types, FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP and FLOW_BLOCK_BINDER_TYPE_RED_MARK. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- include/net/flow_offload.h | 2 ++ include/uapi/linux/pkt_sched.h | 2 ++ net/sched/sch_red.c | 58 ++++++++++++++++++++++++++++++++-- 3 files changed, 60 insertions(+), 2 deletions(-) diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3bafb5124ac0..3e793ac66baf 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -424,6 +424,8 @@ enum flow_block_binder_type { FLOW_BLOCK_BINDER_TYPE_UNSPEC, FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, + FLOW_BLOCK_BINDER_TYPE_RED_MARK, }; struct flow_block { diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index a95f3ae7ab37..9e7c2c607845 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -257,6 +257,8 @@ enum { TCA_RED_STAB, TCA_RED_MAX_P, TCA_RED_FLAGS, /* bitfield32 */ + TCA_RED_EARLY_DROP_BLOCK, /* u32 */ + TCA_RED_MARK_BLOCK, /* u32 */ __TCA_RED_MAX, }; diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 225ce370e5a8..de2be4d04ed6 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -46,6 +46,8 @@ struct red_sched_data { struct red_vars vars; struct red_stats stats; struct Qdisc *qdisc; + struct tcf_qevent qe_early_drop; + struct tcf_qevent qe_mark; }; #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP) @@ -92,6 +94,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + if (!skb) + return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { q->stats.prob_drop++; goto congestion_drop; @@ -109,6 +114,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + if (!skb) + return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { q->stats.forced_drop++; goto congestion_drop; @@ -129,6 +137,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ return ret; congestion_drop: + skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret); + if (!skb) + return NET_XMIT_CN | ret; + qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; } @@ -202,6 +214,8 @@ static void red_destroy(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); + tcf_qevent_destroy(&q->qe_mark, sch); + tcf_qevent_destroy(&q->qe_early_drop, sch); del_timer_sync(&q->adapt_timer); red_offload(sch, false); qdisc_put(q->qdisc); @@ -213,6 +227,8 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, [TCA_RED_MAX_P] = { .type = NLA_U32 }, [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS), + [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 }, + [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 }, }; static int __red_change(struct Qdisc *sch, struct nlattr **tb, @@ -328,12 +344,38 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt, q->qdisc = &noop_qdisc; q->sch = sch; timer_setup(&q->adapt_timer, red_adaptative_timer, 0); - return __red_change(sch, tb, extack); + + err = __red_change(sch, tb, extack); + if (err) + return err; + + err = tcf_qevent_init(&q->qe_early_drop, sch, + FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP, + tb[TCA_RED_EARLY_DROP_BLOCK], extack); + if (err) + goto err_early_drop_init; + + err = tcf_qevent_init(&q->qe_mark, sch, + FLOW_BLOCK_BINDER_TYPE_RED_MARK, + tb[TCA_RED_MARK_BLOCK], extack); + if (err) + goto err_mark_init; + + return 0; + +err_mark_init: + tcf_qevent_destroy(&q->qe_early_drop, sch); +err_early_drop_init: + del_timer_sync(&q->adapt_timer); + red_offload(sch, false); + qdisc_put(q->qdisc); + return err; } static int red_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { + struct red_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_RED_MAX + 1]; int err; @@ -345,6 +387,16 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, if (err < 0) return err; + err = tcf_qevent_validate_change(&q->qe_early_drop, + tb[TCA_RED_EARLY_DROP_BLOCK], extack); + if (err) + return err; + + err = tcf_qevent_validate_change(&q->qe_mark, + tb[TCA_RED_MARK_BLOCK], extack); + if (err) + return err; + return __red_change(sch, tb, extack); } @@ -389,7 +441,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) || nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) || nla_put_bitfield32(skb, TCA_RED_FLAGS, - q->flags, TC_RED_SUPPORTED_FLAGS)) + q->flags, TC_RED_SUPPORTED_FLAGS) || + tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) || + tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop)) goto nla_put_failure; return nla_nest_end(skb, opts); From 6cf0291f95172db68d8a283854389a1966e43c65 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 27 Jun 2020 01:45:29 +0300 Subject: [PATCH 0445/2056] selftests: forwarding: Add a RED test for SW datapath This test is inspired by the mlxsw RED selftest. It is much simpler to set up (also because there is no point in testing PRIO / RED encapsulation). It tests bare RED, ECN and ECN+nodrop modes of operation. On top of that it tests RED early_drop and mark qevents. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../selftests/net/forwarding/sch_red.sh | 492 ++++++++++++++++++ 1 file changed, 492 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/sch_red.sh diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh new file mode 100755 index 000000000000..e714bae473fb --- /dev/null +++ b/tools/testing/selftests/net/forwarding/sch_red.sh @@ -0,0 +1,492 @@ +# SPDX-License-Identifier: GPL-2.0 + +# This test sends one stream of traffic from H1 through a TBF shaper, to a RED +# within TBF shaper on $swp3. The two shapers have the same configuration, and +# thus the resulting stream should fill all available bandwidth on the latter +# shaper. A second stream is sent from H2 also via $swp3, and used to inject +# additional traffic. Since all available bandwidth is taken, this traffic has +# to go to backlog. +# +# +--------------------------+ +--------------------------+ +# | H1 | | H2 | +# | + $h1 | | + $h2 | +# | | 192.0.2.1/28 | | | 192.0.2.2/28 | +# | | TBF 10Mbps | | | | +# +-----|--------------------+ +-----|--------------------+ +# | | +# +-----|------------------------------------------------|--------------------+ +# | SW | | | +# | +--|------------------------------------------------|----------------+ | +# | | + $swp1 + $swp2 | | +# | | BR | | +# | | | | +# | | + $swp3 | | +# | | | TBF 10Mbps / RED | | +# | +--------------------------------|-----------------------------------+ | +# | | | +# +-----------------------------------|---------------------------------------+ +# | +# +-----|--------------------+ +# | H3 | | +# | + $h1 | +# | 192.0.2.3/28 | +# | | +# +--------------------------+ + +ALL_TESTS=" + ping_ipv4 + ecn_test + ecn_nodrop_test + red_test + red_qevent_test + ecn_qevent_test +" + +NUM_NETIFS=6 +CHECK_TC="yes" +source lib.sh + +BACKLOG=30000 +PKTSZ=1400 + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 + mtu_set $h1 10000 + tc qdisc replace dev $h1 root handle 1: tbf \ + rate 10Mbit burst 10K limit 1M +} + +h1_destroy() +{ + tc qdisc del dev $h1 root + mtu_restore $h1 + simple_if_fini $h1 192.0.2.1/28 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.2/28 + mtu_set $h2 10000 +} + +h2_destroy() +{ + mtu_restore $h2 + simple_if_fini $h2 192.0.2.2/28 +} + +h3_create() +{ + simple_if_init $h3 192.0.2.3/28 + mtu_set $h3 10000 +} + +h3_destroy() +{ + mtu_restore $h3 + simple_if_fini $h3 192.0.2.3/28 +} + +switch_create() +{ + ip link add dev br up type bridge + ip link set dev $swp1 up master br + ip link set dev $swp2 up master br + ip link set dev $swp3 up master br + + mtu_set $swp1 10000 + mtu_set $swp2 10000 + mtu_set $swp3 10000 + + tc qdisc replace dev $swp3 root handle 1: tbf \ + rate 10Mbit burst 10K limit 1M + ip link add name _drop_test up type dummy +} + +switch_destroy() +{ + ip link del dev _drop_test + tc qdisc del dev $swp3 root + + mtu_restore $h3 + mtu_restore $h2 + mtu_restore $h1 + + ip link set dev $swp3 down nomaster + ip link set dev $swp2 down nomaster + ip link set dev $swp1 down nomaster + ip link del dev br +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + h2=${NETIFS[p3]} + swp2=${NETIFS[p4]} + + swp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + h3_mac=$(mac_get $h3) + + vrf_prepare + + h1_create + h2_create + h3_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h3_destroy + h2_destroy + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.3 " from host 1" + ping_test $h2 192.0.2.3 " from host 2" +} + +get_qdisc_backlog() +{ + qdisc_stats_get $swp3 11: .backlog +} + +get_nmarked() +{ + qdisc_stats_get $swp3 11: .marked +} + +get_qdisc_npackets() +{ + qdisc_stats_get $swp3 11: .packets +} + +get_nmirrored() +{ + link_stats_get _drop_test tx packets +} + +send_packets() +{ + local proto=$1; shift + local pkts=$1; shift + + $MZ $h2 -p $PKTSZ -a own -b $h3_mac -A 192.0.2.2 -B 192.0.2.3 -t $proto -q -c $pkts "$@" +} + +# This sends traffic in an attempt to build a backlog of $size. Returns 0 on +# success. After 10 failed attempts it bails out and returns 1. It dumps the +# backlog size to stdout. +build_backlog() +{ + local size=$1; shift + local proto=$1; shift + + local i=0 + + while :; do + local cur=$(get_qdisc_backlog) + local diff=$((size - cur)) + local pkts=$(((diff + PKTSZ - 1) / PKTSZ)) + + if ((cur >= size)); then + echo $cur + return 0 + elif ((i++ > 10)); then + echo $cur + return 1 + fi + + send_packets $proto $pkts "$@" + sleep 1 + done +} + +check_marking() +{ + local cond=$1; shift + + local npackets_0=$(get_qdisc_npackets) + local nmarked_0=$(get_nmarked) + sleep 5 + local npackets_1=$(get_qdisc_npackets) + local nmarked_1=$(get_nmarked) + + local nmarked_d=$((nmarked_1 - nmarked_0)) + local npackets_d=$((npackets_1 - npackets_0)) + local pct=$((100 * nmarked_d / npackets_d)) + + echo $pct + ((pct $cond)) +} + +check_mirroring() +{ + local cond=$1; shift + + local npackets_0=$(get_qdisc_npackets) + local nmirrored_0=$(get_nmirrored) + sleep 5 + local npackets_1=$(get_qdisc_npackets) + local nmirrored_1=$(get_nmirrored) + + local nmirrored_d=$((nmirrored_1 - nmirrored_0)) + local npackets_d=$((npackets_1 - npackets_0)) + local pct=$((100 * nmirrored_d / npackets_d)) + + echo $pct + ((pct $cond)) +} + +ecn_test_common() +{ + local name=$1; shift + local limit=$1; shift + local backlog + local pct + + # Build the below-the-limit backlog using UDP. We could use TCP just + # fine, but this way we get a proof that UDP is accepted when queue + # length is below the limit. The main stream is using TCP, and if the + # limit is misconfigured, we would see this traffic being ECN marked. + RET=0 + backlog=$(build_backlog $((2 * limit / 3)) udp) + check_err $? "Could not build the requested backlog" + pct=$(check_marking "== 0") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + log_test "$name backlog < limit" + + # Now push TCP, because non-TCP traffic would be early-dropped after the + # backlog crosses the limit, and we want to make sure that the backlog + # is above the limit. + RET=0 + backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_marking ">= 95") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected >= 95." + log_test "$name backlog > limit" +} + +do_ecn_test() +{ + local limit=$1; shift + local name=ECN + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + sleep 1 + + ecn_test_common "$name" $limit + + # Up there we saw that UDP gets accepted when backlog is below the + # limit. Now that it is above, it should all get dropped, and backlog + # building should fail. + RET=0 + build_backlog $((2 * limit)) udp >/dev/null + check_fail $? "UDP traffic went into backlog instead of being early-dropped" + log_test "$name backlog > limit: UDP early-dropped" + + stop_traffic + sleep 1 +} + +do_ecn_nodrop_test() +{ + local limit=$1; shift + local name="ECN nodrop" + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + sleep 1 + + ecn_test_common "$name" $limit + + # Up there we saw that UDP gets accepted when backlog is below the + # limit. Now that it is above, in nodrop mode, make sure it goes to + # backlog as well. + RET=0 + build_backlog $((2 * limit)) udp >/dev/null + check_err $? "UDP traffic was early-dropped instead of getting into backlog" + log_test "$name backlog > limit: UDP not dropped" + + stop_traffic + sleep 1 +} + +do_red_test() +{ + local limit=$1; shift + local backlog + local pct + + # Use ECN-capable TCP to verify there's no marking even though the queue + # is above limit. + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + + # Pushing below the queue limit should work. + RET=0 + backlog=$(build_backlog $((2 * limit / 3)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_marking "== 0") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + log_test "RED backlog < limit" + + # Pushing above should not. + RET=0 + backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01) + check_fail $? "Traffic went into backlog instead of being early-dropped" + pct=$(check_marking "== 0") + check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0." + log_test "RED backlog > limit" + + stop_traffic + sleep 1 +} + +do_red_qevent_test() +{ + local limit=$1; shift + local backlog + local base + local now + local pct + + RET=0 + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t udp -q & + sleep 1 + + tc filter add block 10 pref 1234 handle 102 matchall skip_hw \ + action mirred egress mirror dev _drop_test + + # Push to the queue until it's at the limit. The configured limit is + # rounded by the qdisc, so this is the best we can do to get to the real + # limit. + build_backlog $((3 * limit / 2)) udp >/dev/null + + base=$(get_nmirrored) + send_packets udp 100 + sleep 1 + now=$(get_nmirrored) + ((now >= base + 100)) + check_err $? "Dropped packets not observed: 100 expected, $((now - base)) seen" + + tc filter del block 10 pref 1234 handle 102 matchall + + base=$(get_nmirrored) + send_packets udp 100 + sleep 1 + now=$(get_nmirrored) + ((now == base)) + check_err $? "Dropped packets still observed: 0 expected, $((now - base)) seen" + + log_test "RED early_dropped packets mirrored" + + stop_traffic + sleep 1 +} + +do_ecn_qevent_test() +{ + local limit=$1; shift + local name=ECN + + RET=0 + + $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \ + -a own -b $h3_mac -t tcp -q tos=0x01 & + sleep 1 + + tc filter add block 10 pref 1234 handle 102 matchall skip_hw \ + action mirred egress mirror dev _drop_test + + backlog=$(build_backlog $((2 * limit / 3)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_mirroring "== 0") + check_err $? "backlog $backlog / $limit Got $pct% mirrored packets, expected == 0." + + backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01) + check_err $? "Could not build the requested backlog" + pct=$(check_mirroring ">= 95") + check_err $? "backlog $backlog / $limit Got $pct% mirrored packets, expected >= 95." + + tc filter del block 10 pref 1234 handle 102 matchall + + log_test "ECN marked packets mirrored" + + stop_traffic + sleep 1 +} + +install_qdisc() +{ + local -a args=("$@") + + tc qdisc replace dev $swp3 parent 1:1 handle 11: red \ + limit 1M avpkt $PKTSZ probability 1 \ + min $BACKLOG max $((BACKLOG + 1)) burst 38 "${args[@]}" + sleep 1 +} + +uninstall_qdisc() +{ + tc qdisc del dev $swp3 parent 1:1 +} + +ecn_test() +{ + install_qdisc ecn + do_ecn_test $BACKLOG + uninstall_qdisc +} + +ecn_nodrop_test() +{ + install_qdisc ecn nodrop + do_ecn_nodrop_test $BACKLOG + uninstall_qdisc +} + +red_test() +{ + install_qdisc + do_red_test $BACKLOG + uninstall_qdisc +} + +red_qevent_test() +{ + install_qdisc qevent early_drop block 10 + do_red_qevent_test $BACKLOG + uninstall_qdisc +} + +ecn_qevent_test() +{ + install_qdisc ecn qevent mark block 10 + do_ecn_qevent_test $BACKLOG + uninstall_qdisc +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From e11703330a5df48e1fd4167e4d22a102e517253e Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Sun, 28 Jun 2020 10:04:51 +0300 Subject: [PATCH 0446/2056] net: phy: marvell10g: support XFI rate matching mode When the hardware MACTYPE hardware configuration pins are set to "XFI with Rate Matching" the PHY interface operate at fixed 10Gbps speed. The MAC buffer packets in both directions to match various wire speeds. Read the MAC Type field in the Port Control register, and set the MAC interface speed accordingly. Signed-off-by: Baruch Siach Signed-off-by: David S. Miller --- drivers/net/phy/marvell10g.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index d4c2e62b2439..a7610eb55f30 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -80,6 +80,8 @@ enum { MV_V2_PORT_CTRL = 0xf001, MV_V2_PORT_CTRL_SWRST = BIT(15), MV_V2_PORT_CTRL_PWRDOWN = BIT(11), + MV_V2_PORT_MAC_TYPE_MASK = 0x7, + MV_V2_PORT_MAC_TYPE_RATE_MATCH = 0x6, /* Temperature control/read registers (88X3310 only) */ MV_V2_TEMP_CTRL = 0xf08a, MV_V2_TEMP_CTRL_MASK = 0xc000, @@ -91,6 +93,7 @@ enum { struct mv3310_priv { u32 firmware_ver; + bool rate_match; struct device *hwmon_dev; char *hwmon_name; @@ -458,7 +461,9 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev) static int mv3310_config_init(struct phy_device *phydev) { + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); int err; + int val; /* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && @@ -475,6 +480,12 @@ static int mv3310_config_init(struct phy_device *phydev) if (err) return err; + val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL); + if (val < 0) + return val; + priv->rate_match = ((val & MV_V2_PORT_MAC_TYPE_MASK) == + MV_V2_PORT_MAC_TYPE_RATE_MATCH); + /* Enable EDPD mode - saving 600mW */ return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS); } @@ -581,6 +592,17 @@ static int mv3310_aneg_done(struct phy_device *phydev) static void mv3310_update_interface(struct phy_device *phydev) { + struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev); + + /* In "XFI with Rate Matching" mode the PHY interface is fixed at + * 10Gb. The PHY adapts the rate to actual wire speed with help of + * internal 16KB buffer. + */ + if (priv->rate_match) { + phydev->interface = PHY_INTERFACE_MODE_10GBASER; + return; + } + if ((phydev->interface == PHY_INTERFACE_MODE_SGMII || phydev->interface == PHY_INTERFACE_MODE_2500BASEX || phydev->interface == PHY_INTERFACE_MODE_10GBASER) && From e1ff9e82e2ea53d01540692a85c16a77e1089537 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 29 Jun 2020 22:26:20 +0200 Subject: [PATCH 0447/2056] net: mptcp: improve fallback to TCP Keep using MPTCP sockets and a use "dummy mapping" in case of fallback to regular TCP. When fallback is triggered, skip addition of the MPTCP option on send. Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/11 Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/22 Co-developed-by: Paolo Abeni Signed-off-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/options.c | 9 +++- net/mptcp/protocol.c | 98 ++++++++++++-------------------------------- net/mptcp/protocol.h | 33 +++++++++++++++ net/mptcp/subflow.c | 47 +++++++++++++-------- 4 files changed, 98 insertions(+), 89 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index df9a51425c6f..b96d3660562f 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -624,6 +624,9 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, opts->suboptions = 0; + if (unlikely(mptcp_check_fallback(sk))) + return false; + if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts)) ret = true; else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining, @@ -714,7 +717,8 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk, */ if (!mp_opt->mp_capable) { subflow->mp_capable = 0; - tcp_sk(sk)->is_mptcp = 0; + pr_fallback(msk); + __mptcp_do_fallback(msk); return false; } @@ -814,6 +818,9 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb, struct mptcp_options_received mp_opt; struct mptcp_ext *mpext; + if (__mptcp_check_fallback(msk)) + return; + mptcp_get_options(skb, &mp_opt); if (!check_fully_established(msk, sk, subflow, skb, &mp_opt)) return; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index be09fd525f8f..84ae96be9837 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -52,11 +52,6 @@ static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) return msk->subflow; } -static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk) -{ - return msk->first && !sk_is_mptcp(msk->first); -} - static struct socket *mptcp_is_tcpsk(struct sock *sk) { struct socket *sock = sk->sk_socket; @@ -94,7 +89,7 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) if (unlikely(sock)) return sock; - if (likely(!__mptcp_needs_tcp_fallback(msk))) + if (likely(!__mptcp_check_fallback(msk))) return NULL; return msk->subflow; @@ -133,6 +128,11 @@ static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state) list_add(&subflow->node, &msk->conn_list); subflow->request_mptcp = 1; + /* accept() will wait on first subflow sk_wq, and we always wakes up + * via msk->sk_socket + */ + RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq); + set_state: if (state != MPTCP_SAME_STATE) inet_sk_state_store(sk, state); @@ -229,6 +229,15 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, if (!skb) break; + if (__mptcp_check_fallback(msk)) { + /* if we are running under the workqueue, TCP could have + * collapsed skbs between dummy map creation and now + * be sure to adjust the size + */ + map_remaining = skb->len; + subflow->map_data_len = skb->len; + } + offset = seq - TCP_SKB_CB(skb)->seq; fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (fin) { @@ -466,8 +475,15 @@ static void mptcp_clean_una(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_data_frag *dtmp, *dfrag; - u64 snd_una = atomic64_read(&msk->snd_una); bool cleaned = false; + u64 snd_una; + + /* on fallback we just need to ignore snd_una, as this is really + * plain TCP + */ + if (__mptcp_check_fallback(msk)) + atomic64_set(&msk->snd_una, msk->write_seq); + snd_una = atomic64_read(&msk->snd_una); list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) @@ -740,7 +756,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int mss_now = 0, size_goal = 0, ret = 0; struct mptcp_sock *msk = mptcp_sk(sk); struct page_frag *pfrag; - struct socket *ssock; size_t copied = 0; struct sock *ssk; bool tx_ok; @@ -759,15 +774,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out; } -fallback: - ssock = __mptcp_tcp_fallback(msk); - if (unlikely(ssock)) { - release_sock(sk); - pr_debug("fallback passthrough"); - ret = sock_sendmsg(ssock, msg); - return ret >= 0 ? ret + copied : (copied ? copied : ret); - } - pfrag = sk_page_frag(sk); restart: mptcp_clean_una(sk); @@ -819,17 +825,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } break; } - if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) { - /* Can happen for passive sockets: - * 3WHS negotiated MPTCP, but first packet after is - * plain TCP (e.g. due to middlebox filtering unknown - * options). - * - * Fall back to TCP. - */ - release_sock(ssk); - goto fallback; - } copied += ret; @@ -972,7 +967,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { struct mptcp_sock *msk = mptcp_sk(sk); - struct socket *ssock; int copied = 0; int target; long timeo; @@ -981,16 +975,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, return -EOPNOTSUPP; lock_sock(sk); - ssock = __mptcp_tcp_fallback(msk); - if (unlikely(ssock)) { -fallback: - release_sock(sk); - pr_debug("fallback-read subflow=%p", - mptcp_subflow_ctx(ssock->sk)); - copied = sock_recvmsg(ssock, msg, flags); - return copied; - } - timeo = sock_rcvtimeo(sk, nonblock); len = min_t(size_t, len, INT_MAX); @@ -1056,9 +1040,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, pr_debug("block timeout %ld", timeo); mptcp_wait_data(sk, &timeo); - ssock = __mptcp_tcp_fallback(msk); - if (unlikely(ssock)) - goto fallback; } if (skb_queue_empty(&sk->sk_receive_queue)) { @@ -1335,8 +1316,6 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how, break; } - /* Wake up anyone sleeping in poll. */ - ssk->sk_state_change(ssk); release_sock(ssk); } @@ -1660,12 +1639,6 @@ void mptcp_finish_connect(struct sock *ssk) sk = subflow->conn; msk = mptcp_sk(sk); - if (!subflow->mp_capable) { - MPTCP_INC_STATS(sock_net(sk), - MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); - return; - } - pr_debug("msk=%p, token=%u", sk, subflow->token); mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); @@ -1971,23 +1944,10 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, { struct sock *sk = sock->sk; struct mptcp_sock *msk; - struct socket *ssock; __poll_t mask = 0; msk = mptcp_sk(sk); - lock_sock(sk); - ssock = __mptcp_tcp_fallback(msk); - if (!ssock) - ssock = __mptcp_nmpc_socket(msk); - if (ssock) { - mask = ssock->ops->poll(file, ssock, wait); - release_sock(sk); - return mask; - } - - release_sock(sk); sock_poll_wait(file, sock, wait); - lock_sock(sk); if (test_bit(MPTCP_DATA_READY, &msk->flags)) mask = EPOLLIN | EPOLLRDNORM; @@ -1997,8 +1957,6 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock, if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; - release_sock(sk); - return mask; } @@ -2006,18 +1964,11 @@ static int mptcp_shutdown(struct socket *sock, int how) { struct mptcp_sock *msk = mptcp_sk(sock->sk); struct mptcp_subflow_context *subflow; - struct socket *ssock; int ret = 0; pr_debug("sk=%p, how=%d", msk, how); lock_sock(sock->sk); - ssock = __mptcp_tcp_fallback(msk); - if (ssock) { - release_sock(sock->sk); - return inet_shutdown(ssock, how); - } - if (how == SHUT_WR || how == SHUT_RDWR) inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); @@ -2043,6 +1994,9 @@ static int mptcp_shutdown(struct socket *sock, int how) mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq); } + /* Wake up anyone sleeping in poll. */ + sock->sk->sk_state_change(sock->sk); + out_unlock: release_sock(sock->sk); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index c05552e5fa23..a709df659ae0 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -89,6 +89,7 @@ #define MPTCP_SEND_SPACE 1 #define MPTCP_WORK_RTX 2 #define MPTCP_WORK_EOF 3 +#define MPTCP_FALLBACK_DONE 4 struct mptcp_options_received { u64 sndr_key; @@ -457,4 +458,36 @@ static inline bool before64(__u64 seq1, __u64 seq2) void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops); +static inline bool __mptcp_check_fallback(struct mptcp_sock *msk) +{ + return test_bit(MPTCP_FALLBACK_DONE, &msk->flags); +} + +static inline bool mptcp_check_fallback(struct sock *sk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + + return __mptcp_check_fallback(msk); +} + +static inline void __mptcp_do_fallback(struct mptcp_sock *msk) +{ + if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) { + pr_debug("TCP fallback already done (msk=%p)", msk); + return; + } + set_bit(MPTCP_FALLBACK_DONE, &msk->flags); +} + +static inline void mptcp_do_fallback(struct sock *sk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + + __mptcp_do_fallback(msk); +} + +#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) + #endif /* __MPTCP_PROTOCOL_H */ diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 102db8c88e97..cb8a42ff4646 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -216,7 +216,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct mptcp_options_received mp_opt; struct sock *parent = subflow->conn; - struct tcp_sock *tp = tcp_sk(sk); subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); @@ -230,6 +229,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) return; subflow->conn_finished = 1; + subflow->ssn_offset = TCP_SKB_CB(skb)->seq; + pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); mptcp_get_options(skb, &mp_opt); if (subflow->request_mptcp && mp_opt.mp_capable) { @@ -245,21 +246,20 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, subflow->thmac, subflow->remote_nonce); } else { - tp->is_mptcp = 0; + if (subflow->request_mptcp) + MPTCP_INC_STATS(sock_net(sk), + MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); + mptcp_do_fallback(sk); + pr_fallback(mptcp_sk(subflow->conn)); } - if (!tp->is_mptcp) + if (mptcp_check_fallback(sk)) return; if (subflow->mp_capable) { pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), subflow->remote_key); mptcp_finish_connect(sk); - - if (skb) { - pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq); - subflow->ssn_offset = TCP_SKB_CB(skb)->seq; - } } else if (subflow->mp_join) { u8 hmac[SHA256_DIGEST_SIZE]; @@ -279,9 +279,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); - if (skb) - subflow->ssn_offset = TCP_SKB_CB(skb)->seq; - if (!mptcp_finish_join(sk)) goto do_reset; @@ -557,7 +554,8 @@ enum mapping_status { MAPPING_OK, MAPPING_INVALID, MAPPING_EMPTY, - MAPPING_DATA_FIN + MAPPING_DATA_FIN, + MAPPING_DUMMY }; static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq) @@ -621,6 +619,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk) if (!skb) return MAPPING_EMPTY; + if (mptcp_check_fallback(ssk)) + return MAPPING_DUMMY; + mpext = mptcp_get_ext(skb); if (!mpext || !mpext->use_map) { if (!subflow->map_valid && !skb->len) { @@ -762,6 +763,16 @@ static bool subflow_check_data_avail(struct sock *ssk) ssk->sk_err = EBADMSG; goto fatal; } + if (status == MAPPING_DUMMY) { + __mptcp_do_fallback(msk); + skb = skb_peek(&ssk->sk_receive_queue); + subflow->map_valid = 1; + subflow->map_seq = READ_ONCE(msk->ack_seq); + subflow->map_data_len = skb->len; + subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - + subflow->ssn_offset; + return true; + } if (status != MAPPING_OK) return false; @@ -885,14 +896,18 @@ static void subflow_data_ready(struct sock *sk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); struct sock *parent = subflow->conn; + struct mptcp_sock *msk; - if (!subflow->mp_capable && !subflow->mp_join) { - subflow->tcp_data_ready(sk); - + msk = mptcp_sk(parent); + if (inet_sk_state_load(sk) == TCP_LISTEN) { + set_bit(MPTCP_DATA_READY, &msk->flags); parent->sk_data_ready(parent); return; } + WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && + !subflow->mp_join); + if (mptcp_subflow_data_available(sk)) mptcp_data_ready(parent, sk); } @@ -1117,7 +1132,7 @@ static void subflow_state_change(struct sock *sk) * a fin packet carrying a DSS can be unnoticed if we don't trigger * the data available machinery here. */ - if (subflow->mp_capable && mptcp_subflow_data_available(sk)) + if (mptcp_subflow_data_available(sk)) mptcp_data_ready(parent, sk); if (!(parent->sk_shutdown & RCV_SHUTDOWN) && From 8fd738049ac3d67a937d36577763b47180aae1ad Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 29 Jun 2020 22:26:21 +0200 Subject: [PATCH 0448/2056] mptcp: fallback in case of simultaneous connect when a MPTCP client tries to connect to itself, tcp_finish_connect() is never reached. Because of this, depending on the socket current state, multiple faulty behaviours can be observed: 1) a WARN_ON() in subflow_data_ready() is hit WARNING: CPU: 2 PID: 882 at net/mptcp/subflow.c:911 subflow_data_ready+0x18b/0x230 [...] CPU: 2 PID: 882 Comm: gh35 Not tainted 5.7.0+ #187 [...] RIP: 0010:subflow_data_ready+0x18b/0x230 [...] Call Trace: tcp_data_queue+0xd2f/0x4250 tcp_rcv_state_process+0xb1c/0x49d3 tcp_v4_do_rcv+0x2bc/0x790 __release_sock+0x153/0x2d0 release_sock+0x4f/0x170 mptcp_shutdown+0x167/0x4e0 __sys_shutdown+0xe6/0x180 __x64_sys_shutdown+0x50/0x70 do_syscall_64+0x9a/0x370 entry_SYSCALL_64_after_hwframe+0x44/0xa9 2) client is stuck forever in mptcp_sendmsg() because the socket is not TCP_ESTABLISHED crash> bt 4847 PID: 4847 TASK: ffff88814b2fb100 CPU: 1 COMMAND: "gh35" #0 [ffff8881376ff680] __schedule at ffffffff97248da4 #1 [ffff8881376ff778] schedule at ffffffff9724a34f #2 [ffff8881376ff7a0] schedule_timeout at ffffffff97252ba0 #3 [ffff8881376ff8a8] wait_woken at ffffffff958ab4ba #4 [ffff8881376ff940] sk_stream_wait_connect at ffffffff96c2d859 #5 [ffff8881376ffa28] mptcp_sendmsg at ffffffff97207fca #6 [ffff8881376ffbc0] sock_sendmsg at ffffffff96be1b5b #7 [ffff8881376ffbe8] sock_write_iter at ffffffff96be1daa #8 [ffff8881376ffce8] new_sync_write at ffffffff95e5cb52 #9 [ffff8881376ffe50] vfs_write at ffffffff95e6547f #10 [ffff8881376ffe90] ksys_write at ffffffff95e65d26 #11 [ffff8881376fff28] do_syscall_64 at ffffffff956088ba #12 [ffff8881376fff50] entry_SYSCALL_64_after_hwframe at ffffffff9740008c RIP: 00007f126f6956ed RSP: 00007ffc2a320278 RFLAGS: 00000217 RAX: ffffffffffffffda RBX: 0000000020000044 RCX: 00007f126f6956ed RDX: 0000000000000004 RSI: 00000000004007b8 RDI: 0000000000000003 RBP: 00007ffc2a3202a0 R8: 0000000000400720 R9: 0000000000400720 R10: 0000000000400720 R11: 0000000000000217 R12: 00000000004004b0 R13: 00007ffc2a320380 R14: 0000000000000000 R15: 0000000000000000 ORIG_RAX: 0000000000000001 CS: 0033 SS: 002b 3) tcpdump captures show that DSS is exchanged even when MP_CAPABLE handshake didn't complete. $ tcpdump -tnnr bad.pcap IP 127.0.0.1.20000 > 127.0.0.1.20000: Flags [S], seq 3208913911, win 65483, options [mss 65495,sackOK,TS val 3291706876 ecr 3291694721,nop,wscale 7,mptcp capable v1], length 0 IP 127.0.0.1.20000 > 127.0.0.1.20000: Flags [S.], seq 3208913911, ack 3208913912, win 65483, options [mss 65495,sackOK,TS val 3291706876 ecr 3291706876,nop,wscale 7,mptcp capable v1], length 0 IP 127.0.0.1.20000 > 127.0.0.1.20000: Flags [.], ack 1, win 512, options [nop,nop,TS val 3291706876 ecr 3291706876], length 0 IP 127.0.0.1.20000 > 127.0.0.1.20000: Flags [F.], seq 1, ack 1, win 512, options [nop,nop,TS val 3291707876 ecr 3291706876,mptcp dss fin seq 0 subseq 0 len 1,nop,nop], length 0 IP 127.0.0.1.20000 > 127.0.0.1.20000: Flags [.], ack 2, win 512, options [nop,nop,TS val 3291707876 ecr 3291707876], length 0 force a fallback to TCP in these cases, and adjust the main socket state to avoid hanging in mptcp_sendmsg(). Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/35 Reported-by: Christoph Paasch Suggested-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.h | 10 ++++++++++ net/mptcp/subflow.c | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index a709df659ae0..1d05d9841b5c 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -490,4 +490,14 @@ static inline void mptcp_do_fallback(struct sock *sk) #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a) +static inline bool subflow_simultaneous_connect(struct sock *sk) +{ + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct sock *parent = subflow->conn; + + return sk->sk_state == TCP_ESTABLISHED && + !mptcp_sk(parent)->pm.server_side && + !subflow->conn_finished; +} + #endif /* __MPTCP_PROTOCOL_H */ diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index cb8a42ff4646..548f9e347ff5 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1128,6 +1128,16 @@ static void subflow_state_change(struct sock *sk) __subflow_state_change(sk); + if (subflow_simultaneous_connect(sk)) { + mptcp_do_fallback(sk); + pr_fallback(mptcp_sk(parent)); + subflow->conn_finished = 1; + if (inet_sk_state_load(parent) == TCP_SYN_SENT) { + inet_sk_state_store(parent, TCP_ESTABLISHED); + parent->sk_state_change(parent); + } + } + /* as recvmsg() does not acquire the subflow socket for ssk selection * a fin packet carrying a DSS can be unnoticed if we don't trigger * the data available machinery here. From d2f77c53342e3e13ce1fd7f9638ee200026e14f4 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 29 Jun 2020 22:26:22 +0200 Subject: [PATCH 0449/2056] mptcp: check for plain TCP sock at accept time This cleanup the code a bit and avoid corrupted states on weird syscall sequence (accept(), connect()). Signed-off-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 69 +++++--------------------------------------- 1 file changed, 7 insertions(+), 62 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 84ae96be9837..dbeb6fe374f5 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -52,13 +52,10 @@ static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) return msk->subflow; } -static struct socket *mptcp_is_tcpsk(struct sock *sk) +static bool mptcp_is_tcpsk(struct sock *sk) { struct socket *sock = sk->sk_socket; - if (sock->sk != sk) - return NULL; - if (unlikely(sk->sk_prot == &tcp_prot)) { /* we are being invoked after mptcp_accept() has * accepted a non-mp-capable flow: sk is a tcp_sk, @@ -68,27 +65,21 @@ static struct socket *mptcp_is_tcpsk(struct sock *sk) * bypass mptcp. */ sock->ops = &inet_stream_ops; - return sock; + return true; #if IS_ENABLED(CONFIG_MPTCP_IPV6) } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { sock->ops = &inet6_stream_ops; - return sock; + return true; #endif } - return NULL; + return false; } static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) { - struct socket *sock; - sock_owned_by_me((const struct sock *)msk); - sock = mptcp_is_tcpsk((struct sock *)msk); - if (unlikely(sock)) - return sock; - if (likely(!__mptcp_check_fallback(msk))) return NULL; @@ -1466,7 +1457,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, return NULL; pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); - if (sk_is_mptcp(newsk)) { struct mptcp_subflow_context *subflow; struct sock *new_mptcp_sock; @@ -1821,42 +1811,6 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, return err; } -static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr, - int peer) -{ - if (sock->sk->sk_prot == &tcp_prot) { - /* we are being invoked from __sys_accept4, after - * mptcp_accept() has just accepted a non-mp-capable - * flow: sk is a tcp_sk, not an mptcp one. - * - * Hand the socket over to tcp so all further socket ops - * bypass mptcp. - */ - sock->ops = &inet_stream_ops; - } - - return inet_getname(sock, uaddr, peer); -} - -#if IS_ENABLED(CONFIG_MPTCP_IPV6) -static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr, - int peer) -{ - if (sock->sk->sk_prot == &tcpv6_prot) { - /* we are being invoked from __sys_accept4 after - * mptcp_accept() has accepted a non-mp-capable - * subflow: sk is a tcp_sk, not mptcp. - * - * Hand the socket over to tcp so all further - * socket ops bypass mptcp. - */ - sock->ops = &inet6_stream_ops; - } - - return inet6_getname(sock, uaddr, peer); -} -#endif - static int mptcp_listen(struct socket *sock, int backlog) { struct mptcp_sock *msk = mptcp_sk(sock->sk); @@ -1885,15 +1839,6 @@ static int mptcp_listen(struct socket *sock, int backlog) return err; } -static bool is_tcp_proto(const struct proto *p) -{ -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - return p == &tcp_prot || p == &tcpv6_prot; -#else - return p == &tcp_prot; -#endif -} - static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { @@ -1915,7 +1860,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, release_sock(sock->sk); err = ssock->ops->accept(sock, newsock, flags, kern); - if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) { + if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { struct mptcp_sock *msk = mptcp_sk(newsock->sk); struct mptcp_subflow_context *subflow; @@ -2011,7 +1956,7 @@ static const struct proto_ops mptcp_stream_ops = { .connect = mptcp_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, - .getname = mptcp_v4_getname, + .getname = inet_getname, .poll = mptcp_poll, .ioctl = inet_ioctl, .gettstamp = sock_gettstamp, @@ -2065,7 +2010,7 @@ static const struct proto_ops mptcp_v6_stream_ops = { .connect = mptcp_stream_connect, .socketpair = sock_no_socketpair, .accept = mptcp_stream_accept, - .getname = mptcp_v6_getname, + .getname = inet6_getname, .poll = mptcp_poll, .ioctl = inet6_ioctl, .gettstamp = sock_gettstamp, From fa68018dc45e3faee9d866d5dc484d141e8f1093 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 29 Jun 2020 22:26:23 +0200 Subject: [PATCH 0450/2056] mptcp: create first subflow at msk creation time This cleans the code a bit and makes the behavior more consistent. Signed-off-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 53 +++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index dbeb6fe374f5..ad619bda71cc 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -86,32 +86,16 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) return msk->subflow; } -static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) -{ - return !msk->first; -} - -static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state) +static int __mptcp_socket_create(struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow; struct sock *sk = (struct sock *)msk; struct socket *ssock; int err; - ssock = __mptcp_tcp_fallback(msk); - if (unlikely(ssock)) - return ssock; - - ssock = __mptcp_nmpc_socket(msk); - if (ssock) - goto set_state; - - if (!__mptcp_can_create_subflow(msk)) - return ERR_PTR(-EINVAL); - err = mptcp_subflow_create_socket(sk, &ssock); if (err) - return ERR_PTR(err); + return err; msk->first = ssock->sk; msk->subflow = ssock; @@ -124,10 +108,7 @@ static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state) */ RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq); -set_state: - if (state != MPTCP_SAME_STATE) - inet_sk_state_store(sk, state); - return ssock; + return 0; } static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, @@ -1255,6 +1236,10 @@ static int mptcp_init_sock(struct sock *sk) if (ret) return ret; + ret = __mptcp_socket_create(mptcp_sk(sk)); + if (ret) + return ret; + sk_sockets_allocated_inc(sk); sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2]; @@ -1744,9 +1729,9 @@ static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) int err; lock_sock(sock->sk); - ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE); - if (IS_ERR(ssock)) { - err = PTR_ERR(ssock); + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + err = -EINVAL; goto unlock; } @@ -1776,13 +1761,14 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, goto do_connect; } - mptcp_token_destroy(msk); - ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); - if (IS_ERR(ssock)) { - err = PTR_ERR(ssock); + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + err = -EINVAL; goto unlock; } + mptcp_token_destroy(msk); + inet_sk_state_store(sock->sk, TCP_SYN_SENT); subflow = mptcp_subflow_ctx(ssock->sk); #ifdef CONFIG_TCP_MD5SIG /* no MPTCP if MD5SIG is enabled on this socket or we may run out of @@ -1820,13 +1806,14 @@ static int mptcp_listen(struct socket *sock, int backlog) pr_debug("msk=%p", msk); lock_sock(sock->sk); - mptcp_token_destroy(msk); - ssock = __mptcp_socket_create(msk, TCP_LISTEN); - if (IS_ERR(ssock)) { - err = PTR_ERR(ssock); + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + err = -EINVAL; goto unlock; } + mptcp_token_destroy(msk); + inet_sk_state_store(sock->sk, TCP_LISTEN); sock_set_flag(sock->sk, SOCK_RCU_FREE); err = ssock->ops->listen(ssock, backlog); From 76660afbb7a1ac6bef0be34c4c6e76d7e07b74d7 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 29 Jun 2020 22:26:24 +0200 Subject: [PATCH 0451/2056] mptcp: __mptcp_tcp_fallback() returns a struct sock Currently __mptcp_tcp_fallback() always return NULL on incoming connections, because MPTCP does not create the additional socket for the first subflow. Since the previous commit no __mptcp_tcp_fallback() caller needs a struct socket, so let __mptcp_tcp_fallback() return the first subflow sock and cope correctly even with incoming connections. Signed-off-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index ad619bda71cc..f2b2bd37e371 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -76,14 +76,14 @@ static bool mptcp_is_tcpsk(struct sock *sk) return false; } -static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) +static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) { sock_owned_by_me((const struct sock *)msk); if (likely(!__mptcp_check_fallback(msk))) return NULL; - return msk->subflow; + return msk->first; } static int __mptcp_socket_create(struct mptcp_sock *msk) @@ -1498,7 +1498,7 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { struct mptcp_sock *msk = mptcp_sk(sk); - struct socket *ssock; + struct sock *ssk; pr_debug("msk=%p", msk); @@ -1509,11 +1509,10 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname, * to the one remaining subflow. */ lock_sock(sk); - ssock = __mptcp_tcp_fallback(msk); + ssk = __mptcp_tcp_fallback(msk); release_sock(sk); - if (ssock) - return tcp_setsockopt(ssock->sk, level, optname, optval, - optlen); + if (ssk) + return tcp_setsockopt(ssk, level, optname, optval, optlen); return -EOPNOTSUPP; } @@ -1522,7 +1521,7 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *option) { struct mptcp_sock *msk = mptcp_sk(sk); - struct socket *ssock; + struct sock *ssk; pr_debug("msk=%p", msk); @@ -1533,11 +1532,10 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname, * to the one remaining subflow. */ lock_sock(sk); - ssock = __mptcp_tcp_fallback(msk); + ssk = __mptcp_tcp_fallback(msk); release_sock(sk); - if (ssock) - return tcp_getsockopt(ssock->sk, level, optname, optval, - option); + if (ssk) + return tcp_getsockopt(ssk, level, optname, optval, option); return -EOPNOTSUPP; } From 8a05661b2b266b6dc45af255b3037b00ef31d85d Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 29 Jun 2020 22:26:25 +0200 Subject: [PATCH 0452/2056] mptcp: close poll() races mptcp_poll always return POLLOUT for unblocking connect(), ensure that the socket is a suitable state. The MPTCP_DATA_READY bit is never cleared on accept: ensure we don't leave mptcp_accept() with an empty accept queue and such bit set. Signed-off-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f2b2bd37e371..28ec26d97f96 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1841,6 +1841,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, if (!ssock) goto unlock_fail; + clear_bit(MPTCP_DATA_READY, &msk->flags); sock_hold(ssock->sk); release_sock(sock->sk); @@ -1861,6 +1862,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, } } + if (inet_csk_listen_poll(ssock->sk)) + set_bit(MPTCP_DATA_READY, &msk->flags); sock_put(ssock->sk); return err; @@ -1869,21 +1872,33 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, return -EINVAL; } +static __poll_t mptcp_check_readable(struct mptcp_sock *msk) +{ + return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : + 0; +} + static __poll_t mptcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait) { struct sock *sk = sock->sk; struct mptcp_sock *msk; __poll_t mask = 0; + int state; msk = mptcp_sk(sk); sock_poll_wait(file, sock, wait); - if (test_bit(MPTCP_DATA_READY, &msk->flags)) - mask = EPOLLIN | EPOLLRDNORM; - if (sk_stream_is_writeable(sk) && - test_bit(MPTCP_SEND_SPACE, &msk->flags)) - mask |= EPOLLOUT | EPOLLWRNORM; + state = inet_sk_state_load(sk); + if (state == TCP_LISTEN) + return mptcp_check_readable(msk); + + if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { + mask |= mptcp_check_readable(msk); + if (sk_stream_is_writeable(sk) && + test_bit(MPTCP_SEND_SPACE, &msk->flags)) + mask |= EPOLLOUT | EPOLLWRNORM; + } if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; From 5f035af76e51cd622abc6564d5512ffeb9e06917 Mon Sep 17 00:00:00 2001 From: Po Liu Date: Mon, 29 Jun 2020 14:54:16 +0800 Subject: [PATCH 0453/2056] net:qos: police action offloading parameter 'burst' change to the original value Since 'tcfp_burst' with TICK factor, driver side always need to recover it to the original value, this patch moves the generic calculation and recover to the 'burst' original value before offloading to device driver. Signed-off-by: Po Liu Acked-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 4 +-- drivers/net/dsa/sja1105/sja1105_flower.c | 16 ++++------ drivers/net/dsa/sja1105/sja1105_main.c | 4 +-- .../net/ethernet/freescale/enetc/enetc_qos.c | 8 +---- drivers/net/ethernet/mscc/ocelot_flower.c | 4 +-- drivers/net/ethernet/mscc/ocelot_net.c | 4 +-- .../ethernet/netronome/nfp/flower/qos_conf.c | 6 ++-- include/net/dsa.h | 2 +- include/net/flow_offload.h | 2 +- include/net/tc_act/tc_police.h | 32 +++++++++++++++++-- net/sched/cls_api.c | 2 +- 11 files changed, 48 insertions(+), 36 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 25046777c993..75020af7f7a4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -746,9 +746,7 @@ static int felix_port_policer_add(struct dsa_switch *ds, int port, struct ocelot *ocelot = ds->priv; struct ocelot_policer pol = { .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, - .burst = div_u64(policer->rate_bytes_per_sec * - PSCHED_NS2TICKS(policer->burst), - PSCHED_TICKS_PER_SEC), + .burst = policer->burst, }; return ocelot_port_policer_add(ocelot, port, &pol); diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c index 9ee8968610cd..12e76020bea3 100644 --- a/drivers/net/dsa/sja1105/sja1105_flower.c +++ b/drivers/net/dsa/sja1105/sja1105_flower.c @@ -31,7 +31,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv, struct netlink_ext_ack *extack, unsigned long cookie, int port, u64 rate_bytes_per_sec, - s64 burst) + u32 burst) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); struct sja1105_l2_policing_entry *policing; @@ -79,9 +79,8 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv, policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec * 512, 1000000); - policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec * - PSCHED_NS2TICKS(burst), - PSCHED_TICKS_PER_SEC); + policing[rule->bcast_pol.sharindx].smax = burst; + /* TODO: support per-flow MTU */ policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; @@ -103,7 +102,7 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv, struct netlink_ext_ack *extack, unsigned long cookie, int port, int tc, u64 rate_bytes_per_sec, - s64 burst) + u32 burst) { struct sja1105_rule *rule = sja1105_rule_find(priv, cookie); struct sja1105_l2_policing_entry *policing; @@ -152,9 +151,8 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv, policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec * 512, 1000000); - policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec * - PSCHED_NS2TICKS(burst), - PSCHED_TICKS_PER_SEC); + policing[rule->tc_pol.sharindx].smax = burst; + /* TODO: support per-flow MTU */ policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; @@ -177,7 +175,7 @@ static int sja1105_flower_policer(struct sja1105_private *priv, int port, unsigned long cookie, struct sja1105_key *key, u64 rate_bytes_per_sec, - s64 burst) + u32 burst) { switch (key->type) { case SJA1105_KEY_BCAST: diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 789b288cc78b..5079e4aeef80 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3324,9 +3324,7 @@ static int sja1105_port_policer_add(struct dsa_switch *ds, int port, */ policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec, 1000000); - policing[port].smax = div_u64(policer->rate_bytes_per_sec * - PSCHED_NS2TICKS(policer->burst), - PSCHED_TICKS_PER_SEC); + policing[port].smax = policer->burst; return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index 4f670cbdf186..b8b336179d82 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1241,8 +1241,6 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, /* Flow meter and max frame size */ if (entryp) { if (entryp->police.burst) { - u64 temp; - fmi = kzalloc(sizeof(*fmi), GFP_KERNEL); if (!fmi) { err = -ENOMEM; @@ -1250,11 +1248,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } refcount_set(&fmi->refcount, 1); fmi->cir = entryp->police.rate_bytes_ps; - /* Convert to original burst value */ - temp = entryp->police.burst * fmi->cir; - temp = div_u64(temp, 1000000000ULL); - - fmi->cbs = temp; + fmi->cbs = entryp->police.burst; fmi->index = entryp->police.index; filter->flags |= ENETC_PSFP_FLAGS_FMI; filter->fmi_index = fmi->index; diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c index f2a85b06a6e7..ec1b6e2572ba 100644 --- a/drivers/net/ethernet/mscc/ocelot_flower.c +++ b/drivers/net/ethernet/mscc/ocelot_flower.c @@ -12,7 +12,6 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, struct ocelot_vcap_filter *filter) { const struct flow_action_entry *a; - s64 burst; u64 rate; int i; @@ -35,8 +34,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f, filter->action = OCELOT_VCAP_ACTION_POLICE; rate = a->police.rate_bytes_ps; filter->pol.rate = div_u64(rate, 1000) * 8; - burst = rate * PSCHED_NS2TICKS(a->police.burst); - filter->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC); + filter->pol.burst = a->police.burst; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 5868ff753232..41a1b5f6df95 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -74,9 +74,7 @@ static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv, } pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8; - pol.burst = (u32)div_u64(action->police.rate_bytes_ps * - PSCHED_NS2TICKS(action->police.burst), - PSCHED_TICKS_PER_SEC); + pol.burst = action->police.burst; err = ocelot_port_policer_add(ocelot, port, &pol); if (err) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c index bb327d48d1ab..d4ce8f9ef3cc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c @@ -69,7 +69,8 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, struct nfp_repr *repr; struct sk_buff *skb; u32 netdev_port_id; - u64 burst, rate; + u32 burst; + u64 rate; if (!nfp_netdev_is_nfp_repr(netdev)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port"); @@ -104,8 +105,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev, } rate = action->police.rate_bytes_ps; - burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst), - PSCHED_TICKS_PER_SEC); + burst = action->police.burst; netdev_port_id = nfp_repr_get_port_id(netdev); skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config), diff --git a/include/net/dsa.h b/include/net/dsa.h index 50389772c597..4046ccd1945d 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -144,7 +144,7 @@ struct dsa_mall_mirror_tc_entry { /* TC port policer entry */ struct dsa_mall_policer_tc_entry { - s64 burst; + u32 burst; u64 rate_bytes_per_sec; }; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3e793ac66baf..de395498440d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -233,7 +233,7 @@ struct flow_action_entry { } sample; struct { /* FLOW_ACTION_POLICE */ u32 index; - s64 burst; + u32 burst; u64 rate_bytes_ps; u32 mtu; } police; diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index cd973b10ae8c..6d1e26b709b5 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -59,14 +59,42 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act) return params->rate.rate_bytes_ps; } -static inline s64 tcf_police_tcfp_burst(const struct tc_action *act) +static inline u32 tcf_police_burst(const struct tc_action *act) { struct tcf_police *police = to_police(act); struct tcf_police_params *params; + u32 burst; params = rcu_dereference_protected(police->params, lockdep_is_held(&police->tcf_lock)); - return params->tcfp_burst; + + /* + * "rate" bytes "burst" nanoseconds + * ------------ * ------------------- + * 1 second 2^6 ticks + * + * ------------------------------------ + * NSEC_PER_SEC nanoseconds + * ------------------------ + * 2^6 ticks + * + * "rate" bytes "burst" nanoseconds 2^6 ticks + * = ------------ * ------------------- * ------------------------ + * 1 second 2^6 ticks NSEC_PER_SEC nanoseconds + * + * "rate" * "burst" + * = ---------------- bytes/nanosecond + * NSEC_PER_SEC^2 + * + * + * "rate" * "burst" + * = ---------------- bytes/second + * NSEC_PER_SEC + */ + burst = div_u64(params->tcfp_burst * params->rate.rate_bytes_ps, + NSEC_PER_SEC); + + return burst; } static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 1b14d5f57e7f..e9e119ea6813 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3660,7 +3660,7 @@ int tc_setup_flow_action(struct flow_action *flow_action, tcf_sample_get_group(entry, act); } else if (is_tcf_police(act)) { entry->id = FLOW_ACTION_POLICE; - entry->police.burst = tcf_police_tcfp_burst(act); + entry->police.burst = tcf_police_burst(act); entry->police.rate_bytes_ps = tcf_police_rate_bytes_ps(act); entry->police.mtu = tcf_police_tcfp_mtu(act); From 0dc95084c30d6323f622f87f7d786f6e69ca118d Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:32:31 +0100 Subject: [PATCH 0454/2056] sfc: update MCDI protocol headers The script used to generate these now includes _OFST definitions for flags, to identify the containing flag word. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_pcol.h | 6877 +++++++++++++++++++++++++- 1 file changed, 6841 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 79d834a4ae49..d3fcbf930dba 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /**************************************************************************** * Driver for Solarflare network controllers and boards - * Copyright 2009-2013 Solarflare Communications Inc. + * Copyright 2009-2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. */ @@ -383,14 +384,19 @@ #define MCDI_EVENT_LEVEL_FATAL 0x3 #define MCDI_EVENT_DATA_OFST 0 #define MCDI_EVENT_DATA_LEN 4 +#define MCDI_EVENT_CMDDONE_SEQ_OFST 0 #define MCDI_EVENT_CMDDONE_SEQ_LBN 0 #define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8 +#define MCDI_EVENT_CMDDONE_DATALEN_OFST 0 #define MCDI_EVENT_CMDDONE_DATALEN_LBN 8 #define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8 +#define MCDI_EVENT_CMDDONE_ERRNO_OFST 0 #define MCDI_EVENT_CMDDONE_ERRNO_LBN 16 #define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8 +#define MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0 #define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0 #define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16 +#define MCDI_EVENT_LINKCHANGE_SPEED_OFST 0 #define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16 #define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4 /* enum: Link is down or link speed could not be determined */ @@ -409,26 +415,36 @@ #define MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6 /* enum: 100Gbs */ #define MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7 +#define MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0 #define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20 #define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4 +#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24 #define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_MONITOR_OFST 0 #define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0 #define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_STATE_OFST 0 #define MCDI_EVENT_SENSOREVT_STATE_LBN 8 #define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8 +#define MCDI_EVENT_SENSOREVT_VALUE_OFST 0 #define MCDI_EVENT_SENSOREVT_VALUE_LBN 16 #define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16 +#define MCDI_EVENT_FWALERT_DATA_OFST 0 #define MCDI_EVENT_FWALERT_DATA_LBN 8 #define MCDI_EVENT_FWALERT_DATA_WIDTH 24 +#define MCDI_EVENT_FWALERT_REASON_OFST 0 #define MCDI_EVENT_FWALERT_REASON_LBN 0 #define MCDI_EVENT_FWALERT_REASON_WIDTH 8 /* enum: SRAM Access. */ #define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 +#define MCDI_EVENT_FLR_VF_OFST 0 #define MCDI_EVENT_FLR_VF_LBN 0 #define MCDI_EVENT_FLR_VF_WIDTH 8 +#define MCDI_EVENT_TX_ERR_TXQ_OFST 0 #define MCDI_EVENT_TX_ERR_TXQ_LBN 0 #define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12 +#define MCDI_EVENT_TX_ERR_TYPE_OFST 0 #define MCDI_EVENT_TX_ERR_TYPE_LBN 12 #define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4 /* enum: Descriptor loader reported failure */ @@ -443,12 +459,16 @@ #define MCDI_EVENT_TX_OPT_IN_PKT 0x8 /* enum: DMA or PIO data access error */ #define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9 +#define MCDI_EVENT_TX_ERR_INFO_OFST 0 #define MCDI_EVENT_TX_ERR_INFO_LBN 16 #define MCDI_EVENT_TX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0 #define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12 #define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_TX_FLUSH_TXQ_OFST 0 #define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0 #define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12 +#define MCDI_EVENT_PTP_ERR_TYPE_OFST 0 #define MCDI_EVENT_PTP_ERR_TYPE_LBN 0 #define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8 /* enum: PLL lost lock */ @@ -459,6 +479,7 @@ #define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum: Merge queue overflow */ #define MCDI_EVENT_PTP_ERR_QUEUE 0x4 +#define MCDI_EVENT_AOE_ERR_TYPE_OFST 0 #define MCDI_EVENT_AOE_ERR_TYPE_LBN 0 #define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8 /* enum: AOE failed to load - no valid image? */ @@ -505,8 +526,10 @@ #define MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13 /* enum: Notify that FPGA Controller is alive to serve MCDI requests */ #define MCDI_EVENT_AOE_FC_RUNNING 0x14 +#define MCDI_EVENT_AOE_ERR_DATA_OFST 0 #define MCDI_EVENT_AOE_ERR_DATA_LBN 8 #define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0 #define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8 #define MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8 /* enum: FC Assert happened, but the register information is not available */ @@ -514,6 +537,7 @@ /* enum: The register information for FC Assert is ready for readinng by driver */ #define MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8 /* enum: Reading from NV failed */ @@ -534,28 +558,38 @@ #define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7 /* enum: Unsupported DDR rank */ #define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8 +#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0 #define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8 #define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8 /* enum: Primary boot flash */ #define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0 /* enum: Secondary boot flash */ #define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8 +#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8 #define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8 +#define MCDI_EVENT_RX_ERR_RXQ_OFST 0 #define MCDI_EVENT_RX_ERR_RXQ_LBN 0 #define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12 +#define MCDI_EVENT_RX_ERR_TYPE_OFST 0 #define MCDI_EVENT_RX_ERR_TYPE_LBN 12 #define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4 +#define MCDI_EVENT_RX_ERR_INFO_OFST 0 #define MCDI_EVENT_RX_ERR_INFO_LBN 16 #define MCDI_EVENT_RX_ERR_INFO_WIDTH 16 +#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0 #define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12 #define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1 +#define MCDI_EVENT_RX_FLUSH_RXQ_OFST 0 #define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0 #define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12 +#define MCDI_EVENT_MC_REBOOT_COUNT_OFST 0 #define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0 #define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16 +#define MCDI_EVENT_MUM_ERR_TYPE_OFST 0 #define MCDI_EVENT_MUM_ERR_TYPE_LBN 0 #define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8 /* enum: MUM failed to load - no valid image? */ @@ -564,10 +598,13 @@ #define MCDI_EVENT_MUM_ASSERT 0x2 /* enum: MUM not kicking watchdog */ #define MCDI_EVENT_MUM_WATCHDOG 0x3 +#define MCDI_EVENT_MUM_ERR_DATA_OFST 0 #define MCDI_EVENT_MUM_ERR_DATA_LBN 8 #define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8 +#define MCDI_EVENT_DBRET_SEQ_OFST 0 #define MCDI_EVENT_DBRET_SEQ_LBN 0 #define MCDI_EVENT_DBRET_SEQ_WIDTH 8 +#define MCDI_EVENT_SUC_ERR_TYPE_OFST 0 #define MCDI_EVENT_SUC_ERR_TYPE_LBN 0 #define MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8 /* enum: Corrupted or bad SUC application. */ @@ -578,14 +615,48 @@ #define MCDI_EVENT_SUC_EXCEPTION 0x3 /* enum: SUC watchdog timer expired. */ #define MCDI_EVENT_SUC_WATCHDOG 0x4 +#define MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0 #define MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8 #define MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24 +#define MCDI_EVENT_SUC_ERR_DATA_OFST 0 #define MCDI_EVENT_SUC_ERR_DATA_LBN 8 #define MCDI_EVENT_SUC_ERR_DATA_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24 +#define MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4 +/* Enum values, see field(s): */ +/* MCDI_EVENT/LINKCHANGE_SPEED */ +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28 +#define MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29 +#define MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3 +/* Enum values, see field(s): */ +/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ +#define MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0 +#define MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_OFST 0 +#define MCDI_EVENT_MODULECHANGE_SEQ_LBN 30 +#define MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2 #define MCDI_EVENT_DATA_LBN 0 #define MCDI_EVENT_DATA_WIDTH 32 +/* Alias for PTP_DATA. */ #define MCDI_EVENT_SRC_LBN 36 #define MCDI_EVENT_SRC_WIDTH 8 +/* Data associated with PTP events which doesn't fit into the main DATA field + */ +#define MCDI_EVENT_PTP_DATA_LBN 36 +#define MCDI_EVENT_PTP_DATA_WIDTH 8 +/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the + * event ring + */ +#define MCDI_EVENT_EV_EVQ_PHASE_LBN 59 +#define MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1 #define MCDI_EVENT_EV_CODE_LBN 60 #define MCDI_EVENT_EV_CODE_WIDTH 4 #define MCDI_EVENT_CODE_LBN 44 @@ -660,6 +731,48 @@ #define MCDI_EVENT_CODE_DBRET 0x1e /* enum: The MC has detected a fault on the SUC */ #define MCDI_EVENT_CODE_SUC 0x1f +/* enum: Link change. This event is sent instead of LINKCHANGE if + * WANT_V2_LINKCHANGES was set on driver attach. + */ +#define MCDI_EVENT_CODE_LINKCHANGE_V2 0x20 +/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach + * when the local device capabilities changes. This will usually correspond to + * a module change. + */ +#define MCDI_EVENT_CODE_MODULECHANGE 0x21 +/* enum: Notification that the sensors have been added and/or removed from the + * sensor table. This event includes the new sensor table generation count, if + * this does not match the driver's local copy it is expected to call + * DYNAMIC_SENSORS_LIST to refresh it. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22 +/* enum: Notification that a sensor has changed state as a result of a reading + * crossing a threshold. This is sent as two events, the first event contains + * the handle and the sensor's state (in the SRC field), and the second + * contains the value. + */ +#define MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23 +/* enum: Notification that a descriptor proxy function configuration has been + * pushed to "live" status (visible to host). SRC field contains the handle of + * the affected descriptor proxy function. DATA field contains the generation + * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET / + * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24 +/* enum: Notification that a descriptor proxy function has been reset. SRC + * field contains the handle of the affected descriptor proxy function. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25 +/* enum: Notification that a driver attached to a descriptor proxy function. + * SRC field contains the handle of the affected descriptor proxy function. For + * Virtio proxy functions this message consists of two MCDI events, where the + * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0 + * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy + * functions event length and meaning of DATA field is not yet defined. See + * SF-122927-TC for details. + */ +#define MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26 /* enum: Artificial event generated by host and posted via MC for test * purposes. */ @@ -785,6 +898,48 @@ #define MCDI_EVENT_DBRET_DATA_LEN 4 #define MCDI_EVENT_DBRET_DATA_LBN 0 #define MCDI_EVENT_DBRET_DATA_WIDTH 32 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0 +#define MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32 +#define MCDI_EVENT_MODULECHANGE_DATA_OFST 0 +#define MCDI_EVENT_MODULECHANGE_DATA_LEN 4 +#define MCDI_EVENT_MODULECHANGE_DATA_LBN 0 +#define MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32 +/* The new generation count after a sensor has been added or deleted. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32 +/* The handle of a dynamic sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32 +/* The current values of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0 +#define MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32 +/* The current state of a sensor. */ +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36 +#define MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8 +#define MCDI_EVENT_DESC_PROXY_DATA_OFST 0 +#define MCDI_EVENT_DESC_PROXY_DATA_LEN 4 +#define MCDI_EVENT_DESC_PROXY_DATA_LBN 0 +#define MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32 +/* Generation count of applied configuration set */ +#define MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4 +#define MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0 +#define MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32 +/* Virtio features negotiated with the host driver. First event (CONT=1) + * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63. + */ +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0 +#define MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32 /* FCDI_EVENT structuredef */ #define FCDI_EVENT_LEN 8 @@ -802,6 +957,7 @@ #define FCDI_EVENT_LEVEL_FATAL 0x3 #define FCDI_EVENT_DATA_OFST 0 #define FCDI_EVENT_DATA_LEN 4 +#define FCDI_EVENT_LINK_STATE_STATUS_OFST 0 #define FCDI_EVENT_LINK_STATE_STATUS_LBN 0 #define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1 #define FCDI_EVENT_LINK_DOWN 0x0 /* enum */ @@ -892,7 +1048,9 @@ */ #define FCDI_EXTENDED_EVENT_PPS_LENMIN 16 #define FCDI_EXTENDED_EVENT_PPS_LENMAX 248 +#define FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016 #define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num)) +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8) /* Number of timestamps following */ #define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0 #define FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4 @@ -915,6 +1073,7 @@ #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30 +#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64 #define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64 @@ -934,24 +1093,33 @@ #define MUM_EVENT_LEVEL_FATAL 0x3 #define MUM_EVENT_DATA_OFST 0 #define MUM_EVENT_DATA_LEN 4 +#define MUM_EVENT_SENSOR_ID_OFST 0 #define MUM_EVENT_SENSOR_ID_LBN 0 #define MUM_EVENT_SENSOR_ID_WIDTH 8 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */ +#define MUM_EVENT_SENSOR_STATE_OFST 0 #define MUM_EVENT_SENSOR_STATE_LBN 8 #define MUM_EVENT_SENSOR_STATE_WIDTH 8 +#define MUM_EVENT_PORT_PHY_READY_OFST 0 #define MUM_EVENT_PORT_PHY_READY_LBN 0 #define MUM_EVENT_PORT_PHY_READY_WIDTH 1 +#define MUM_EVENT_PORT_PHY_LINK_UP_OFST 0 #define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1 #define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOL_OFST 0 #define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2 #define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOL_OFST 0 #define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3 #define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_LOS_OFST 0 #define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4 #define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_RX_LOS_OFST 0 #define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5 #define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1 +#define MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0 #define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6 #define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1 #define MUM_EVENT_DATA_LBN 0 @@ -1016,6 +1184,7 @@ * has additional checks to reject insecure calls. */ #define MC_CMD_READ32 0x1 +#undef MC_CMD_0x1_PRIVILEGE_CTG #define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -1029,11 +1198,14 @@ /* MC_CMD_READ32_OUT msgresponse */ #define MC_CMD_READ32_OUT_LENMIN 4 #define MC_CMD_READ32_OUT_LENMAX 252 +#define MC_CMD_READ32_OUT_LENMAX_MCDI2 1020 #define MC_CMD_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) #define MC_CMD_READ32_OUT_BUFFER_OFST 0 #define MC_CMD_READ32_OUT_BUFFER_LEN 4 #define MC_CMD_READ32_OUT_BUFFER_MINNUM 1 #define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 /***********************************/ @@ -1041,19 +1213,23 @@ * Write multiple 32byte words to MC memory. */ #define MC_CMD_WRITE32 0x2 +#undef MC_CMD_0x2_PRIVILEGE_CTG #define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_WRITE32_IN msgrequest */ #define MC_CMD_WRITE32_IN_LENMIN 8 #define MC_CMD_WRITE32_IN_LENMAX 252 +#define MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020 #define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num)) +#define MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4) #define MC_CMD_WRITE32_IN_ADDR_OFST 0 #define MC_CMD_WRITE32_IN_ADDR_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_OFST 4 #define MC_CMD_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1 #define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62 +#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254 /* MC_CMD_WRITE32_OUT msgresponse */ #define MC_CMD_WRITE32_OUT_LEN 0 @@ -1066,6 +1242,7 @@ * has additional checks to reject insecure calls. */ #define MC_CMD_COPYCODE 0x3 +#undef MC_CMD_0x3_PRIVILEGE_CTG #define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -1090,16 +1267,22 @@ * below) */ #define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1 +#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6 #define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1 /* Destination address */ @@ -1122,6 +1305,7 @@ * Select function for function-specific commands. */ #define MC_CMD_SET_FUNC 0x4 +#undef MC_CMD_0x4_PRIVILEGE_CTG #define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -1140,6 +1324,7 @@ * Get the instruction address from which the MC booted. */ #define MC_CMD_GET_BOOT_STATUS 0x5 +#undef MC_CMD_0x5_PRIVILEGE_CTG #define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -1155,10 +1340,13 @@ #define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1 +#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2 #define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1 @@ -1170,6 +1358,7 @@ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS */ #define MC_CMD_GET_ASSERTS 0x6 +#undef MC_CMD_0x6_PRIVILEGE_CTG #define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -1211,6 +1400,104 @@ #define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136 #define MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4 +/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs + * found on Riverhead designs + */ +#define MC_CMD_GET_ASSERTS_OUT_V2_LEN 240 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26 + +/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted + * firmware version information + */ +#define MC_CMD_GET_ASSERTS_OUT_V3_LEN 360 +/* Assertion status flag. */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0 +#define MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4 +/* enum: No assertions have failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */ +/* enum: A system-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */ +/* enum: A thread-level assertion has failed. */ +/* MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */ +/* enum: The system was reset by the watchdog. */ +/* MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */ +/* enum: An illegal address trap stopped the system (huntington and later) */ +/* MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */ +/* Failing PC value */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4 +/* Saved GP regs */ +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31 +/* enum: A magic value hinting that the value in this register at the time of + * the failure has likely been lost. + */ +/* MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */ +/* Failing thread address */ +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132 +#define MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4 +/* Saved Special Function Registers */ +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4 +#define MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20 +/* MC firmware build date (as Unix timestamp) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264 +/* MC firmware version number */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272 +/* MC firmware security level */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4 +/* MC firmware extra version info (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296 +#define MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64 + /***********************************/ /* MC_CMD_LOG_CTRL @@ -1218,6 +1505,7 @@ * sensor notifications and MCDI completions */ #define MC_CMD_LOG_CTRL 0x7 +#undef MC_CMD_0x7_PRIVILEGE_CTG #define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -1240,9 +1528,10 @@ /***********************************/ /* MC_CMD_GET_VERSION - * Get version information about the MC firmware. + * Get version information about adapter components. */ #define MC_CMD_GET_VERSION 0x8 +#undef MC_CMD_0x8_PRIVILEGE_CTG #define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -1303,12 +1592,107 @@ #define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32 #define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16 +/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version + * information for all adapter components. For Riverhead based designs, base MC + * firmware version fields refer to NMC firmware, while CMC firmware data is in + * dedicated CMC fields. Flags indicate which data is present in the response + * (depending on which components exist on a particular adapter) + */ +#define MC_CMD_GET_VERSION_V2_OUT_LEN 304 +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */ +/* MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */ +/* Enum values, see field(s): */ +/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */ +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4 +#define MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4 +/* 128bit mask of functions supported by the current firmware */ +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24 +#define MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28 +/* extra info */ +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32 +#define MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16 +/* Flags indicating which extended fields are valid */ +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2 +#define MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1 +/* MC firmware unique build ID (as binary SHA-1 value) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20 +/* MC firmware security level */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4 +/* MC firmware build name (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76 +#define MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4 +/* SUC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164 +#define MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4 +/* The CMC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4 +/* CMC firmware build date (as 64-bit Unix timestamp) */ +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184 +#define MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188 +/* FPGA version as three numbers. On Riverhead based systems this field uses + * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG): + * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1 + * => B, ...) FPGA_VERSION[2]: Sub-revision number + */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3 +/* Extra FPGA revision information (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204 +#define MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16 +/* Board name / adapter model (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16 +/* Board revision number */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4 +/* Board serial number (as null-terminated US-ASCII string) */ +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240 +#define MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64 + /***********************************/ /* MC_CMD_PTP * Perform PTP operation */ #define MC_CMD_PTP 0xb +#undef MC_CMD_0xb_PRIVILEGE_CTG #define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -1434,7 +1818,9 @@ /* MC_CMD_PTP_IN_TRANSMIT msgrequest */ #define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 #define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020 #define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1) /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ @@ -1447,6 +1833,7 @@ #define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ #define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 @@ -1599,7 +1986,9 @@ /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */ #define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13 #define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252 +#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020 #define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num)) +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1) /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_CMD_LEN 4 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ @@ -1610,6 +1999,7 @@ #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1 #define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240 +#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008 /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */ #define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16 @@ -1774,8 +2164,10 @@ /* Original field containing queue ID. Now extended to include flags. */ #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16 +#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31 #define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1 @@ -1940,12 +2332,15 @@ /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20 #define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020 #define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num)) +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20) /* A set of host and NIC times */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0 #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20 #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1 #define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12 +#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51 /* Host time immediately before NIC's hardware clock read */ #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0 #define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4 @@ -2022,11 +2417,14 @@ /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */ #define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1 #define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252 +#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020 #define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num)) +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1) #define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0 #define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1 #define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1 #define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252 +#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020 /* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */ #define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4 @@ -2075,12 +2473,16 @@ /* Various PTP capabilities */ #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1 +#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1 #define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12 @@ -2143,6 +2545,7 @@ * Read 32bit words from the indirect memory map. */ #define MC_CMD_CSR_READ32 0xc +#undef MC_CMD_0xc_PRIVILEGE_CTG #define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -2159,12 +2562,15 @@ /* MC_CMD_CSR_READ32_OUT msgresponse */ #define MC_CMD_CSR_READ32_OUT_LENMIN 4 #define MC_CMD_CSR_READ32_OUT_LENMAX 252 +#define MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020 #define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4) /* The last dword is the status, not a value read */ #define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0 #define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4 #define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1 #define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63 +#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255 /***********************************/ @@ -2172,13 +2578,16 @@ * Write 32bit dwords to the indirect memory map. */ #define MC_CMD_CSR_WRITE32 0xd +#undef MC_CMD_0xd_PRIVILEGE_CTG #define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_CSR_WRITE32_IN msgrequest */ #define MC_CMD_CSR_WRITE32_IN_LENMIN 12 #define MC_CMD_CSR_WRITE32_IN_LENMAX 252 +#define MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020 #define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num)) +#define MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4) /* Address */ #define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0 #define MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4 @@ -2188,6 +2597,7 @@ #define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4 #define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1 #define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61 +#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253 /* MC_CMD_CSR_WRITE32_OUT msgresponse */ #define MC_CMD_CSR_WRITE32_OUT_LEN 4 @@ -2201,6 +2611,7 @@ * MCDI command to avoid creating too many MCDI commands. */ #define MC_CMD_HP 0x54 +#undef MC_CMD_0x54_PRIVILEGE_CTG #define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -2247,6 +2658,7 @@ * Get stack information. */ #define MC_CMD_STACKINFO 0xf +#undef MC_CMD_0xf_PRIVILEGE_CTG #define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -2256,12 +2668,15 @@ /* MC_CMD_STACKINFO_OUT msgresponse */ #define MC_CMD_STACKINFO_OUT_LENMIN 12 #define MC_CMD_STACKINFO_OUT_LENMAX 252 +#define MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020 #define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12) /* (thread ptr, stack size, free space) for each thread in system */ #define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0 #define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12 #define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1 #define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21 +#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85 /***********************************/ @@ -2269,6 +2684,7 @@ * MDIO register read. */ #define MC_CMD_MDIO_READ 0x10 +#undef MC_CMD_0x10_PRIVILEGE_CTG #define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -2316,6 +2732,7 @@ * MDIO register write. */ #define MC_CMD_MDIO_WRITE 0x11 +#undef MC_CMD_0x11_PRIVILEGE_CTG #define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -2363,13 +2780,16 @@ * Write DBI register(s). */ #define MC_CMD_DBI_WRITE 0x12 +#undef MC_CMD_0x12_PRIVILEGE_CTG #define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_WRITE_IN msgrequest */ #define MC_CMD_DBI_WRITE_IN_LENMIN 12 #define MC_CMD_DBI_WRITE_IN_LENMAX 252 +#define MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020 #define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num)) +#define MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12) /* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF. */ @@ -2377,6 +2797,7 @@ #define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12 #define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1 #define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21 +#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85 /* MC_CMD_DBI_WRITE_OUT msgresponse */ #define MC_CMD_DBI_WRITE_OUT_LEN 0 @@ -2389,10 +2810,13 @@ #define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4 #define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15 #define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4 #define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14 #define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1 #define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32 @@ -2526,6 +2950,7 @@ * Returns the MC firmware configuration structure. */ #define MC_CMD_GET_BOARD_CFG 0x18 +#undef MC_CMD_0x18_PRIVILEGE_CTG #define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -2535,7 +2960,9 @@ /* MC_CMD_GET_BOARD_CFG_OUT msgresponse */ #define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96 #define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136 +#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136 #define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num)) +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2) #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4 #define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4 @@ -2590,6 +3017,7 @@ #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12 #define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32 +#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32 /***********************************/ @@ -2597,13 +3025,16 @@ * Read DBI register(s) -- extended functionality */ #define MC_CMD_DBI_READX 0x19 +#undef MC_CMD_0x19_PRIVILEGE_CTG #define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_DBI_READX_IN msgrequest */ #define MC_CMD_DBI_READX_IN_LENMIN 8 #define MC_CMD_DBI_READX_IN_LENMAX 248 +#define MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016 #define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num)) +#define MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8) /* Each Read op consists of an address (offset 0), VF/CS2) */ #define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0 #define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8 @@ -2611,16 +3042,20 @@ #define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4 #define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1 #define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31 +#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127 /* MC_CMD_DBI_READX_OUT msgresponse */ #define MC_CMD_DBI_READX_OUT_LENMIN 4 #define MC_CMD_DBI_READX_OUT_LENMAX 252 +#define MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020 #define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4) /* Value */ #define MC_CMD_DBI_READX_OUT_VALUE_OFST 0 #define MC_CMD_DBI_READX_OUT_VALUE_LEN 4 #define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1 #define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63 +#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255 /* MC_CMD_DBIRDOP_TYPEDEF structuredef */ #define MC_CMD_DBIRDOP_TYPEDEF_LEN 8 @@ -2630,10 +3065,13 @@ #define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32 #define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4 #define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16 #define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16 +#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4 #define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15 #define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1 +#define MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4 #define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14 #define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1 #define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32 @@ -2645,6 +3083,7 @@ * Set the 16byte seed for the MC pseudo-random generator. */ #define MC_CMD_SET_RAND_SEED 0x1a +#undef MC_CMD_0x1a_PRIVILEGE_CTG #define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -2670,12 +3109,15 @@ /* MC_CMD_LTSSM_HIST_OUT msgresponse */ #define MC_CMD_LTSSM_HIST_OUT_LENMIN 0 #define MC_CMD_LTSSM_HIST_OUT_LENMAX 252 +#define MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020 #define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4) /* variable number of LTSSM values, as bytes. The history is read-to-clear. */ #define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0 #define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4 #define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0 #define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63 +#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255 /***********************************/ @@ -2688,6 +3130,7 @@ * platforms. */ #define MC_CMD_DRV_ATTACH 0x1c +#undef MC_CMD_0x1c_PRIVILEGE_CTG #define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -2696,18 +3139,33 @@ /* new state to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4 +#define MC_CMD_DRV_ATTACH_OFST 0 #define MC_CMD_DRV_ATTACH_LBN 0 #define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0 #define MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0 #define MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_OFST 0 #define MC_CMD_DRV_PREBOOT_LBN 1 #define MC_CMD_DRV_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0 #define MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1 #define MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0 #define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2 #define MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0 #define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3 #define MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 #define MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4 @@ -2736,9 +3194,91 @@ * bug69716) */ #define MC_CMD_FW_L3XUDP 0x7 +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +#define MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe /* enum: Only this option is allowed for non-admin functions */ #define MC_CMD_FW_DONT_CARE 0xffffffff +/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver + * version + */ +#define MC_CMD_DRV_ATTACH_IN_V2_LEN 32 +/* new state to set if UPDATE=1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4 +/* MC_CMD_DRV_ATTACH_OFST 0 */ +/* MC_CMD_DRV_ATTACH_LBN 0 */ +/* MC_CMD_DRV_ATTACH_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1 +/* MC_CMD_DRV_PREBOOT_OFST 0 */ +/* MC_CMD_DRV_PREBOOT_LBN 1 */ +/* MC_CMD_DRV_PREBOOT_WIDTH 1 */ +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1 +#define MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2 +#define MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5 +#define MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1 +/* 1 to set new state, or 0 to just report the existing state */ +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4 +#define MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4 +/* preferred datapath firmware (for Huntington; ignored for Siena) */ +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8 +#define MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4 +/* enum: Prefer to use full featured firmware */ +/* MC_CMD_FW_FULL_FEATURED 0x0 */ +/* enum: Prefer to use firmware with fewer features but lower latency */ +/* MC_CMD_FW_LOW_LATENCY 0x1 */ +/* enum: Prefer to use firmware for SolarCapture packed stream mode */ +/* MC_CMD_FW_PACKED_STREAM 0x2 */ +/* enum: Prefer to use firmware with fewer features and simpler TX event + * batching but higher TX packet rate + */ +/* MC_CMD_FW_HIGH_TX_RATE 0x3 */ +/* enum: Reserved value */ +/* MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */ +/* enum: Prefer to use firmware with additional "rules engine" filtering + * support + */ +/* MC_CMD_FW_RULES_ENGINE 0x5 */ +/* enum: Prefer to use firmware with additional DPDK support */ +/* MC_CMD_FW_DPDK 0x6 */ +/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and + * bug69716) + */ +/* MC_CMD_FW_L3XUDP 0x7 */ +/* enum: Requests that the MC keep whatever datapath firmware is currently + * running. It's used for test purposes, where we want to be able to shmboot + * special test firmware variants. This option is only recognised in eftest + * (i.e. non-production) builds. + */ +/* MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */ +/* enum: Only this option is allowed for non-admin functions */ +/* MC_CMD_FW_DONT_CARE 0xffffffff */ +/* Version of the driver to be reported by management protocols (e.g. NC-SI) + * handled by the NIC. This is a zero-terminated ASCII string. + */ +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12 +#define MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20 + /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 /* previous or existing state, see the bitmask at NEW_STATE */ @@ -2770,6 +3310,13 @@ * input. */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4 +/* enum: Used during development only. Should no longer be used. */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5 +/* enum: If set, indicates that TX only spreading is enabled. Even-numbered + * TXQs will use one engine, and odd-numbered TXQs will use the other. This + * also has the effect that only even-numbered RXQs will receive traffic. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5 /***********************************/ @@ -2795,6 +3342,7 @@ * use MC_CMD_ENTITY_RESET instead. */ #define MC_CMD_PORT_RESET 0x20 +#undef MC_CMD_0x20_PRIVILEGE_CTG #define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -2821,6 +3369,7 @@ */ #define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0 #define MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4 +#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0 #define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1 @@ -2927,17 +3476,22 @@ * Copy the given ASCII string out onto UART and/or out of the network port. */ #define MC_CMD_PUTS 0x23 +#undef MC_CMD_0x23_PRIVILEGE_CTG #define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_PUTS_IN msgrequest */ #define MC_CMD_PUTS_IN_LENMIN 13 #define MC_CMD_PUTS_IN_LENMAX 252 +#define MC_CMD_PUTS_IN_LENMAX_MCDI2 1020 #define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) +#define MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1) #define MC_CMD_PUTS_IN_DEST_OFST 0 #define MC_CMD_PUTS_IN_DEST_LEN 4 +#define MC_CMD_PUTS_IN_UART_OFST 0 #define MC_CMD_PUTS_IN_UART_LBN 0 #define MC_CMD_PUTS_IN_UART_WIDTH 1 +#define MC_CMD_PUTS_IN_PORT_OFST 0 #define MC_CMD_PUTS_IN_PORT_LBN 1 #define MC_CMD_PUTS_IN_PORT_WIDTH 1 #define MC_CMD_PUTS_IN_DHOST_OFST 4 @@ -2946,6 +3500,7 @@ #define MC_CMD_PUTS_IN_STRING_LEN 1 #define MC_CMD_PUTS_IN_STRING_MINNUM 1 #define MC_CMD_PUTS_IN_STRING_MAXNUM 240 +#define MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008 /* MC_CMD_PUTS_OUT msgresponse */ #define MC_CMD_PUTS_OUT_LEN 0 @@ -2957,6 +3512,7 @@ * 'zombie' state. Locks required: None */ #define MC_CMD_GET_PHY_CFG 0x24 +#undef MC_CMD_0x24_PRIVILEGE_CTG #define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -2968,18 +3524,25 @@ /* flags */ #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0 #define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2 #define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3 #define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4 #define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5 #define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1 +#define MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0 #define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6 #define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1 /* ?? */ @@ -2988,46 +3551,67 @@ /* Bitmask of supported capabilities */ #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8 #define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4 +#define MC_CMD_PHY_CAP_10HDX_OFST 8 #define MC_CMD_PHY_CAP_10HDX_LBN 1 #define MC_CMD_PHY_CAP_10HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10FDX_OFST 8 #define MC_CMD_PHY_CAP_10FDX_LBN 2 #define MC_CMD_PHY_CAP_10FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100HDX_OFST 8 #define MC_CMD_PHY_CAP_100HDX_LBN 3 #define MC_CMD_PHY_CAP_100HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_100FDX_OFST 8 #define MC_CMD_PHY_CAP_100FDX_LBN 4 #define MC_CMD_PHY_CAP_100FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000HDX_OFST 8 #define MC_CMD_PHY_CAP_1000HDX_LBN 5 #define MC_CMD_PHY_CAP_1000HDX_WIDTH 1 +#define MC_CMD_PHY_CAP_1000FDX_OFST 8 #define MC_CMD_PHY_CAP_1000FDX_LBN 6 #define MC_CMD_PHY_CAP_1000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_10000FDX_OFST 8 #define MC_CMD_PHY_CAP_10000FDX_LBN 7 #define MC_CMD_PHY_CAP_10000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_PAUSE_OFST 8 #define MC_CMD_PHY_CAP_PAUSE_LBN 8 #define MC_CMD_PHY_CAP_PAUSE_WIDTH 1 +#define MC_CMD_PHY_CAP_ASYM_OFST 8 #define MC_CMD_PHY_CAP_ASYM_LBN 9 #define MC_CMD_PHY_CAP_ASYM_WIDTH 1 +#define MC_CMD_PHY_CAP_AN_OFST 8 #define MC_CMD_PHY_CAP_AN_LBN 10 #define MC_CMD_PHY_CAP_AN_WIDTH 1 +#define MC_CMD_PHY_CAP_40000FDX_OFST 8 #define MC_CMD_PHY_CAP_40000FDX_LBN 11 #define MC_CMD_PHY_CAP_40000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_DDM_OFST 8 #define MC_CMD_PHY_CAP_DDM_LBN 12 #define MC_CMD_PHY_CAP_DDM_WIDTH 1 +#define MC_CMD_PHY_CAP_100000FDX_OFST 8 #define MC_CMD_PHY_CAP_100000FDX_LBN 13 #define MC_CMD_PHY_CAP_100000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_25000FDX_OFST 8 #define MC_CMD_PHY_CAP_25000FDX_LBN 14 #define MC_CMD_PHY_CAP_25000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_50000FDX_OFST 8 #define MC_CMD_PHY_CAP_50000FDX_LBN 15 #define MC_CMD_PHY_CAP_50000FDX_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_OFST 8 #define MC_CMD_PHY_CAP_BASER_FEC_LBN 16 #define MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8 #define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17 #define MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_OFST 8 #define MC_CMD_PHY_CAP_RS_FEC_LBN 18 #define MC_CMD_PHY_CAP_RS_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8 #define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19 #define MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8 #define MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20 #define MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1 +#define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8 #define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21 #define MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1 /* ?? */ @@ -3084,6 +3668,7 @@ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held) */ #define MC_CMD_START_BIST 0x25 +#undef MC_CMD_0x25_PRIVILEGE_CTG #define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -3123,6 +3708,7 @@ * EACCES (if PHY_LOCK is not held). */ #define MC_CMD_POLL_BIST 0x26 +#undef MC_CMD_0x26_PRIVILEGE_CTG #define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -3295,11 +3881,14 @@ /* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */ #define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4 #define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252 +#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020 #define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num)) +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4) #define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0 #define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4 #define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1 #define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63 +#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255 /* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */ #define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0 @@ -3310,6 +3899,7 @@ * Returns a bitmask of loopback modes available at each speed. */ #define MC_CMD_GET_LOOPBACK_MODES 0x28 +#undef MC_CMD_0x28_PRIVILEGE_CTG #define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -3605,6 +4195,7 @@ * ETIME. */ #define MC_CMD_GET_LINK 0x29 +#undef MC_CMD_0x29_PRIVILEGE_CTG #define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -3635,18 +4226,30 @@ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ #define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16 #define MC_CMD_GET_LINK_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16 #define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0 #define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16 #define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1 #define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16 #define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2 #define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16 #define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3 #define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16 #define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6 #define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16 #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7 #define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20 #define MC_CMD_GET_LINK_OUT_FCNTL_LEN 4 @@ -3654,12 +4257,16 @@ /* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24 #define MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4 +#define MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 #define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 +#define MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 #define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 #define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 #define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 #define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 +#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 #define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 @@ -3687,18 +4294,30 @@ /* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ #define MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0 #define MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1 #define MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2 #define MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3 #define MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6 #define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16 #define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7 #define MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1 /* This returns the negotiated flow control value. */ #define MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20 #define MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4 @@ -3706,12 +4325,16 @@ /* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */ #define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24 #define MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4 +/* MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */ /* MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */ /* MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */ /* MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */ /* MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */ /* MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */ /* MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */ +/* MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */ /* MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */ /* MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */ /* True local device capabilities (taking into account currently used PMD/MDI, @@ -3735,32 +4358,45 @@ /* FEC_TYPE/TYPE */ #define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4 +#define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0 #define MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1 #define MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2 #define MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3 #define MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4 #define MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5 #define MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6 #define MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7 #define MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40 #define MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8 #define MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9 +#define MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1 /***********************************/ /* MC_CMD_SET_LINK * Write the unified MAC/PHY link configuration. Locks required: None. Return - * code: 0, EINVAL, ETIME + * code: 0, EINVAL, ETIME, EAGAIN */ #define MC_CMD_SET_LINK 0x2a +#undef MC_CMD_0x2a_PRIVILEGE_CTG #define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -3774,12 +4410,18 @@ /* Flags */ #define MC_CMD_SET_LINK_IN_FLAGS_OFST 4 #define MC_CMD_SET_LINK_IN_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4 #define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0 #define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_POWEROFF_OFST 4 #define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1 #define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_TXDIS_OFST 4 #define MC_CMD_SET_LINK_IN_TXDIS_LBN 2 #define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1 /* Loopback mode. */ #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8 #define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4 @@ -3791,6 +4433,50 @@ #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12 #define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4 +/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence + * number to ensure this SET_LINK command corresponds to the latest + * MODULECHANGE event. + */ +#define MC_CMD_SET_LINK_IN_V2_LEN 17 +/* Near-side advertised capabilities. Refer to + * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions. + */ +#define MC_CMD_SET_LINK_IN_V2_CAP_OFST 0 +#define MC_CMD_SET_LINK_IN_V2_CAP_LEN 4 +/* Flags */ +#define MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1 +#define MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2 +#define MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3 +#define MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1 +/* Loopback mode. */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */ +/* A loopback speed of "0" is supported, and means (choose any available + * speed). + */ +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12 +#define MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7 +#define MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1 + /* MC_CMD_SET_LINK_OUT msgresponse */ #define MC_CMD_SET_LINK_OUT_LEN 0 @@ -3800,6 +4486,7 @@ * Set identification LED state. Locks required: None. Return code: 0, EINVAL */ #define MC_CMD_SET_ID_LED 0x2b +#undef MC_CMD_0x2b_PRIVILEGE_CTG #define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -3821,6 +4508,7 @@ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL */ #define MC_CMD_SET_MAC 0x2c +#undef MC_CMD_0x2c_PRIVILEGE_CTG #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -3839,8 +4527,10 @@ #define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_IN_REJECT_OFST 16 #define MC_CMD_SET_MAC_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_IN_FCNTL_OFST 20 @@ -3859,6 +4549,7 @@ #define MC_CMD_FCNTL_GENERATE 0x5 #define MC_CMD_SET_MAC_IN_FLAGS_OFST 24 #define MC_CMD_SET_MAC_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 @@ -3877,8 +4568,10 @@ #define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 #define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 #define MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 #define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 #define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 @@ -3897,6 +4590,7 @@ /* MC_CMD_FCNTL_GENERATE 0x5 */ #define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 #define MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 /* Select which parameters to configure. A parameter will only be modified if @@ -3906,14 +4600,19 @@ */ #define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 #define MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 #define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28 #define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 #define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28 #define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 #define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28 #define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 #define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28 #define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 #define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 @@ -3940,6 +4639,7 @@ * Returns: 0, ETIME */ #define MC_CMD_PHY_STATS 0x2d +#undef MC_CMD_0x2d_PRIVILEGE_CTG #define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -4021,6 +4721,7 @@ * effect. Returns: 0, ETIME */ #define MC_CMD_MAC_STATS 0x2e +#undef MC_CMD_0x2e_PRIVILEGE_CTG #define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -4033,18 +4734,25 @@ #define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4 #define MC_CMD_MAC_STATS_IN_CMD_OFST 8 #define MC_CMD_MAC_STATS_IN_CMD_LEN 4 +#define MC_CMD_MAC_STATS_IN_DMA_OFST 8 #define MC_CMD_MAC_STATS_IN_DMA_LBN 0 #define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_CLEAR_OFST 8 #define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1 #define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8 #define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2 #define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8 #define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3 #define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8 #define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4 #define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8 #define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5 #define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1 +#define MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16 #define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16 /* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as @@ -4321,6 +5029,37 @@ /* Other enum values, see field(s): */ /* MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */ +/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0 + +/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */ +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3) +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4 +#define MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4 +/* enum: Start of V4 stats buffer space */ +#define MC_CMD_MAC_V4_DMABUF_START 0x79 +/* enum: RXDP counter: Number of packets truncated because scattering was + * disabled. + */ +#define MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79 +/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting + * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_IDLE 0x7a +/* enum: RXDP counter: Number of times the RXDP timed out while head of line + * blocking. Will be zero unless RXDP_HLB_IDLE capability is set. + */ +#define MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b +/* enum: This includes the space at offset 124 which is the final + * GENERATION_END in a MAC_STATS_V4 response and otherwise unused. + */ +#define MC_CMD_MAC_NSTATS_V4 0x7d +/* Other enum values, see field(s): */ +/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */ + /***********************************/ /* MC_CMD_SRIOV @@ -4403,12 +5142,15 @@ /* MC_CMD_MEMCPY_IN msgrequest */ #define MC_CMD_MEMCPY_IN_LENMIN 32 #define MC_CMD_MEMCPY_IN_LENMAX 224 +#define MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992 #define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num)) +#define MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32) /* see MC_CMD_MEMCPY_RECORD_TYPEDEF */ #define MC_CMD_MEMCPY_IN_RECORD_OFST 0 #define MC_CMD_MEMCPY_IN_RECORD_LEN 32 #define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1 #define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7 +#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31 /* MC_CMD_MEMCPY_OUT msgresponse */ #define MC_CMD_MEMCPY_OUT_LEN 0 @@ -4419,6 +5161,7 @@ * Set a WoL filter. */ #define MC_CMD_WOL_FILTER_SET 0x32 +#undef MC_CMD_0x32_PRIVILEGE_CTG #define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -4515,8 +5258,10 @@ /* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */ #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0 #define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1 +#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8 #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1 #define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1 @@ -4531,6 +5276,7 @@ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS */ #define MC_CMD_WOL_FILTER_REMOVE 0x33 +#undef MC_CMD_0x33_PRIVILEGE_CTG #define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -4549,6 +5295,7 @@ * ENOSYS */ #define MC_CMD_WOL_FILTER_RESET 0x34 +#undef MC_CMD_0x34_PRIVILEGE_CTG #define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -4586,6 +5333,7 @@ * Locks required: none. Returns: 0 */ #define MC_CMD_NVRAM_TYPES 0x36 +#undef MC_CMD_0x36_PRIVILEGE_CTG #define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -4647,6 +5395,7 @@ * EINVAL (bad type). */ #define MC_CMD_NVRAM_INFO 0x37 +#undef MC_CMD_0x37_PRIVILEGE_CTG #define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -4669,16 +5418,25 @@ #define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12 +#define MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3 +#define MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6 #define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1 +#define MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12 #define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 @@ -4698,14 +5456,19 @@ #define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4 #define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 #define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 #define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5 #define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 #define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 #define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 @@ -4729,6 +5492,7 @@ * EPERM. */ #define MC_CMD_NVRAM_UPDATE_START 0x38 +#undef MC_CMD_0x38_PRIVILEGE_CTG #define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -4753,6 +5517,7 @@ /* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 @@ -4767,6 +5532,7 @@ * PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_READ 0x39 +#undef MC_CMD_0x39_PRIVILEGE_CTG #define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -4820,11 +5586,14 @@ /* MC_CMD_NVRAM_READ_OUT msgresponse */ #define MC_CMD_NVRAM_READ_OUT_LENMIN 1 #define MC_CMD_NVRAM_READ_OUT_LENMAX 252 +#define MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020 #define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1) #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020 /***********************************/ @@ -4834,13 +5603,16 @@ * PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_WRITE 0x3a +#undef MC_CMD_0x3a_PRIVILEGE_CTG #define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_NVRAM_WRITE_IN msgrequest */ #define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 #define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020 #define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1) #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 #define MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4 /* Enum values, see field(s): */ @@ -4853,6 +5625,7 @@ #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008 /* MC_CMD_NVRAM_WRITE_OUT msgresponse */ #define MC_CMD_NVRAM_WRITE_OUT_LEN 0 @@ -4865,6 +5638,7 @@ * PHY_LOCK required and not held) */ #define MC_CMD_NVRAM_ERASE 0x3b +#undef MC_CMD_0x3b_PRIVILEGE_CTG #define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -4894,6 +5668,7 @@ * the error EPERM. */ #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c +#undef MC_CMD_0x3c_PRIVILEGE_CTG #define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -4922,8 +5697,15 @@ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2 +#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1 /* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code @@ -4946,7 +5728,10 @@ * has completed. */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4 -/* Result of nvram update completion processing */ +/* Result of nvram update completion processing. Result codes that indicate an + * internal build failure and therefore not expected to be seen by customers in + * the field are marked with a prefix 'Internal-error'. + */ #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0 #define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4 /* enum: Invalid return code; only non-zero values are defined. Defined as @@ -4985,6 +5770,51 @@ #define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc /* enum: The image has a lower security level than the current firmware. */ #define MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd +/* enum: Internal-error. The signed image is missing the 'contents' section, + * where the 'contents' section holds the actual image payload to be applied. + */ +#define MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe +/* enum: Internal-error. The bundle header is invalid. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf +/* enum: Internal-error. The bundle does not have a valid reflash image layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10 +/* enum: Internal-error. The bundle has an inconsistent layout of components or + * incorrect checksum. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11 +/* enum: Internal-error. The bundle manifest is inconsistent with components in + * the bundle. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12 +/* enum: Internal-error. The number of components in a bundle do not match the + * number of components advertised by the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13 +/* enum: Internal-error. The bundle contains too many components for the MC + * firmware to process + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14 +/* enum: Internal-error. The bundle manifest has an invalid/inconsistent + * component. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15 +/* enum: Internal-error. The hash of a component does not match the hash stored + * in the bundle manifest. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16 +/* enum: Internal-error. Component hash calculation failed. */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17 +/* enum: Internal-error. The component does not have a valid reflash image + * layout. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18 +/* enum: The bundle processing code failed to copy a component to its target + * partition. + */ +#define MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19 +/* enum: The update operation is in-progress. */ +#define MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a /***********************************/ @@ -5006,6 +5836,7 @@ * DATALEN=0 */ #define MC_CMD_REBOOT 0x3d +#undef MC_CMD_0x3d_PRIVILEGE_CTG #define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5026,6 +5857,7 @@ * thread address. */ #define MC_CMD_SCHEDINFO 0x3e +#undef MC_CMD_0x3e_PRIVILEGE_CTG #define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5035,11 +5867,14 @@ /* MC_CMD_SCHEDINFO_OUT msgresponse */ #define MC_CMD_SCHEDINFO_OUT_LENMIN 4 #define MC_CMD_SCHEDINFO_OUT_LENMAX 252 +#define MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020 #define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4) #define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0 #define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4 #define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1 #define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63 +#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255 /***********************************/ @@ -5048,6 +5883,7 @@ * mode to the specified value. Returns the old mode. */ #define MC_CMD_REBOOT_MODE 0x3f +#undef MC_CMD_0x3f_PRIVILEGE_CTG #define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -5063,6 +5899,7 @@ #define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum: snapper fake POR */ #define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4 +#define MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0 #define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7 #define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1 @@ -5104,6 +5941,7 @@ * Locks required: None Returns: 0 */ #define MC_CMD_SENSOR_INFO 0x41 +#undef MC_CMD_0x41_PRIVILEGE_CTG #define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -5121,10 +5959,29 @@ #define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0 #define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4 +/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8 +/* Which page of sensors to report. + * + * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit). + * + * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc. + */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1 + /* MC_CMD_SENSOR_INFO_OUT msgresponse */ #define MC_CMD_SENSOR_INFO_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020 #define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) #define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0 #define MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4 /* enum: Controller temperature: degC */ @@ -5301,6 +6158,22 @@ #define MC_CMD_SENSOR_IN_1V3 0x55 /* enum: 1.3v power current: mA */ #define MC_CMD_SENSOR_IN_I1V3 0x56 +/* enum: Engineering sensor 1 */ +#define MC_CMD_SENSOR_ENGINEERING_1 0x57 +/* enum: Engineering sensor 2 */ +#define MC_CMD_SENSOR_ENGINEERING_2 0x58 +/* enum: Engineering sensor 3 */ +#define MC_CMD_SENSOR_ENGINEERING_3 0x59 +/* enum: Engineering sensor 4 */ +#define MC_CMD_SENSOR_ENGINEERING_4 0x5a +/* enum: Engineering sensor 5 */ +#define MC_CMD_SENSOR_ENGINEERING_5 0x5b +/* enum: Engineering sensor 6 */ +#define MC_CMD_SENSOR_ENGINEERING_6 0x5c +/* enum: Engineering sensor 7 */ +#define MC_CMD_SENSOR_ENGINEERING_7 0x5d +/* enum: Engineering sensor 8 */ +#define MC_CMD_SENSOR_ENGINEERING_8 0x5e /* enum: Not a sensor: reserved for the next page flag */ #define MC_CMD_SENSOR_PAGE2_NEXT 0x5f /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ @@ -5310,15 +6183,19 @@ #define MC_CMD_SENSOR_ENTRY_HI_OFST 8 #define MC_CMD_SENSOR_ENTRY_MINNUM 0 #define MC_CMD_SENSOR_ENTRY_MAXNUM 31 +#define MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 /* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */ #define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4 #define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252 +#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020 #define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num)) +#define MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8) #define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0 #define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4 /* Enum values, see field(s): */ /* MC_CMD_SENSOR_INFO_OUT */ +#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0 #define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31 #define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ @@ -5328,6 +6205,7 @@ /* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */ /* MC_CMD_SENSOR_ENTRY_MINNUM 0 */ /* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */ +/* MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */ /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */ #define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8 @@ -5367,12 +6245,17 @@ * STATE_WARNING. Otherwise the board should not be expected to function. */ #define MC_CMD_READ_SENSORS 0x42 +#undef MC_CMD_0x42_PRIVILEGE_CTG #define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_READ_SENSORS_IN msgrequest */ #define MC_CMD_READ_SENSORS_IN_LEN 8 -/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0 #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8 #define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0 @@ -5380,7 +6263,11 @@ /* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ #define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 -/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 @@ -5389,6 +6276,27 @@ #define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8 #define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4 +/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16 +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). + * + * If the address is 0xffffffffffffffff send the readings in the response (used + * by cmdclient). + */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4 +/* Size in bytes of host buffer. */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4 +/* Flags controlling information retrieved */ +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0 +#define MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1 + /* MC_CMD_READ_SENSORS_OUT msgresponse */ #define MC_CMD_READ_SENSORS_OUT_LEN 0 @@ -5432,6 +6340,7 @@ * code: 0 */ #define MC_CMD_GET_PHY_STATE 0x43 +#undef MC_CMD_0x43_PRIVILEGE_CTG #define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -5469,6 +6378,7 @@ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS */ #define MC_CMD_WOL_FILTER_GET 0x45 +#undef MC_CMD_0x45_PRIVILEGE_CTG #define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -5487,13 +6397,16 @@ * Returns: 0, ENOSYS */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46 +#undef MC_CMD_0x46_PRIVILEGE_CTG #define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num)) +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4) #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 #define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */ @@ -5502,6 +6415,7 @@ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62 +#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */ #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14 @@ -5535,6 +6449,7 @@ * None. Returns: 0, ENOSYS */ #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47 +#undef MC_CMD_0x47_PRIVILEGE_CTG #define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK @@ -5569,6 +6484,7 @@ * required: None Returns: 0 */ #define MC_CMD_TESTASSERT 0x49 +#undef MC_CMD_0x49_PRIVILEGE_CTG #define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5611,6 +6527,7 @@ * basis. Locks required: None. Returns: 0, EINVAL . */ #define MC_CMD_WORKAROUND 0x4a +#undef MC_CMD_0x4a_PRIVILEGE_CTG #define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5658,6 +6575,7 @@ #define MC_CMD_WORKAROUND_EXT_OUT_LEN 4 #define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0 #define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4 +#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0 #define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1 @@ -5672,6 +6590,7 @@ * Anything else: currently undefined. Locks required: None. Return code: 0. */ #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b +#undef MC_CMD_0x4b_PRIVILEGE_CTG #define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5683,7 +6602,9 @@ /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1) /* in bytes */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4 @@ -5691,6 +6612,7 @@ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016 /***********************************/ @@ -5699,6 +6621,7 @@ * on the type of partition). */ #define MC_CMD_NVRAM_TEST 0x4c +#undef MC_CMD_0x4c_PRIVILEGE_CTG #define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5771,6 +6694,7 @@ * of range. */ #define MC_CMD_SENSOR_SET_LIMS 0x4e +#undef MC_CMD_0x4e_PRIVILEGE_CTG #define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -5823,6 +6747,7 @@ * none. Returns: 0, EINVAL (bad type). */ #define MC_CMD_NVRAM_PARTITIONS 0x51 +#undef MC_CMD_0x51_PRIVILEGE_CTG #define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5832,7 +6757,9 @@ /* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */ #define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4 #define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252 +#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020 #define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4) /* total number of partitions */ #define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0 #define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4 @@ -5841,6 +6768,7 @@ #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4 #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0 #define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62 +#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254 /***********************************/ @@ -5849,6 +6777,7 @@ * none. Returns: 0, EINVAL (bad type). */ #define MC_CMD_NVRAM_METADATA 0x52 +#undef MC_CMD_0x52_PRIVILEGE_CTG #define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5861,16 +6790,21 @@ /* MC_CMD_NVRAM_METADATA_OUT msgresponse */ #define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20 #define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252 +#define MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020 #define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num)) +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1) /* Partition type ID code */ #define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0 #define MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4 #define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4 #define MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4 +#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0 #define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1 #define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4 #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2 #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1 /* Subtype ID code for content of this partition */ @@ -5893,6 +6827,7 @@ #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1 #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0 #define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232 +#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000 /***********************************/ @@ -5900,6 +6835,7 @@ * Returns the base MAC, count and stride for the requesting function */ #define MC_CMD_GET_MAC_ADDRESSES 0x55 +#undef MC_CMD_0x55_PRIVILEGE_CTG #define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -5924,9 +6860,13 @@ /***********************************/ /* MC_CMD_CLP - * Perform a CLP related operation + * Perform a CLP related operation, see SF-110495-PS for details of CLP + * processing. This command has been extended to accomodate the requirements of + * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC, + * SF-120509-TC and SF-117282-PS. */ #define MC_CMD_CLP 0x56 +#undef MC_CMD_0x56_PRIVILEGE_CTG #define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -5961,7 +6901,10 @@ #define MC_CMD_CLP_IN_SET_MAC_LEN 12 /* MC_CMD_CLP_IN_OP_OFST 0 */ /* MC_CMD_CLP_IN_OP_LEN 4 */ -/* MAC address assigned to port */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ #define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4 #define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6 /* Padding */ @@ -5971,11 +6914,40 @@ /* MC_CMD_CLP_OUT_SET_MAC msgresponse */ #define MC_CMD_CLP_OUT_SET_MAC_LEN 0 +/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_SET_MAC_V2_LEN 16 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00 + * restores the permanent (factory-programmed) MAC address associated with the + * port. A non-zero MAC address persists until a PCIe reset or a power cycle. + */ +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6 +/* Padding */ +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10 +#define MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0 +#define MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1 + /* MC_CMD_CLP_IN_GET_MAC msgrequest */ #define MC_CMD_CLP_IN_GET_MAC_LEN 4 /* MC_CMD_CLP_IN_OP_OFST 0 */ /* MC_CMD_CLP_IN_OP_LEN 4 */ +/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */ +#define MC_CMD_CLP_IN_GET_MAC_V2_LEN 8 +/* MC_CMD_CLP_IN_OP_OFST 0 */ +/* MC_CMD_CLP_IN_OP_LEN 4 */ +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0 +#define MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1 + /* MC_CMD_CLP_OUT_GET_MAC msgresponse */ #define MC_CMD_CLP_OUT_GET_MAC_LEN 8 /* MAC address assigned to port */ @@ -6016,6 +6988,7 @@ * Perform a MUM operation */ #define MC_CMD_MUM 0x57 +#undef MC_CMD_0x57_PRIVILEGE_CTG #define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -6023,6 +6996,7 @@ #define MC_CMD_MUM_IN_LEN 4 #define MC_CMD_MUM_IN_OP_HDR_OFST 0 #define MC_CMD_MUM_IN_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_OP_OFST 0 #define MC_CMD_MUM_IN_OP_LBN 0 #define MC_CMD_MUM_IN_OP_WIDTH 8 /* enum: NULL MCDI command to MUM */ @@ -6092,7 +7066,9 @@ /* MC_CMD_MUM_IN_WRITE msgrequest */ #define MC_CMD_MUM_IN_WRITE_LENMIN 16 #define MC_CMD_MUM_IN_WRITE_LENMAX 252 +#define MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020 #define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num)) +#define MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ /* MC_CMD_MUM_IN_CMD_LEN 4 */ @@ -6109,11 +7085,14 @@ #define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4 #define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1 #define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60 +#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252 /* MC_CMD_MUM_IN_RAW_CMD msgrequest */ #define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17 #define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020 #define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num)) +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1) /* MUM cmd header */ /* MC_CMD_MUM_IN_CMD_OFST 0 */ /* MC_CMD_MUM_IN_CMD_LEN 4 */ @@ -6131,6 +7110,7 @@ #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1 #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1 #define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236 +#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004 /* MC_CMD_MUM_IN_LOG msgrequest */ #define MC_CMD_MUM_IN_LOG_LEN 8 @@ -6158,6 +7138,7 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_HDR_OFST 4 #define MC_CMD_MUM_IN_GPIO_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4 #define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0 #define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */ @@ -6220,12 +7201,14 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8 #define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8 #define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */ +#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16 #define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8 @@ -6242,6 +7225,7 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8 @@ -6251,6 +7235,7 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8 @@ -6260,6 +7245,7 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4 +#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24 #define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8 @@ -6270,8 +7256,10 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4 #define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4 +#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0 #define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8 +#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4 #define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8 #define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8 @@ -6289,10 +7277,13 @@ /* Control flags for clock programming */ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 @@ -6318,6 +7309,7 @@ /* MC_CMD_MUM_IN_CMD_LEN 4 */ #define MC_CMD_MUM_IN_QSFP_HDR_OFST 4 #define MC_CMD_MUM_IN_QSFP_HDR_LEN 4 +#define MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4 #define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0 #define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4 #define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */ @@ -6417,21 +7409,27 @@ /* MC_CMD_MUM_OUT_RAW_CMD msgresponse */ #define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1 #define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252 +#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020 #define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num)) +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1) /* returned data */ #define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0 #define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1 #define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1 #define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252 +#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020 /* MC_CMD_MUM_OUT_READ msgresponse */ #define MC_CMD_MUM_OUT_READ_LENMIN 4 #define MC_CMD_MUM_OUT_READ_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020 #define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4) #define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0 #define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4 #define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1 #define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255 /* MC_CMD_MUM_OUT_WRITE msgresponse */ #define MC_CMD_MUM_OUT_WRITE_LEN 0 @@ -6490,15 +7488,21 @@ /* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */ #define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4 #define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252 +#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020 #define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num)) +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4) #define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0 #define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4 #define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1 #define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63 +#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255 +#define MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0 #define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0 #define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0 #define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16 #define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0 #define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24 #define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8 @@ -6524,8 +7528,10 @@ #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1 +#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1 #define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1 @@ -6537,7 +7543,9 @@ /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num)) +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1) /* in bytes */ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4 @@ -6545,6 +7553,7 @@ #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1 #define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248 +#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016 /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */ #define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8 @@ -6561,12 +7570,16 @@ /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 #define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016 #define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8) /* Discrete (soldered) DDR resistor strap info */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 /* Number of SODIMM info records */ @@ -6579,6 +7592,8 @@ #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 #define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 /* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ @@ -6587,10 +7602,13 @@ #define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 /* enum: Total number of SODIMM banks */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 #define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 #define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ @@ -6599,10 +7617,13 @@ #define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ /* enum: Values 5-15 are reserved for future usage */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 #define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 #define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 /* enum: No module present */ @@ -6620,14 +7641,314 @@ /* enum: Modules may or may not be present, but cannot establish contact by I2C */ #define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 #define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 -/* MC_CMD_RESOURCE_SPECIFIER enum */ -/* enum: Any */ -#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff -/* enum: None */ -#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe +/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This + * should match the equivalent structure in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24 +/* A value below this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32 +/* A value below this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32 +/* A value below this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32 +/* A value above this will trigger a warning event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32 +/* A value above this will trigger a critical event. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32 +/* A value above this will shut down the card. */ +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160 +#define MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32 + +/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64 +/* The handle used to identify the sensor in calls to + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32 +/* A human-readable name for the sensor (zero terminated string, max 32 bytes) + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256 +/* The type of the sensor device, and by implication the unit of that the + * values will be reported in + */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4 +/* enum: A voltage sensor. Unit is mV */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0 +/* enum: A current sensor. Unit is mA */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1 +/* enum: A power sensor. Unit is mW */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2 +/* enum: A temperature sensor. Unit is Celsius */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3 +/* enum: A cooling fan sensor. Unit is RPM */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32 +/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */ +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320 +#define MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192 + +/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor. + * This should match the equivalent structure in the sensor_query SPHINX + * service. + */ +#define MC_CMD_DYNAMIC_SENSORS_READING_LEN 12 +/* The handle used to identify the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0 +#define MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32 +/* The current value of the sensor */ +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32 +#define MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32 +/* The sensor's condition, e.g. good, broken or removed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4 +/* enum: Sensor working normally within limits */ +#define MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0 +/* enum: Warning threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1 +/* enum: Critical threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2 +/* enum: Fatal threshold breached */ +#define MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3 +/* enum: Sensor not working */ +#define MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4 +/* enum: Sensor working but no reading available */ +#define MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5 +/* enum: Sensor initialization failed */ +#define MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64 +#define MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_LIST + * Return a complete list of handles for sensors currently managed by the MC, + * and a generation count for this version of the sensor table. On systems + * advertising the DYNAMIC_SENSORS capability bit, this replaces the + * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors + * added by the NMC. + * + * Sensor handles are persistent for the lifetime of the sensor and are used to + * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and + * MC_CMD_DYNAMIC_SENSORS_GET_VALUES. + * + * The generation count is maintained by the MC, is persistent across reboots + * and will be incremented each time the sensor table is modified. When the + * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated + * containing the new generation count. The driver should compare this against + * the current generation count, and if it is different, call + * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table. + * + * The sensor count is provided to allow a future path to supporting more than + * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e. + * the maximum number that will fit in a single response. As this is a fairly + * large number (253) it is not anticipated that this will be needed in the + * near future, so can currently be ignored. + * + * On Riverhead this command is implemented as a a wrapper for `list` in the + * sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66 +#undef MC_CMD_0x66_PRIVILEGE_CTG + +#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0 + +/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4) +/* Generation count, which will be updated each time a sensor is added to or + * removed from the MC sensor table. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4 +/* Number of sensors managed by the MC. Note that in principle, this can be + * larger than the size of the HANDLES array. + */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4 +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61 +#define MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS + * Get descriptions for a set of sensors, specified as an array of sensor + * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a a wrapper for + * `get_descriptions` in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67 +#undef MC_CMD_0x67_PRIVILEGE_CTG + +#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64) +/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3 +#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15 + + +/***********************************/ +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS + * Read the state and value for a set of sensors, specified as an array of + * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST. + * + * In the case of a broken sensor, then the state of the response's + * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value + * provided should be treated as erroneous. + * + * Any handles which do not correspond to a sensor currently managed by the MC + * will be dropped from from the response. This may happen when a sensor table + * update is in progress, and effectively means the set of usable sensors is + * the intersection between the sets of sensors known to the driver and the MC. + * + * On Riverhead this command is implemented as a a wrapper for `get_readings` + * in the sensor_query SPHINX service. + */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68 +#undef MC_CMD_0x68_PRIVILEGE_CTG + +#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4) +/* Array of sensor handles */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255 + +/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12) +/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */ +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21 +#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85 + + +/***********************************/ +/* MC_CMD_EVENT_CTRL + * Configure which categories of unsolicited events the driver expects to + * receive (Riverhead). + */ +#define MC_CMD_EVENT_CTRL 0x69 +#undef MC_CMD_0x69_PRIVILEGE_CTG + +#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVENT_CTRL_IN msgrequest */ +#define MC_CMD_EVENT_CTRL_IN_LENMIN 0 +#define MC_CMD_EVENT_CTRL_IN_LENMAX 252 +#define MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020 +#define MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num)) +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4) +/* Array of event categories for which the driver wishes to receive events. */ +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63 +#define MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255 +/* enum: Driver wishes to receive LINKCHANGE events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0 +/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events. + */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1 +/* enum: Driver wishes to receive receive errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2 +/* enum: Driver wishes to receive transmit errors. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3 +/* enum: Driver wishes to receive firmware alerts. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4 +/* enum: Driver wishes to receive reboot events. */ +#define MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5 + +/* MC_CMD_EVENT_CTRL_OUT msgrequest */ +#define MC_CMD_EVENT_CTRL_OUT_LEN 0 /* EVB_PORT_ID structuredef */ #define EVB_PORT_ID_LEN 4 @@ -6789,6 +8110,8 @@ #define NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01 /* enum: Bundle update non-volatile log output partition */ #define NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02 +/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */ +#define NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -6846,24 +8169,34 @@ #define LICENSED_FEATURES_MASK_LEN 8 #define LICENSED_FEATURES_MASK_LO_OFST 0 #define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0 #define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 #define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_OFST 0 #define LICENSED_FEATURES_PIO_LBN 1 #define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_OFST 0 #define LICENSED_FEATURES_EVQ_TIMER_LBN 2 #define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_OFST 0 #define LICENSED_FEATURES_CLOCK_LBN 3 #define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0 #define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 #define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0 #define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 #define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_OFST 0 #define LICENSED_FEATURES_RX_SNIFF_LBN 6 #define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_OFST 0 #define LICENSED_FEATURES_TX_SNIFF_LBN 7 #define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0 #define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 #define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0 #define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 #define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 #define LICENSED_FEATURES_MASK_LBN 0 @@ -6876,36 +8209,52 @@ #define LICENSED_V3_APPS_MASK_LEN 8 #define LICENSED_V3_APPS_MASK_LO_OFST 0 #define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_ONLOAD_OFST 0 #define LICENSED_V3_APPS_ONLOAD_LBN 0 #define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_OFST 0 #define LICENSED_V3_APPS_PTP_LBN 1 #define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0 #define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 #define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_OFST 0 #define LICENSED_V3_APPS_SOLARSECURE_LBN 3 #define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_OFST 0 #define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 #define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0 #define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 #define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0 #define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 #define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_TCP_DIRECT_OFST 0 #define LICENSED_V3_APPS_TCP_DIRECT_LBN 8 #define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1 +#define LICENSED_V3_APPS_LOW_LATENCY_OFST 0 #define LICENSED_V3_APPS_LOW_LATENCY_LBN 9 #define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0 #define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10 #define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12 #define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1 +#define LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0 #define LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13 #define LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_DSHBRD_OFST 0 #define LICENSED_V3_APPS_DSHBRD_LBN 14 #define LICENSED_V3_APPS_DSHBRD_WIDTH 1 +#define LICENSED_V3_APPS_SCATRD_OFST 0 #define LICENSED_V3_APPS_SCATRD_LBN 15 #define LICENSED_V3_APPS_SCATRD_WIDTH 1 #define LICENSED_V3_APPS_MASK_LBN 0 @@ -6918,24 +8267,34 @@ #define LICENSED_V3_FEATURES_MASK_LEN 8 #define LICENSED_V3_FEATURES_MASK_LO_OFST 0 #define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0 #define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 #define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_OFST 0 #define LICENSED_V3_FEATURES_PIO_LBN 1 #define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0 #define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 #define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_OFST 0 #define LICENSED_V3_FEATURES_CLOCK_LBN 3 #define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0 #define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 #define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0 #define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 #define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_OFST 0 #define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 #define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_OFST 0 #define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 #define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0 #define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 #define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0 #define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9 #define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 #define LICENSED_V3_FEATURES_MASK_LBN 0 @@ -6988,12 +8347,16 @@ */ #define RSS_MODE_HASH_SELECTOR_OFST 0 #define RSS_MODE_HASH_SELECTOR_LEN 1 +#define RSS_MODE_HASH_SRC_ADDR_OFST 0 #define RSS_MODE_HASH_SRC_ADDR_LBN 0 #define RSS_MODE_HASH_SRC_ADDR_WIDTH 1 +#define RSS_MODE_HASH_DST_ADDR_OFST 0 #define RSS_MODE_HASH_DST_ADDR_LBN 1 #define RSS_MODE_HASH_DST_ADDR_WIDTH 1 +#define RSS_MODE_HASH_SRC_PORT_OFST 0 #define RSS_MODE_HASH_SRC_PORT_LBN 2 #define RSS_MODE_HASH_SRC_PORT_WIDTH 1 +#define RSS_MODE_HASH_DST_PORT_OFST 0 #define RSS_MODE_HASH_DST_PORT_LBN 3 #define RSS_MODE_HASH_DST_PORT_WIDTH 1 #define RSS_MODE_HASH_SELECTOR_LBN 0 @@ -7018,6 +8381,7 @@ * Get a dump of the MCPU registers */ #define MC_CMD_READ_REGS 0x50 +#undef MC_CMD_0x50_PRIVILEGE_CTG #define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -7043,13 +8407,16 @@ * end with an address for each 4k of host memory required to back the EVQ. */ #define MC_CMD_INIT_EVQ 0x80 +#undef MC_CMD_0x80_PRIVILEGE_CTG #define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_INIT_EVQ_IN msgrequest */ #define MC_CMD_INIT_EVQ_IN_LENMIN 44 #define MC_CMD_INIT_EVQ_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548 #define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8) /* Size, in entries */ #define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0 #define MC_CMD_INIT_EVQ_IN_SIZE_LEN 4 @@ -7068,18 +8435,25 @@ /* tbd */ #define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1 #define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2 #define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3 #define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4 #define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 #define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16 #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 #define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 @@ -7122,6 +8496,7 @@ #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40 #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64 /* MC_CMD_INIT_EVQ_OUT msgresponse */ #define MC_CMD_INIT_EVQ_OUT_LEN 4 @@ -7132,7 +8507,9 @@ /* MC_CMD_INIT_EVQ_V2_IN msgrequest */ #define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44 #define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548 +#define MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548 #define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num)) +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8) /* Size, in entries */ #define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0 #define MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4 @@ -7151,20 +8528,28 @@ /* tbd */ #define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7 #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4 /* enum: All initialisation flags specified by host. */ @@ -7186,6 +8571,9 @@ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags. */ #define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11 +#define MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1 #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20 #define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4 /* enum: Disabled */ @@ -7226,6 +8614,7 @@ #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40 #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64 /* MC_CMD_INIT_EVQ_V2_OUT msgresponse */ #define MC_CMD_INIT_EVQ_V2_OUT_LEN 8 @@ -7235,12 +8624,16 @@ /* Actual configuration applied on the card */ #define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3 #define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1 @@ -7271,6 +8664,7 @@ * the RXQ. */ #define MC_CMD_INIT_RXQ 0x81 +#undef MC_CMD_0x81_PRIVILEGE_CTG #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -7279,7 +8673,9 @@ */ #define MC_CMD_INIT_RXQ_IN_LENMIN 36 #define MC_CMD_INIT_RXQ_IN_LENMAX 252 +#define MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020 #define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) /* Size, in entries */ #define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0 #define MC_CMD_INIT_RXQ_IN_SIZE_LEN 4 @@ -7298,20 +8694,28 @@ /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1 #define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2 #define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16 #define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3 #define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7 #define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8 #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16 #define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10 #define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ @@ -7327,6 +8731,7 @@ #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 /* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode * flags @@ -7341,7 +8746,7 @@ #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4 #define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. - * This field is ignored if DMA_MODE == EQUAL_STRIDE_PACKED_STREAM or DMA_MODE + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE * == PACKED_STREAM. */ #define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8 @@ -7354,20 +8759,28 @@ /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3 #define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10 #define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4 /* enum: One packet per descriptor (for normal networking) */ @@ -7380,9 +8793,13 @@ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath * firmware. */ +#define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ #define MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 #define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 #define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */ @@ -7390,10 +8807,15 @@ #define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4 @@ -7421,7 +8843,7 @@ #define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4 #define MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4 /* The value to put in the event data. Check hardware spec. for valid range. - * This field is ignored if DMA_MODE == EQUAL_STRIDE_PACKED_STREAM or DMA_MODE + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE * == PACKED_STREAM. */ #define MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8 @@ -7434,20 +8856,28 @@ /* There will be more flags here. */ #define MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3 #define MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10 #define MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4 /* enum: One packet per descriptor (for normal networking) */ @@ -7460,9 +8890,13 @@ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath * firmware. */ +#define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ #define MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 #define MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 #define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */ @@ -7470,10 +8904,15 @@ #define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */ #define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */ #define MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19 #define MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4 @@ -7490,21 +8929,21 @@ #define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540 #define MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4 /* The number of packet buffers that will be contained within each - * EQUAL_STRIDE_PACKED_STREAM format bucket supplied by the driver. This field - * is ignored unless DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. */ #define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 #define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 /* The length in bytes of the area in each packet buffer that can be written to * by the adapter. This is used to store the packet prefix and the packet * payload. This length does not include any end padding added by the driver. - * This field is ignored unless DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. */ #define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548 #define MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4 /* The length in bytes of a single packet buffer within a - * EQUAL_STRIDE_PACKED_STREAM format bucket. This field is ignored unless - * DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. */ #define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552 #define MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4 @@ -7512,11 +8951,296 @@ * there are no RX descriptors available. If the timeout is reached and there * are still no descriptors then the packet will be dropped. A timeout of 0 * means the datapath will never be blocked. This field is ignored unless - * DMA_MODE == EQUAL_STRIDE_PACKED_STREAM. + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. */ #define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 #define MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required + * for systems with a QDMA (currently, Riverhead) + */ +#define MC_CMD_INIT_RXQ_V4_IN_LEN 564 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4 + +/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a + * different RX packet prefix + */ +#define MC_CMD_INIT_RXQ_V5_IN_LEN 568 +/* Size, in entries */ +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0 +#define MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4 +/* The EVQ to send events to. This is an index originally specified to + * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE. + */ +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4 +/* The value to put in the event data. Check hardware spec. for valid range. + * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE + * == PACKED_STREAM. + */ +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8 +#define MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4 +/* Desired instance. Must be set to a specific instance, which is a function + * local queue index. + */ +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12 +#define MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4 +/* There will be more flags here. */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3 +#define MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4 +/* enum: One packet per descriptor (for normal networking) */ +#define MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0 +/* enum: Pack multiple packets into large descriptors (for SolarCapture) */ +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1 +/* enum: Pack multiple packets into large descriptors using the format designed + * to maximise packet rate. This mode uses 1 "bucket" per descriptor with + * multiple fixed-size packet buffers within each bucket. For a full + * description see SF-119419-TC. This mode is only supported by "dpdk" datapath + * firmware. + */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2 +/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */ +#define MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15 +#define MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3 +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */ +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20 +#define MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1 +/* Owner ID to use if in buffer mode (zero if physical) */ +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20 +#define MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4 +/* The port ID associated with the v-adaptor which should contain this DMAQ. */ +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24 +#define MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4 +/* 64-bit address of 4k of 4k-aligned host memory buffer */ +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32 +#define MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64 +/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */ +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540 +#define MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4 +/* The number of packet buffers that will be contained within each + * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field + * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4 +/* The length in bytes of the area in each packet buffer that can be written to + * by the adapter. This is used to store the packet prefix and the packet + * payload. This length does not include any end padding added by the driver. + * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548 +#define MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4 +/* The length in bytes of a single packet buffer within a + * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552 +#define MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4 +/* The maximum time in nanoseconds that the datapath will be backpressured if + * there are no RX descriptors available. If the timeout is reached and there + * are still no descriptors then the packet will be dropped. A timeout of 0 + * means the datapath will never be blocked. This field is ignored unless + * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER. + */ +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556 +#define MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4 +/* V4 message data */ +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4 +/* Size in bytes of buffers attached to descriptors posted to this queue. Set + * to zero if using this message on non-QDMA based platforms. Currently in + * Riverhead there is a global limit of eight different buffer sizes across all + * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a + * request for a different buffer size will fail if there are already eight + * other buffer sizes in use. In future Riverhead this limit will go away and + * any size will be accepted. + */ +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560 +#define MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4 +/* Prefix id for the RX prefix format to use on packets delivered this queue. + * Zero is always a valid prefix id and means the default prefix format + * documented for the platform. Other prefix ids can be obtained by calling + * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields. + */ +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564 +#define MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4 + /* MC_CMD_INIT_RXQ_OUT msgresponse */ #define MC_CMD_INIT_RXQ_OUT_LEN 0 @@ -7526,11 +9250,18 @@ /* MC_CMD_INIT_RXQ_V3_OUT msgresponse */ #define MC_CMD_INIT_RXQ_V3_OUT_LEN 0 +/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V4_OUT_LEN 0 + +/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */ +#define MC_CMD_INIT_RXQ_V5_OUT_LEN 0 + /***********************************/ /* MC_CMD_INIT_TXQ */ #define MC_CMD_INIT_TXQ 0x82 +#undef MC_CMD_0x82_PRIVILEGE_CTG #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -7539,7 +9270,9 @@ */ #define MC_CMD_INIT_TXQ_IN_LENMIN 36 #define MC_CMD_INIT_TXQ_IN_LENMAX 252 +#define MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020 #define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num)) +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8) /* Size, in entries */ #define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0 #define MC_CMD_INIT_TXQ_IN_SIZE_LEN 4 @@ -7559,22 +9292,31 @@ /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1 #define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2 #define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3 #define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16 #define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4 #define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8 #define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9 #define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 #define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ @@ -7590,6 +9332,7 @@ #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28 +#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124 /* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode * flags @@ -7614,30 +9357,48 @@ /* There will be more flags here. */ #define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4 #define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4 @@ -7651,11 +9412,14 @@ #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32 #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1 #define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64 +#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64 /* Flags related to Qbb flow control mode. */ #define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1 #define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3 @@ -7671,6 +9435,7 @@ * or the operation will fail with EBUSY */ #define MC_CMD_FINI_EVQ 0x83 +#undef MC_CMD_0x83_PRIVILEGE_CTG #define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -7691,6 +9456,7 @@ * Teardown a RXQ. */ #define MC_CMD_FINI_RXQ 0x84 +#undef MC_CMD_0x84_PRIVILEGE_CTG #define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -7709,6 +9475,7 @@ * Teardown a TXQ. */ #define MC_CMD_FINI_TXQ 0x85 +#undef MC_CMD_0x85_PRIVILEGE_CTG #define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -7727,6 +9494,7 @@ * Generate an event on an EVQ belonging to the function issuing the command. */ #define MC_CMD_DRIVER_EVENT 0x86 +#undef MC_CMD_0x86_PRIVILEGE_CTG #define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -7753,6 +9521,7 @@ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated. */ #define MC_CMD_PROXY_CMD 0x5b +#undef MC_CMD_0x5b_PRIVILEGE_CTG #define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -7761,8 +9530,10 @@ /* The handle of the target function. */ #define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0 #define MC_CMD_PROXY_CMD_IN_TARGET_LEN 4 +#define MC_CMD_PROXY_CMD_IN_TARGET_PF_OFST 0 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0 #define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16 +#define MC_CMD_PROXY_CMD_IN_TARGET_VF_OFST 0 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16 #define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16 #define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */ @@ -7818,6 +9589,7 @@ * a designated admin function */ #define MC_CMD_PROXY_CONFIGURE 0x58 +#undef MC_CMD_0x58_PRIVILEGE_CTG #define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -7825,6 +9597,7 @@ #define MC_CMD_PROXY_CONFIGURE_IN_LEN 108 #define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0 #define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -7869,6 +9642,7 @@ #define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 #define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS @@ -7923,6 +9697,7 @@ * MC_CMD_PROXY_CONFIGURE). */ #define MC_CMD_PROXY_COMPLETE 0x5f +#undef MC_CMD_0x5f_PRIVILEGE_CTG #define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -7960,6 +9735,7 @@ * cannot do so). The buffer table entries will initially be zeroed. */ #define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87 +#undef MC_CMD_0x87_PRIVILEGE_CTG #define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -7990,13 +9766,16 @@ * Reprogram a set of buffer table entries in the specified chunk. */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88 +#undef MC_CMD_0x88_PRIVILEGE_CTG #define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num)) +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8) #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4 /* ID */ @@ -8012,6 +9791,7 @@ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32 +#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */ #define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0 @@ -8021,6 +9801,7 @@ /* MC_CMD_FREE_BUFTBL_CHUNK */ #define MC_CMD_FREE_BUFTBL_CHUNK 0x89 +#undef MC_CMD_0x89_PRIVILEGE_CTG #define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -8038,6 +9819,7 @@ * Multiplexed MCDI call for filter operations */ #define MC_CMD_FILTER_OP 0x8a +#undef MC_CMD_0x8a_PRIVILEGE_CTG #define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -8070,32 +9852,46 @@ /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1 #define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3 #define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4 #define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5 #define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6 #define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7 #define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8 #define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9 #define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10 #define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11 #define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 #define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ @@ -8143,8 +9939,10 @@ #define MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ #define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40 #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40 #define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1 #define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1 /* source MAC address to match (as bytes in network order) */ @@ -8210,60 +10008,88 @@ /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 #define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ @@ -8311,8 +10137,10 @@ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1 #define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1 /* source MAC address to match (as bytes in network order) */ @@ -8348,8 +10176,10 @@ */ #define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72 #define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8 /* enum: Match VXLAN traffic with this VNI */ @@ -8358,8 +10188,10 @@ #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1 /* enum: Reserved for experimental development use */ #define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72 #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0 #define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24 #define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8 /* enum: Match NVGRE traffic with this VSID */ @@ -8454,60 +10286,88 @@ /* fields to include in match criteria */ #define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0 #define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1 #define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2 #define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3 #define MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4 #define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5 #define MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6 #define MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7 #define MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8 #define MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10 #define MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11 #define MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25 #define MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30 #define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16 #define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31 #define MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1 /* receive destination */ @@ -8555,8 +10415,10 @@ #define MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4 /* enum: request default behaviour (based on filter type) */ #define MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40 #define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0 #define MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1 +#define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40 #define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1 #define MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1 /* source MAC address to match (as bytes in network order) */ @@ -8592,8 +10454,10 @@ */ #define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72 #define MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4 +#define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72 #define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0 #define MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72 #define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24 #define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8 /* enum: Match VXLAN traffic with this VNI */ @@ -8602,8 +10466,10 @@ #define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1 /* enum: Reserved for experimental development use */ #define MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe +#define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72 #define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0 #define MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24 +#define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72 #define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24 #define MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8 /* enum: Match NVGRE traffic with this VSID */ @@ -8693,7 +10559,10 @@ * support the DPDK rte_flow "MARK" action. */ #define MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2 -/* the mark value for MATCH_ACTION_MARK */ +/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the + * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX) + * will cause the filter insertion to fail with EINVAL. + */ #define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176 #define MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4 @@ -8741,6 +10610,7 @@ * Get information related to the parser-dispatcher subsystem */ #define MC_CMD_GET_PARSER_DISP_INFO 0xe4 +#undef MC_CMD_0xe4_PRIVILEGE_CTG #define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -8764,11 +10634,17 @@ * frames (Medford only) */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 +/* enum: read the list of supported matches for the encapsulation detection + * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) /* identifies the type of operation requested */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4 @@ -8784,6 +10660,7 @@ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0 #define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 /* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8 @@ -8795,9 +10672,38 @@ /* bitfield of filter insertion restrictions */ #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4 +#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0 #define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1 +/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is + * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value + * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the + * supported match types that can be used in the encapsulation detection rules + * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num)) +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4) +/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */ +/* number of supported match types */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4 +/* array of supported match types (valid MATCH_FLAGS values for + * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order + */ +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61 +#define MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253 + /***********************************/ /* MC_CMD_PARSER_DISP_RW @@ -8809,6 +10715,7 @@ * permitted. */ #define MC_CMD_PARSER_DISP_RW 0xe5 +#undef MC_CMD_0xe5_PRIVILEGE_CTG #define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -8898,6 +10805,7 @@ * Get number of PFs on the device. */ #define MC_CMD_GET_PF_COUNT 0xb6 +#undef MC_CMD_0xb6_PRIVILEGE_CTG #define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -8932,6 +10840,7 @@ * Get port assignment for current PCI function. */ #define MC_CMD_GET_PORT_ASSIGNMENT 0xb8 +#undef MC_CMD_0xb8_PRIVILEGE_CTG #define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -8950,6 +10859,7 @@ * Set port assignment for current PCI function. */ #define MC_CMD_SET_PORT_ASSIGNMENT 0xb9 +#undef MC_CMD_0xb9_PRIVILEGE_CTG #define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -8968,6 +10878,7 @@ * Allocate VIs for current PCI function. */ #define MC_CMD_ALLOC_VIS 0x8b +#undef MC_CMD_0x8b_PRIVILEGE_CTG #define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9014,6 +10925,7 @@ * but not freed. */ #define MC_CMD_FREE_VIS 0x8c +#undef MC_CMD_0x8c_PRIVILEGE_CTG #define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9029,6 +10941,7 @@ * Get SRIOV config for this PF. */ #define MC_CMD_GET_SRIOV_CFG 0xba +#undef MC_CMD_0xba_PRIVILEGE_CTG #define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9045,6 +10958,7 @@ #define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4 #define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8 #define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0 #define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF. */ @@ -9060,6 +10974,7 @@ * Set SRIOV config for this PF. */ #define MC_CMD_SET_SRIOV_CFG 0xbb +#undef MC_CMD_0xbb_PRIVILEGE_CTG #define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -9073,6 +10988,7 @@ #define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4 #define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8 #define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0 #define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1 /* RID offset of first VF from PF, or 0 for no change, or @@ -9096,6 +11012,7 @@ * function. */ #define MC_CMD_GET_VI_ALLOC_INFO 0x8d +#undef MC_CMD_0x8d_PRIVILEGE_CTG #define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9122,6 +11039,7 @@ * For CmdClient use. Dump pertinent information on a specific absolute VI. */ #define MC_CMD_DUMP_VI_STATE 0x8e +#undef MC_CMD_0x8e_PRIVILEGE_CTG #define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9164,10 +11082,13 @@ /* Combined metadata field. */ #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24 #define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8 /* TXDPCPU raw table data for queue. */ @@ -9190,14 +11111,19 @@ #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32 #define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56 #define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40 #define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24 /* RXDPCPU raw table data for queue. */ @@ -9220,12 +11146,16 @@ #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8 +#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32 #define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8 @@ -9235,6 +11165,7 @@ * Allocate a push I/O buffer for later use with a tx queue. */ #define MC_CMD_ALLOC_PIOBUF 0x8f +#undef MC_CMD_0x8f_PRIVILEGE_CTG #define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -9253,6 +11184,7 @@ * Free a push I/O buffer. */ #define MC_CMD_FREE_PIOBUF 0x90 +#undef MC_CMD_0x90_PRIVILEGE_CTG #define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -9271,6 +11203,7 @@ * Get TLP steering and ordering information for a VI. */ #define MC_CMD_GET_VI_TLP_PROCESSING 0xb0 +#undef MC_CMD_0xb0_PRIVILEGE_CTG #define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9309,6 +11242,7 @@ * Set TLP steering and ordering information for a VI. */ #define MC_CMD_SET_VI_TLP_PROCESSING 0xb1 +#undef MC_CMD_0xb1_PRIVILEGE_CTG #define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9347,6 +11281,7 @@ * Get global PCIe steering and transaction processing configuration. */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc +#undef MC_CMD_0xbc_PRIVILEGE_CTG #define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -9372,38 +11307,55 @@ /* Amalgamated TLP info word. */ #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23 @@ -9413,6 +11365,7 @@ * Set global PCIe steering and transaction processing configuration. */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd +#undef MC_CMD_0xbd_PRIVILEGE_CTG #define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -9425,32 +11378,46 @@ /* Amalgamated TLP info word. */ #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2 +#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22 @@ -9463,6 +11430,7 @@ * Download a new set of images to the satellite CPUs from the host. */ #define MC_CMD_SATELLITE_DOWNLOAD 0x91 +#undef MC_CMD_0x91_PRIVILEGE_CTG #define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -9486,7 +11454,9 @@ */ #define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20 #define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020 #define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num)) +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4) /* Download phase. (Note: the IDLE phase is used internally and is never valid * in a command from the host.) */ @@ -9551,6 +11521,7 @@ #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1 #define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59 +#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251 /* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */ #define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8 @@ -9586,6 +11557,7 @@ * reference inherent device capabilities as opposed to current NVRAM config. */ #define MC_CMD_GET_CAPABILITIES 0xbe +#undef MC_CMD_0xbe_PRIVILEGE_CTG #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -9597,62 +11569,91 @@ /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 #define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 #define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 #define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 #define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 #define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 #define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 #define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 #define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14 #define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16 #define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18 #define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20 #define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21 #define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23 #define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24 #define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25 #define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26 #define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28 #define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0 #define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31 #define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ @@ -9713,8 +11714,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -9725,6 +11728,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -9759,8 +11765,10 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -9771,6 +11779,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -9811,62 +11822,91 @@ /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 #define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 #define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 #define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ @@ -9927,8 +11967,10 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -9939,6 +11981,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -9973,8 +12018,10 @@ #define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -9985,6 +12032,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -10019,58 +12069,110 @@ /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7 #define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10 #define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 #define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15 #define MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 #define MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 #define MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22 #define MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 #define MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24 #define MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1 -/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present - * on older firmware (check the length). +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 #define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 @@ -10130,62 +12232,91 @@ /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7 #define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9 #define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13 #define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14 #define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ @@ -10246,8 +12377,10 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -10258,6 +12391,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -10292,8 +12428,10 @@ #define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -10304,6 +12442,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -10338,58 +12479,110 @@ /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7 #define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10 #define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 #define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15 #define MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 #define MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 #define MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22 #define MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 #define MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24 #define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1 -/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present - * on older firmware (check the length). +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 #define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 @@ -10474,62 +12667,91 @@ /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7 #define MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9 #define MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 #define MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13 #define MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14 #define MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27 #define MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1 /* RxDPCPU firmware id. */ @@ -10590,8 +12812,10 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -10602,6 +12826,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: RX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -10636,8 +12863,10 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 /* enum: reserved value - do not use (may indicate alternative interpretation @@ -10648,6 +12877,9 @@ * development only) */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 /* enum: TX PD firmware with approximately Siena-compatible behaviour * (Huntington development only) */ @@ -10682,58 +12914,110 @@ /* Second word of flags. Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7 #define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10 #define MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 #define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15 #define MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 #define MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 #define MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22 #define MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 #define MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24 #define MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1 -/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present - * on older firmware (check the length). +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). */ #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 #define MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 @@ -10821,6 +13105,2422 @@ #define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76 #define MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4 + +/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 + +/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 + +/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156 + +/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: Rules engine RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5 +/* enum: DPDK RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* enum: RXDP Test firmware image 10 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: Rules engine TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5 +/* enum: DPDK TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware for telemetry prototyping (Medford2 development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Full featured TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3 +/* enum: (deprecated original name for the FULL_FEATURED variant) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: Rules engine TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8 +/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9 +/* enum: DPDK TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC (when + * TX_TSO_V2 == 1). Not present on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2 +/* On chips later than Medford the amount of address space assigned to each VI + * is configurable. This is a global setting that the driver must query to + * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available + * with 8k VI windows. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1 +/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k. + * CTPIO is not mapped. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0 +/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1 +/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2 +/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1 +/* Number of buffers per adapter that can be used for VFIFO Stuffing + * (SF-115995-SW) in the present configuration of firmware and port mode. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2 +/* Entry count in the MAC stats array, including the final GENERATION_END + * entry. For MAC stats DMA, drivers should allocate a buffer large enough to + * hold at least this many 64-bit stats values, if they wish to receive all + * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the + * stats array returned will be truncated. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2 +/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field + * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4 +/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in + * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when + * they create an RX queue. Due to hardware limitations, only a small number of + * different buffer sizes may be available concurrently. Nonzero entries in + * this array are the sizes of buffers which the system guarantees will be + * available for use. If the list is empty, there are no limitations on + * concurrent buffer sizes. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16 +/* Third word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1 +/* These bits are reserved for communicating test-specific capabilities to + * host-side test software. All production drivers should treat this field as + * opaque. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156 +/* The minimum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum size (in table entries) of indirection table to be allocated + * from the pool for an RSS context. Note that the table size used must be a + * power of 2. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4 +/* The maximum number of queues that can be used by an RSS context in exclusive + * mode. In exclusive mode the context has a configurable indirection table and + * a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4 +/* The maximum number of queues that can be used by an RSS context in even- + * spreading mode. In even-spreading mode the context has no indirection table + * but it does have a configurable RSS key. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4 +/* The total number of RSS contexts supported. Note that the number of + * available contexts using indirection tables is also limited by the + * availability of indirection table space allocated from a common pool. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4 +/* The total amount of indirection table space that can be shared between RSS + * contexts. + */ +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180 +#define MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4 + /***********************************/ /* MC_CMD_V2_EXTN @@ -10858,6 +15558,7 @@ * Allocate a pacer bucket (for qau rp or a snapper test) */ #define MC_CMD_TCM_BUCKET_ALLOC 0xb2 +#undef MC_CMD_0xb2_PRIVILEGE_CTG #define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -10876,6 +15577,7 @@ * Free a pacer bucket */ #define MC_CMD_TCM_BUCKET_FREE 0xb3 +#undef MC_CMD_0xb3_PRIVILEGE_CTG #define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -10894,6 +15596,7 @@ * Initialise pacer bucket with a given rate */ #define MC_CMD_TCM_BUCKET_INIT 0xb4 +#undef MC_CMD_0xb4_PRIVILEGE_CTG #define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -10927,6 +15630,7 @@ * Initialise txq in pacer with given options or set options */ #define MC_CMD_TCM_TXQ_INIT 0xb5 +#undef MC_CMD_0xb5_PRIVILEGE_CTG #define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -10941,10 +15645,13 @@ /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2 #define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ @@ -10975,10 +15682,13 @@ /* bitmask of the priority queues this txq is inserted into when inserted. */ #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1 +#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2 #define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1 /* the reaction point (RP) bucket */ @@ -11010,6 +15720,7 @@ * Link a push I/O buffer to a TxQ */ #define MC_CMD_LINK_PIOBUF 0x92 +#undef MC_CMD_0x92_PRIVILEGE_CTG #define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -11031,6 +15742,7 @@ * Unlink a push I/O buffer from a TxQ */ #define MC_CMD_UNLINK_PIOBUF 0x93 +#undef MC_CMD_0x93_PRIVILEGE_CTG #define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -11049,6 +15761,7 @@ * allocate and initialise a v-switch. */ #define MC_CMD_VSWITCH_ALLOC 0x94 +#undef MC_CMD_0x94_PRIVILEGE_CTG #define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11073,6 +15786,7 @@ /* Flags controlling v-port creation */ #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators, @@ -11094,6 +15808,7 @@ * de-allocate a v-switch. */ #define MC_CMD_VSWITCH_FREE 0x95 +#undef MC_CMD_0x95_PRIVILEGE_CTG #define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11114,6 +15829,7 @@ * not, then the command returns ENOENT). */ #define MC_CMD_VSWITCH_QUERY 0x63 +#undef MC_CMD_0x63_PRIVILEGE_CTG #define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11132,6 +15848,7 @@ * allocate a v-port. */ #define MC_CMD_VPORT_ALLOC 0x96 +#undef MC_CMD_0x96_PRIVILEGE_CTG #define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11164,8 +15881,10 @@ /* Flags controlling v-port creation */ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 #define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 /* The number of VLAN tags to insert/remove. An error will be returned if @@ -11177,8 +15896,10 @@ /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16 #define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16 @@ -11194,6 +15915,7 @@ * de-allocate a v-port. */ #define MC_CMD_VPORT_FREE 0x97 +#undef MC_CMD_0x97_PRIVILEGE_CTG #define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11212,6 +15934,7 @@ * allocate a v-adaptor. */ #define MC_CMD_VADAPTOR_ALLOC 0x98 +#undef MC_CMD_0x98_PRIVILEGE_CTG #define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11223,8 +15946,10 @@ /* Flags controlling v-adaptor creation */ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 /* The number of VLAN tags to strip on receive */ @@ -11236,8 +15961,10 @@ /* The actual VLAN tags to insert/remove */ #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16 #define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16 /* The MAC address to assign to this v-adaptor */ @@ -11255,6 +15982,7 @@ * de-allocate a v-adaptor. */ #define MC_CMD_VADAPTOR_FREE 0x99 +#undef MC_CMD_0x99_PRIVILEGE_CTG #define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11273,6 +16001,7 @@ * assign a new MAC address to a v-adaptor. */ #define MC_CMD_VADAPTOR_SET_MAC 0x5d +#undef MC_CMD_0x5d_PRIVILEGE_CTG #define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11294,6 +16023,7 @@ * read the MAC address assigned to a v-adaptor. */ #define MC_CMD_VADAPTOR_GET_MAC 0x5e +#undef MC_CMD_0x5e_PRIVILEGE_CTG #define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11315,6 +16045,7 @@ * read some config of v-adaptor. */ #define MC_CMD_VADAPTOR_QUERY 0x61 +#undef MC_CMD_0x61_PRIVILEGE_CTG #define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11342,6 +16073,7 @@ * assign a port to a PCI function. */ #define MC_CMD_EVB_PORT_ASSIGN 0x9a +#undef MC_CMD_0x9a_PRIVILEGE_CTG #define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11353,8 +16085,10 @@ /* The target function to modify. */ #define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4 #define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4 +#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0 #define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16 +#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4 #define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16 #define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16 @@ -11367,6 +16101,7 @@ * Assign the 64 bit region addresses. */ #define MC_CMD_RDWR_A64_REGIONS 0x9b +#undef MC_CMD_0x9b_PRIVILEGE_CTG #define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -11405,6 +16140,7 @@ * Allocate an Onload stack ID. */ #define MC_CMD_ONLOAD_STACK_ALLOC 0x9c +#undef MC_CMD_0x9c_PRIVILEGE_CTG #define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -11426,6 +16162,7 @@ * Free an Onload stack ID. */ #define MC_CMD_ONLOAD_STACK_FREE 0x9d +#undef MC_CMD_0x9d_PRIVILEGE_CTG #define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD @@ -11444,6 +16181,7 @@ * Allocate an RSS context. */ #define MC_CMD_RSS_CONTEXT_ALLOC 0x9e +#undef MC_CMD_0x9e_PRIVILEGE_CTG #define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11464,12 +16202,68 @@ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1 -/* Number of queues spanned by this context, in the range 1-64; valid offsets - * in the indirection table will be in the range 0 to NUM_QUEUES-1. +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. */ #define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8 #define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4 +/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16 +/* The handle of the owning upstream port */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4 +/* The type of context to allocate */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4 +/* enum: Allocate a context for exclusive use. The key and indirection table + * must be explicitly configured. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0 +/* enum: Allocate a context for shared use; this will spread across a range of + * queues, but the key and indirection table are pre-configured and may not be + * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1 +/* enum: Allocate a context to spread evenly across an arbitrary number of + * queues. No indirection table space is allocated for this context. (EF100 and + * later) + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2 +/* Number of queues spanned by this context. For exclusive contexts this must + * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where + * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if + * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in + * the indirection table will be in the range 0 to NUM_QUEUES-1. For even- + * spreading contexts this must be in the range 1 to + * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note + * that specifying NUM_QUEUES = 1 will not perform any spreading but may still + * be useful as a way of obtaining the Toeplitz hash. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4 +/* Size of indirection table to be allocated to this context from the pool. + * Must be a power of 2. The minimum and maximum table size can be queried + * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in + * the common pool to allocate the requested table size, due to allocating + * table space to other RSS contexts, then the command will fail with + * MC_CMD_ERR_ENOSPC. + */ +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12 +#define MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4 + /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4 /* The handle of the new RSS context. This should be considered opaque to the @@ -11487,6 +16281,7 @@ * Free an RSS context. */ #define MC_CMD_RSS_CONTEXT_FREE 0x9f +#undef MC_CMD_0x9f_PRIVILEGE_CTG #define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11505,6 +16300,7 @@ * Set the Toeplitz hash key for an RSS context. */ #define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0 +#undef MC_CMD_0xa0_PRIVILEGE_CTG #define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11526,6 +16322,7 @@ * Get the Toeplitz hash key for an RSS context. */ #define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1 +#undef MC_CMD_0xa1_PRIVILEGE_CTG #define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11544,9 +16341,12 @@ /***********************************/ /* MC_CMD_RSS_CONTEXT_SET_TABLE - * Set the indirection table for an RSS context. + * Set the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. */ #define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2 +#undef MC_CMD_0xa2_PRIVILEGE_CTG #define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11565,9 +16365,12 @@ /***********************************/ /* MC_CMD_RSS_CONTEXT_GET_TABLE - * Get the indirection table for an RSS context. + * Get the indirection table for an RSS context. This command should only be + * used with indirection tables containing 128 entries, which is the default + * when the RSS context is allocated without specifying a table size. */ #define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3 +#undef MC_CMD_0xa3_PRIVILEGE_CTG #define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11584,11 +16387,99 @@ #define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128 +/***********************************/ +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE + * Write a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e +#undef MC_CMD_0x13e_PRIVILEGE_CTG + +#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array of index-value pairs to be written to the table. Structure is + * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY. + */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0 + +/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4 +/* The index of the table entry to be written. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16 +/* The value to write into the table entry. */ +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16 +#define MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16 + + +/***********************************/ +/* MC_CMD_RSS_CONTEXT_READ_TABLE + * Read a portion of a selectable-size indirection table for an RSS context. + * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the + * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES. + */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f +#undef MC_CMD_0x13f_PRIVILEGE_CTG + +#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2) +/* The handle of the RSS context */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4 +/* An array containing the indices of the entries to be read. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508 + +/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2) +/* A buffer containing the requested entries read from the table. */ +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126 +#define MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510 + + /***********************************/ /* MC_CMD_RSS_CONTEXT_SET_FLAGS * Set various control flags for an RSS context. */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1 +#undef MC_CMD_0xe1_PRIVILEGE_CTG #define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11611,26 +16502,37 @@ */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4 @@ -11643,6 +16545,7 @@ * Get various control flags for an RSS context. */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2 +#undef MC_CMD_0xe2_PRIVILEGE_CTG #define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11669,26 +16572,37 @@ */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4 +#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4 @@ -11698,6 +16612,7 @@ * Allocate a .1p mapping. */ #define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4 +#undef MC_CMD_0xa4_PRIVILEGE_CTG #define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -11730,6 +16645,7 @@ * Free a .1p mapping. */ #define MC_CMD_DOT1P_MAPPING_FREE 0xa5 +#undef MC_CMD_0xa5_PRIVILEGE_CTG #define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -11748,6 +16664,7 @@ * Set the mapping table for a .1p mapping. */ #define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6 +#undef MC_CMD_0xa6_PRIVILEGE_CTG #define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -11771,6 +16688,7 @@ * Get the mapping table for a .1p mapping. */ #define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7 +#undef MC_CMD_0xa7_PRIVILEGE_CTG #define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -11794,6 +16712,7 @@ * Get Interrupt Vector config for this PF. */ #define MC_CMD_GET_VECTOR_CFG 0xbf +#undef MC_CMD_0xbf_PRIVILEGE_CTG #define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11818,6 +16737,7 @@ * Set Interrupt Vector config for this PF. */ #define MC_CMD_SET_VECTOR_CFG 0xc0 +#undef MC_CMD_0xc0_PRIVILEGE_CTG #define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11844,6 +16764,7 @@ * Add a MAC address to a v-port */ #define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8 +#undef MC_CMD_0xa8_PRIVILEGE_CTG #define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11865,6 +16786,7 @@ * Delete a MAC address from a v-port */ #define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9 +#undef MC_CMD_0xa9_PRIVILEGE_CTG #define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11886,6 +16808,7 @@ * Delete a MAC address from a v-port */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa +#undef MC_CMD_0xaa_PRIVILEGE_CTG #define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11898,7 +16821,9 @@ /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num)) +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6) /* The number of MAC addresses returned */ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4 @@ -11907,6 +16832,7 @@ #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0 #define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41 +#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169 /***********************************/ @@ -11916,6 +16842,7 @@ * function will be reset before applying the changes. */ #define MC_CMD_VPORT_RECONFIGURE 0xeb +#undef MC_CMD_0xeb_PRIVILEGE_CTG #define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11927,8 +16854,10 @@ /* Flags requesting what should be changed. */ #define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 #define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 #define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 /* The number of VLAN tags to insert/remove. An error will be returned if @@ -11940,8 +16869,10 @@ /* The actual VLAN tags to insert/remove */ #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 #define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 /* The number of MAC addresses to add */ @@ -11956,6 +16887,7 @@ #define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 #define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 #define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 #define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 @@ -11965,6 +16897,7 @@ * read some config of v-port. */ #define MC_CMD_EVB_PORT_QUERY 0x62 +#undef MC_CMD_0x62_PRIVILEGE_CTG #define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -11994,6 +16927,7 @@ * lifted in future. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab +#undef MC_CMD_0xab_PRIVILEGE_CTG #define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12009,12 +16943,15 @@ /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num)) +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12) /* Raw buffer table entries, layed out as BUFTBL_ENTRY. */ #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1 #define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21 +#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85 /***********************************/ @@ -12022,6 +16959,7 @@ * Set global RXDP configuration settings */ #define MC_CMD_SET_RXDP_CONFIG 0xc1 +#undef MC_CMD_0xc1_PRIVILEGE_CTG #define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -12029,8 +16967,10 @@ #define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4 #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 /* enum: pad to 64 bytes */ @@ -12049,6 +16989,7 @@ * Get global RXDP configuration settings */ #define MC_CMD_GET_RXDP_CONFIG 0xc2 +#undef MC_CMD_0xc2_PRIVILEGE_CTG #define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -12059,8 +17000,10 @@ #define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4 #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2 /* Enum values, see field(s): */ @@ -12072,6 +17015,7 @@ * Return the system and PDCPU clock frequencies. */ #define MC_CMD_GET_CLOCK 0xac +#undef MC_CMD_0xac_PRIVILEGE_CTG #define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -12093,6 +17037,7 @@ * Control the system and DPCPU clock frequencies. Changes are lost reboot. */ #define MC_CMD_SET_CLOCK 0xad +#undef MC_CMD_0xad_PRIVILEGE_CTG #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12178,6 +17123,7 @@ * Send an arbitrary DPCPU message. */ #define MC_CMD_DPCPU_RPC 0xae +#undef MC_CMD_0xae_PRIVILEGE_CTG #define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12206,6 +17152,7 @@ */ #define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4 #define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8 #define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */ @@ -12217,14 +17164,19 @@ #define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */ #define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */ #define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */ +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48 #define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */ @@ -12232,17 +17184,22 @@ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */ #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80 #define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16 +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */ #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */ #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */ +#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64 #define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16 #define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12 @@ -12261,8 +17218,10 @@ /* DATA */ #define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4 #define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32 +#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4 #define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32 #define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16 +#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48 #define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16 #define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12 @@ -12282,6 +17241,7 @@ * Trigger an interrupt by prodding the BIU. */ #define MC_CMD_TRIGGER_INTERRUPT 0xe3 +#undef MC_CMD_0xe3_PRIVILEGE_CTG #define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -12300,6 +17260,7 @@ * Special operations to support (for now) shmboot. */ #define MC_CMD_SHMBOOT_OP 0xe6 +#undef MC_CMD_0xe6_PRIVILEGE_CTG #define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -12320,6 +17281,7 @@ * Read multiple 64bit words from capture block memory */ #define MC_CMD_CAP_BLK_READ 0xe7 +#undef MC_CMD_0xe7_PRIVILEGE_CTG #define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12335,13 +17297,16 @@ /* MC_CMD_CAP_BLK_READ_OUT msgresponse */ #define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8 #define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248 +#define MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016 #define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num)) +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8) #define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0 #define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8 #define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0 #define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4 #define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1 #define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31 +#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127 /***********************************/ @@ -12349,6 +17314,7 @@ * Take a dump of the DUT state */ #define MC_CMD_DUMP_DO 0xe8 +#undef MC_CMD_0xe8_PRIVILEGE_CTG #define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12428,6 +17394,7 @@ * Configure unsolicited dumps */ #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9 +#undef MC_CMD_0xe9_PRIVILEGE_CTG #define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12496,6 +17463,7 @@ * the parameter is out of range. */ #define MC_CMD_SET_PSU 0xea +#undef MC_CMD_0xea_PRIVILEGE_CTG #define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12521,6 +17489,7 @@ * Get function information. PF and VF number. */ #define MC_CMD_GET_FUNCTION_INFO 0xec +#undef MC_CMD_0xec_PRIVILEGE_CTG #define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -12542,6 +17511,7 @@ * reboot. */ #define MC_CMD_ENABLE_OFFLINE_BIST 0xed +#undef MC_CMD_0xed_PRIVILEGE_CTG #define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -12559,13 +17529,16 @@ * forget. */ #define MC_CMD_UART_SEND_DATA 0xee +#undef MC_CMD_0xee_PRIVILEGE_CTG #define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_UART_SEND_DATA_OUT msgrequest */ #define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16 #define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252 +#define MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020 #define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num)) +#define MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1) /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */ #define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0 #define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4 @@ -12582,6 +17555,7 @@ #define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1 #define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0 #define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236 +#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004 /* MC_CMD_UART_SEND_DATA_IN msgresponse */ #define MC_CMD_UART_SEND_DATA_IN_LEN 0 @@ -12593,6 +17567,7 @@ * subject to change and not currently implemented. */ #define MC_CMD_UART_RECV_DATA 0xef +#undef MC_CMD_0xef_PRIVILEGE_CTG #define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -12614,7 +17589,9 @@ /* MC_CMD_UART_RECV_DATA_IN msgresponse */ #define MC_CMD_UART_RECV_DATA_IN_LENMIN 16 #define MC_CMD_UART_RECV_DATA_IN_LENMAX 252 +#define MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020 #define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num)) +#define MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1) /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */ #define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0 #define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4 @@ -12631,6 +17608,7 @@ #define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1 #define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0 #define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236 +#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004 /***********************************/ @@ -12638,6 +17616,7 @@ * Read data programmed into the device One-Time-Programmable (OTP) Fuses */ #define MC_CMD_READ_FUSES 0xf0 +#undef MC_CMD_0xf0_PRIVILEGE_CTG #define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -12653,7 +17632,9 @@ /* MC_CMD_READ_FUSES_OUT msgresponse */ #define MC_CMD_READ_FUSES_OUT_LENMIN 4 #define MC_CMD_READ_FUSES_OUT_LENMAX 252 +#define MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020 #define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1) /* Length of returned OTP data in bytes */ #define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0 #define MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4 @@ -12662,6 +17643,7 @@ #define MC_CMD_READ_FUSES_OUT_DATA_LEN 1 #define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0 #define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248 +#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016 /***********************************/ @@ -12669,13 +17651,16 @@ * Get or set KR Serdes RXEQ and TX Driver settings */ #define MC_CMD_KR_TUNE 0xf1 +#undef MC_CMD_0xf1_PRIVILEGE_CTG #define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_KR_TUNE_IN msgrequest */ #define MC_CMD_KR_TUNE_IN_LENMIN 4 #define MC_CMD_KR_TUNE_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_IN_LENMAX_MCDI2 1020 #define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_NUM(len) (((len)-4)/4) /* Requested operation */ #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0 #define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1 @@ -12712,6 +17697,7 @@ #define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4 #define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0 #define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62 +#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM_MCDI2 254 /* MC_CMD_KR_TUNE_OUT msgresponse */ #define MC_CMD_KR_TUNE_OUT_LEN 0 @@ -12728,12 +17714,16 @@ /* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) /* RXEQ Parameter */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15, Huntington) */ @@ -12822,6 +17812,45 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20 /* enum: CDR integral loop code (Medford2) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21 +/* enum: CTLE Boost stages - retimer lineside (Medford2 with DS250x retimer - 4 + * stages, 2 bits per stage) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_LS 0x22 +/* enum: DFE Tap1 - retimer lineside (Medford2 with DS250x retimer (-31 - 31)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_LS 0x23 +/* enum: DFE Tap2 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_LS 0x24 +/* enum: DFE Tap3 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_LS 0x25 +/* enum: DFE Tap4 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_LS 0x26 +/* enum: DFE Tap5 - retimer lineside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_LS 0x27 +/* enum: CTLE Boost stages - retimer hostside (Medford2 with DS250x retimer - 4 + * stages, 2 bits per stage) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_HS 0x28 +/* enum: DFE Tap1 - retimer hostside (Medford2 with DS250x retimer (-31 - 31)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_HS 0x29 +/* enum: DFE Tap2 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_HS 0x2a +/* enum: DFE Tap3 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_HS 0x2b +/* enum: DFE Tap4 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_HS 0x2c +/* enum: DFE Tap5 - retimer hostside (Medford2 with DS250x retimer (-15 - 15)) + */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_HS 0x2d +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -12829,19 +17858,25 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 /* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */ #define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) /* Requested operation */ #define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1 @@ -12853,20 +17888,27 @@ #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 /* Enum values, see field(s): */ /* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3 /* Enum values, see field(s): */ /* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_OFST 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 #define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 @@ -12885,12 +17927,16 @@ /* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) /* TXEQ Parameter */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: TX Amplitude (Huntington, Medford, Medford2) */ @@ -12915,10 +17961,23 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 /* enum: TX Amplitude Fine control (Medford) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa -/* enum: Pre-shoot Tap (Medford, Medford2) */ +/* enum: Pre-cursor Tap (Medford, Medford2) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb -/* enum: De-emphasis Tap (Medford, Medford2) */ +/* enum: Post-cursor Tap (Medford, Medford2) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc +/* enum: TX Amplitude (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_LS 0xd +/* enum: Pre-cursor Tap (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_LS 0xe +/* enum: Post-cursor Tap (Retimer Lineside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_LS 0xf +/* enum: TX Amplitude (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_HS 0x10 +/* enum: Pre-cursor Tap (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_HS 0x11 +/* enum: Post-cursor Tap (Retimer Hostside) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_HS 0x12 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -12926,17 +17985,22 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8 /* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */ #define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX_MCDI2 1020 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) /* Requested operation */ #define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1 @@ -12948,18 +18012,24 @@ #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_OFST 4 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8 /* Enum values, see field(s): */ /* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_OFST 4 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3 /* Enum values, see field(s): */ /* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_OFST 4 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_OFST 4 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_OFST 4 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24 #define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8 @@ -13000,8 +18070,10 @@ #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_OFST 4 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_OFST 4 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31 #define MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1 /* Scan duration / cycle count */ @@ -13023,11 +18095,14 @@ /* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */ #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 #define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 /* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */ #define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8 @@ -13039,8 +18114,10 @@ #define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_OFST 4 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8 +#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_OFST 4 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31 #define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1 @@ -13130,13 +18207,16 @@ * Get or set PCIE Serdes RXEQ and TX Driver settings */ #define MC_CMD_PCIE_TUNE 0xf2 +#undef MC_CMD_0xf2_PRIVILEGE_CTG #define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_PCIE_TUNE_IN msgrequest */ #define MC_CMD_PCIE_TUNE_IN_LENMIN 4 #define MC_CMD_PCIE_TUNE_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020 #define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num)) +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4) /* Requested operation */ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1 @@ -13165,6 +18245,7 @@ #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0 #define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254 /* MC_CMD_PCIE_TUNE_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_OUT_LEN 0 @@ -13181,12 +18262,16 @@ /* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) /* RXEQ Parameter */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: Attenuation (0-15) */ @@ -13211,6 +18296,7 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 /* enum: CTLE EQ Resistor (DC Gain) */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -13230,17 +18316,22 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 /* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */ #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4) /* Requested operation */ #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1 @@ -13252,20 +18343,27 @@ #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 /* Enum values, see field(s): */ /* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5 /* Enum values, see field(s): */ /* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 #define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 @@ -13284,12 +18382,16 @@ /* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4) /* RXEQ Parameter */ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 /* enum: TxMargin (PIPE) */ @@ -13302,12 +18404,15 @@ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3 /* enum: De-emphasis coefficient C(+1) (PIPE) */ #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4 /* Enum values, see field(s): */ /* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24 #define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 @@ -13337,11 +18442,14 @@ /* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */ #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0 #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020 #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num)) +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2) #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0 #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2 #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0 #define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126 +#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510 /* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */ #define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0 @@ -13356,6 +18464,7 @@ * - not used for V3 licensing */ #define MC_CMD_LICENSING 0xf3 +#undef MC_CMD_0xf3_PRIVILEGE_CTG #define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13411,6 +18520,7 @@ * - V3 licensing (Medford) */ #define MC_CMD_LICENSING_V3 0xd0 +#undef MC_CMD_0xd0_PRIVILEGE_CTG #define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13480,6 +18590,7 @@ * partition - V3 licensing (Medford) */ #define MC_CMD_LICENSING_GET_ID_V3 0xd1 +#undef MC_CMD_0xd1_PRIVILEGE_CTG #define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13489,7 +18600,9 @@ /* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1) /* type of license (eg 3) */ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4 @@ -13501,6 +18614,7 @@ #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 #define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012 /***********************************/ @@ -13509,6 +18623,7 @@ * This will fail on a single-core system. */ #define MC_CMD_MC2MC_PROXY 0xf4 +#undef MC_CMD_0xf4_PRIVILEGE_CTG #define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13526,6 +18641,7 @@ * or a reboot of the MC.) Not used for V3 licensing */ #define MC_CMD_GET_LICENSED_APP_STATE 0xf5 +#undef MC_CMD_0xf5_PRIVILEGE_CTG #define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13553,6 +18669,7 @@ * operation or a reboot of the MC.) Used for V3 licensing (Medford) */ #define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 +#undef MC_CMD_0xd2_PRIVILEGE_CTG #define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13579,11 +18696,12 @@ /***********************************/ /* MC_CMD_GET_LICENSED_V3_FEATURE_STATES - * Query the state of one or more licensed features. (Note that the actual + * Query the state of an one or more licensed features. (Note that the actual * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE * operation or a reboot of the MC.) Used for V3 licensing (Medford) */ #define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 +#undef MC_CMD_0xd3_PRIVILEGE_CTG #define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13612,13 +18730,16 @@ * licensing. */ #define MC_CMD_LICENSED_APP_OP 0xf6 +#undef MC_CMD_0xf6_PRIVILEGE_CTG #define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_LICENSED_APP_OP_IN msgrequest */ #define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8 #define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020 #define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num)) +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4) /* application ID */ #define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0 #define MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4 @@ -13634,16 +18755,20 @@ #define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0 #define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61 +#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253 /* MC_CMD_LICENSED_APP_OP_OUT msgresponse */ #define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0 #define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252 +#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020 #define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4) /* result specific to this particular operation */ #define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0 #define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4 #define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0 #define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63 +#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255 /* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */ #define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72 @@ -13688,6 +18813,7 @@ * (Medford) */ #define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 +#undef MC_CMD_0xd4_PRIVILEGE_CTG #define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13740,6 +18866,7 @@ * Mask features - V3 licensing (Medford) */ #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 +#undef MC_CMD_0xd5_PRIVILEGE_CTG #define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -13771,6 +18898,7 @@ * erased when the adapter is power cycled */ #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6 +#undef MC_CMD_0xd6_PRIVILEGE_CTG #define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -13840,6 +18968,7 @@ * delivered to a specific queue, or a set of queues with RSS. */ #define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7 +#undef MC_CMD_0xf7_PRIVILEGE_CTG #define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -13848,8 +18977,10 @@ /* configuration flags */ #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 +#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1 #define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ @@ -13880,6 +19011,7 @@ * the configuration. */ #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8 +#undef MC_CMD_0xf8_PRIVILEGE_CTG #define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13891,8 +19023,10 @@ /* configuration flags */ #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1 #define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ @@ -13915,13 +19049,16 @@ * Change configuration related to the parser-dispatcher subsystem. */ #define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9 +#undef MC_CMD_0xf9_PRIVILEGE_CTG #define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12 #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020 #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num)) +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4) /* the type of configuration setting to change */ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0 #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4 @@ -13946,6 +19083,7 @@ #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4 #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1 #define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61 +#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253 /* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */ #define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0 @@ -13956,6 +19094,7 @@ * Read configuration related to the parser-dispatcher subsystem. */ #define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa +#undef MC_CMD_0xfa_PRIVILEGE_CTG #define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -13975,7 +19114,9 @@ /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4 #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020 #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num)) +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4) /* current value: the details depend on the type of configuration setting being * read */ @@ -13983,6 +19124,7 @@ #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4 #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1 #define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63 +#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255 /***********************************/ @@ -13996,6 +19138,7 @@ * dedicated as TX sniff receivers. */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb +#undef MC_CMD_0xfb_PRIVILEGE_CTG #define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14004,6 +19147,7 @@ /* configuration flags */ #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4 +#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1 /* receive queue handle (for RSS mode, this is the base queue) */ @@ -14034,6 +19178,7 @@ * the configuration. */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc +#undef MC_CMD_0xfc_PRIVILEGE_CTG #define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14045,6 +19190,7 @@ /* configuration flags */ #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4 +#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1 /* receiving queue handle (for RSS mode, this is the base queue) */ @@ -14067,6 +19213,7 @@ * Per queue rx error stats. */ #define MC_CMD_RMON_STATS_RX_ERRORS 0xfe +#undef MC_CMD_0xfe_PRIVILEGE_CTG #define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14077,6 +19224,7 @@ #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4 +#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0 #define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1 @@ -14097,6 +19245,7 @@ * Find out about available PCIE resources */ #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd +#undef MC_CMD_0xfd_PRIVILEGE_CTG #define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14135,6 +19284,7 @@ * Find out about available port modes */ #define MC_CMD_GET_PORT_MODES 0xff +#undef MC_CMD_0xff_PRIVILEGE_CTG #define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14143,7 +19293,9 @@ /* MC_CMD_GET_PORT_MODES_OUT msgresponse */ #define MC_CMD_GET_PORT_MODES_OUT_LEN 12 -/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */ +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ #define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0 #define MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4 /* Default (canonical) board mode */ @@ -14153,12 +19305,66 @@ #define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8 #define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4 +/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16 +/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) + * that are supported for customer use in production firmware. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0 +#define MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4 +/* Default (canonical) board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4 +#define MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4 +/* Current board mode */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8 +#define MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4 +/* Bitmask of engineering port modes available on the board (indexed by + * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that + * contains all modes implemented in firmware for a particular board. Modes + * listed in MODES are considered production modes and should be exposed in + * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES + * should be considered hidden (not to be exposed in userland tools) and for + * engineering use only. There are no other semantic differences and any mode + * listed in either MODES or ENGINEERING_MODES can be set on the board. + */ +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12 +#define MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4 + + +/***********************************/ +/* MC_CMD_OVERRIDE_PORT_MODE + * Override flash config port mode for subsequent MC reboot(s). Override data + * is stored in the presistent data section of DMEM and activated on next MC + * warm reboot. A cold reboot resets the override. It is assumed that a + * sufficient number of PFs are available and that port mapping is valid for + * the new port mode, as the override does not affect PF configuration. + */ +#define MC_CMD_OVERRIDE_PORT_MODE 0x137 +#undef MC_CMD_0x137_PRIVILEGE_CTG + +#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1 +/* New mode (TLV_PORT_MODE_*) to set, if override enabled */ +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4 +#define MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4 + +/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */ +#define MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0 + /***********************************/ /* MC_CMD_READ_ATB * Sample voltages on the ATB */ #define MC_CMD_READ_ATB 0x100 +#undef MC_CMD_0x100_PRIVILEGE_CTG #define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14188,6 +19394,7 @@ * enums here must correspond with those in MC_CMD_WORKAROUND. */ #define MC_CMD_GET_WORKAROUNDS 0x59 +#undef MC_CMD_0x59_PRIVILEGE_CTG #define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14224,6 +19431,7 @@ * Read/set privileges of an arbitrary PCIe function */ #define MC_CMD_PRIVILEGE_MASK 0x5a +#undef MC_CMD_0x5a_PRIVILEGE_CTG #define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14234,8 +19442,10 @@ */ #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16 #define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16 #define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */ @@ -14274,6 +19484,12 @@ * are not permitted on secure adapters regardless of the privilege mask. */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000 +/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for + * administrator-level operations that are not allowed from the local host once + * an adapter has Bound to a remote ServerLock Controller (see doxbox + * SF-117064-DG for background). + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ @@ -14291,6 +19507,7 @@ * Read/set link state mode of a VF */ #define MC_CMD_LINK_STATE_MODE 0x5c +#undef MC_CMD_0x5c_PRIVILEGE_CTG #define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14301,8 +19518,10 @@ */ #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16 #define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16 /* New link state mode to be set */ @@ -14327,6 +19546,7 @@ * parameter to MC_CMD_INIT_RXQ. */ #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101 +#undef MC_CMD_0x101_PRIVILEGE_CTG #define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14348,6 +19568,7 @@ * Additional fuse diagnostics */ #define MC_CMD_FUSE_DIAGS 0x102 +#undef MC_CMD_0x102_PRIVILEGE_CTG #define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14401,6 +19622,7 @@ * included in one of the masks provided. */ #define MC_CMD_PRIVILEGE_MODIFY 0x60 +#undef MC_CMD_0x60_PRIVILEGE_CTG #define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14418,8 +19640,10 @@ /* For VFS_OF_PF specify the PF, for ONE specify the target function */ #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16 +#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16 #define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16 /* Privileges to be added to the target functions. For privilege definitions @@ -14442,6 +19666,7 @@ * Read XPM memory */ #define MC_CMD_XPM_READ_BYTES 0x103 +#undef MC_CMD_0x103_PRIVILEGE_CTG #define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14457,12 +19682,15 @@ /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */ #define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0 #define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252 +#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020 #define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1) /* Data */ #define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0 #define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1 #define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0 #define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252 +#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020 /***********************************/ @@ -14470,13 +19698,16 @@ * Write XPM memory */ #define MC_CMD_XPM_WRITE_BYTES 0x104 +#undef MC_CMD_0x104_PRIVILEGE_CTG #define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */ #define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8 #define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252 +#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020 #define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num)) +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1) /* Start address (byte) */ #define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0 #define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4 @@ -14488,6 +19719,7 @@ #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1 #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0 #define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244 +#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012 /* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */ #define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0 @@ -14498,6 +19730,7 @@ * Read XPM sector */ #define MC_CMD_XPM_READ_SECTOR 0x105 +#undef MC_CMD_0x105_PRIVILEGE_CTG #define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14513,7 +19746,9 @@ /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4 #define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36 +#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36 #define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1) /* Sector type */ #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0 #define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4 @@ -14527,6 +19762,7 @@ #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1 #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0 #define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32 +#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32 /***********************************/ @@ -14534,13 +19770,16 @@ * Write XPM sector */ #define MC_CMD_XPM_WRITE_SECTOR 0x106 +#undef MC_CMD_0x106_PRIVILEGE_CTG #define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */ #define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12 #define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44 +#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44 #define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num)) +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1) /* If writing fails due to an uncorrectable error, try up to RETRIES following * sectors (or until no more space available). If 0, only one write attempt is * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair @@ -14563,6 +19802,7 @@ #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1 #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0 #define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32 +#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32 /* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */ #define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4 @@ -14576,6 +19816,7 @@ * Invalidate XPM sector */ #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107 +#undef MC_CMD_0x107_PRIVILEGE_CTG #define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14594,6 +19835,7 @@ * Blank-check XPM memory and report bad locations */ #define MC_CMD_XPM_BLANK_CHECK 0x108 +#undef MC_CMD_0x108_PRIVILEGE_CTG #define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14609,7 +19851,9 @@ /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */ #define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4 #define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252 +#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020 #define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num)) +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2) /* Total number of bad (non-blank) locations */ #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0 #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4 @@ -14620,6 +19864,7 @@ #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2 #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0 #define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124 +#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508 /***********************************/ @@ -14627,6 +19872,7 @@ * Blank-check and repair XPM memory */ #define MC_CMD_XPM_REPAIR 0x109 +#undef MC_CMD_0x109_PRIVILEGE_CTG #define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14649,6 +19895,7 @@ * be performed on an unprogrammed part. */ #define MC_CMD_XPM_DECODER_TEST 0x10a +#undef MC_CMD_0x10a_PRIVILEGE_CTG #define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14668,6 +19915,7 @@ * first available location to use, or fail with ENOSPC if none left. */ #define MC_CMD_XPM_WRITE_TEST 0x10b +#undef MC_CMD_0x10b_PRIVILEGE_CTG #define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE @@ -14688,6 +19936,7 @@ * does match, otherwise it will respond with success before it jumps to IMEM. */ #define MC_CMD_EXEC_SIGNED 0x10c +#undef MC_CMD_0x10c_PRIVILEGE_CTG #define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14717,6 +19966,7 @@ * MC_CMD_EXEC_SIGNED. */ #define MC_CMD_PREPARE_SIGNED 0x10d +#undef MC_CMD_0x10d_PRIVILEGE_CTG #define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14761,16 +20011,20 @@ * cause all functions to see a reset. (Available on Medford only.) */ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 +#undef MC_CMD_0x117_PRIVILEGE_CTG #define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4) /* Flags */ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 /* The number of entries in the ENTRIES array */ @@ -14783,12 +20037,14 @@ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 /* Flags */ #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 @@ -14801,6 +20057,7 @@ * priority. */ #define MC_CMD_RX_BALANCING 0x118 +#undef MC_CMD_0x118_PRIVILEGE_CTG #define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14829,13 +20086,16 @@ * if the tag is already present. */ #define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c +#undef MC_CMD_0x11c_PRIVILEGE_CTG #define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN /* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num)) +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1) /* The tag to be appended */ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4 @@ -14847,6 +20107,7 @@ #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1 #define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244 +#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012 /* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */ #define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0 @@ -14859,6 +20120,7 @@ * correctly at ATE. */ #define MC_CMD_XPM_VERIFY_CONTENTS 0x11b +#undef MC_CMD_0x11b_PRIVILEGE_CTG #define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN @@ -14871,7 +20133,9 @@ /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num)) +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1) /* Number of sectors found (test builds only) */ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4 @@ -14886,6 +20150,7 @@ #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0 #define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240 +#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008 /***********************************/ @@ -14898,6 +20163,7 @@ * and TMR_RELOAD_ACT_NS). */ #define MC_CMD_SET_EVQ_TMR 0x120 +#undef MC_CMD_0x120_PRIVILEGE_CTG #define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -14935,6 +20201,7 @@ * Query properties about the event queue timers. */ #define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122 +#undef MC_CMD_0x122_PRIVILEGE_CTG #define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -15003,6 +20270,7 @@ * non used switch buffers. */ #define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d +#undef MC_CMD_0x11d_PRIVILEGE_CTG #define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -15054,6 +20322,7 @@ * previously allocated common pools. */ #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e +#undef MC_CMD_0x11e_PRIVILEGE_CTG #define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -15106,6 +20375,7 @@ * ready to be re-used. */ #define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f +#undef MC_CMD_0x11f_PRIVILEGE_CTG #define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -15125,6 +20395,7 @@ * it ready to be re-used. */ #define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121 +#undef MC_CMD_0x121_PRIVILEGE_CTG #define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -15144,6 +20415,7 @@ * not yet assigned. */ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124 +#undef MC_CMD_0x124_PRIVILEGE_CTG #define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL @@ -15160,4 +20432,1537 @@ #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4 +/***********************************/ +/* MC_CMD_SUC_VERSION + * Get the version of the SUC + */ +#define MC_CMD_SUC_VERSION 0x134 +#undef MC_CMD_0x134_PRIVILEGE_CTG + +#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_SUC_VERSION_IN msgrequest */ +#define MC_CMD_SUC_VERSION_IN_LEN 0 + +/* MC_CMD_SUC_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_VERSION_OUT_LEN 24 +/* The SUC firmware version as four numbers - a.b.c.d */ +#define MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4 +#define MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4 +/* The date, in seconds since the Unix epoch, when the firmware image was + * built. + */ +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16 +#define MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4 +/* The ID of the SUC chip. This is specific to the platform but typically + * indicates family, memory sizes etc. See SF-116728-SW for further details. + */ +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20 +#define MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4 + +/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot + * loader. + */ +#define MC_CMD_SUC_BOOT_VERSION_IN_LEN 4 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4 +/* enum: Requests the SUC boot version. */ +#define MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b + +/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4 +/* The SUC boot version */ +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0 +#define MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4 + + +/***********************************/ +/* MC_CMD_GET_RX_PREFIX_ID + * This command is part of the mechanism for configuring the format of the RX + * packet prefix. It takes as input a bitmask of the fields the host would like + * to be in the prefix. If the hardware supports RX prefixes with that + * combination of fields, then this command returns a list of prefix-ids, + * opaque identifiers suitable for use in the RX_PREFIX_ID field of a + * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not + * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids + * due to resource constraints, returns ENOSPC. + */ +#define MC_CMD_GET_RX_PREFIX_ID 0x13b +#undef MC_CMD_0x13b_PRIVILEGE_CTG + +#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */ +#define MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8 +/* Field bitmask. */ +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4 +#define MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5 +#define MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6 +#define MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7 +#define MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8 +#define MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9 +#define MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1 + +/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num)) +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4) +/* Number of prefix-ids returned */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4 +/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or + * MC_CMD_QUERY_PREFIX_ID + */ +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62 +#define MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254 + +/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix + * field + */ +#define RX_PREFIX_FIELD_INFO_LEN 4 +/* The offset of the field from the start of the prefix, in bits */ +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0 +#define RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16 +/* The width of the field, in bits */ +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16 +#define RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8 +/* The type of the field. These enum values are in the same order as the fields + * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask + */ +#define RX_PREFIX_FIELD_INFO_TYPE_OFST 3 +#define RX_PREFIX_FIELD_INFO_TYPE_LEN 1 +#define RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */ +#define RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */ +#define RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */ +#define RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */ +#define RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */ +#define RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */ +#define RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */ +#define RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */ +#define RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */ +#define RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */ +#define RX_PREFIX_FIELD_INFO_TYPE_LBN 24 +#define RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8 + +/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in + * which every field has a fixed offset and width + */ +#define RX_PREFIX_FIXED_RESPONSE_LENMIN 4 +#define RX_PREFIX_FIXED_RESPONSE_LENMAX 252 +#define RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020 +#define RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num)) +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4) +/* Length of the RX prefix in bytes */ +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0 +#define RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8 +/* Number of fields present in the prefix */ +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8 +#define RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16 +#define RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16 +/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */ +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32 +#define RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32 + + +/***********************************/ +/* MC_CMD_QUERY_RX_PREFIX_ID + * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID) + * and returns a description of the RX prefix of packets delievered to an RXQ + * created with that prefix id + */ +#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c +#undef MC_CMD_0x13c_PRIVILEGE_CTG + +#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */ +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4 +/* Prefix id to query */ +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4 + +/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num)) +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1) +/* An enum describing the structure of this response. */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1 +/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3 +/* The response. Its format is as defined by the RESPONSE_TYPE value */ +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248 +#define MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016 + + +/***********************************/ +/* MC_CMD_BUNDLE + * A command to perform various bundle-related operations on insecure cards. + */ +#define MC_CMD_BUNDLE 0x13d +#undef MC_CMD_0x13d_PRIVILEGE_CTG + +#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE + +/* MC_CMD_BUNDLE_IN msgrequest */ +#define MC_CMD_BUNDLE_IN_LEN 4 +/* Sub-command code */ +#define MC_CMD_BUNDLE_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_IN_OP_LEN 4 +/* enum: Get the current host access mode set on component partitions. */ +#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0 +/* enum: Set the host access mode set on component partitions. */ +#define MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current + * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and + * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On + * secure adapters, this command returns MC_CMD_ERR_EPERM. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4 +/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access + * control mode. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4 +/* Access mode of component partitions. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4 +/* enum: Component partitions are read-only from the host. */ +#define MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0 +/* enum: Component partitions can read read-from written-to by the host. */ +#define MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1 + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component + * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as + * read-only on firmware built with bundle support. This command marks these + * partitions as read/writeable. The access status set by this command does not + * persist across MC reboots. This command only works on engineering (insecure) + * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM. + */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8 +/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4 +/* Access mode of component partitions. */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4 +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */ + +/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */ +#define MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_VPD + * Read all VPD starting from a given address + */ +#define MC_CMD_GET_VPD 0x165 +#undef MC_CMD_0x165_PRIVILEGE_CTG + +#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_VPD_IN msgresponse */ +#define MC_CMD_GET_VPD_IN_LEN 4 +/* VPD address to start from. In case VPD is longer than MCDI buffer + * (unlikely), user can make multiple calls with different starting addresses. + */ +#define MC_CMD_GET_VPD_IN_ADDR_OFST 0 +#define MC_CMD_GET_VPD_IN_ADDR_LEN 4 + +/* MC_CMD_GET_VPD_OUT msgresponse */ +#define MC_CMD_GET_VPD_OUT_LENMIN 0 +#define MC_CMD_GET_VPD_OUT_LENMAX 252 +#define MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num)) +#define MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1) +/* VPD data returned. */ +#define MC_CMD_GET_VPD_OUT_DATA_OFST 0 +#define MC_CMD_GET_VPD_OUT_DATA_LEN 1 +#define MC_CMD_GET_VPD_OUT_DATA_MINNUM 0 +#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252 +#define MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020 + + +/***********************************/ +/* MC_CMD_GET_NCSI_INFO + * Provide information about the NC-SI stack + */ +#define MC_CMD_GET_NCSI_INFO 0x167 +#undef MC_CMD_0x167_PRIVILEGE_CTG + +#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_NCSI_INFO_IN msgrequest */ +#define MC_CMD_GET_NCSI_INFO_IN_LEN 8 +/* Operation to be performed */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0 +#define MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4 +/* enum: Information on the link settings. */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0 +/* enum: Statistics associated with the channel */ +#define MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1 +/* The NC-SI channel on which the operation is to be performed */ +#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4 +#define MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4 + +/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12 +/* Settings as received from BMC. */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4 +/* Advertised capabilities applied to channel. */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4 +/* General status */ +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4 +#define MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1 + +/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28 +/* The number of NC-SI commands received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4 +/* The number of NC-SI commands dropped. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4 +/* The number of invalid NC-SI commands received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4 +/* The number of checksum errors seen. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4 +/* The number of NC-SI requests received. */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4 +/* The number of NC-SI responses sent (includes AENs) */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4 +/* The number of NC-SI AENs sent */ +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24 +#define MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4 + + +/* CLOCK_INFO structuredef: Information about a single hardware clock */ +#define CLOCK_INFO_LEN 28 +/* Enumeration that uniquely identifies the clock */ +#define CLOCK_INFO_CLOCK_ID_OFST 0 +#define CLOCK_INFO_CLOCK_ID_LEN 2 +/* enum: The Riverhead CMC (card MC) */ +#define CLOCK_INFO_CLOCK_CMC 0x0 +/* enum: The Riverhead NMC (network MC) */ +#define CLOCK_INFO_CLOCK_NMC 0x1 +/* enum: The Riverhead SDNET slice main logic */ +#define CLOCK_INFO_CLOCK_SDNET 0x2 +/* enum: The Riverhead SDNET LUT */ +#define CLOCK_INFO_CLOCK_SDNET_LUT 0x3 +/* enum: The Riverhead SDNET control logic */ +#define CLOCK_INFO_CLOCK_SDNET_CTRL 0x4 +/* enum: The Riverhead Streaming SubSystem */ +#define CLOCK_INFO_CLOCK_SSS 0x5 +/* enum: The Riverhead network MAC and associated CSR registers */ +#define CLOCK_INFO_CLOCK_MAC 0x6 +#define CLOCK_INFO_CLOCK_ID_LBN 0 +#define CLOCK_INFO_CLOCK_ID_WIDTH 16 +/* Assorted flags */ +#define CLOCK_INFO_FLAGS_OFST 2 +#define CLOCK_INFO_FLAGS_LEN 2 +#define CLOCK_INFO_SETTABLE_OFST 2 +#define CLOCK_INFO_SETTABLE_LBN 0 +#define CLOCK_INFO_SETTABLE_WIDTH 1 +#define CLOCK_INFO_FLAGS_LBN 16 +#define CLOCK_INFO_FLAGS_WIDTH 16 +/* The frequency in HZ */ +#define CLOCK_INFO_FREQUENCY_OFST 4 +#define CLOCK_INFO_FREQUENCY_LEN 8 +#define CLOCK_INFO_FREQUENCY_LO_OFST 4 +#define CLOCK_INFO_FREQUENCY_HI_OFST 8 +#define CLOCK_INFO_FREQUENCY_LBN 32 +#define CLOCK_INFO_FREQUENCY_WIDTH 64 +/* Human-readable ASCII name for clock, with NUL termination */ +#define CLOCK_INFO_NAME_OFST 12 +#define CLOCK_INFO_NAME_LEN 1 +#define CLOCK_INFO_NAME_NUM 16 +#define CLOCK_INFO_NAME_LBN 96 +#define CLOCK_INFO_NAME_WIDTH 8 + + +/***********************************/ +/* MC_CMD_GET_CLOCKS_INFO + * Get information about the device clocks + */ +#define MC_CMD_GET_CLOCKS_INFO 0x166 +#undef MC_CMD_0x166_PRIVILEGE_CTG + +#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */ +#define MC_CMD_GET_CLOCKS_INFO_IN_LEN 0 + +/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */ +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008 +#define MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num)) +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28) +/* An array of CLOCK_INFO structures. */ +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9 +#define MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_ADD + * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d +#undef MC_CMD_0x16d_PRIVILEGE_CTG + +#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36 +/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4 +/* Any non-zero bits other than the ones named below or an unsupported + * combination will cause the NIC to return EOPNOTSUPP. In the future more + * flags may be added. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1 +/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order. + * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2 +/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order. + * (Deprecated) + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12 +/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12 +/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the + * case of IPv4, the IP should be in the first 4 bytes and all other bytes + * should be zero. + */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16 +/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1 +/* Actions that should be applied to packets match the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1 +/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2 +/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4 +/* Handle to inserted rule. Used for removing the rule. */ +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4 + + +/***********************************/ +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE + * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule. + */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e +#undef MC_CMD_0x16e_PRIVILEGE_CTG + +#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4 +/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0 +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4 + +/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */ +#define MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0 + +/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are + * defined in SF-120734-TC with more information in SF-122717-TC. + */ +#define FUNCTION_PERSONALITY_LEN 4 +#define FUNCTION_PERSONALITY_ID_OFST 0 +#define FUNCTION_PERSONALITY_ID_LEN 4 +/* enum: Function has no assigned personality */ +#define FUNCTION_PERSONALITY_NULL 0x0 +/* enum: Function has an EF100-style function control window and VI windows + * with both EF100 and vDPA doorbells. + */ +#define FUNCTION_PERSONALITY_EF100 0x1 +/* enum: Function has virtio net device configuration registers and doorbells + * for virtio queue pairs. + */ +#define FUNCTION_PERSONALITY_VIRTIO_NET 0x2 +/* enum: Function has virtio block device configuration registers and a + * doorbell for a single virtqueue. + */ +#define FUNCTION_PERSONALITY_VIRTIO_BLK 0x3 +/* enum: Function is a Xilinx acceleration device - management function */ +#define FUNCTION_PERSONALITY_ACCEL_MGMT 0x4 +/* enum: Function is a Xilinx acceleration device - user function */ +#define FUNCTION_PERSONALITY_ACCEL_USR 0x5 +#define FUNCTION_PERSONALITY_ID_LBN 0 +#define FUNCTION_PERSONALITY_ID_WIDTH 32 + + +/***********************************/ +/* MC_CMD_VIRTIO_GET_FEATURES + * Get a list of the virtio features supported by the device. + */ +#define MC_CMD_VIRTIO_GET_FEATURES 0x168 +#undef MC_CMD_0x168_PRIVILEGE_CTG + +#define MC_CMD_0x168_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_GET_FEATURES_IN msgrequest */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_LEN 4 +/* Type of device to get features for. Matches the device id as defined by the + * virtio spec. + */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_LEN 4 +/* enum: Reserved. Do not use. */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_RESERVED 0x0 +/* enum: Net device. */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_NET 0x1 +/* enum: Block device. */ +#define MC_CMD_VIRTIO_GET_FEATURES_IN_BLOCK 0x2 + +/* MC_CMD_VIRTIO_GET_FEATURES_OUT msgresponse */ +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_LEN 8 +/* Features supported by the device. The result is a bitfield in the format of + * the feature bits of the specified device type as defined in the virtIO 1.1 + * specification ( https://docs.oasis- + * open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.pdf ) + */ +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0 +#define MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4 + + +/***********************************/ +/* MC_CMD_VIRTIO_TEST_FEATURES + * Query whether a given set of features is supported. Fails with ENOSUP if the + * driver requests a feature the device doesn't support. Fails with EINVAL if + * the driver fails to request a feature which the device requires. + */ +#define MC_CMD_VIRTIO_TEST_FEATURES 0x169 +#undef MC_CMD_0x169_PRIVILEGE_CTG + +#define MC_CMD_0x169_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_TEST_FEATURES_IN msgrequest */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_LEN 16 +/* Type of device to test features for. Matches the device id as defined by the + * virtio spec. + */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_OFST 4 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_LEN 4 +/* Features requested. Same format as the returned value from + * MC_CMD_VIRTIO_GET_FEATURES. + */ +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8 +#define MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12 + +/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */ +#define MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_VIRTIO_INIT_QUEUE + * Create a virtio virtqueue. Fails with EALREADY if the queue already exists. + * Fails with ENOSUP if a feature is requested that isn't supported. Fails with + * EINVAL if a required feature isn't requested, or any other parameter is + * invalid. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE 0x16a +#undef MC_CMD_0x16a_PRIVILEGE_CTG + +#define MC_CMD_0x16a_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_INIT_QUEUE_REQ msgrequest */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN 68 +/* Type of virtqueue to create. A network rxq and a txq can exist at the same + * time on a single VI. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_OFST 0 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_LEN 1 +/* enum: A network device receive queue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ 0x0 +/* enum: A network device transmit queue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ 0x1 +/* enum: A block device request queue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_BLOCK 0x2 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_OFST 1 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_LEN 1 +/* If the calling function is a PF and this field is not VF_NULL, create the + * queue on the specified child VF instead of on the PF. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_OFST 2 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_LEN 2 +/* enum: No VF, create queue on the PF. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_VF_NULL 0xffff +/* Desired instance. This is the function-local index of the associated VI, not + * the virtqueue number as counted by the virtqueue spec. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_OFST 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_LEN 4 +/* Queue size, in entries. Must be a power of two. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_OFST 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_LEN 4 +/* Flags */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_OFST 12 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_LEN 4 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_OFST 12 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_LBN 0 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_WIDTH 1 +/* Address of the descriptor table in the virtqueue. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20 +/* Address of the available ring in the virtqueue. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28 +/* Address of the used ring in the virtqueue. */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36 +/* PASID to use on PCIe transactions involving this queue. Ignored if the + * USE_PASID flag is not set. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_OFST 40 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_LEN 4 +/* Which MSIX vector to use for this virtqueue, or NO_VECTOR if MSIX should not + * be used. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_OFST 44 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_LEN 2 +/* enum: Do not enable interrupts for this virtqueue */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_NO_VECTOR 0xffff +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_OFST 46 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_LEN 2 +/* Virtio features to apply to this queue. Same format as the in the virtio + * spec and in the return from MC_CMD_VIRTIO_GET_FEATURES. Must be a subset of + * the features returned from MC_CMD_VIRTIO_GET_FEATURES. Features are per- + * queue because with vDPA multiple queues on the same function can be passed + * through to different virtual hosts as independent devices. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */ +/* The inital producer index for this queue's used ring. If this queue is being + * created to be migrated into, this should be the FINAL_PIDX value returned by + * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. Otherwise, it + * should be zero. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4 +/* The inital consumer index for this queue's available ring. If this queue is + * being created to be migrated into, this should be the FINAL_CIDX value + * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. + * Otherwise, it should be zero. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4 +/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated + * with. Use MAE_MPORT_SELECTOR_ASSIGNED to request the default mport for the + * function this queue is being created on. + */ +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_OFST 64 +#define MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_LEN 4 + +/* MC_CMD_VIRTIO_INIT_QUEUE_RESP msgresponse */ +#define MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN 0 + + +/***********************************/ +/* MC_CMD_VIRTIO_FINI_QUEUE + * Destroy a virtio virtqueue + */ +#define MC_CMD_VIRTIO_FINI_QUEUE 0x16b +#undef MC_CMD_0x16b_PRIVILEGE_CTG + +#define MC_CMD_0x16b_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_FINI_QUEUE_REQ msgrequest */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN 8 +/* Type of virtqueue to destroy. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_OFST 0 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_INIT_QUEUE/MC_CMD_VIRTIO_INIT_QUEUE_REQ/QUEUE_TYPE */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_OFST 1 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_LEN 1 +/* If the calling function is a PF and this field is not VF_NULL, destroy the + * queue on the specified child VF instead of on the PF. + */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_OFST 2 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_LEN 2 +/* enum: No VF, destroy the queue on the PF. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_VF_NULL 0xffff +/* Instance to destroy */ +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_OFST 4 +#define MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_LEN 4 + +/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8 +/* The producer index of the used ring when the queue was stopped. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0 +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4 +/* The consumer index of the available ring when the queue was stopped. */ +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4 +#define MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4 + + +/***********************************/ +/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET + * Get the offset in the BAR of the doorbells for a VI. Doesn't require the + * queue(s) to be allocated. + */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET 0x16c +#undef MC_CMD_0x16c_PRIVILEGE_CTG + +#define MC_CMD_0x16c_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ msgrequest */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_LEN 8 +/* Type of device to get information for. Matches the device id as defined by + * the virtio spec. + */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_OFST 0 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_LEN 1 +/* Enum values, see field(s): */ +/* MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_OFST 1 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_LEN 1 +/* If the calling function is a PF and this field is not VF_NULL, query the VI + * on the specified child VF instead of on the PF. + */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_OFST 2 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_LEN 2 +/* enum: No VF, query the PF. */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_VF_NULL 0xffff +/* VI instance to query */ +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_OFST 4 +#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_LEN 4 + +/* MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP msgresponse */ +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_LEN 8 +/* Offset of RX doorbell in BAR */ +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_OFST 0 +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_LEN 4 +/* Offset of TX doorbell in BAR */ +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_OFST 4 +#define MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_LEN 4 + +/* MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP msgresponse */ +#define MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_LEN 4 +/* Offset of request doorbell in BAR */ +#define MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_OFST 0 +#define MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_LEN 4 + +/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID + * (interface/PF/VF tuple) + */ +#define PCIE_FUNCTION_LEN 8 +/* PCIe PF function number */ +#define PCIE_FUNCTION_PF_OFST 0 +#define PCIE_FUNCTION_PF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_PF_ANY 0xfffe +/* enum: Value representing invalid (null) function */ +#define PCIE_FUNCTION_PF_NULL 0xffff +#define PCIE_FUNCTION_PF_LBN 0 +#define PCIE_FUNCTION_PF_WIDTH 16 +/* PCIe VF Function number (PF relative) */ +#define PCIE_FUNCTION_VF_OFST 2 +#define PCIE_FUNCTION_VF_LEN 2 +/* enum: Wildcard value representing any available function (e.g in resource + * allocation requests) + */ +#define PCIE_FUNCTION_VF_ANY 0xfffe +/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF == + * PF_NULL) + */ +#define PCIE_FUNCTION_VF_NULL 0xffff +#define PCIE_FUNCTION_VF_LBN 16 +#define PCIE_FUNCTION_VF_WIDTH 16 +/* PCIe interface of the function */ +#define PCIE_FUNCTION_INTF_OFST 4 +#define PCIE_FUNCTION_INTF_LEN 4 +/* enum: Host PCIe interface */ +#define PCIE_FUNCTION_INTF_HOST 0x0 +/* enum: Application Processor interface */ +#define PCIE_FUNCTION_INTF_AP 0x1 +#define PCIE_FUNCTION_INTF_LBN 32 +#define PCIE_FUNCTION_INTF_WIDTH 32 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_CREATE + * Descriptor proxy functions are abstract devices that forward all request + * submitted to the host PCIe function (descriptors submitted to Virtio or + * EF100 queues) to be handled on another function (most commonly on the + * embedded Application Processor), via EF100 descriptor proxy, memory-to- + * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk + * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy + * function on the host and assigns a user-defined label. The actual function + * configuration is not persisted until the caller configures it with + * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with + * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN. + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172 +#undef MC_CMD_0x172_PRIVILEGE_CTG + +#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52 +/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to + * {PF_ANY,VF_ANY,interface} for "any available function" Set to + * {PF_ANY,VF_NULL,interface} for "any available PF" + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4 +/* The personality to set. The meanings of the personalities are defined in + * SF-120734-TC with more information in SF-122717-TC. At present, we only + * support proxying for VIRTIO_BLK + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4 +/* Enum values, see field(s): */ +/* FUNCTION_PERSONALITY/ID */ +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40 + +/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12 +/* Handle to the descriptor proxy function */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4 +/* Allocated function ID (as struct PCIE_FUNCTION) */ +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_DESTROY + * Remove an existing descriptor proxy function. Underlying function + * personality and configuration reverts back to factory default. Function + * configuration is committed immediately to specified store and any function + * ownership is released. + */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173 +#undef MC_CMD_0x173_PRIVILEGE_CTG + +#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44 +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40 +/* Store from which to remove function configuration */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40 +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4 +/* Enum values, see field(s): */ +/* MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */ + +/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0 + +/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See + * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature + * bits. See Virtio specification v1.1, Section 5.2.4 (struct + * virtio_blk_config) for definition of remaining configuration fields + */ +#define VIRTIO_BLK_CONFIG_LEN 68 +/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */ +#define VIRTIO_BLK_CONFIG_FEATURES_OFST 0 +#define VIRTIO_BLK_CONFIG_FEATURES_LEN 8 +#define VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0 +#define VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14 +#define VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38 +#define VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1 +#define VIRTIO_BLK_CONFIG_FEATURES_LBN 0 +#define VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64 +/* The capacity of the device (expressed in 512-byte sectors) */ +#define VIRTIO_BLK_CONFIG_CAPACITY_OFST 8 +#define VIRTIO_BLK_CONFIG_CAPACITY_LEN 8 +#define VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8 +#define VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12 +#define VIRTIO_BLK_CONFIG_CAPACITY_LBN 64 +#define VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64 +/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is + * set. + */ +#define VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16 +#define VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4 +#define VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128 +#define VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32 +/* Maximum number of segments in a request. Only valid when + * VIRTIO_BLK_F_SEG_MAX is set. + */ +#define VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20 +#define VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4 +#define VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160 +#define VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32 +/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is + * set. + */ +#define VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24 +#define VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2 +#define VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192 +#define VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16 +/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set. + */ +#define VIRTIO_BLK_CONFIG_HEADS_OFST 26 +#define VIRTIO_BLK_CONFIG_HEADS_LEN 1 +#define VIRTIO_BLK_CONFIG_HEADS_LBN 208 +#define VIRTIO_BLK_CONFIG_HEADS_WIDTH 8 +/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set. + */ +#define VIRTIO_BLK_CONFIG_SECTORS_OFST 27 +#define VIRTIO_BLK_CONFIG_SECTORS_LEN 1 +#define VIRTIO_BLK_CONFIG_SECTORS_LBN 216 +#define VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8 +/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */ +#define VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28 +#define VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4 +#define VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224 +#define VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32 +/* Block topology - number of logical blocks per physical block (log2). Only + * valid when VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32 +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1 +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256 +#define VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8 +/* Block topology - offset of first aligned logical block. Only valid when + * VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33 +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1 +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264 +#define VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8 +/* Block topology - suggested minimum I/O size in blocks. Only valid when + * VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34 +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2 +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272 +#define VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16 +/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid + * when VIRTIO_BLK_F_TOPOLOGY is set. + */ +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36 +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4 +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288 +#define VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32 +/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and + * not carried in config data. + */ +#define VIRTIO_BLK_CONFIG_UNUSED0_OFST 40 +#define VIRTIO_BLK_CONFIG_UNUSED0_LEN 2 +#define VIRTIO_BLK_CONFIG_UNUSED0_LBN 320 +#define VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16 +/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated. + */ +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42 +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2 +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336 +#define VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16 +/* Maximum discard sectors size, in 512-byte units. Only valid if + * VIRTIO_BLK_F_DISCARD is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32 +/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384 +#define VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32 +/* Discard sector alignment, in 512-byte units. Only valid if + * VIRTIO_BLK_F_DISCARD is set. + */ +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52 +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4 +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416 +#define VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32 +/* Maximum write zeroes sectors size, in 512-byte units. Only valid if + * VIRTIO_BLK_F_WRITE_ZEROES is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32 +/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES + * is set. + */ +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480 +#define VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32 +/* Write zeroes request can result in deallocating one or more sectors. Only + * valid if VIRTIO_BLK_F_WRITE_ZEROES is set. + */ +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64 +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1 +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512 +#define VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8 +/* Unused, set to zero. */ +#define VIRTIO_BLK_CONFIG_UNUSED1_OFST 65 +#define VIRTIO_BLK_CONFIG_UNUSED1_LEN 3 +#define VIRTIO_BLK_CONFIG_UNUSED1_LBN 520 +#define VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET + * Set configuration for an existing descriptor proxy function. Configuration + * data must match function personality. The actual function configuration is + * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN + */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174 +#undef MC_CMD_0x174_PRIVILEGE_CTG + +#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num)) +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1) +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4 +/* Reserved for future extension, set to zero. */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16 +/* Configuration data. Format of configuration data is determined implicitly + * from function personality referred to by HANDLE. Currently, only supported + * format is VIRTIO_BLK_CONFIG. + */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232 +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000 + +/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_COMMIT + * Commit function configuration to non-volatile or volatile store. Once + * configuration is applied to hardware (which may happen immediately or on + * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be + * delivered to callers MCDI event queue. + */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175 +#undef MC_CMD_0x175_PRIVILEGE_CTG + +#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4 +/* enum: Store into non-volatile (dynamic) config */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0 +/* enum: Store into volatile (ephemeral) config */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1 + +/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4 +/* Generation count to be delivered in an event once configuration becomes live + */ +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_OPEN + * Retrieve a handle for an existing descriptor proxy function. Returns an + * integer handle, valid until function is deallocated, MC rebooted or power- + * cycle. Returns ENODEV if no function with given label exists. + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176 +#undef MC_CMD_0x176_PRIVILEGE_CTG + +#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40 +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40 + +/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num)) +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1) +/* Handle to the descriptor proxy function */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4 +/* PCIe Function ID (as struct PCIE_FUNCTION) */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8 +/* Function personality */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4 +/* Enum values, see field(s): */ +/* FUNCTION_PERSONALITY/ID */ +/* Function configuration state */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4 +/* enum: Function configuration is visible to the host (live) */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0 +/* enum: Function configuration is pending reset */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1 +/* Generation count to be delivered in an event once the configuration becomes + * live (if status is "pending") + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4 +/* Reserved for future extension, set to zero. */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16 +/* Configuration data corresponding to function personality. Currently, only + * supported format is VIRTIO_BLK_CONFIG + */ +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212 +#define MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_CLOSE + * Releases a handle for an open descriptor proxy function. If proxying was + * enabled on the device, the caller is expected to gracefully stop it using + * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an + * active device without disabling proxying will result in forced close, which + * will put the device into a failed state and signal the host driver of the + * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side) + */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1 +#undef MC_CMD_0x1a1_PRIVILEGE_CTG + +#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4 +/* Handle to the descriptor proxy function */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0 + +/* DESC_PROXY_FUNC_MAP structuredef */ +#define DESC_PROXY_FUNC_MAP_LEN 52 +/* PCIe function ID (as struct PCIE_FUNCTION) */ +#define DESC_PROXY_FUNC_MAP_FUNC_OFST 0 +#define DESC_PROXY_FUNC_MAP_FUNC_LEN 8 +#define DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0 +#define DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4 +#define DESC_PROXY_FUNC_MAP_FUNC_LBN 0 +#define DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64 +/* Function personality */ +#define DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8 +#define DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4 +/* Enum values, see field(s): */ +/* FUNCTION_PERSONALITY/ID */ +#define DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64 +#define DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32 +/* User-defined label (zero-terminated ASCII string) to uniquely identify the + * function + */ +#define DESC_PROXY_FUNC_MAP_LABEL_OFST 12 +#define DESC_PROXY_FUNC_MAP_LABEL_LEN 40 +#define DESC_PROXY_FUNC_MAP_LABEL_LBN 96 +#define DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_ENUM + * Enumerate existing descriptor proxy functions + */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177 +#undef MC_CMD_0x177_PRIVILEGE_CTG + +#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4 +/* Starting index, set to 0 on first request. See + * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS. + */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num)) +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52) +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1 +/* Function map, as array of DESC_PROXY_FUNC_MAP */ +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4 +#define MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_ENABLE + * Enable descriptor proxying for function into target event queue. Returns VI + * allocation info for the proxy source function, so that the caller can map + * absolute VI IDs from descriptor proxy events back to the originating + * function. + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178 +#undef MC_CMD_0x178_PRIVILEGE_CTG + +#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4 +/* Descriptor proxy sink queue (caller function relative). Must be extended + * width event queue + */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8 +/* The number of VIs allocated on the function */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4 +/* The base absolute VI number allocated to the function. */ +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4 +#define MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4 + + +/***********************************/ +/* MC_CMD_DESC_PROXY_FUNC_DISABLE + * Disable descriptor proxying for function + */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179 +#undef MC_CMD_0x179_PRIVILEGE_CTG + +#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4 +/* Handle to descriptor proxy function (as returned by + * MC_CMD_DESC_PROXY_FUNC_OPEN) + */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0 +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4 + +/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */ +#define MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_GET_ADDR_SPC_ID + * Get Address space identifier for use in mem2mem descriptors for a given + * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem + * descriptors. + */ +#define MC_CMD_GET_ADDR_SPC_ID 0x1a0 +#undef MC_CMD_0x1a0_PRIVILEGE_CTG + +#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16 +/* Resource type to get ADDR_SPC_ID for */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0 +#define MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4 +/* enum: Address space ID for host/AP memory DMA over the same interface this + * MCDI was called on + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0 +/* enum: Address space ID for host/AP memory DMA via PCI interface and function + * specified by FUNC + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1 +/* enum: Address space ID for host/AP memory DMA via PCI interface and function + * specified by FUNC with PASID value specified by PASID + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2 +/* enum: Address space ID for host/AP memory DMA via PCI interface and function + * specified by FUNC with PASID value of relative VI specified by VI + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3 +/* enum: Address space ID for host/AP memory DMA via PCI interface, function + * and PASID value of absolute VI specified by VI + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4 +/* enum: Address space ID for host memory DMA via PCI interface and function of + * descriptor proxy function specified by HANDLE + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5 +/* enum: Address space ID for DMA to/from MC memory */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6 +/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR) + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7 +/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC, + * PCI_FUNC_PASID or REL_VI. + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8 +/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12 +#define MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4 +/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12 +#define MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4 +/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE. + */ +#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4 +#define MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4 + +/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */ +#define MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8 +/* Address Space ID for the requested target. Only the lower 36 bits are valid + * in the current SmartNIC implementation. + */ +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0 +#define MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4 + + #endif /* MCDI_PCOL_H */ From 6d9b5dcd29a50cec8445c66fc8055476f8e2d057 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:32:46 +0100 Subject: [PATCH 0455/2056] sfc: determine flag word automatically in efx_has_cap() Now that we have an _OFST definition for each individual flag bit, callers of efx_has_cap() don't need to specify which flag word it's in; we can just use the flag name directly in MCDI_CAPABILITY_OFST. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi.h | 5 ++--- drivers/net/ethernet/sfc/mcdi_filters.c | 8 ++++---- drivers/net/ethernet/sfc/ptp.c | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index b107e4c00285..db9746a751d4 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -332,10 +332,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); #define MCDI_CAPABILITY_OFST(field) \ MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST -/* field is FLAGS1 or FLAGS2 */ -#define efx_has_cap(efx, flag, field) \ +#define efx_has_cap(efx, field) \ efx->type->check_caps(efx, \ - MCDI_CAPABILITY(flag), \ + MCDI_CAPABILITY(field), \ MCDI_CAPABILITY_OFST(field)) void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len); diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 455a62814fb9..7b39a3aa3a1a 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -828,7 +828,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx, efx_filter_set_uc_def(&spec); if (encap_type) { - if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1)) + if (efx_has_cap(efx, VXLAN_NVGRE)) efx_filter_set_encap_type(&spec, encap_type); else /* @@ -1304,7 +1304,7 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining) rc = efx_mcdi_filter_table_probe_matches(efx, table, false); if (rc) goto fail; - if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1)) + if (efx_has_cap(efx, VXLAN_NVGRE)) rc = efx_mcdi_filter_table_probe_matches(efx, table, true); if (rc) goto fail; @@ -1927,7 +1927,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive return 0; } - if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1)) + if (efx_has_cap(efx, RX_RSS_LIMITED)) return -EOPNOTSUPP; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, @@ -1948,7 +1948,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive if (context_size) *context_size = rss_spread; - if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1)) + if (efx_has_cap(efx, ADDITIONAL_RSS_MODES)) efx_mcdi_set_rss_context_flags(efx, ctx); return 0; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 04c7283d205e..15c08cae6ae6 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -352,7 +352,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp, bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx) { - return efx_has_cap(efx, TX_MAC_TIMESTAMPING, FLAGS2); + return efx_has_cap(efx, TX_MAC_TIMESTAMPING); } /* PTP 'extra' channel is still a traffic channel, but we only create TX queues From 08f9912ef01e184b6a0512ca062e4f743671dac2 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:33:03 +0100 Subject: [PATCH 0456/2056] sfc: extend bitfield macros up to POPULATE_DWORD_13 Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/bitfield.h | 34 ++++++++++++++++++++++++----- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h index 1b59e9fe58b4..2590cab53e54 100644 --- a/drivers/net/ethernet/sfc/bitfield.h +++ b/drivers/net/ethernet/sfc/bitfield.h @@ -282,7 +282,10 @@ typedef union efx_oword { field7, value7, \ field8, value8, \ field9, value9, \ - field10, value10) \ + field10, value10, \ + field11, value11, \ + field12, value12, \ + field13, value13) \ (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ @@ -292,7 +295,10 @@ typedef union efx_oword { EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ - EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10))) + EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \ + EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13))) #define EFX_INSERT_FIELDS64(...) \ cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) @@ -334,7 +340,13 @@ typedef union efx_oword { #endif /* Populate an octword field with various numbers of arguments */ -#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_13 EFX_POPULATE_OWORD +#define EFX_POPULATE_OWORD_12(oword, ...) \ + EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_11(oword, ...) \ + EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_OWORD_10(oword, ...) \ + EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_OWORD_9(oword, ...) \ EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_OWORD_8(oword, ...) \ @@ -363,7 +375,13 @@ typedef union efx_oword { EFX_DWORD_3, 0xffffffff) /* Populate a quadword field with various numbers of arguments */ -#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_13 EFX_POPULATE_QWORD +#define EFX_POPULATE_QWORD_12(qword, ...) \ + EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_11(qword, ...) \ + EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_QWORD_10(qword, ...) \ + EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_QWORD_9(qword, ...) \ EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_QWORD_8(qword, ...) \ @@ -390,7 +408,13 @@ typedef union efx_oword { EFX_DWORD_1, 0xffffffff) /* Populate a dword field with various numbers of arguments */ -#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_13 EFX_POPULATE_DWORD +#define EFX_POPULATE_DWORD_12(dword, ...) \ + EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_11(dword, ...) \ + EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) +#define EFX_POPULATE_DWORD_10(dword, ...) \ + EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_DWORD_9(dword, ...) \ EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) #define EFX_POPULATE_DWORD_8(dword, ...) \ From de5f32e2b6301c1f6780b3ae9c2740a5e422e2da Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:33:44 +0100 Subject: [PATCH 0457/2056] sfc: don't try to create more channels than we can have VIs Calculate efx->max_vis at probe time, and check against it in efx_allocate_msix_channels() when considering whether to create XDP TX channels. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 18 ++++++++++++++---- drivers/net/ethernet/sfc/efx_channels.c | 7 +++++++ drivers/net/ethernet/sfc/net_driver.h | 1 + drivers/net/ethernet/sfc/siena.c | 1 + 4 files changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 4b0e3695a71a..c99fedb315fe 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -601,10 +601,14 @@ static int efx_ef10_probe(struct efx_nic *efx) * However, until we use TX option descriptors we need two TX queues * per channel. */ - efx->max_channels = min_t(unsigned int, - EFX_MAX_CHANNELS, - efx_ef10_mem_map_size(efx) / - (efx->vi_stride * EFX_TXQ_TYPES)); + efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; + if (!efx->max_vis) { + netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); + rc = -EIO; + goto fail5; + } + efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS, + efx->max_vis / EFX_TXQ_TYPES); efx->max_tx_channels = efx->max_channels; if (WARN_ON(efx->max_channels == 0)) { rc = -EIO; @@ -1129,6 +1133,12 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) ((efx->n_tx_channels + efx->n_extra_tx_channels) * EFX_TXQ_TYPES) + efx->n_xdp_channels * efx->xdp_tx_per_channel); + if (efx->max_vis && efx->max_vis < channel_vis) { + netif_dbg(efx, drv, efx->net_dev, + "Reducing channel VIs from %u to %u\n", + channel_vis, efx->max_vis); + channel_vis = efx->max_vis; + } #ifdef EFX_USE_PIO /* Try to allocate PIO buffers if wanted and if the full diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index c492523b986c..2c3510b0524a 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -175,6 +175,13 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; + } else if (n_channels + n_xdp_tx > efx->max_vis) { + netif_err(efx, drv, efx->net_dev, + "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", + n_xdp_tx, n_channels, efx->max_vis); + efx->n_xdp_channels = 0; + efx->xdp_tx_per_channel = 0; + efx->xdp_tx_queue_count = 0; } else { efx->n_xdp_channels = n_xdp_ev; efx->xdp_tx_per_channel = EFX_TXQ_TYPES; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 1afb58feb9ab..7bc4d1cbb398 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1022,6 +1022,7 @@ struct efx_nic { unsigned next_buffer_table; unsigned int max_channels; + unsigned int max_vis; unsigned int max_tx_channels; unsigned n_channels; unsigned n_rx_channels; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 891e9fb6abec..6462bbe2448a 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -276,6 +276,7 @@ static int siena_probe_nic(struct efx_nic *efx) } efx->max_channels = EFX_MAX_CHANNELS; + efx->max_vis = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS; efx_reado(efx, ®, FR_AZ_CS_DEBUG); From d3142c193dca9a2f6878f4128ce1aaf221bb3f99 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:34:20 +0100 Subject: [PATCH 0458/2056] sfc: refactor EF10 stats handling Separate the generation-count handling from the format conversion, to make it easier to re-use both for EF100. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 68 ++++++++++++++++----------------- drivers/net/ethernet/sfc/nic.c | 45 ++++++++++++++++++++++ drivers/net/ethernet/sfc/nic.h | 3 ++ 3 files changed, 82 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index c99fedb315fe..49af06ba7a8e 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1279,6 +1279,14 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) return 0; } +static void efx_ef10_fini_nic(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + kfree(nic_data->mc_stats); + nic_data->mc_stats = NULL; +} + static int efx_ef10_init_nic(struct efx_nic *efx) { struct efx_ef10_nic_data *nic_data = efx->nic_data; @@ -1300,6 +1308,11 @@ static int efx_ef10_init_nic(struct efx_nic *efx) efx->must_realloc_vis = false; } + nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64), + GFP_KERNEL); + if (!nic_data->mc_stats) + return -ENOMEM; + if (nic_data->must_restore_piobufs && nic_data->n_piobufs) { rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs); if (rc == 0) { @@ -1775,55 +1788,42 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats, return stats_count; } -static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx) +static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) { struct efx_ef10_nic_data *nic_data = efx->nic_data; DECLARE_BITMAP(mask, EF10_STAT_COUNT); - __le64 generation_start, generation_end; u64 *stats = nic_data->stats; - __le64 *dma_stats; efx_ef10_get_stat_mask(efx, mask); - dma_stats = efx->stats_buffer.addr; - - generation_end = dma_stats[efx->num_mac_stats - 1]; - if (generation_end == EFX_MC_STATS_GENERATION_INVALID) - return 0; - rmb(); - efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask, - stats, efx->stats_buffer.addr, false); - rmb(); - generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; - if (generation_end != generation_start) - return -EAGAIN; + efx_nic_copy_stats(efx, nic_data->mc_stats); + efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, + mask, stats, nic_data->mc_stats, false); /* Update derived statistics */ efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_port_rx_nodesc_drops]); + /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC. + * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES. + * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES. + * Here we calculate port_rx_good_bytes. + */ stats[EF10_STAT_port_rx_good_bytes] = stats[EF10_STAT_port_rx_bytes] - stats[EF10_STAT_port_rx_bytes_minus_good_bytes]; + + /* The asynchronous reads used to calculate RX_BAD_BYTES in + * MC Firmware are done such that we should not see an increase in + * RX_BAD_BYTES when a good packet has arrived. Unfortunately this + * does mean that the stat can decrease at times. Here we do not + * update the stat unless it has increased or has gone to zero + * (In the case of the NIC rebooting). + * Please see Bug 33781 for a discussion of why things work this way. + */ efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes], stats[EF10_STAT_port_rx_bytes_minus_good_bytes]); efx_update_sw_stats(efx, stats); - return 0; -} - - -static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats, - struct rtnl_link_stats64 *core_stats) -{ - int retry; - - /* If we're unlucky enough to read statistics during the DMA, wait - * up to 10ms for it to finish (typically takes <500us) - */ - for (retry = 0; retry < 100; ++retry) { - if (efx_ef10_try_update_nic_stats_pf(efx) == 0) - break; - udelay(100); - } return efx_ef10_update_stats_common(efx, full_stats, core_stats); } @@ -4033,7 +4033,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .remove = efx_ef10_remove, .dimension_resources = efx_ef10_dimension_resources, .init = efx_ef10_init_nic, - .fini = efx_port_dummy_op_void, + .fini = efx_ef10_fini_nic, .map_reset_reason = efx_ef10_map_reset_reason, .map_reset_flags = efx_ef10_map_reset_flags, .reset = efx_ef10_reset, @@ -4142,7 +4142,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .remove = efx_ef10_remove, .dimension_resources = efx_ef10_dimension_resources, .init = efx_ef10_init_nic, - .fini = efx_port_dummy_op_void, + .fini = efx_ef10_fini_nic, .map_reset_reason = efx_ef10_map_reset_reason, .map_reset_flags = efx_ef10_map_reset_flags, .reset = efx_ef10_reset, diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index b0baa70fbba7..ac6630510324 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -20,6 +20,8 @@ #include "farch_regs.h" #include "io.h" #include "workarounds.h" +#include "mcdi_port_common.h" +#include "mcdi_pcol.h" /************************************************************************** * @@ -470,6 +472,49 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, return visible; } +/** + * efx_nic_copy_stats - Copy stats from the DMA buffer in to an + * intermediate buffer. This is used to get a consistent + * set of stats while the DMA buffer can be written at any time + * by the NIC. + * @efx: The associated NIC. + * @dest: Destination buffer. Must be the same size as the DMA buffer. + */ +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + __le64 generation_start, generation_end; + int rc = 0, retry; + + if (!dest) + return 0; + + if (!dma_stats) + goto return_zeroes; + + /* If we're unlucky enough to read statistics during the DMA, wait + * up to 10ms for it to finish (typically takes <500us) + */ + for (retry = 0; retry < 100; ++retry) { + generation_end = dma_stats[efx->num_mac_stats - 1]; + if (generation_end == EFX_MC_STATS_GENERATION_INVALID) + goto return_zeroes; + rmb(); + memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64)); + rmb(); + generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; + if (generation_end == generation_start) + return 0; /* return good data */ + udelay(100); + } + + rc = -EIO; + +return_zeroes: + memset(dest, 0, efx->num_mac_stats * sizeof(u64)); + return rc; +} + /** * efx_nic_update_stats - Convert statistics DMA buffer to array of u64 * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 8f73c5d996eb..792907aeeb75 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -368,6 +368,7 @@ enum { * @piobuf_size: size of a single PIO buffer * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC * reboot + * @mc_stats: Scratch buffer for converting statistics to the kernel's format * @stats: Hardware statistics * @workaround_35388: Flag: firmware supports workaround for bug 35388 * @workaround_26807: Flag: firmware supports workaround for bug 26807 @@ -404,6 +405,7 @@ struct efx_ef10_nic_data { unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT]; u16 piobuf_size; bool must_restore_piobufs; + __le64 *mc_stats; u64 stats[EF10_STAT_COUNT]; bool workaround_35388; bool workaround_26807; @@ -674,6 +676,7 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf); size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u8 *names); +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u64 *stats, const void *dma_buf, bool accumulate); From 9043f48fd3e302dd3cee49f911ce0fd1494b479f Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:34:39 +0100 Subject: [PATCH 0459/2056] sfc: split up nic.h The new nic_common.h contains the inlines for NIC-type function dispatch, declarations for NIC-generic functions in nic.c, and other similar NIC- generic functionality. Retained in nic.h are NIC-specific declarations such as the siena and ef10 nic_data structs and various farch functions. The EF100 driver will thus include nic_common.h but not nic.h. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 6 +- drivers/net/ethernet/sfc/nic.h | 296 +------------------------- drivers/net/ethernet/sfc/nic_common.h | 273 ++++++++++++++++++++++++ drivers/net/ethernet/sfc/ptp.c | 3 +- drivers/net/ethernet/sfc/ptp.h | 45 ++++ 5 files changed, 322 insertions(+), 301 deletions(-) create mode 100644 drivers/net/ethernet/sfc/nic_common.h create mode 100644 drivers/net/ethernet/sfc/ptp.h diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 49af06ba7a8e..3bdb8606512a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1433,8 +1433,6 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type) { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name } #define EF10_OTHER_STAT(ext_name) \ [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 } -#define GENERIC_SW_STAT(ext_name) \ - [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(port_tx_bytes, TX_BYTES), @@ -1478,8 +1476,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = { EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), - GENERIC_SW_STAT(rx_nodesc_trunc), - GENERIC_SW_STAT(rx_noskb_drops), + EFX_GENERIC_SW_STAT(rx_nodesc_trunc), + EFX_GENERIC_SW_STAT(rx_noskb_drops), EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW), EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW), EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL), diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 792907aeeb75..135c43146c13 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -8,133 +8,11 @@ #ifndef EFX_NIC_H #define EFX_NIC_H -#include -#include "net_driver.h" +#include "nic_common.h" #include "efx.h" -#include "efx_common.h" -#include "mcdi.h" - -enum { - /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. - * They are not supported by this driver but these revision numbers - * form part of the ethtool API for register dumping. - */ - EFX_REV_SIENA_A0 = 3, - EFX_REV_HUNT_A0 = 4, -}; - -static inline int efx_nic_rev(struct efx_nic *efx) -{ - return efx->type->revision; -} u32 efx_farch_fpga_ver(struct efx_nic *efx); -/* Read the current event from the event queue */ -static inline efx_qword_t *efx_event(struct efx_channel *channel, - unsigned int index) -{ - return ((efx_qword_t *) (channel->eventq.buf.addr)) + - (index & channel->eventq_mask); -} - -/* See if an event is present - * - * We check both the high and low dword of the event for all ones. We - * wrote all ones when we cleared the event, and no valid event can - * have all ones in either its high or low dwords. This approach is - * robust against reordering. - * - * Note that using a single 64-bit comparison is incorrect; even - * though the CPU read will be atomic, the DMA write may not be. - */ -static inline int efx_event_present(efx_qword_t *event) -{ - return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | - EFX_DWORD_IS_ALL_ONES(event->dword[1])); -} - -/* Returns a pointer to the specified transmit descriptor in the TX - * descriptor queue belonging to the specified channel. - */ -static inline efx_qword_t * -efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) -{ - return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; -} - -/* Get partner of a TX queue, seen as part of the same net core queue */ -static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) -{ - if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) - return tx_queue - EFX_TXQ_TYPE_OFFLOAD; - else - return tx_queue + EFX_TXQ_TYPE_OFFLOAD; -} - -/* Report whether this TX queue would be empty for the given write_count. - * May return false negative. - */ -static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, - unsigned int write_count) -{ - unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); - - if (empty_read_count == 0) - return false; - - return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; -} - -/* Report whether the NIC considers this TX queue empty, using - * packet_write_count (the write count recorded for the last completable - * doorbell push). May return false negative. EF10 only, which is OK - * because only EF10 supports PIO. - */ -static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) -{ - EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); - return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); -} - -/* Decide whether we can use TX PIO, ie. write packet data directly into - * a buffer on the device. This can reduce latency at the expense of - * throughput, so we only do this if both hardware and software TX rings - * are empty. This also ensures that only one packet at a time can be - * using the PIO buffer. - */ -static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) -{ - struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); - - return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && - efx_nic_tx_is_empty(partner); -} - -/* Decide whether to push a TX descriptor to the NIC vs merely writing - * the doorbell. This can reduce latency when we are adding a single - * descriptor to an empty queue, but is otherwise pointless. Further, - * Falcon and Siena have hardware bugs (SF bug 33851) that may be - * triggered if we don't check this. - * We use the write_count used for the last doorbell push, to get the - * NIC's view of the tx queue. - */ -static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, - unsigned int write_count) -{ - bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); - - tx_queue->empty_read_count = 0; - return was_empty && tx_queue->write_count - write_count == 1; -} - -/* Returns a pointer to the specified descriptor in the RX descriptor queue */ -static inline efx_qword_t * -efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) -{ - return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; -} - enum { PHY_TYPE_NONE = 0, PHY_TYPE_TXC43128 = 1, @@ -147,18 +25,6 @@ enum { PHY_TYPE_SFT9001B = 10, }; -/* Alignment of PCIe DMA boundaries (4KB) */ -#define EFX_PAGE_SIZE 4096 -/* Size and alignment of buffer table entries (same) */ -#define EFX_BUF_SIZE EFX_PAGE_SIZE - -/* NIC-generic software stats */ -enum { - GENERIC_STAT_rx_noskb_drops, - GENERIC_STAT_rx_nodesc_trunc, - GENERIC_STAT_COUNT -}; - enum { SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT, SIENA_STAT_tx_good_bytes, @@ -434,123 +300,15 @@ struct efx_ef10_nic_data { int efx_init_sriov(void); void efx_fini_sriov(void); -struct ethtool_ts_info; -int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); -void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); -struct efx_channel *efx_ptp_channel(struct efx_nic *efx); -void efx_ptp_remove(struct efx_nic *efx); -int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); -int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); -bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -int efx_ptp_get_mode(struct efx_nic *efx); -int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, - unsigned int new_mode); -int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); -void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); -size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); -size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); -void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); -void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, - struct sk_buff *skb); -static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, - struct sk_buff *skb) -{ - if (channel->sync_events_state == SYNC_EVENTS_VALID) - __efx_rx_skb_attach_timestamp(channel, skb); -} -void efx_ptp_start_datapath(struct efx_nic *efx); -void efx_ptp_stop_datapath(struct efx_nic *efx); -bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx); -ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); - -extern const struct efx_nic_type falcon_a1_nic_type; -extern const struct efx_nic_type falcon_b0_nic_type; extern const struct efx_nic_type siena_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_nic_type; extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; -/************************************************************************** - * - * Externs - * - ************************************************************************** - */ - int falcon_probe_board(struct efx_nic *efx, u16 revision_info); -/* TX data path */ -static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) -{ - return tx_queue->efx->type->tx_probe(tx_queue); -} -static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) -{ - tx_queue->efx->type->tx_init(tx_queue); -} -static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) -{ - tx_queue->efx->type->tx_remove(tx_queue); -} -static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) -{ - tx_queue->efx->type->tx_write(tx_queue); -} - int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, bool *data_mapped); -/* RX data path */ -static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) -{ - return rx_queue->efx->type->rx_probe(rx_queue); -} -static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) -{ - rx_queue->efx->type->rx_init(rx_queue); -} -static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) -{ - rx_queue->efx->type->rx_remove(rx_queue); -} -static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) -{ - rx_queue->efx->type->rx_write(rx_queue); -} -static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) -{ - rx_queue->efx->type->rx_defer_refill(rx_queue); -} - -/* Event data path */ -static inline int efx_nic_probe_eventq(struct efx_channel *channel) -{ - return channel->efx->type->ev_probe(channel); -} -static inline int efx_nic_init_eventq(struct efx_channel *channel) -{ - return channel->efx->type->ev_init(channel); -} -static inline void efx_nic_fini_eventq(struct efx_channel *channel) -{ - channel->efx->type->ev_fini(channel); -} -static inline void efx_nic_remove_eventq(struct efx_channel *channel) -{ - channel->efx->type->ev_remove(channel); -} -static inline int -efx_nic_process_eventq(struct efx_channel *channel, int quota) -{ - return channel->efx->type->ev_process(channel, quota); -} -static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) -{ - channel->efx->type->ev_read_ack(channel); -} - -void efx_nic_event_test_start(struct efx_channel *channel); - /* Falcon/Siena queue operations */ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); void efx_farch_tx_init(struct efx_tx_queue *tx_queue); @@ -600,31 +358,6 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, #endif void efx_farch_filter_sync_rx_mode(struct efx_nic *efx); -bool efx_nic_event_present(struct efx_channel *channel); - -/* Some statistics are computed as A - B where A and B each increase - * linearly with some hardware counter(s) and the counters are read - * asynchronously. If the counters contributing to B are always read - * after those contributing to A, the computed value may be lower than - * the true value by some variable amount, and may decrease between - * subsequent computations. - * - * We should never allow statistics to decrease or to exceed the true - * value. Since the computed value will never be greater than the - * true value, we can achieve this by only storing the computed value - * when it increases. - */ -static inline void efx_update_diff_stat(u64 *stat, u64 diff) -{ - if ((s64)(diff - *stat) > 0) - *stat = diff; -} - -/* Interrupts */ -int efx_nic_init_interrupt(struct efx_nic *efx); -int efx_nic_irq_test_start(struct efx_nic *efx); -void efx_nic_fini_interrupt(struct efx_nic *efx); - /* Falcon/Siena interrupts */ void efx_farch_irq_enable_master(struct efx_nic *efx); int efx_farch_irq_test_generate(struct efx_nic *efx); @@ -633,17 +366,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id); irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id); irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); -static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) -{ - return READ_ONCE(channel->event_test_cpu); -} -static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) -{ - return READ_ONCE(efx->last_irq_cpu); -} - /* Global Resources */ -int efx_nic_flush_queues(struct efx_nic *efx); void siena_prepare_flush(struct efx_nic *efx); int efx_farch_fini_dmaq(struct efx_nic *efx); void efx_farch_finish_flr(struct efx_nic *efx); @@ -657,10 +380,6 @@ void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx); void efx_farch_rx_pull_indir_table(struct efx_nic *efx); -int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, - unsigned int len, gfp_t gfp_flags); -void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); - /* Tests */ struct efx_farch_register_test { unsigned address; @@ -671,19 +390,6 @@ int efx_farch_test_registers(struct efx_nic *efx, const struct efx_farch_register_test *regs, size_t n_regs); -size_t efx_nic_get_regs_len(struct efx_nic *efx); -void efx_nic_get_regs(struct efx_nic *efx, void *buf); - -size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u8 *names); -int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); -void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, - const unsigned long *mask, u64 *stats, - const void *dma_buf, bool accumulate); -void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); - -#define EFX_MAX_FLUSH_TIME 5000 - void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, efx_qword_t *event); diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h new file mode 100644 index 000000000000..e90ce85359cb --- /dev/null +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_NIC_COMMON_H +#define EFX_NIC_COMMON_H + +#include "net_driver.h" +#include "efx_common.h" +#include "mcdi.h" +#include "ptp.h" + +enum { + /* Revisions 0-2 were Falcon A0, A1 and B0 respectively. + * They are not supported by this driver but these revision numbers + * form part of the ethtool API for register dumping. + */ + EFX_REV_SIENA_A0 = 3, + EFX_REV_HUNT_A0 = 4, +}; + +static inline int efx_nic_rev(struct efx_nic *efx) +{ + return efx->type->revision; +} + +/* Read the current event from the event queue */ +static inline efx_qword_t *efx_event(struct efx_channel *channel, + unsigned int index) +{ + return ((efx_qword_t *) (channel->eventq.buf.addr)) + + (index & channel->eventq_mask); +} + +/* See if an event is present + * + * We check both the high and low dword of the event for all ones. We + * wrote all ones when we cleared the event, and no valid event can + * have all ones in either its high or low dwords. This approach is + * robust against reordering. + * + * Note that using a single 64-bit comparison is incorrect; even + * though the CPU read will be atomic, the DMA write may not be. + */ +static inline int efx_event_present(efx_qword_t *event) +{ + return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | + EFX_DWORD_IS_ALL_ONES(event->dword[1])); +} + +/* Returns a pointer to the specified transmit descriptor in the TX + * descriptor queue belonging to the specified channel. + */ +static inline efx_qword_t * +efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index; +} + +/* Report whether this TX queue would be empty for the given write_count. + * May return false negative. + */ +static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); + + if (empty_read_count == 0) + return false; + + return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; +} + +/* Report whether the NIC considers this TX queue empty, using + * packet_write_count (the write count recorded for the last completable + * doorbell push). May return false negative. EF10 only, which is OK + * because only EF10 supports PIO. + */ +static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) +{ + EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors); + return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count); +} + +/* Get partner of a TX queue, seen as part of the same net core queue */ +/* XXX is this a thing on EF100? */ +static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + return tx_queue - EFX_TXQ_TYPE_OFFLOAD; + else + return tx_queue + EFX_TXQ_TYPE_OFFLOAD; +} + +/* Decide whether we can use TX PIO, ie. write packet data directly into + * a buffer on the device. This can reduce latency at the expense of + * throughput, so we only do this if both hardware and software TX rings + * are empty. This also ensures that only one packet at a time can be + * using the PIO buffer. + */ +static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) +{ + struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue); + + return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) && + efx_nic_tx_is_empty(partner); +} + +/* Decide whether to push a TX descriptor to the NIC vs merely writing + * the doorbell. This can reduce latency when we are adding a single + * descriptor to an empty queue, but is otherwise pointless. Further, + * Falcon and Siena have hardware bugs (SF bug 33851) that may be + * triggered if we don't check this. + * We use the write_count used for the last doorbell push, to get the + * NIC's view of the tx queue. + */ +static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue, + unsigned int write_count) +{ + bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count); + + tx_queue->empty_read_count = 0; + return was_empty && tx_queue->write_count - write_count == 1; +} + +/* Returns a pointer to the specified descriptor in the RX descriptor queue */ +static inline efx_qword_t * +efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) +{ + return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index; +} + +/* Alignment of PCIe DMA boundaries (4KB) */ +#define EFX_PAGE_SIZE 4096 +/* Size and alignment of buffer table entries (same) */ +#define EFX_BUF_SIZE EFX_PAGE_SIZE + +/* NIC-generic software stats */ +enum { + GENERIC_STAT_rx_noskb_drops, + GENERIC_STAT_rx_nodesc_trunc, + GENERIC_STAT_COUNT +}; + +#define EFX_GENERIC_SW_STAT(ext_name) \ + [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 } + +/* TX data path */ +static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) +{ + return tx_queue->efx->type->tx_probe(tx_queue); +} +static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_init(tx_queue); +} +static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_remove(tx_queue); +} +static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) +{ + tx_queue->efx->type->tx_write(tx_queue); +} + +/* RX data path */ +static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) +{ + return rx_queue->efx->type->rx_probe(rx_queue); +} +static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_init(rx_queue); +} +static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_remove(rx_queue); +} +static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_write(rx_queue); +} +static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) +{ + rx_queue->efx->type->rx_defer_refill(rx_queue); +} + +/* Event data path */ +static inline int efx_nic_probe_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_probe(channel); +} +static inline int efx_nic_init_eventq(struct efx_channel *channel) +{ + return channel->efx->type->ev_init(channel); +} +static inline void efx_nic_fini_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_fini(channel); +} +static inline void efx_nic_remove_eventq(struct efx_channel *channel) +{ + channel->efx->type->ev_remove(channel); +} +static inline int +efx_nic_process_eventq(struct efx_channel *channel, int quota) +{ + return channel->efx->type->ev_process(channel, quota); +} +static inline void efx_nic_eventq_read_ack(struct efx_channel *channel) +{ + channel->efx->type->ev_read_ack(channel); +} + +void efx_nic_event_test_start(struct efx_channel *channel); + +bool efx_nic_event_present(struct efx_channel *channel); + +/* Some statistics are computed as A - B where A and B each increase + * linearly with some hardware counter(s) and the counters are read + * asynchronously. If the counters contributing to B are always read + * after those contributing to A, the computed value may be lower than + * the true value by some variable amount, and may decrease between + * subsequent computations. + * + * We should never allow statistics to decrease or to exceed the true + * value. Since the computed value will never be greater than the + * true value, we can achieve this by only storing the computed value + * when it increases. + */ +static inline void efx_update_diff_stat(u64 *stat, u64 diff) +{ + if ((s64)(diff - *stat) > 0) + *stat = diff; +} + +/* Interrupts */ +int efx_nic_init_interrupt(struct efx_nic *efx); +int efx_nic_irq_test_start(struct efx_nic *efx); +void efx_nic_fini_interrupt(struct efx_nic *efx); + +static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) +{ + return READ_ONCE(channel->event_test_cpu); +} +static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) +{ + return READ_ONCE(efx->last_irq_cpu); +} + +/* Global Resources */ +int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, + unsigned int len, gfp_t gfp_flags); +void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); + +size_t efx_nic_get_regs_len(struct efx_nic *efx); +void efx_nic_get_regs(struct efx_nic *efx, void *buf); + +size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u8 *names); +int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); +void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count, + const unsigned long *mask, u64 *stats, + const void *dma_buf, bool accumulate); +void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat); + +#define EFX_MAX_FLUSH_TIME 5000 + +#endif /* EFX_NIC_COMMON_H */ diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 15c08cae6ae6..393b7cbac8b2 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include "net_driver.h" @@ -44,7 +43,7 @@ #include "mcdi_pcol.h" #include "io.h" #include "farch_regs.h" -#include "nic.h" +#include "nic.h" /* indirectly includes ptp.h */ /* Maximum number of events expected to make up a PTP event */ #define MAX_EVENT_FRAGS 3 diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h new file mode 100644 index 000000000000..9855e8c9e544 --- /dev/null +++ b/drivers/net/ethernet/sfc/ptp.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2006 Fen Systems Ltd. + * Copyright 2006-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_PTP_H +#define EFX_PTP_H + +#include +#include "net_driver.h" + +struct ethtool_ts_info; +int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); +void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); +struct efx_channel *efx_ptp_channel(struct efx_nic *efx); +void efx_ptp_remove(struct efx_nic *efx); +int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr); +int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr); +void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +int efx_ptp_get_mode(struct efx_nic *efx); +int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode); +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings); +size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats); +void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev); +void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb); +static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel, + struct sk_buff *skb) +{ + if (channel->sync_events_state == SYNC_EVENTS_VALID) + __efx_rx_skb_attach_timestamp(channel, skb); +} +void efx_ptp_start_datapath(struct efx_nic *efx); +void efx_ptp_stop_datapath(struct efx_nic *efx); +bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx); +ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue); + +#endif /* EFX_PTP_H */ From bdccfd2d4ea7a0676045b16e83a1138deff20923 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:34:50 +0100 Subject: [PATCH 0460/2056] sfc: commonise ethtool link handling functions Link speeds, FEC, and autonegotiation are all things EF100 will share. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ethtool.c | 148 ---------------------- drivers/net/ethernet/sfc/ethtool_common.c | 146 +++++++++++++++++++++ drivers/net/ethernet/sfc/ethtool_common.h | 12 +- 3 files changed, 157 insertions(+), 149 deletions(-) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 04e88d05e8ff..55413d451ac3 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -54,58 +54,6 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, return 0; } -/* This must be called with rtnl_lock held. */ -static int -efx_ethtool_get_link_ksettings(struct net_device *net_dev, - struct ethtool_link_ksettings *cmd) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_link_state *link_state = &efx->link_state; - u32 supported; - - mutex_lock(&efx->mac_lock); - efx->phy_op->get_link_ksettings(efx, cmd); - mutex_unlock(&efx->mac_lock); - - /* Both MACs support pause frames (bidirectional and respond-only) */ - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - - supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - - if (LOOPBACK_INTERNAL(efx)) { - cmd->base.speed = link_state->speed; - cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; - } - - return 0; -} - -/* This must be called with rtnl_lock held. */ -static int -efx_ethtool_set_link_ksettings(struct net_device *net_dev, - const struct ethtool_link_ksettings *cmd) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - /* GMAC does not support 1000Mbps HD */ - if ((cmd->base.speed == SPEED_1000) && - (cmd->base.duplex != DUPLEX_FULL)) { - netif_dbg(efx, drv, efx->net_dev, - "rejecting unsupported 1000Mbps HD setting\n"); - return -EINVAL; - } - - mutex_lock(&efx->mac_lock); - rc = efx->phy_op->set_link_ksettings(efx, cmd); - mutex_unlock(&efx->mac_lock); - return rc; -} - static int efx_ethtool_get_regs_len(struct net_device *net_dev) { return efx_nic_get_regs_len(netdev_priv(net_dev)); @@ -168,14 +116,6 @@ static void efx_ethtool_self_test(struct net_device *net_dev, test->flags |= ETH_TEST_FL_FAILED; } -/* Restart autonegotiation */ -static int efx_ethtool_nway_reset(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - return mdio45_nway_restart(&efx->mdio); -} - /* * Each channel has a single IRQ and moderation timer, started by any * completion (or other event). Unless the module parameter @@ -300,64 +240,6 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } -static int efx_ethtool_set_pauseparam(struct net_device *net_dev, - struct ethtool_pauseparam *pause) -{ - struct efx_nic *efx = netdev_priv(net_dev); - u8 wanted_fc, old_fc; - u32 old_adv; - int rc = 0; - - mutex_lock(&efx->mac_lock); - - wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | - (pause->tx_pause ? EFX_FC_TX : 0) | - (pause->autoneg ? EFX_FC_AUTO : 0)); - - if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { - netif_dbg(efx, drv, efx->net_dev, - "Flow control unsupported: tx ON rx OFF\n"); - rc = -EINVAL; - goto out; - } - - if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) { - netif_dbg(efx, drv, efx->net_dev, - "Autonegotiation is disabled\n"); - rc = -EINVAL; - goto out; - } - - /* Hook for Falcon bug 11482 workaround */ - if (efx->type->prepare_enable_fc_tx && - (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) - efx->type->prepare_enable_fc_tx(efx); - - old_adv = efx->link_advertising[0]; - old_fc = efx->wanted_fc; - efx_link_set_wanted_fc(efx, wanted_fc); - if (efx->link_advertising[0] != old_adv || - (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { - rc = efx->phy_op->reconfigure(efx); - if (rc) { - netif_err(efx, drv, efx->net_dev, - "Unable to advertise requested flow " - "control setting\n"); - goto out; - } - } - - /* Reconfigure the MAC. The PHY *may* generate a link state change event - * if the user just changed the advertised capabilities, but there's no - * harm doing this twice */ - efx_mac_reconfigure(efx); - -out: - mutex_unlock(&efx->mac_lock); - - return rc; -} - static void efx_ethtool_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) { @@ -1104,36 +986,6 @@ static int efx_ethtool_get_module_info(struct net_device *net_dev, return ret; } -static int efx_ethtool_get_fecparam(struct net_device *net_dev, - struct ethtool_fecparam *fecparam) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - if (!efx->phy_op || !efx->phy_op->get_fecparam) - return -EOPNOTSUPP; - mutex_lock(&efx->mac_lock); - rc = efx->phy_op->get_fecparam(efx, fecparam); - mutex_unlock(&efx->mac_lock); - - return rc; -} - -static int efx_ethtool_set_fecparam(struct net_device *net_dev, - struct ethtool_fecparam *fecparam) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - if (!efx->phy_op || !efx->phy_op->get_fecparam) - return -EOPNOTSUPP; - mutex_lock(&efx->mac_lock); - rc = efx->phy_op->set_fecparam(efx, fecparam); - mutex_unlock(&efx->mac_lock); - - return rc; -} - const struct ethtool_ops efx_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index b8d281ab6c7a..b91961126eeb 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -124,6 +124,14 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) efx->msg_enable = msg_enable; } +/* Restart autonegotiation */ +int efx_ethtool_nway_reset(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return mdio45_nway_restart(&efx->mdio); +} + void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { @@ -134,6 +142,64 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev, pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); } +int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u8 wanted_fc, old_fc; + u32 old_adv; + int rc = 0; + + mutex_lock(&efx->mac_lock); + + wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | + (pause->tx_pause ? EFX_FC_TX : 0) | + (pause->autoneg ? EFX_FC_AUTO : 0)); + + if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { + netif_dbg(efx, drv, efx->net_dev, + "Flow control unsupported: tx ON rx OFF\n"); + rc = -EINVAL; + goto out; + } + + if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) { + netif_dbg(efx, drv, efx->net_dev, + "Autonegotiation is disabled\n"); + rc = -EINVAL; + goto out; + } + + /* Hook for Falcon bug 11482 workaround */ + if (efx->type->prepare_enable_fc_tx && + (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX)) + efx->type->prepare_enable_fc_tx(efx); + + old_adv = efx->link_advertising[0]; + old_fc = efx->wanted_fc; + efx_link_set_wanted_fc(efx, wanted_fc); + if (efx->link_advertising[0] != old_adv || + (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { + rc = efx->phy_op->reconfigure(efx); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "Unable to advertise requested flow " + "control setting\n"); + goto out; + } + } + + /* Reconfigure the MAC. The PHY *may* generate a link state change event + * if the user just changed the advertised capabilities, but there's no + * harm doing this twice */ + efx_mac_reconfigure(efx); + +out: + mutex_unlock(&efx->mac_lock); + + return rc; +} + /** * efx_fill_test - fill in an individual self-test entry * @test_index: Index of the test @@ -455,3 +521,83 @@ void efx_ethtool_get_stats(struct net_device *net_dev, efx_ptp_update_stats(efx, data); } + +/* This must be called with rtnl_lock held. */ +int efx_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_link_state *link_state = &efx->link_state; + u32 supported; + + mutex_lock(&efx->mac_lock); + efx->phy_op->get_link_ksettings(efx, cmd); + mutex_unlock(&efx->mac_lock); + + /* Both MACs support pause frames (bidirectional and respond-only) */ + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + + supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + + if (LOOPBACK_INTERNAL(efx)) { + cmd->base.speed = link_state->speed; + cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; + } + + return 0; +} + +/* This must be called with rtnl_lock held. */ +int efx_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *cmd) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* GMAC does not support 1000Mbps HD */ + if ((cmd->base.speed == SPEED_1000) && + (cmd->base.duplex != DUPLEX_FULL)) { + netif_dbg(efx, drv, efx->net_dev, + "rejecting unsupported 1000Mbps HD setting\n"); + return -EINVAL; + } + + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->set_link_ksettings(efx, cmd); + mutex_unlock(&efx->mac_lock); + return rc; +} + +int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + if (!efx->phy_op || !efx->phy_op->get_fecparam) + return -EOPNOTSUPP; + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->get_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} + +int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + if (!efx->phy_op || !efx->phy_op->get_fecparam) + return -EOPNOTSUPP; + mutex_lock(&efx->mac_lock); + rc = efx->phy_op->set_fecparam(efx, fecparam); + mutex_unlock(&efx->mac_lock); + + return rc; +} diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index fa624313f330..eaa1fd9157f8 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -15,8 +15,11 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info); u32 efx_ethtool_get_msglevel(struct net_device *net_dev); void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); +int efx_ethtool_nway_reset(struct net_device *net_dev); void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause); +int efx_ethtool_set_pauseparam(struct net_device *net_dev, + struct ethtool_pauseparam *pause); int efx_ethtool_fill_self_tests(struct efx_nic *efx, struct efx_self_tests *tests, u8 *strings, u64 *data); @@ -26,5 +29,12 @@ void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set, void efx_ethtool_get_stats(struct net_device *net_dev, struct ethtool_stats *stats __attribute__ ((unused)), u64 *data); - +int efx_ethtool_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *out); +int efx_ethtool_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *settings); +int efx_ethtool_get_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); +int efx_ethtool_set_fecparam(struct net_device *net_dev, + struct ethtool_fecparam *fecparam); #endif From cdec457b7afe475d1735a5083e68a34f7d766dad Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:35:05 +0100 Subject: [PATCH 0461/2056] sfc: commonise ethtool NFC and RXFH/RSS functions EF100 will share EF10's model of filtering, hashing and spreading. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ethtool.c | 672 ---------------------- drivers/net/ethernet/sfc/ethtool_common.c | 672 ++++++++++++++++++++++ drivers/net/ethernet/sfc/ethtool_common.h | 16 + 3 files changed, 688 insertions(+), 672 deletions(-) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 55413d451ac3..5e0051b94ae7 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -267,678 +267,6 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) return efx_reset(efx, rc); } -/* MAC address mask including only I/G bit */ -static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; - -#define IP4_ADDR_FULL_MASK ((__force __be32)~0) -#define IP_PROTO_FULL_MASK 0xFF -#define PORT_FULL_MASK ((__force __be16)~0) -#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) - -static inline void ip6_fill_mask(__be32 *mask) -{ - mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; -} - -static int efx_ethtool_get_class_rule(struct efx_nic *efx, - struct ethtool_rx_flow_spec *rule, - u32 *rss_context) -{ - struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; - struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; - struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; - struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; - struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; - struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; - struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; - struct ethhdr *mac_entry = &rule->h_u.ether_spec; - struct ethhdr *mac_mask = &rule->m_u.ether_spec; - struct efx_filter_spec spec; - int rc; - - rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, - rule->location, &spec); - if (rc) - return rc; - - if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) - rule->ring_cookie = RX_CLS_FLOW_DISC; - else - rule->ring_cookie = spec.dmaq_id; - - if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && - spec.ether_type == htons(ETH_P_IP) && - (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && - (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && - !(spec.match_flags & - ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | - EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { - rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? - TCP_V4_FLOW : UDP_V4_FLOW); - if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { - ip_entry->ip4dst = spec.loc_host[0]; - ip_mask->ip4dst = IP4_ADDR_FULL_MASK; - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { - ip_entry->ip4src = spec.rem_host[0]; - ip_mask->ip4src = IP4_ADDR_FULL_MASK; - } - if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { - ip_entry->pdst = spec.loc_port; - ip_mask->pdst = PORT_FULL_MASK; - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { - ip_entry->psrc = spec.rem_port; - ip_mask->psrc = PORT_FULL_MASK; - } - } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && - spec.ether_type == htons(ETH_P_IPV6) && - (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && - (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && - !(spec.match_flags & - ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | - EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { - rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? - TCP_V6_FLOW : UDP_V6_FLOW); - if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { - memcpy(ip6_entry->ip6dst, spec.loc_host, - sizeof(ip6_entry->ip6dst)); - ip6_fill_mask(ip6_mask->ip6dst); - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { - memcpy(ip6_entry->ip6src, spec.rem_host, - sizeof(ip6_entry->ip6src)); - ip6_fill_mask(ip6_mask->ip6src); - } - if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { - ip6_entry->pdst = spec.loc_port; - ip6_mask->pdst = PORT_FULL_MASK; - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { - ip6_entry->psrc = spec.rem_port; - ip6_mask->psrc = PORT_FULL_MASK; - } - } else if (!(spec.match_flags & - ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | - EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | - EFX_FILTER_MATCH_OUTER_VID))) { - rule->flow_type = ETHER_FLOW; - if (spec.match_flags & - (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { - ether_addr_copy(mac_entry->h_dest, spec.loc_mac); - if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) - eth_broadcast_addr(mac_mask->h_dest); - else - ether_addr_copy(mac_mask->h_dest, - mac_addr_ig_mask); - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { - ether_addr_copy(mac_entry->h_source, spec.rem_mac); - eth_broadcast_addr(mac_mask->h_source); - } - if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { - mac_entry->h_proto = spec.ether_type; - mac_mask->h_proto = ETHER_TYPE_FULL_MASK; - } - } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && - spec.ether_type == htons(ETH_P_IP) && - !(spec.match_flags & - ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | - EFX_FILTER_MATCH_IP_PROTO))) { - rule->flow_type = IPV4_USER_FLOW; - uip_entry->ip_ver = ETH_RX_NFC_IP4; - if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { - uip_mask->proto = IP_PROTO_FULL_MASK; - uip_entry->proto = spec.ip_proto; - } - if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { - uip_entry->ip4dst = spec.loc_host[0]; - uip_mask->ip4dst = IP4_ADDR_FULL_MASK; - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { - uip_entry->ip4src = spec.rem_host[0]; - uip_mask->ip4src = IP4_ADDR_FULL_MASK; - } - } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && - spec.ether_type == htons(ETH_P_IPV6) && - !(spec.match_flags & - ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | - EFX_FILTER_MATCH_IP_PROTO))) { - rule->flow_type = IPV6_USER_FLOW; - if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { - uip6_mask->l4_proto = IP_PROTO_FULL_MASK; - uip6_entry->l4_proto = spec.ip_proto; - } - if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { - memcpy(uip6_entry->ip6dst, spec.loc_host, - sizeof(uip6_entry->ip6dst)); - ip6_fill_mask(uip6_mask->ip6dst); - } - if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { - memcpy(uip6_entry->ip6src, spec.rem_host, - sizeof(uip6_entry->ip6src)); - ip6_fill_mask(uip6_mask->ip6src); - } - } else { - /* The above should handle all filters that we insert */ - WARN_ON(1); - return -EINVAL; - } - - if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { - rule->flow_type |= FLOW_EXT; - rule->h_ext.vlan_tci = spec.outer_vid; - rule->m_ext.vlan_tci = htons(0xfff); - } - - if (spec.flags & EFX_FILTER_FLAG_RX_RSS) { - rule->flow_type |= FLOW_RSS; - *rss_context = spec.rss_context; - } - - return rc; -} - -static int -efx_ethtool_get_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info, u32 *rule_locs) -{ - struct efx_nic *efx = netdev_priv(net_dev); - u32 rss_context = 0; - s32 rc = 0; - - switch (info->cmd) { - case ETHTOOL_GRXRINGS: - info->data = efx->n_rx_channels; - return 0; - - case ETHTOOL_GRXFH: { - struct efx_rss_context *ctx = &efx->rss_context; - __u64 data; - - mutex_lock(&efx->rss_lock); - if (info->flow_type & FLOW_RSS && info->rss_context) { - ctx = efx_find_rss_context_entry(efx, info->rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - } - - data = 0; - if (!efx_rss_active(ctx)) /* No RSS */ - goto out_setdata_unlock; - - switch (info->flow_type & ~FLOW_RSS) { - case UDP_V4_FLOW: - case UDP_V6_FLOW: - if (ctx->rx_hash_udp_4tuple) - data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | - RXH_IP_SRC | RXH_IP_DST); - else - data = RXH_IP_SRC | RXH_IP_DST; - break; - case TCP_V4_FLOW: - case TCP_V6_FLOW: - data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | - RXH_IP_SRC | RXH_IP_DST); - break; - case SCTP_V4_FLOW: - case SCTP_V6_FLOW: - case AH_ESP_V4_FLOW: - case AH_ESP_V6_FLOW: - case IPV4_FLOW: - case IPV6_FLOW: - data = RXH_IP_SRC | RXH_IP_DST; - break; - default: - break; - } -out_setdata_unlock: - info->data = data; -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; - } - - case ETHTOOL_GRXCLSRLCNT: - info->data = efx_filter_get_rx_id_limit(efx); - if (info->data == 0) - return -EOPNOTSUPP; - info->data |= RX_CLS_LOC_SPECIAL; - info->rule_cnt = - efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); - return 0; - - case ETHTOOL_GRXCLSRULE: - if (efx_filter_get_rx_id_limit(efx) == 0) - return -EOPNOTSUPP; - rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context); - if (rc < 0) - return rc; - if (info->fs.flow_type & FLOW_RSS) - info->rss_context = rss_context; - return 0; - - case ETHTOOL_GRXCLSRLALL: - info->data = efx_filter_get_rx_id_limit(efx); - if (info->data == 0) - return -EOPNOTSUPP; - rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, - rule_locs, info->rule_cnt); - if (rc < 0) - return rc; - info->rule_cnt = rc; - return 0; - - default: - return -EOPNOTSUPP; - } -} - -static inline bool ip6_mask_is_full(__be32 mask[4]) -{ - return !~(mask[0] & mask[1] & mask[2] & mask[3]); -} - -static inline bool ip6_mask_is_empty(__be32 mask[4]) -{ - return !(mask[0] | mask[1] | mask[2] | mask[3]); -} - -static int efx_ethtool_set_class_rule(struct efx_nic *efx, - struct ethtool_rx_flow_spec *rule, - u32 rss_context) -{ - struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; - struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; - struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; - struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; - struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; - struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; - struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; - struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; - u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS); - struct ethhdr *mac_entry = &rule->h_u.ether_spec; - struct ethhdr *mac_mask = &rule->m_u.ether_spec; - enum efx_filter_flags flags = 0; - struct efx_filter_spec spec; - int rc; - - /* Check that user wants us to choose the location */ - if (rule->location != RX_CLS_LOC_ANY) - return -EINVAL; - - /* Range-check ring_cookie */ - if (rule->ring_cookie >= efx->n_rx_channels && - rule->ring_cookie != RX_CLS_FLOW_DISC) - return -EINVAL; - - /* Check for unsupported extensions */ - if ((rule->flow_type & FLOW_EXT) && - (rule->m_ext.vlan_etype || rule->m_ext.data[0] || - rule->m_ext.data[1])) - return -EINVAL; - - if (efx->rx_scatter) - flags |= EFX_FILTER_FLAG_RX_SCATTER; - if (rule->flow_type & FLOW_RSS) - flags |= EFX_FILTER_FLAG_RX_RSS; - - efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags, - (rule->ring_cookie == RX_CLS_FLOW_DISC) ? - EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); - - if (rule->flow_type & FLOW_RSS) - spec.rss_context = rss_context; - - switch (flow_type) { - case TCP_V4_FLOW: - case UDP_V4_FLOW: - spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | - EFX_FILTER_MATCH_IP_PROTO); - spec.ether_type = htons(ETH_P_IP); - spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP - : IPPROTO_UDP; - if (ip_mask->ip4dst) { - if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; - spec.loc_host[0] = ip_entry->ip4dst; - } - if (ip_mask->ip4src) { - if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; - spec.rem_host[0] = ip_entry->ip4src; - } - if (ip_mask->pdst) { - if (ip_mask->pdst != PORT_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; - spec.loc_port = ip_entry->pdst; - } - if (ip_mask->psrc) { - if (ip_mask->psrc != PORT_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; - spec.rem_port = ip_entry->psrc; - } - if (ip_mask->tos) - return -EINVAL; - break; - - case TCP_V6_FLOW: - case UDP_V6_FLOW: - spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | - EFX_FILTER_MATCH_IP_PROTO); - spec.ether_type = htons(ETH_P_IPV6); - spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP - : IPPROTO_UDP; - if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { - if (!ip6_mask_is_full(ip6_mask->ip6dst)) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; - memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); - } - if (!ip6_mask_is_empty(ip6_mask->ip6src)) { - if (!ip6_mask_is_full(ip6_mask->ip6src)) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; - memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); - } - if (ip6_mask->pdst) { - if (ip6_mask->pdst != PORT_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; - spec.loc_port = ip6_entry->pdst; - } - if (ip6_mask->psrc) { - if (ip6_mask->psrc != PORT_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; - spec.rem_port = ip6_entry->psrc; - } - if (ip6_mask->tclass) - return -EINVAL; - break; - - case IPV4_USER_FLOW: - if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || - uip_entry->ip_ver != ETH_RX_NFC_IP4) - return -EINVAL; - spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; - spec.ether_type = htons(ETH_P_IP); - if (uip_mask->ip4dst) { - if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; - spec.loc_host[0] = uip_entry->ip4dst; - } - if (uip_mask->ip4src) { - if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; - spec.rem_host[0] = uip_entry->ip4src; - } - if (uip_mask->proto) { - if (uip_mask->proto != IP_PROTO_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; - spec.ip_proto = uip_entry->proto; - } - break; - - case IPV6_USER_FLOW: - if (uip6_mask->l4_4_bytes || uip6_mask->tclass) - return -EINVAL; - spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; - spec.ether_type = htons(ETH_P_IPV6); - if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { - if (!ip6_mask_is_full(uip6_mask->ip6dst)) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; - memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); - } - if (!ip6_mask_is_empty(uip6_mask->ip6src)) { - if (!ip6_mask_is_full(uip6_mask->ip6src)) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; - memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); - } - if (uip6_mask->l4_proto) { - if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; - spec.ip_proto = uip6_entry->l4_proto; - } - break; - - case ETHER_FLOW: - if (!is_zero_ether_addr(mac_mask->h_dest)) { - if (ether_addr_equal(mac_mask->h_dest, - mac_addr_ig_mask)) - spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; - else if (is_broadcast_ether_addr(mac_mask->h_dest)) - spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; - else - return -EINVAL; - ether_addr_copy(spec.loc_mac, mac_entry->h_dest); - } - if (!is_zero_ether_addr(mac_mask->h_source)) { - if (!is_broadcast_ether_addr(mac_mask->h_source)) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; - ether_addr_copy(spec.rem_mac, mac_entry->h_source); - } - if (mac_mask->h_proto) { - if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; - spec.ether_type = mac_entry->h_proto; - } - break; - - default: - return -EINVAL; - } - - if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { - if (rule->m_ext.vlan_tci != htons(0xfff)) - return -EINVAL; - spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; - spec.outer_vid = rule->h_ext.vlan_tci; - } - - rc = efx_filter_insert_filter(efx, &spec, true); - if (rc < 0) - return rc; - - rule->location = rc; - return 0; -} - -static int efx_ethtool_set_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *info) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx_filter_get_rx_id_limit(efx) == 0) - return -EOPNOTSUPP; - - switch (info->cmd) { - case ETHTOOL_SRXCLSRLINS: - return efx_ethtool_set_class_rule(efx, &info->fs, - info->rss_context); - - case ETHTOOL_SRXCLSRLDEL: - return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, - info->fs.location); - - default: - return -EOPNOTSUPP; - } -} - -static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->n_rx_channels == 1) - return 0; - return ARRAY_SIZE(efx->rss_context.rx_indir_table); -} - -static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - return efx->type->rx_hash_key_size; -} - -static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - rc = efx->type->rx_pull_rss_config(efx); - if (rc) - return rc; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (indir) - memcpy(indir, efx->rss_context.rx_indir_table, - sizeof(efx->rss_context.rx_indir_table)); - if (key) - memcpy(key, efx->rss_context.rx_hash_key, - efx->type->rx_hash_key_size); - return 0; -} - -static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - /* Hash function is Toeplitz, cannot be changed */ - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) - return -EOPNOTSUPP; - if (!indir && !key) - return 0; - - if (!key) - key = efx->rss_context.rx_hash_key; - if (!indir) - indir = efx->rss_context.rx_indir_table; - - return efx->type->rx_push_rss_config(efx, true, indir, key); -} - -static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, - u8 *key, u8 *hfunc, u32 rss_context) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_rss_context *ctx; - int rc = 0; - - if (!efx->type->rx_pull_rss_context_config) - return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - ctx = efx_find_rss_context_entry(efx, rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - rc = efx->type->rx_pull_rss_context_config(efx, ctx); - if (rc) - goto out_unlock; - - if (hfunc) - *hfunc = ETH_RSS_HASH_TOP; - if (indir) - memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table)); - if (key) - memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size); -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; -} - -static int efx_ethtool_set_rxfh_context(struct net_device *net_dev, - const u32 *indir, const u8 *key, - const u8 hfunc, u32 *rss_context, - bool delete) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_rss_context *ctx; - bool allocated = false; - int rc; - - if (!efx->type->rx_push_rss_context_config) - return -EOPNOTSUPP; - /* Hash function is Toeplitz, cannot be changed */ - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) - return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - - if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { - if (delete) { - /* alloc + delete == Nothing to do */ - rc = -EINVAL; - goto out_unlock; - } - ctx = efx_alloc_rss_context_entry(efx); - if (!ctx) { - rc = -ENOMEM; - goto out_unlock; - } - ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - /* Initialise indir table and key to defaults */ - efx_set_default_rx_indir_table(efx, ctx); - netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); - allocated = true; - } else { - ctx = efx_find_rss_context_entry(efx, *rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - } - - if (delete) { - /* delete this context */ - rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); - if (!rc) - efx_free_rss_context_entry(ctx); - goto out_unlock; - } - - if (!key) - key = ctx->rx_hash_key; - if (!indir) - indir = ctx->rx_indir_table; - - rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); - if (rc && allocated) - efx_free_rss_context_entry(ctx); - else - *rss_context = ctx->user_id; -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; -} - static int efx_ethtool_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) { diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index b91961126eeb..d7d8795eb1d3 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -13,6 +13,7 @@ #include "mcdi.h" #include "nic.h" #include "selftest.h" +#include "rx_common.h" #include "ethtool_common.h" struct efx_sw_stat_desc { @@ -601,3 +602,674 @@ int efx_ethtool_set_fecparam(struct net_device *net_dev, return rc; } + +/* MAC address mask including only I/G bit */ +static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; + +#define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF +#define PORT_FULL_MASK ((__force __be16)~0) +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) + +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + +static int efx_ethtool_get_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 *rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + struct efx_filter_spec spec; + int rc; + + rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL, + rule->location, &spec); + if (rc) + return rc; + + if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP) + rule->ring_cookie = RX_CLS_FLOW_DISC; + else + rule->ring_cookie = spec.dmaq_id; + + if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IP) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V4_FLOW : UDP_V4_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + ip_entry->ip4dst = spec.loc_host[0]; + ip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + ip_entry->ip4src = spec.rem_host[0]; + ip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip_entry->pdst = spec.loc_port; + ip_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip_entry->psrc = spec.rem_port; + ip_mask->psrc = PORT_FULL_MASK; + } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } + } else if (!(spec.match_flags & + ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | + EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_OUTER_VID))) { + rule->flow_type = ETHER_FLOW; + if (spec.match_flags & + (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { + ether_addr_copy(mac_entry->h_dest, spec.loc_mac); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) + eth_broadcast_addr(mac_mask->h_dest); + else + ether_addr_copy(mac_mask->h_dest, + mac_addr_ig_mask); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { + ether_addr_copy(mac_entry->h_source, spec.rem_mac); + eth_broadcast_addr(mac_mask->h_source); + } + if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { + mac_entry->h_proto = spec.ether_type; + mac_mask->h_proto = ETHER_TYPE_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV4_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } + } else { + /* The above should handle all filters that we insert */ + WARN_ON(1); + return -EINVAL; + } + + if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) { + rule->flow_type |= FLOW_EXT; + rule->h_ext.vlan_tci = spec.outer_vid; + rule->m_ext.vlan_tci = htons(0xfff); + } + + if (spec.flags & EFX_FILTER_FLAG_RX_RSS) { + rule->flow_type |= FLOW_RSS; + *rss_context = spec.rss_context; + } + + return rc; +} + +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs) +{ + struct efx_nic *efx = netdev_priv(net_dev); + u32 rss_context = 0; + s32 rc = 0; + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = efx->n_rx_channels; + return 0; + + case ETHTOOL_GRXFH: { + struct efx_rss_context *ctx = &efx->rss_context; + __u64 data; + + mutex_lock(&efx->rss_lock); + if (info->flow_type & FLOW_RSS && info->rss_context) { + ctx = efx_find_rss_context_entry(efx, info->rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + data = 0; + if (!efx_rss_active(ctx)) /* No RSS */ + goto out_setdata_unlock; + + switch (info->flow_type & ~FLOW_RSS) { + case UDP_V4_FLOW: + case UDP_V6_FLOW: + if (ctx->rx_hash_udp_4tuple) + data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST); + else + data = RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V4_FLOW: + case TCP_V6_FLOW: + data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 | + RXH_IP_SRC | RXH_IP_DST); + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V4_FLOW: + case AH_ESP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + data = RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } +out_setdata_unlock: + info->data = data; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; + } + + case ETHTOOL_GRXCLSRLCNT: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + info->data |= RX_CLS_LOC_SPECIAL; + info->rule_cnt = + efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL); + return 0; + + case ETHTOOL_GRXCLSRULE: + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context); + if (rc < 0) + return rc; + if (info->fs.flow_type & FLOW_RSS) + info->rss_context = rss_context; + return 0; + + case ETHTOOL_GRXCLSRLALL: + info->data = efx_filter_get_rx_id_limit(efx); + if (info->data == 0) + return -EOPNOTSUPP; + rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL, + rule_locs, info->rule_cnt); + if (rc < 0) + return rc; + info->rule_cnt = rc; + return 0; + + default: + return -EOPNOTSUPP; + } +} + +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + +static int efx_ethtool_set_class_rule(struct efx_nic *efx, + struct ethtool_rx_flow_spec *rule, + u32 rss_context) +{ + struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; + struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; + u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS); + struct ethhdr *mac_entry = &rule->h_u.ether_spec; + struct ethhdr *mac_mask = &rule->m_u.ether_spec; + enum efx_filter_flags flags = 0; + struct efx_filter_spec spec; + int rc; + + /* Check that user wants us to choose the location */ + if (rule->location != RX_CLS_LOC_ANY) + return -EINVAL; + + /* Range-check ring_cookie */ + if (rule->ring_cookie >= efx->n_rx_channels && + rule->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + /* Check for unsupported extensions */ + if ((rule->flow_type & FLOW_EXT) && + (rule->m_ext.vlan_etype || rule->m_ext.data[0] || + rule->m_ext.data[1])) + return -EINVAL; + + if (efx->rx_scatter) + flags |= EFX_FILTER_FLAG_RX_SCATTER; + if (rule->flow_type & FLOW_RSS) + flags |= EFX_FILTER_FLAG_RX_RSS; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags, + (rule->ring_cookie == RX_CLS_FLOW_DISC) ? + EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie); + + if (rule->flow_type & FLOW_RSS) + spec.rss_context = rss_context; + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IP); + spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (ip_mask->ip4dst) { + if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = ip_entry->ip4dst; + } + if (ip_mask->ip4src) { + if (ip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = ip_entry->ip4src; + } + if (ip_mask->pdst) { + if (ip_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip_entry->pdst; + } + if (ip_mask->psrc) { + if (ip_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip_entry->psrc; + } + if (ip_mask->tos) + return -EINVAL; + break; + + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP + : IPPROTO_UDP; + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IPV4_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + + case ETHER_FLOW: + if (!is_zero_ether_addr(mac_mask->h_dest)) { + if (ether_addr_equal(mac_mask->h_dest, + mac_addr_ig_mask)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG; + else if (is_broadcast_ether_addr(mac_mask->h_dest)) + spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; + else + return -EINVAL; + ether_addr_copy(spec.loc_mac, mac_entry->h_dest); + } + if (!is_zero_ether_addr(mac_mask->h_source)) { + if (!is_broadcast_ether_addr(mac_mask->h_source)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; + ether_addr_copy(spec.rem_mac, mac_entry->h_source); + } + if (mac_mask->h_proto) { + if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = mac_entry->h_proto; + } + break; + + default: + return -EINVAL; + } + + if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) { + if (rule->m_ext.vlan_tci != htons(0xfff)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID; + spec.outer_vid = rule->h_ext.vlan_tci; + } + + rc = efx_filter_insert_filter(efx, &spec, true); + if (rc < 0) + return rc; + + rule->location = rc; + return 0; +} + +int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx_filter_get_rx_id_limit(efx) == 0) + return -EOPNOTSUPP; + + switch (info->cmd) { + case ETHTOOL_SRXCLSRLINS: + return efx_ethtool_set_class_rule(efx, &info->fs, + info->rss_context); + + case ETHTOOL_SRXCLSRLDEL: + return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL, + info->fs.location); + + default: + return -EOPNOTSUPP; + } +} + +u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->n_rx_channels == 1) + return 0; + return ARRAY_SIZE(efx->rss_context.rx_indir_table); +} + +u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + return efx->type->rx_hash_key_size; +} + +int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->rx_pull_rss_config(efx); + if (rc) + return rc; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, efx->rss_context.rx_indir_table, + sizeof(efx->rss_context.rx_indir_table)); + if (key) + memcpy(key, efx->rss_context.rx_hash_key, + efx->type->rx_hash_key_size); + return 0; +} + +int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + if (!indir && !key) + return 0; + + if (!key) + key = efx->rss_context.rx_hash_key; + if (!indir) + indir = efx->rss_context.rx_indir_table; + + return efx->type->rx_push_rss_config(efx, true, indir, key); +} + +int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + int rc = 0; + + if (!efx->type->rx_pull_rss_context_config) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + ctx = efx_find_rss_context_entry(efx, rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + rc = efx->type->rx_pull_rss_context_config(efx, ctx); + if (rc) + goto out_unlock; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + if (indir) + memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table)); + if (key) + memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size); +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} + +int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_rss_context *ctx; + bool allocated = false; + int rc; + + if (!efx->type->rx_push_rss_context_config) + return -EOPNOTSUPP; + /* Hash function is Toeplitz, cannot be changed */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + mutex_lock(&efx->rss_lock); + + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + if (delete) { + /* alloc + delete == Nothing to do */ + rc = -EINVAL; + goto out_unlock; + } + ctx = efx_alloc_rss_context_entry(efx); + if (!ctx) { + rc = -ENOMEM; + goto out_unlock; + } + ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + /* Initialise indir table and key to defaults */ + efx_set_default_rx_indir_table(efx, ctx); + netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); + allocated = true; + } else { + ctx = efx_find_rss_context_entry(efx, *rss_context); + if (!ctx) { + rc = -ENOENT; + goto out_unlock; + } + } + + if (delete) { + /* delete this context */ + rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); + if (!rc) + efx_free_rss_context_entry(ctx); + goto out_unlock; + } + + if (!key) + key = ctx->rx_hash_key; + if (!indir) + indir = ctx->rx_indir_table; + + rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); + if (rc && allocated) + efx_free_rss_context_entry(ctx); + else + *rss_context = ctx->user_id; +out_unlock: + mutex_unlock(&efx->rss_lock); + return rc; +} diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index eaa1fd9157f8..024a78ce0905 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -37,4 +37,20 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev, struct ethtool_fecparam *fecparam); int efx_ethtool_set_fecparam(struct net_device *net_dev, struct ethtool_fecparam *fecparam); +int efx_ethtool_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info, u32 *rule_locs); +int efx_ethtool_set_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *info); +u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev); +u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev); +int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key, + u8 *hfunc); +int efx_ethtool_set_rxfh(struct net_device *net_dev, + const u32 *indir, const u8 *key, const u8 hfunc); +int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context); +int efx_ethtool_set_rxfh_context(struct net_device *net_dev, + const u32 *indir, const u8 *key, + const u8 hfunc, u32 *rss_context, + bool delete); #endif From 5671dd5565d443185cdc325e8bea0cdd77f3911b Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:35:15 +0100 Subject: [PATCH 0462/2056] sfc: commonise other ethtool bits A few more ethtool handlers which EF100 will share. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ethtool.c | 93 ----------------------- drivers/net/ethernet/sfc/ethtool_common.c | 93 +++++++++++++++++++++++ drivers/net/ethernet/sfc/ethtool_common.h | 8 ++ 3 files changed, 101 insertions(+), 93 deletions(-) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 5e0051b94ae7..48a96ed6b7d0 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -68,54 +68,6 @@ static void efx_ethtool_get_regs(struct net_device *net_dev, efx_nic_get_regs(efx, buf); } -static void efx_ethtool_self_test(struct net_device *net_dev, - struct ethtool_test *test, u64 *data) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_self_tests *efx_tests; - bool already_up; - int rc = -ENOMEM; - - efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); - if (!efx_tests) - goto fail; - - if (efx->state != STATE_READY) { - rc = -EBUSY; - goto out; - } - - netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", - (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - - /* We need rx buffers and interrupts. */ - already_up = (efx->net_dev->flags & IFF_UP); - if (!already_up) { - rc = dev_open(efx->net_dev, NULL); - if (rc) { - netif_err(efx, drv, efx->net_dev, - "failed opening device.\n"); - goto out; - } - } - - rc = efx_selftest(efx, efx_tests, test->flags); - - if (!already_up) - dev_close(efx->net_dev); - - netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", - rc == 0 ? "passed" : "failed", - (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); - -out: - efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); - kfree(efx_tests); -fail: - if (rc) - test->flags |= ETH_TEST_FL_FAILED; -} - /* * Each channel has a single IRQ and moderation timer, started by any * completion (or other event). Unless the module parameter @@ -255,18 +207,6 @@ static int efx_ethtool_set_wol(struct net_device *net_dev, return efx->type->set_wol(efx, wol->wolopts); } -static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - rc = efx->type->map_reset_flags(flags); - if (rc < 0) - return rc; - - return efx_reset(efx, rc); -} - static int efx_ethtool_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) { @@ -281,39 +221,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, return 0; } -static int efx_ethtool_get_module_eeprom(struct net_device *net_dev, - struct ethtool_eeprom *ee, - u8 *data) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int ret; - - if (!efx->phy_op || !efx->phy_op->get_module_eeprom) - return -EOPNOTSUPP; - - mutex_lock(&efx->mac_lock); - ret = efx->phy_op->get_module_eeprom(efx, ee, data); - mutex_unlock(&efx->mac_lock); - - return ret; -} - -static int efx_ethtool_get_module_info(struct net_device *net_dev, - struct ethtool_modinfo *modinfo) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int ret; - - if (!efx->phy_op || !efx->phy_op->get_module_info) - return -EOPNOTSUPP; - - mutex_lock(&efx->mac_lock); - ret = efx->phy_op->get_module_info(efx, modinfo); - mutex_unlock(&efx->mac_lock); - - return ret; -} - const struct ethtool_ops efx_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index d7d8795eb1d3..c96595e50234 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -125,6 +125,54 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) efx->msg_enable = msg_enable; } +void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_self_tests *efx_tests; + bool already_up; + int rc = -ENOMEM; + + efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); + if (!efx_tests) + goto fail; + + if (efx->state != STATE_READY) { + rc = -EBUSY; + goto out; + } + + netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + + /* We need rx buffers and interrupts. */ + already_up = (efx->net_dev->flags & IFF_UP); + if (!already_up) { + rc = dev_open(efx->net_dev, NULL); + if (rc) { + netif_err(efx, drv, efx->net_dev, + "failed opening device.\n"); + goto out; + } + } + + rc = efx_selftest(efx, efx_tests, test->flags); + + if (!already_up) + dev_close(efx->net_dev); + + netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n", + rc == 0 ? "passed" : "failed", + (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); + +out: + efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); + kfree(efx_tests); +fail: + if (rc) + test->flags |= ETH_TEST_FL_FAILED; +} + /* Restart autonegotiation */ int efx_ethtool_nway_reset(struct net_device *net_dev) { @@ -1273,3 +1321,48 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, mutex_unlock(&efx->rss_lock); return rc; } + +int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + rc = efx->type->map_reset_flags(flags); + if (rc < 0) + return rc; + + return efx_reset(efx, rc); +} + +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + if (!efx->phy_op || !efx->phy_op->get_module_eeprom) + return -EOPNOTSUPP; + + mutex_lock(&efx->mac_lock); + ret = efx->phy_op->get_module_eeprom(efx, ee, data); + mutex_unlock(&efx->mac_lock); + + return ret; +} + +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int ret; + + if (!efx->phy_op || !efx->phy_op->get_module_info) + return -EOPNOTSUPP; + + mutex_lock(&efx->mac_lock); + ret = efx->phy_op->get_module_info(efx, modinfo); + mutex_unlock(&efx->mac_lock); + + return ret; +} diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index 024a78ce0905..7bfbbd08a1ef 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -15,6 +15,8 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info); u32 efx_ethtool_get_msglevel(struct net_device *net_dev); void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); +void efx_ethtool_self_test(struct net_device *net_dev, + struct ethtool_test *test, u64 *data); int efx_ethtool_nway_reset(struct net_device *net_dev); void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause); @@ -53,4 +55,10 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, const u32 *indir, const u8 *key, const u8 hfunc, u32 *rss_context, bool delete); +int efx_ethtool_reset(struct net_device *net_dev, u32 *flags); +int efx_ethtool_get_module_eeprom(struct net_device *net_dev, + struct ethtool_eeprom *ee, + u8 *data); +int efx_ethtool_get_module_info(struct net_device *net_dev, + struct ethtool_modinfo *modinfo); #endif From 53e1f21abd89bde46ed30061c58370b8a079f6f5 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:35:25 +0100 Subject: [PATCH 0463/2056] sfc: commonise FC advertising Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 24 ------------------------ drivers/net/ethernet/sfc/efx.h | 3 --- drivers/net/ethernet/sfc/efx_common.c | 24 ++++++++++++++++++++++++ drivers/net/ethernet/sfc/efx_common.h | 3 +++ 4 files changed, 27 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 256807c28ff7..474cfce5c042 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -133,30 +133,6 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs, * **************************************************************************/ -/* Equivalent to efx_link_set_advertising with all-zeroes, except does not - * force the Autoneg bit on. - */ -void efx_link_clear_advertising(struct efx_nic *efx) -{ - bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); - efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); -} - -void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) -{ - efx->wanted_fc = wanted_fc; - if (efx->link_advertising[0]) { - if (wanted_fc & EFX_FC_RX) - efx->link_advertising[0] |= (ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - else - efx->link_advertising[0] &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - if (wanted_fc & EFX_FC_TX) - efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; - } -} - static void efx_fini_port(struct efx_nic *efx); static int efx_probe_port(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 66dcab140449..8aadec02407c 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -216,9 +216,6 @@ static inline void efx_schedule_channel_irq(struct efx_channel *channel) efx_schedule_channel(channel); } -void efx_link_clear_advertising(struct efx_nic *efx); -void efx_link_set_wanted_fc(struct efx_nic *efx, u8); - static inline void efx_device_detach_sync(struct efx_nic *efx) { struct net_device *dev = efx->net_dev; diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 1799ff9a45d9..02459d90afb0 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -383,6 +383,30 @@ static void efx_stop_datapath(struct efx_nic *efx) * **************************************************************************/ +/* Equivalent to efx_link_set_advertising with all-zeroes, except does not + * force the Autoneg bit on. + */ +void efx_link_clear_advertising(struct efx_nic *efx) +{ + bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS); + efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); +} + +void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) +{ + efx->wanted_fc = wanted_fc; + if (efx->link_advertising[0]) { + if (wanted_fc & EFX_FC_RX) + efx->link_advertising[0] |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else + efx->link_advertising[0] &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + if (wanted_fc & EFX_FC_TX) + efx->link_advertising[0] ^= ADVERTISED_Asym_Pause; + } +} + static void efx_start_port(struct efx_nic *efx) { netif_dbg(efx, ifup, efx->net_dev, "start port\n"); diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index fa2fc681e7f9..c522a5be43d2 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -18,6 +18,9 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev); void efx_fini_struct(struct efx_nic *efx); +void efx_link_clear_advertising(struct efx_nic *efx); +void efx_link_set_wanted_fc(struct efx_nic *efx, u8); + void efx_start_all(struct efx_nic *efx); void efx_stop_all(struct efx_nic *efx); From 66a65128d4a585aff4baf123d710107cbd31c3a7 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:35:33 +0100 Subject: [PATCH 0464/2056] sfc: track which BAR is mapped EF100 needs to map multiple BARs (sequentially, not concurrently) in order to read the Function Control Window during probe. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 4 ++-- drivers/net/ethernet/sfc/efx_common.c | 19 ++++++++++++------- drivers/net/ethernet/sfc/efx_common.h | 2 +- drivers/net/ethernet/sfc/net_driver.h | 3 +++ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 474cfce5c042..86639b1e4e5c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1074,7 +1074,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) efx_pci_remove_main(efx); - efx_fini_io(efx, efx->type->mem_bar(efx)); + efx_fini_io(efx); netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); efx_fini_struct(efx); @@ -1342,7 +1342,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev, return 0; fail3: - efx_fini_io(efx, efx->type->mem_bar(efx)); + efx_fini_io(efx); fail2: efx_fini_struct(efx); fail1: diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 02459d90afb0..36c0ab57d3bd 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -953,6 +953,8 @@ int efx_init_struct(struct efx_nic *efx, INIT_WORK(&efx->mac_work, efx_mac_work); init_waitqueue_head(&efx->flush_wq); + efx->mem_bar = UINT_MAX; + rc = efx_init_channels(efx); if (rc) goto fail; @@ -996,7 +998,9 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, struct pci_dev *pci_dev = efx->pci_dev; int rc; - netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); + efx->mem_bar = UINT_MAX; + + netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar); rc = pci_enable_device(pci_dev); if (rc) { @@ -1038,21 +1042,21 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, rc = pci_request_region(pci_dev, bar, "sfc"); if (rc) { netif_err(efx, probe, efx->net_dev, - "request for memory BAR failed\n"); + "request for memory BAR[%d] failed\n", bar); rc = -EIO; goto fail3; } - + efx->mem_bar = bar; efx->membase = ioremap(efx->membase_phys, mem_map_size); if (!efx->membase) { netif_err(efx, probe, efx->net_dev, - "could not map memory BAR at %llx+%x\n", + "could not map memory BAR[%d] at %llx+%x\n", bar, (unsigned long long)efx->membase_phys, mem_map_size); rc = -ENOMEM; goto fail4; } netif_dbg(efx, probe, efx->net_dev, - "memory BAR at %llx+%x (virtual %p)\n", + "memory BAR[%d] at %llx+%x (virtual %p)\n", bar, (unsigned long long)efx->membase_phys, mem_map_size, efx->membase); @@ -1068,7 +1072,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, return rc; } -void efx_fini_io(struct efx_nic *efx, int bar) +void efx_fini_io(struct efx_nic *efx) { netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); @@ -1078,8 +1082,9 @@ void efx_fini_io(struct efx_nic *efx, int bar) } if (efx->membase_phys) { - pci_release_region(efx->pci_dev, bar); + pci_release_region(efx->pci_dev, efx->mem_bar); efx->membase_phys = 0; + efx->mem_bar = UINT_MAX; } /* Don't disable bus-mastering if VFs are assigned */ diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index c522a5be43d2..93a017aafb9f 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -13,7 +13,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask, unsigned int mem_map_size); -void efx_fini_io(struct efx_nic *efx, int bar); +void efx_fini_io(struct efx_nic *efx); int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev); void efx_fini_struct(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 7bc4d1cbb398..e0b84b2e3bd2 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -961,6 +961,7 @@ struct efx_async_filter_insertion { * @vpd_sn: Serial number read from VPD * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their * xdp_rxq_info structures? + * @mem_bar: The BAR that is mapped into membase. * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock * @last_irq_cpu: Last CPU to handle a possible test interrupt. This @@ -1137,6 +1138,8 @@ struct efx_nic { char *vpd_sn; bool xdp_rxq_info_failed; + unsigned int mem_bar; + /* The following fields may be written more often */ struct delayed_work monitor_work ____cacheline_aligned_in_smp; From 21ea21252eddb3cea56845f58f87208062799bef Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:35:41 +0100 Subject: [PATCH 0465/2056] sfc: commonise PCI error handlers EF100 will use the same mechanisms for PCI error recovery. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 91 --------------------------- drivers/net/ethernet/sfc/efx_common.c | 91 +++++++++++++++++++++++++++ drivers/net/ethernet/sfc/efx_common.h | 1 + 3 files changed, 92 insertions(+), 91 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 86639b1e4e5c..4c2d305089ce 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1490,97 +1490,6 @@ static const struct dev_pm_ops efx_pm_ops = { .restore = efx_pm_resume, }; -/* A PCI error affecting this device was detected. - * At this point MMIO and DMA may be disabled. - * Stop the software path and request a slot reset. - */ -static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) -{ - pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - struct efx_nic *efx = pci_get_drvdata(pdev); - - if (state == pci_channel_io_perm_failure) - return PCI_ERS_RESULT_DISCONNECT; - - rtnl_lock(); - - if (efx->state != STATE_DISABLED) { - efx->state = STATE_RECOVERY; - efx->reset_pending = 0; - - efx_device_detach_sync(efx); - - efx_stop_all(efx); - efx_disable_interrupts(efx); - - status = PCI_ERS_RESULT_NEED_RESET; - } else { - /* If the interface is disabled we don't want to do anything - * with it. - */ - status = PCI_ERS_RESULT_RECOVERED; - } - - rtnl_unlock(); - - pci_disable_device(pdev); - - return status; -} - -/* Fake a successful reset, which will be performed later in efx_io_resume. */ -static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) -{ - struct efx_nic *efx = pci_get_drvdata(pdev); - pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; - - if (pci_enable_device(pdev)) { - netif_err(efx, hw, efx->net_dev, - "Cannot re-enable PCI device after reset.\n"); - status = PCI_ERS_RESULT_DISCONNECT; - } - - return status; -} - -/* Perform the actual reset and resume I/O operations. */ -static void efx_io_resume(struct pci_dev *pdev) -{ - struct efx_nic *efx = pci_get_drvdata(pdev); - int rc; - - rtnl_lock(); - - if (efx->state == STATE_DISABLED) - goto out; - - rc = efx_reset(efx, RESET_TYPE_ALL); - if (rc) { - netif_err(efx, hw, efx->net_dev, - "efx_reset failed after PCI error (%d)\n", rc); - } else { - efx->state = STATE_READY; - netif_dbg(efx, hw, efx->net_dev, - "Done resetting and resuming IO after PCI error.\n"); - } - -out: - rtnl_unlock(); -} - -/* For simplicity and reliability, we always require a slot reset and try to - * reset the hardware when a pci error affecting the device is detected. - * We leave both the link_reset and mmio_enabled callback unimplemented: - * with our request for slot reset the mmio_enabled callback will never be - * called, and the link_reset callback is not used by AER or EEH mechanisms. - */ -static const struct pci_error_handlers efx_err_handlers = { - .error_detected = efx_io_error_detected, - .slot_reset = efx_io_slot_reset, - .resume = efx_io_resume, -}; - static struct pci_driver efx_pci_driver = { .name = KBUILD_MODNAME, .id_table = efx_pci_table, diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 36c0ab57d3bd..88ade7f41819 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1130,3 +1130,94 @@ void efx_fini_mcdi_logging(struct efx_nic *efx) device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging); } #endif + +/* A PCI error affecting this device was detected. + * At this point MMIO and DMA may be disabled. + * Stop the software path and request a slot reset. + */ +static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, + enum pci_channel_state state) +{ + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + struct efx_nic *efx = pci_get_drvdata(pdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + rtnl_lock(); + + if (efx->state != STATE_DISABLED) { + efx->state = STATE_RECOVERY; + efx->reset_pending = 0; + + efx_device_detach_sync(efx); + + efx_stop_all(efx); + efx_disable_interrupts(efx); + + status = PCI_ERS_RESULT_NEED_RESET; + } else { + /* If the interface is disabled we don't want to do anything + * with it. + */ + status = PCI_ERS_RESULT_RECOVERED; + } + + rtnl_unlock(); + + pci_disable_device(pdev); + + return status; +} + +/* Fake a successful reset, which will be performed later in efx_io_resume. */ +static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; + + if (pci_enable_device(pdev)) { + netif_err(efx, hw, efx->net_dev, + "Cannot re-enable PCI device after reset.\n"); + status = PCI_ERS_RESULT_DISCONNECT; + } + + return status; +} + +/* Perform the actual reset and resume I/O operations. */ +static void efx_io_resume(struct pci_dev *pdev) +{ + struct efx_nic *efx = pci_get_drvdata(pdev); + int rc; + + rtnl_lock(); + + if (efx->state == STATE_DISABLED) + goto out; + + rc = efx_reset(efx, RESET_TYPE_ALL); + if (rc) { + netif_err(efx, hw, efx->net_dev, + "efx_reset failed after PCI error (%d)\n", rc); + } else { + efx->state = STATE_READY; + netif_dbg(efx, hw, efx->net_dev, + "Done resetting and resuming IO after PCI error.\n"); + } + +out: + rtnl_unlock(); +} + +/* For simplicity and reliability, we always require a slot reset and try to + * reset the hardware when a pci error affecting the device is detected. + * We leave both the link_reset and mmio_enabled callback unimplemented: + * with our request for slot reset the mmio_enabled callback will never be + * called, and the link_reset callback is not used by AER or EEH mechanisms. + */ +const struct pci_error_handlers efx_err_handlers = { + .error_detected = efx_io_error_detected, + .slot_reset = efx_io_slot_reset, + .resume = efx_io_resume, +}; diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index 93a017aafb9f..68af2af3b5da 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -73,4 +73,5 @@ void efx_link_status_changed(struct efx_nic *efx); unsigned int efx_xdp_max_mtu(struct efx_nic *efx); int efx_change_mtu(struct net_device *net_dev, int new_mtu); +extern const struct pci_error_handlers efx_err_handlers; #endif From 850b722756d6886b3269e548398934e81a3c7503 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:39:32 +0100 Subject: [PATCH 0466/2056] sfc: commonise drain event handling Avoids a call from generic MCDI code into ef10.c. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 8 -------- drivers/net/ethernet/sfc/mcdi.c | 10 +++++++++- drivers/net/ethernet/sfc/nic.h | 1 - 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 3bdb8606512a..efc49869320f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3117,14 +3117,6 @@ static void efx_ef10_ev_test_generate(struct efx_channel *channel) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); } -void efx_ef10_handle_drain_event(struct efx_nic *efx) -{ - if (atomic_dec_and_test(&efx->active_queues)) - wake_up(&efx->flush_wq); - - WARN_ON(atomic_read(&efx->active_queues) < 0); -} - static int efx_ef10_fini_dmaq(struct efx_nic *efx) { struct efx_tx_queue *tx_queue; diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index a8cc3881edce..244fb621d17b 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1299,6 +1299,14 @@ static void efx_mcdi_abandon(struct efx_nic *efx) efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT); } +static void efx_handle_drain_event(struct efx_nic *efx) +{ + if (atomic_dec_and_test(&efx->active_queues)) + wake_up(&efx->flush_wq); + + WARN_ON(atomic_read(&efx->active_queues) < 0); +} + /* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event) @@ -1371,7 +1379,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN != MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER)) - efx_ef10_handle_drain_event(efx); + efx_handle_drain_event(efx); break; case MCDI_EVENT_CODE_TX_ERR: case MCDI_EVENT_CODE_RX_ERR: diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 135c43146c13..c24dc55532c2 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -376,7 +376,6 @@ void falcon_stop_nic_stats(struct efx_nic *efx); int falcon_reset_xaui(struct efx_nic *efx); void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); void efx_farch_init_common(struct efx_nic *efx); -void efx_ef10_handle_drain_event(struct efx_nic *efx); void efx_farch_rx_push_indir_table(struct efx_nic *efx); void efx_farch_rx_pull_indir_table(struct efx_nic *efx); From 28abe8251b118116fac85f5169effcede547f26e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:36:33 +0100 Subject: [PATCH 0467/2056] sfc: commonise ARFS handling EF100 will use the same approach to ARFS as EF10. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.h | 5 - drivers/net/ethernet/sfc/rx.c | 234 --------------------------- drivers/net/ethernet/sfc/rx_common.c | 234 +++++++++++++++++++++++++++ drivers/net/ethernet/sfc/rx_common.h | 4 + 4 files changed, 238 insertions(+), 239 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 8aadec02407c..e8be786582c0 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -147,11 +147,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, { return efx->type->filter_get_rx_ids(efx, priority, buf, size); } -#ifdef CONFIG_RFS_ACCEL -int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id); -bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); -#endif /* RSS contexts */ static inline bool efx_rss_active(struct efx_rss_context *ctx) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index c01916cff507..a201a30b5d29 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -417,237 +417,3 @@ void __efx_rx_packet(struct efx_channel *channel) out: channel->rx_pkt_n_frags = 0; } - -#ifdef CONFIG_RFS_ACCEL - -static void efx_filter_rfs_work(struct work_struct *data) -{ - struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, - work); - struct efx_nic *efx = netdev_priv(req->net_dev); - struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); - int slot_idx = req - efx->rps_slot; - struct efx_arfs_rule *rule; - u16 arfs_id = 0; - int rc; - - rc = efx->type->filter_insert(efx, &req->spec, true); - if (rc >= 0) - /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ - rc %= efx->type->max_rx_ip_filters; - if (efx->rps_hash_table) { - spin_lock_bh(&efx->rps_hash_lock); - rule = efx_rps_hash_find(efx, &req->spec); - /* The rule might have already gone, if someone else's request - * for the same spec was already worked and then expired before - * we got around to our work. In that case we have nothing - * tying us to an arfs_id, meaning that as soon as the filter - * is considered for expiry it will be removed. - */ - if (rule) { - if (rc < 0) - rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; - else - rule->filter_id = rc; - arfs_id = rule->arfs_id; - } - spin_unlock_bh(&efx->rps_hash_lock); - } - if (rc >= 0) { - /* Remember this so we can check whether to expire the filter - * later. - */ - mutex_lock(&efx->rps_mutex); - if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) - channel->rfs_filter_count++; - channel->rps_flow_id[rc] = req->flow_id; - mutex_unlock(&efx->rps_mutex); - - if (req->spec.ether_type == htons(ETH_P_IP)) - netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", - (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - req->spec.rem_host, ntohs(req->spec.rem_port), - req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc, arfs_id); - else - netif_info(efx, rx_status, efx->net_dev, - "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", - (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - req->spec.rem_host, ntohs(req->spec.rem_port), - req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc, arfs_id); - channel->n_rfs_succeeded++; - } else { - if (req->spec.ether_type == htons(ETH_P_IP)) - netif_dbg(efx, rx_status, efx->net_dev, - "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", - (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - req->spec.rem_host, ntohs(req->spec.rem_port), - req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc, arfs_id); - else - netif_dbg(efx, rx_status, efx->net_dev, - "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", - (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", - req->spec.rem_host, ntohs(req->spec.rem_port), - req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc, arfs_id); - channel->n_rfs_failed++; - /* We're overloading the NIC's filter tables, so let's do a - * chunk of extra expiry work. - */ - __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, - 100u)); - } - - /* Release references */ - clear_bit(slot_idx, &efx->rps_slot_map); - dev_put(req->net_dev); -} - -int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, - u16 rxq_index, u32 flow_id) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct efx_async_filter_insertion *req; - struct efx_arfs_rule *rule; - struct flow_keys fk; - int slot_idx; - bool new; - int rc; - - /* find a free slot */ - for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) - if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) - break; - if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) - return -EBUSY; - - if (flow_id == RPS_FLOW_ID_INVALID) { - rc = -EINVAL; - goto out_clear; - } - - if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { - rc = -EPROTONOSUPPORT; - goto out_clear; - } - - if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { - rc = -EPROTONOSUPPORT; - goto out_clear; - } - if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { - rc = -EPROTONOSUPPORT; - goto out_clear; - } - - req = efx->rps_slot + slot_idx; - efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, - efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, - rxq_index); - req->spec.match_flags = - EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | - EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | - EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; - req->spec.ether_type = fk.basic.n_proto; - req->spec.ip_proto = fk.basic.ip_proto; - - if (fk.basic.n_proto == htons(ETH_P_IP)) { - req->spec.rem_host[0] = fk.addrs.v4addrs.src; - req->spec.loc_host[0] = fk.addrs.v4addrs.dst; - } else { - memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, - sizeof(struct in6_addr)); - memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, - sizeof(struct in6_addr)); - } - - req->spec.rem_port = fk.ports.src; - req->spec.loc_port = fk.ports.dst; - - if (efx->rps_hash_table) { - /* Add it to ARFS hash table */ - spin_lock(&efx->rps_hash_lock); - rule = efx_rps_hash_add(efx, &req->spec, &new); - if (!rule) { - rc = -ENOMEM; - goto out_unlock; - } - if (new) - rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; - rc = rule->arfs_id; - /* Skip if existing or pending filter already does the right thing */ - if (!new && rule->rxq_index == rxq_index && - rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) - goto out_unlock; - rule->rxq_index = rxq_index; - rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; - spin_unlock(&efx->rps_hash_lock); - } else { - /* Without an ARFS hash table, we just use arfs_id 0 for all - * filters. This means if multiple flows hash to the same - * flow_id, all but the most recently touched will be eligible - * for expiry. - */ - rc = 0; - } - - /* Queue the request */ - dev_hold(req->net_dev = net_dev); - INIT_WORK(&req->work, efx_filter_rfs_work); - req->rxq_index = rxq_index; - req->flow_id = flow_id; - schedule_work(&req->work); - return rc; -out_unlock: - spin_unlock(&efx->rps_hash_lock); -out_clear: - clear_bit(slot_idx, &efx->rps_slot_map); - return rc; -} - -bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) -{ - bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); - struct efx_nic *efx = channel->efx; - unsigned int index, size, start; - u32 flow_id; - - if (!mutex_trylock(&efx->rps_mutex)) - return false; - expire_one = efx->type->filter_rfs_expire_one; - index = channel->rfs_expire_index; - start = index; - size = efx->type->max_rx_ip_filters; - while (quota) { - flow_id = channel->rps_flow_id[index]; - - if (flow_id != RPS_FLOW_ID_INVALID) { - quota--; - if (expire_one(efx, flow_id, index)) { - netif_info(efx, rx_status, efx->net_dev, - "expired filter %d [channel %u flow %u]\n", - index, channel->channel, flow_id); - channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; - channel->rfs_filter_count--; - } - } - if (++index == size) - index = 0; - /* If we were called with a quota that exceeds the total number - * of filters in the table (which shouldn't happen, but could - * if two callers race), ensure that we don't loop forever - - * stop when we've examined every row of the table. - */ - if (index == start) - break; - } - - channel->rfs_expire_index = index; - mutex_unlock(&efx->rps_mutex); - return true; -} - -#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index e10c23833515..9927e9ecbbb4 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -849,3 +849,237 @@ void efx_remove_filters(struct efx_nic *efx) efx->type->filter_table_remove(efx); up_write(&efx->filter_sem); } + +#ifdef CONFIG_RFS_ACCEL + +static void efx_filter_rfs_work(struct work_struct *data) +{ + struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion, + work); + struct efx_nic *efx = netdev_priv(req->net_dev); + struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); + int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; + int rc; + + rc = efx->type->filter_insert(efx, &req->spec, true); + if (rc >= 0) + /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */ + rc %= efx->type->max_rx_ip_filters; + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } + if (rc >= 0) { + /* Remember this so we can check whether to expire the filter + * later. + */ + mutex_lock(&efx->rps_mutex); + if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID) + channel->rfs_filter_count++; + channel->rps_flow_id[rc] = req->flow_id; + mutex_unlock(&efx->rps_mutex); + + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_info(efx, rx_status, efx->net_dev, + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_info(efx, rx_status, efx->net_dev, + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_succeeded++; + } else { + if (req->spec.ether_type == htons(ETH_P_IP)) + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + else + netif_dbg(efx, rx_status, efx->net_dev, + "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n", + (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", + req->spec.rem_host, ntohs(req->spec.rem_port), + req->spec.loc_host, ntohs(req->spec.loc_port), + req->rxq_index, req->flow_id, rc, arfs_id); + channel->n_rfs_failed++; + /* We're overloading the NIC's filter tables, so let's do a + * chunk of extra expiry work. + */ + __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, + 100u)); + } + + /* Release references */ + clear_bit(slot_idx, &efx->rps_slot_map); + dev_put(req->net_dev); +} + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; + struct flow_keys fk; + int slot_idx; + bool new; + int rc; + + /* find a free slot */ + for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++) + if (!test_and_set_bit(slot_idx, &efx->rps_slot_map)) + break; + if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT) + return -EBUSY; + + if (flow_id == RPS_FLOW_ID_INVALID) { + rc = -EINVAL; + goto out_clear; + } + + if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + + if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) { + rc = -EPROTONOSUPPORT; + goto out_clear; + } + + req = efx->rps_slot + slot_idx; + efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT, + efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0, + rxq_index); + req->spec.match_flags = + EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | + EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; + req->spec.ether_type = fk.basic.n_proto; + req->spec.ip_proto = fk.basic.ip_proto; + + if (fk.basic.n_proto == htons(ETH_P_IP)) { + req->spec.rem_host[0] = fk.addrs.v4addrs.src; + req->spec.loc_host[0] = fk.addrs.v4addrs.dst; + } else { + memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src, + sizeof(struct in6_addr)); + memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst, + sizeof(struct in6_addr)); + } + + req->spec.rem_port = fk.ports.src; + req->spec.loc_port = fk.ports.dst; + + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ + dev_hold(req->net_dev = net_dev); + INIT_WORK(&req->work, efx_filter_rfs_work); + req->rxq_index = rxq_index; + req->flow_id = flow_id; + schedule_work(&req->work); + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); +out_clear: + clear_bit(slot_idx, &efx->rps_slot_map); + return rc; +} + +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota) +{ + bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); + struct efx_nic *efx = channel->efx; + unsigned int index, size, start; + u32 flow_id; + + if (!mutex_trylock(&efx->rps_mutex)) + return false; + expire_one = efx->type->filter_rfs_expire_one; + index = channel->rfs_expire_index; + start = index; + size = efx->type->max_rx_ip_filters; + while (quota) { + flow_id = channel->rps_flow_id[index]; + + if (flow_id != RPS_FLOW_ID_INVALID) { + quota--; + if (expire_one(efx, flow_id, index)) { + netif_info(efx, rx_status, efx->net_dev, + "expired filter %d [channel %u flow %u]\n", + index, channel->channel, flow_id); + channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; + channel->rfs_filter_count--; + } + } + if (++index == size) + index = 0; + /* If we were called with a quota that exceeds the total number + * of filters in the table (which shouldn't happen, but could + * if two callers race), ensure that we don't loop forever - + * stop when we've examined every row of the table. + */ + if (index == start) + break; + } + + channel->rfs_expire_index = index; + mutex_unlock(&efx->rps_mutex); + return true; +} + +#endif /* CONFIG_RFS_ACCEL */ diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h index c41f12a89477..3508dd316570 100644 --- a/drivers/net/ethernet/sfc/rx_common.h +++ b/drivers/net/ethernet/sfc/rx_common.h @@ -89,6 +89,10 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, const struct efx_filter_spec *spec, bool *new); void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); + +int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota); #endif int efx_probe_filters(struct efx_nic *efx); From 4d9c0a2d64551f33638dd566bb82edb3c3cc782c Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 29 Jun 2020 14:36:56 +0100 Subject: [PATCH 0468/2056] sfc: extend common GRO interface to support CHECKSUM_COMPLETE EF100 will use CHECKSUM_COMPLETE, but will also make use of efx_rx_packet_gro(), thus needs to be able to pass the checksum value into that function. Drivers for older NICs pass in a csum of 0 to get the old semantics (use the RX flags for CHECKSUM_UNNECESSARY marking). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/rx.c | 2 +- drivers/net/ethernet/sfc/rx_common.c | 11 ++++++++--- drivers/net/ethernet/sfc/rx_common.h | 2 +- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index a201a30b5d29..c73b933a9101 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -411,7 +411,7 @@ void __efx_rx_packet(struct efx_channel *channel) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb) - efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh); + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0); else efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags); out: diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 9927e9ecbbb4..fb77c7bbe4af 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -510,7 +510,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) */ void efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, - unsigned int n_frags, u8 *eh) + unsigned int n_frags, u8 *eh, __wsum csum) { struct napi_struct *napi = &channel->napi_str; struct efx_nic *efx = channel->efx; @@ -528,8 +528,13 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, if (efx->net_dev->features & NETIF_F_RXHASH) skb_set_hash(skb, efx_rx_buf_hash(efx, eh), PKT_HASH_TYPE_L3); - skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? - CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + if (csum) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } else { + skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? + CHECKSUM_UNNECESSARY : CHECKSUM_NONE); + } skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL); for (;;) { diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h index 3508dd316570..1672d74f30e2 100644 --- a/drivers/net/ethernet/sfc/rx_common.h +++ b/drivers/net/ethernet/sfc/rx_common.h @@ -67,7 +67,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); void efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, - unsigned int n_frags, u8 *eh); + unsigned int n_frags, u8 *eh, __wsum csum); struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); From d70446ee1f406ef4c6dd299a8482dc98ef07ed0e Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Mon, 29 Jun 2020 21:47:11 +0300 Subject: [PATCH 0469/2056] dpaa2-eth: send a scatter-gather FD instead of realloc-ing Instead of realloc-ing the skb on the Tx path when the provided headroom is smaller than the HW requirements, create a Scatter/Gather frame descriptor with only one entry. Remove the '[drv] tx realloc frames' counter exposed previously through ethtool since it is no longer used. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- .../freescale/dpaa2/dpaa2-eth-debugfs.c | 7 +- .../net/ethernet/freescale/dpaa2/dpaa2-eth.c | 177 +++++++++++++++--- .../net/ethernet/freescale/dpaa2/dpaa2-eth.h | 9 +- .../ethernet/freescale/dpaa2/dpaa2-ethtool.c | 1 - 4 files changed, 160 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index 2880ca02d7e7..5cb357c74dec 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -19,14 +19,14 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) int i; seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); - seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n", + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", - "Tx SG", "Tx realloc", "Enq busy"); + "Tx SG", "Enq busy"); for_each_online_cpu(i) { stats = per_cpu_ptr(priv->percpu_stats, i); extras = per_cpu_ptr(priv->percpu_extras, i); - seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", i, stats->rx_packets, stats->rx_errors, @@ -35,7 +35,6 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) stats->tx_errors, extras->tx_conf_frames, extras->tx_sg_frames, - extras->tx_reallocs, extras->tx_portal_busy); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 712bbfdbe7d7..4a264b75c035 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -685,6 +685,86 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, return err; } +/* Create a SG frame descriptor based on a linear skb. + * + * This function is used on the Tx path when the skb headroom is not large + * enough for the HW requirements, thus instead of realloc-ing the skb we + * create a SG frame descriptor with only one entry. + */ +static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_eth_sgt_cache *sgt_cache; + struct dpaa2_sg_entry *sgt; + struct dpaa2_eth_swa *swa; + dma_addr_t addr, sgt_addr; + void *sgt_buf = NULL; + int sgt_buf_size; + int err; + + /* Prepare the HW SGT structure */ + sgt_cache = this_cpu_ptr(priv->sgt_cache); + sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); + + if (sgt_cache->count == 0) + sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, + GFP_ATOMIC); + else + sgt_buf = sgt_cache->buf[--sgt_cache->count]; + if (unlikely(!sgt_buf)) + return -ENOMEM; + + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) { + err = -ENOMEM; + goto data_map_failed; + } + + /* Fill in the HW SGT structure */ + dpaa2_sg_set_addr(sgt, addr); + dpaa2_sg_set_len(sgt, skb->len); + dpaa2_sg_set_final(sgt, true); + + /* Store the skb backpointer in the SGT buffer */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->type = DPAA2_ETH_SWA_SINGLE; + swa->single.skb = skb; + swa->sg.sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, sgt_addr))) { + err = -ENOMEM; + goto sgt_map_failed; + } + + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, sgt_addr); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); + + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, sgt_buf); + + return 0; + +sgt_map_failed: + dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); +data_map_failed: + if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) + kfree(sgt_buf); + else + sgt_cache->buf[sgt_cache->count++] = sgt_buf; + + return err; +} + /* Create a frame descriptor based on a linear skb */ static int build_single_fd(struct dpaa2_eth_priv *priv, struct sk_buff *skb, @@ -743,13 +823,16 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, const struct dpaa2_fd *fd, bool in_napi) { struct device *dev = priv->net_dev->dev.parent; - dma_addr_t fd_addr; + dma_addr_t fd_addr, sg_addr; struct sk_buff *skb = NULL; unsigned char *buffer_start; struct dpaa2_eth_swa *swa; u8 fd_format = dpaa2_fd_get_format(fd); u32 fd_len = dpaa2_fd_get_len(fd); + struct dpaa2_eth_sgt_cache *sgt_cache; + struct dpaa2_sg_entry *sgt; + fd_addr = dpaa2_fd_get_addr(fd); buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); swa = (struct dpaa2_eth_swa *)buffer_start; @@ -769,16 +852,29 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, DMA_BIDIRECTIONAL); } } else if (fd_format == dpaa2_fd_sg) { - skb = swa->sg.skb; + if (swa->type == DPAA2_ETH_SWA_SG) { + skb = swa->sg.skb; - /* Unmap the scatterlist */ - dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, - DMA_BIDIRECTIONAL); - kfree(swa->sg.scl); + /* Unmap the scatterlist */ + dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, + DMA_BIDIRECTIONAL); + kfree(swa->sg.scl); - /* Unmap the SGT buffer */ - dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, - DMA_BIDIRECTIONAL); + /* Unmap the SGT buffer */ + dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, + DMA_BIDIRECTIONAL); + } else { + skb = swa->single.skb; + + /* Unmap the SGT Buffer */ + dma_unmap_single(dev, fd_addr, swa->single.sgt_size, + DMA_BIDIRECTIONAL); + + sgt = (struct dpaa2_sg_entry *)(buffer_start + + priv->tx_data_offset); + sg_addr = dpaa2_sg_get_addr(sgt); + dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); + } } else { netdev_dbg(priv->net_dev, "Invalid FD format\n"); return; @@ -808,8 +904,17 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, } /* Free SGT buffer allocated on tx */ - if (fd_format != dpaa2_fd_single) - skb_free_frag(buffer_start); + if (fd_format != dpaa2_fd_single) { + sgt_cache = this_cpu_ptr(priv->sgt_cache); + if (swa->type == DPAA2_ETH_SWA_SG) { + skb_free_frag(buffer_start); + } else { + if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) + kfree(buffer_start); + else + sgt_cache->buf[sgt_cache->count++] = buffer_start; + } + } /* Move on with skb release */ napi_consume_skb(skb, in_napi); @@ -833,22 +938,6 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) percpu_extras = this_cpu_ptr(priv->percpu_extras); needed_headroom = dpaa2_eth_needed_headroom(priv, skb); - if (skb_headroom(skb) < needed_headroom) { - struct sk_buff *ns; - - ns = skb_realloc_headroom(skb, needed_headroom); - if (unlikely(!ns)) { - percpu_stats->tx_dropped++; - goto err_alloc_headroom; - } - percpu_extras->tx_reallocs++; - - if (skb->sk) - skb_set_owner_w(ns, skb->sk); - - dev_kfree_skb(skb); - skb = ns; - } /* We'll be holding a back-reference to the skb until Tx Confirmation; * we don't want that overwritten by a concurrent Tx with a cloned skb. @@ -867,6 +956,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) err = build_sg_fd(priv, skb, &fd); percpu_extras->tx_sg_frames++; percpu_extras->tx_sg_bytes += skb->len; + } else if (skb_headroom(skb) < needed_headroom) { + err = build_sg_fd_single_buf(priv, skb, &fd); + percpu_extras->tx_sg_frames++; + percpu_extras->tx_sg_bytes += skb->len; } else { err = build_single_fd(priv, skb, &fd); } @@ -924,7 +1017,6 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) return NETDEV_TX_OK; err_build_fd: -err_alloc_headroom: dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -1161,6 +1253,22 @@ static int refill_pool(struct dpaa2_eth_priv *priv, return 0; } +static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_sgt_cache *sgt_cache; + u16 count; + int k, i; + + for_each_online_cpu(k) { + sgt_cache = per_cpu_ptr(priv->sgt_cache, k); + count = sgt_cache->count; + + for (i = 0; i < count; i++) + kfree(sgt_cache->buf[i]); + sgt_cache->count = 0; + } +} + static int pull_channel(struct dpaa2_eth_channel *ch) { int err; @@ -1562,6 +1670,9 @@ static int dpaa2_eth_stop(struct net_device *net_dev) /* Empty the buffer pool */ drain_pool(priv); + /* Empty the Scatter-Gather Buffer cache */ + dpaa2_eth_sgt_cache_drain(priv); + return 0; } @@ -3846,6 +3957,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) goto err_alloc_percpu_extras; } + priv->sgt_cache = alloc_percpu(*priv->sgt_cache); + if (!priv->sgt_cache) { + dev_err(dev, "alloc_percpu(sgt_cache) failed\n"); + err = -ENOMEM; + goto err_alloc_sgt_cache; + } + err = netdev_init(net_dev); if (err) goto err_netdev_init; @@ -3914,6 +4032,8 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) err_alloc_rings: err_csum: err_netdev_init: + free_percpu(priv->sgt_cache); +err_alloc_sgt_cache: free_percpu(priv->percpu_extras); err_alloc_percpu_extras: free_percpu(priv->percpu_stats); @@ -3959,6 +4079,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) fsl_mc_free_irqs(ls_dev); free_rings(priv); + free_percpu(priv->sgt_cache); free_percpu(priv->percpu_stats); free_percpu(priv->percpu_extras); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 2d7ada0f0dbd..9e4ceb92f240 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -125,6 +125,7 @@ struct dpaa2_eth_swa { union { struct { struct sk_buff *skb; + int sgt_size; } single; struct { struct sk_buff *skb; @@ -282,7 +283,6 @@ struct dpaa2_eth_drv_stats { __u64 tx_conf_bytes; __u64 tx_sg_frames; __u64 tx_sg_bytes; - __u64 tx_reallocs; __u64 rx_sg_frames; __u64 rx_sg_bytes; /* Enqueues retried due to portal busy */ @@ -395,6 +395,12 @@ struct dpaa2_eth_cls_rule { u8 in_use; }; +#define DPAA2_ETH_SGT_CACHE_SIZE 256 +struct dpaa2_eth_sgt_cache { + void *buf[DPAA2_ETH_SGT_CACHE_SIZE]; + u16 count; +}; + /* Driver private data */ struct dpaa2_eth_priv { struct net_device *net_dev; @@ -409,6 +415,7 @@ struct dpaa2_eth_priv { u8 num_channels; struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; + struct dpaa2_eth_sgt_cache __percpu *sgt_cache; struct dpni_attr dpni_attrs; u16 dpni_ver_major; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index e88269fe3de7..c4cbbcaa9a3f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -43,7 +43,6 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { "[drv] tx conf bytes", "[drv] tx sg frames", "[drv] tx sg bytes", - "[drv] tx realloc frames", "[drv] rx sg frames", "[drv] rx sg bytes", "[drv] enqueue portal busy", From 4c96c0ac16e046ad3625d7b7228664ba478541a5 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Mon, 29 Jun 2020 21:47:12 +0300 Subject: [PATCH 0470/2056] dpaa2-eth: add software counter for Tx frames converted to S/G With the previous commit, in case of insufficient SKB headroom on the Tx path instead of reallocing the SKB we now send a S/G frame descriptor. Export the number of occurences of this case as a per CPU counter (in debugfs) and a total number in the ethtool statistics - "tx converted sg frames'. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c | 7 ++++--- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 2 ++ drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 3 +++ drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c | 2 ++ 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c index 5cb357c74dec..56d9927fbfda 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c @@ -19,14 +19,14 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) int i; seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); - seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", + seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n", "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", - "Tx SG", "Enq busy"); + "Tx SG", "Tx converted to SG", "Enq busy"); for_each_online_cpu(i) { stats = per_cpu_ptr(priv->percpu_stats, i); extras = per_cpu_ptr(priv->percpu_extras, i); - seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", + seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", i, stats->rx_packets, stats->rx_errors, @@ -35,6 +35,7 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) stats->tx_errors, extras->tx_conf_frames, extras->tx_sg_frames, + extras->tx_converted_sg_frames, extras->tx_portal_busy); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 4a264b75c035..bc1f1e0117b6 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -960,6 +960,8 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) err = build_sg_fd_single_buf(priv, skb, &fd); percpu_extras->tx_sg_frames++; percpu_extras->tx_sg_bytes += skb->len; + percpu_extras->tx_converted_sg_frames++; + percpu_extras->tx_converted_sg_bytes += skb->len; } else { err = build_single_fd(priv, skb, &fd); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 9e4ceb92f240..9138a35a68f9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -285,6 +285,9 @@ struct dpaa2_eth_drv_stats { __u64 tx_sg_bytes; __u64 rx_sg_frames; __u64 rx_sg_bytes; + /* Linear skbs sent as a S/G FD due to insufficient headroom */ + __u64 tx_converted_sg_frames; + __u64 tx_converted_sg_bytes; /* Enqueues retried due to portal busy */ __u64 tx_portal_busy; }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index c4cbbcaa9a3f..8356f1fbbee1 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -45,6 +45,8 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { "[drv] tx sg bytes", "[drv] rx sg frames", "[drv] rx sg bytes", + "[drv] tx converted sg frames", + "[drv] tx converted sg bytes", "[drv] enqueue portal busy", /* Channel stats */ "[drv] dequeue portal busy", From 93ec439abeefe2e205657ae2b98a7fee4fbd4a0b Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Wed, 27 May 2020 13:51:32 -0700 Subject: [PATCH 0471/2056] igc: Add initial EEE support IEEE802.3az-2010 Energy Efficient Ethernet has been approved as standard (September 2010) and the driver can enable and disable it via ethtool. Disable the feature by default on parts which support it. Add enable/disable eee options. tx-lpi, tx-timer and advertise not supported yet. Signed-off-by: Sasha Neftin Reviewed-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc.h | 4 + drivers/net/ethernet/intel/igc/igc_defines.h | 10 +++ drivers/net/ethernet/intel/igc/igc_ethtool.c | 95 ++++++++++++++++++++ drivers/net/ethernet/intel/igc/igc_hw.h | 1 + drivers/net/ethernet/intel/igc/igc_i225.c | 56 ++++++++++++ drivers/net/ethernet/intel/igc/igc_i225.h | 2 + drivers/net/ethernet/intel/igc/igc_main.c | 16 ++++ drivers/net/ethernet/intel/igc/igc_regs.h | 5 ++ 8 files changed, 189 insertions(+) diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index a2d260165df3..9c57afad6afe 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -117,6 +117,9 @@ struct igc_ring { struct igc_adapter { struct net_device *netdev; + struct ethtool_eee eee; + u16 eee_advert; + unsigned long state; unsigned int flags; unsigned int num_q_vectors; @@ -255,6 +258,7 @@ extern char igc_driver_name[]; #define IGC_FLAG_MEDIA_RESET BIT(10) #define IGC_FLAG_MAS_ENABLE BIT(12) #define IGC_FLAG_HAS_MSIX BIT(13) +#define IGC_FLAG_EEE BIT(14) #define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 186deb1d9375..ee7fa1c062a0 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -511,4 +511,14 @@ /* Maximum size of the MTA register table in all supported adapters */ #define MAX_MTA_REG 128 +/* EEE defines */ +#define IGC_IPCNFG_EEE_2_5G_AN 0x00000010 /* IPCNFG EEE Ena 2.5G AN */ +#define IGC_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define IGC_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define IGC_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define IGC_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define IGC_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +#define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 735f3fb47dca..ac331116ea08 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -4,6 +4,7 @@ /* ethtool support for igc */ #include #include +#include #include "igc.h" #include "igc_diag.h" @@ -1548,6 +1549,98 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags) return 0; } +static int igc_ethtool_get_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + u32 eeer; + + if (hw->dev_spec._base.eee_enable) + edata->advertised = + mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + *edata = adapter->eee; + edata->supported = SUPPORTED_Autoneg; + + eeer = rd32(IGC_EEER); + + /* EEE status on negotiated link */ + if (eeer & IGC_EEER_EEE_NEG) + edata->eee_active = true; + + if (eeer & IGC_EEER_TX_LPI_EN) + edata->tx_lpi_enabled = true; + + edata->eee_enabled = hw->dev_spec._base.eee_enable; + + edata->advertised = SUPPORTED_Autoneg; + edata->lp_advertised = SUPPORTED_Autoneg; + + /* Report correct negotiated EEE status for devices that + * wrongly report EEE at half-duplex + */ + if (adapter->link_duplex == HALF_DUPLEX) { + edata->eee_enabled = false; + edata->eee_active = false; + edata->tx_lpi_enabled = false; + edata->advertised &= ~edata->advertised; + } + + return 0; +} + +static int igc_ethtool_set_eee(struct net_device *netdev, + struct ethtool_eee *edata) +{ + struct igc_adapter *adapter = netdev_priv(netdev); + struct igc_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + memset(&eee_curr, 0, sizeof(struct ethtool_eee)); + + ret_val = igc_ethtool_get_eee(netdev, &eee_curr); + if (ret_val) { + netdev_err(netdev, + "Problem setting EEE advertisement options\n"); + return -EINVAL; + } + + if (eee_curr.eee_enabled) { + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + netdev_err(netdev, + "Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + /* Tx LPI timer is not implemented currently */ + if (edata->tx_lpi_timer) { + netdev_err(netdev, + "Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + } else if (!edata->eee_enabled) { + netdev_err(netdev, + "Setting EEE options are not supported with EEE disabled\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { + hw->dev_spec._base.eee_enable = edata->eee_enabled; + adapter->flags |= IGC_FLAG_EEE; + + /* reset link */ + if (netif_running(netdev)) + igc_reinit_locked(adapter); + else + igc_reset(adapter); + } + + return 0; +} + static int igc_ethtool_begin(struct net_device *netdev) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -1829,6 +1922,8 @@ static const struct ethtool_ops igc_ethtool_ops = { .set_channels = igc_ethtool_set_channels, .get_priv_flags = igc_ethtool_get_priv_flags, .set_priv_flags = igc_ethtool_set_priv_flags, + .get_eee = igc_ethtool_get_eee, + .set_eee = igc_ethtool_set_eee, .begin = igc_ethtool_begin, .complete = igc_ethtool_complete, .get_link_ksettings = igc_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index af34ae310327..2ab7d9fab6af 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -191,6 +191,7 @@ struct igc_fc_info { struct igc_dev_spec_base { bool clear_semaphore_once; + bool eee_enable; }; struct igc_hw { diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index c25f555aaf82..3a4e982edb67 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -488,3 +488,59 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw) } return 0; } + +/** + * igc_set_eee_i225 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv2p5G: boolean flag enabling 2.5G EEE advertisement + * @adv1G: boolean flag enabling 1G EEE advertisement + * @adv100M: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + **/ +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M) +{ + u32 ipcnfg, eeer; + + ipcnfg = rd32(IGC_IPCNFG); + eeer = rd32(IGC_EEER); + + /* enable or disable per user setting */ + if (hw->dev_spec._base.eee_enable) { + u32 eee_su = rd32(IGC_EEE_SU); + + if (adv100M) + ipcnfg |= IGC_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= IGC_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN; + + if (adv2p5G) + ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN; + else + ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN; + + eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & IGC_EEE_SU_LPI_CLK_STP) + hw_dbg("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN | + IGC_IPCNFG_EEE_100M_AN); + eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN | + IGC_EEER_LPI_FC); + } + wr32(IGC_IPCNFG, ipcnfg); + wr32(IGC_EEER, eeer); + rd32(IGC_IPCNFG); + rd32(IGC_EEER); + + return IGC_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/igc/igc_i225.h b/drivers/net/ethernet/intel/igc/igc_i225.h index 7b66e1f9c0e6..04759e076a9e 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.h +++ b/drivers/net/ethernet/intel/igc/igc_i225.h @@ -9,5 +9,7 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask); s32 igc_init_nvm_params_i225(struct igc_hw *hw); bool igc_get_flash_presence_i225(struct igc_hw *hw); +s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, + bool adv100M); #endif diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index c2f41a558fd6..7e4d56c7b4c4 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -102,6 +102,9 @@ void igc_reset(struct igc_adapter *adapter) if (hw->mac.ops.init_hw(hw)) netdev_err(dev, "Error on hardware initialization\n"); + /* Re-establish EEE setting */ + igc_set_eee_i225(hw, true, true, true); + if (!netif_running(adapter->netdev)) igc_power_down_link(adapter); @@ -4252,6 +4255,15 @@ static void igc_watchdog_task(struct work_struct *work) (ctrl & IGC_CTRL_RFCE) ? "RX" : (ctrl & IGC_CTRL_TFCE) ? "TX" : "None"); + /* disable EEE if enabled */ + if ((adapter->flags & IGC_FLAG_EEE) && + adapter->link_duplex == HALF_DUPLEX) { + netdev_info(netdev, + "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); + adapter->hw.dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + } + /* check if SmartSpeed worked */ igc_check_downshift(hw); if (phy->speed_downgraded) @@ -5182,6 +5194,10 @@ static int igc_probe(struct pci_dev *pdev, netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* Disable EEE for internal PHY devices */ + hw->dev_spec._base.eee_enable = false; + adapter->flags &= ~IGC_FLAG_EEE; + igc_set_eee_i225(hw, false, false, false); pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 232e82dec62e..75e040a5d46f 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -248,6 +248,11 @@ /* Wake Up packet memory */ #define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +/* Energy Efficient Ethernet "EEE" registers */ +#define IGC_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define IGC_EEE_SU 0x0E34 /* EEE Setup */ + /* forward declaration */ struct igc_hw; u32 igc_rd32(struct igc_hw *hw, u32 reg); From a2af44b64c8a60f9aba90e72b1ff872ab6a6dd6b Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:12 +0300 Subject: [PATCH 0472/2056] mlxsw: spectrum_dcb: Rename mlxsw_sp_port_headroom_set() mlxsw_sp_port_headroom_set() is defined twice - in spectrum.c and in spectrum_dcb.c, with different arguments and different implementation but the name is same. Rename mlxsw_sp_port_headroom_set() to mlxsw_sp_port_headroom_ets_set() in order to allow using the second function in several files, and not only as static function in spectrum.c. Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Reviewed-by: Florian Fainelli Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index f8e3d635b9e2..0d3fb2e51ea5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -110,8 +110,8 @@ static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } -static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct ieee_ets *ets) +static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) { bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; @@ -180,7 +180,7 @@ static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port, } /* Ingress configuration. */ - err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets); + err = mlxsw_sp_port_headroom_ets_set(mlxsw_sp_port, ets); if (err) goto err_port_headroom_set; From 614d509aa1e7854381465e9645aa5ee565a6c890 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:13 +0300 Subject: [PATCH 0473/2056] mlxsw: Move ethtool_ops to spectrum_ethtool.c Add spectrum_ethtool.c file for ethtool code. Move ethtool_ops and the relevant code from spectrum.c to spectrum_ethtool.c. Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Reviewed-by: Florian Fainelli Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- .../net/ethernet/mellanox/mlxsw/spectrum.c | 880 +----------------- .../net/ethernet/mellanox/mlxsw/spectrum.h | 43 + .../mellanox/mlxsw/spectrum_ethtool.c | 841 +++++++++++++++++ 4 files changed, 892 insertions(+), 875 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 4aeabb35c943..3709983fbd77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -30,7 +30,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_mr_tcam.o spectrum_mr.o \ spectrum_qdisc.o spectrum_span.o \ spectrum_nve.o spectrum_nve_vxlan.o \ - spectrum_dpipe.o spectrum_trap.o + spectrum_dpipe.o spectrum_trap.o \ + spectrum_ethtool.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a5c5363915dc..d508edccc54b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -94,7 +94,6 @@ static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = { static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3"; -static const char mlxsw_sp_driver_version[] = "1.0"; static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 @@ -176,39 +175,6 @@ struct mlxsw_sp_mlxfw_dev { struct mlxsw_sp *mlxsw_sp; }; -struct mlxsw_sp_ptp_ops { - struct mlxsw_sp_ptp_clock * - (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); - void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); - - struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); - void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); - - /* Notify a driver that a packet that might be PTP was received. Driver - * is responsible for freeing the passed-in SKB. - */ - void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); - - /* Notify a driver that a timestamped packet was transmitted. Driver - * is responsible for freeing the passed-in SKB. - */ - void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, - u8 local_port); - - int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); - int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, - struct hwtstamp_config *config); - void (*shaper_work)(struct work_struct *work); - int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); - int (*get_stats_count)(void); - void (*get_stats_strings)(u8 **p); - void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, - u64 *data, int data_index); -}; - struct mlxsw_sp_span_ops { u32 (*buffsize_get)(int mtu, u32 speed); }; @@ -595,8 +561,8 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) return 0; } -static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, - bool is_up) +int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool is_up) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char paos_pl[MLXSW_REG_PAOS_LEN]; @@ -1010,8 +976,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } -static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, - int mtu, bool pause_en) +int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + int mtu, bool pause_en) { u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; bool dcb_en = !!mlxsw_sp_port->dcb.ets; @@ -1103,8 +1069,8 @@ static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device return -EINVAL; } -static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, - int prio, char *ppcnt_pl) +int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, + int prio, char *ppcnt_pl) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -1562,635 +1528,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_do_ioctl = mlxsw_sp_port_ioctl, }; -static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - - strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, - sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, mlxsw_sp_driver_version, - sizeof(drvinfo->version)); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d", - mlxsw_sp->bus_info->fw_rev.major, - mlxsw_sp->bus_info->fw_rev.minor, - mlxsw_sp->bus_info->fw_rev.subminor); - strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, - sizeof(drvinfo->bus_info)); -} - -static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - - pause->rx_pause = mlxsw_sp_port->link.rx_pause; - pause->tx_pause = mlxsw_sp_port->link.tx_pause; -} - -static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct ethtool_pauseparam *pause) -{ - char pfcc_pl[MLXSW_REG_PFCC_LEN]; - - mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); - mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); - mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); - - return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), - pfcc_pl); -} - -static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - bool pause_en = pause->tx_pause || pause->rx_pause; - int err; - - if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { - netdev_err(dev, "PFC already enabled on port\n"); - return -EINVAL; - } - - if (pause->autoneg) { - netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); - return -EINVAL; - } - - err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); - if (err) { - netdev_err(dev, "Failed to configure port's headroom\n"); - return err; - } - - err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); - if (err) { - netdev_err(dev, "Failed to set PAUSE parameters\n"); - goto err_port_pause_configure; - } - - mlxsw_sp_port->link.rx_pause = pause->rx_pause; - mlxsw_sp_port->link.tx_pause = pause->tx_pause; - - return 0; - -err_port_pause_configure: - pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); - mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); - return err; -} - -struct mlxsw_sp_port_hw_stats { - char str[ETH_GSTRING_LEN]; - u64 (*getter)(const char *payload); - bool cells_bytes; -}; - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { - { - .str = "a_frames_transmitted_ok", - .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, - }, - { - .str = "a_frames_received_ok", - .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, - }, - { - .str = "a_frame_check_sequence_errors", - .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, - }, - { - .str = "a_alignment_errors", - .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, - }, - { - .str = "a_octets_transmitted_ok", - .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, - }, - { - .str = "a_octets_received_ok", - .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, - }, - { - .str = "a_multicast_frames_xmitted_ok", - .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, - }, - { - .str = "a_broadcast_frames_xmitted_ok", - .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, - }, - { - .str = "a_multicast_frames_received_ok", - .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, - }, - { - .str = "a_broadcast_frames_received_ok", - .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, - }, - { - .str = "a_in_range_length_errors", - .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, - }, - { - .str = "a_out_of_range_length_field", - .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, - }, - { - .str = "a_frame_too_long_errors", - .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, - }, - { - .str = "a_symbol_error_during_carrier", - .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, - }, - { - .str = "a_mac_control_frames_transmitted", - .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, - }, - { - .str = "a_mac_control_frames_received", - .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, - }, - { - .str = "a_unsupported_opcodes_received", - .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, - }, - { - .str = "a_pause_mac_ctrl_frames_received", - .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, - }, - { - .str = "a_pause_mac_ctrl_frames_xmitted", - .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, - }, -}; - -#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { - { - .str = "if_in_discards", - .getter = mlxsw_reg_ppcnt_if_in_discards_get, - }, - { - .str = "if_out_discards", - .getter = mlxsw_reg_ppcnt_if_out_discards_get, - }, - { - .str = "if_out_errors", - .getter = mlxsw_reg_ppcnt_if_out_errors_get, - }, -}; - -#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ - ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { - { - .str = "ether_stats_undersize_pkts", - .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, - }, - { - .str = "ether_stats_oversize_pkts", - .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, - }, - { - .str = "ether_stats_fragments", - .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, - }, - { - .str = "ether_pkts64octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, - }, - { - .str = "ether_pkts65to127octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, - }, - { - .str = "ether_pkts128to255octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, - }, - { - .str = "ether_pkts256to511octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, - }, - { - .str = "ether_pkts512to1023octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, - }, - { - .str = "ether_pkts1024to1518octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, - }, - { - .str = "ether_pkts1519to2047octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, - }, - { - .str = "ether_pkts2048to4095octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, - }, - { - .str = "ether_pkts4096to8191octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, - }, - { - .str = "ether_pkts8192to10239octets", - .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, - }, -}; - -#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ - ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { - { - .str = "dot3stats_fcs_errors", - .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, - }, - { - .str = "dot3stats_symbol_errors", - .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, - }, - { - .str = "dot3control_in_unknown_opcodes", - .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, - }, - { - .str = "dot3in_pause_frames", - .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, - }, -}; - -#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ - ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { - { - .str = "ecn_marked", - .getter = mlxsw_reg_ppcnt_ecn_marked_get, - }, -}; - -#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { - { - .str = "discard_ingress_general", - .getter = mlxsw_reg_ppcnt_ingress_general_get, - }, - { - .str = "discard_ingress_policy_engine", - .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, - }, - { - .str = "discard_ingress_vlan_membership", - .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, - }, - { - .str = "discard_ingress_tag_frame_type", - .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, - }, - { - .str = "discard_egress_vlan_membership", - .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, - }, - { - .str = "discard_loopback_filter", - .getter = mlxsw_reg_ppcnt_loopback_filter_get, - }, - { - .str = "discard_egress_general", - .getter = mlxsw_reg_ppcnt_egress_general_get, - }, - { - .str = "discard_egress_hoq", - .getter = mlxsw_reg_ppcnt_egress_hoq_get, - }, - { - .str = "discard_egress_policy_engine", - .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, - }, - { - .str = "discard_ingress_tx_link_down", - .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, - }, - { - .str = "discard_egress_stp_filter", - .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, - }, - { - .str = "discard_egress_sll", - .getter = mlxsw_reg_ppcnt_egress_sll_get, - }, -}; - -#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ - ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { - { - .str = "rx_octets_prio", - .getter = mlxsw_reg_ppcnt_rx_octets_get, - }, - { - .str = "rx_frames_prio", - .getter = mlxsw_reg_ppcnt_rx_frames_get, - }, - { - .str = "tx_octets_prio", - .getter = mlxsw_reg_ppcnt_tx_octets_get, - }, - { - .str = "tx_frames_prio", - .getter = mlxsw_reg_ppcnt_tx_frames_get, - }, - { - .str = "rx_pause_prio", - .getter = mlxsw_reg_ppcnt_rx_pause_get, - }, - { - .str = "rx_pause_duration_prio", - .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, - }, - { - .str = "tx_pause_prio", - .getter = mlxsw_reg_ppcnt_tx_pause_get, - }, - { - .str = "tx_pause_duration_prio", - .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, - }, -}; - -#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) - -static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { - { - .str = "tc_transmit_queue_tc", - .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, - .cells_bytes = true, - }, - { - .str = "tc_no_buffer_discard_uc_tc", - .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, - }, -}; - -#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) - -#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ - MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ - MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ - MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ - MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ - MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ - (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ - IEEE_8021QAZ_MAX_TCS) + \ - (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ - TC_MAX_QUEUE)) - -static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) -{ - int i; - - for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", - mlxsw_sp_port_hw_prio_stats[i].str, prio); - *p += ETH_GSTRING_LEN; - } -} - -static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) -{ - int i; - - for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { - snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", - mlxsw_sp_port_hw_tc_stats[i].str, tc); - *p += ETH_GSTRING_LEN; - } -} - -static void mlxsw_sp_port_get_strings(struct net_device *dev, - u32 stringset, u8 *data) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { - memcpy(p, mlxsw_sp_port_hw_stats[i].str, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { - memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { - memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { - memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { - memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { - memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - mlxsw_sp_port_get_prio_strings(&p, i); - - for (i = 0; i < TC_MAX_QUEUE; i++) - mlxsw_sp_port_get_tc_strings(&p, i); - - mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); - break; - } -} - -static int mlxsw_sp_port_set_phys_id(struct net_device *dev, - enum ethtool_phys_id_state state) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char mlcr_pl[MLXSW_REG_MLCR_LEN]; - bool active; - - switch (state) { - case ETHTOOL_ID_ACTIVE: - active = true; - break; - case ETHTOOL_ID_INACTIVE: - active = false; - break; - default: - return -EOPNOTSUPP; - } - - mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); -} - -static int -mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, - int *p_len, enum mlxsw_reg_ppcnt_grp grp) -{ - switch (grp) { - case MLXSW_REG_PPCNT_IEEE_8023_CNT: - *p_hw_stats = mlxsw_sp_port_hw_stats; - *p_len = MLXSW_SP_PORT_HW_STATS_LEN; - break; - case MLXSW_REG_PPCNT_RFC_2863_CNT: - *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; - *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; - break; - case MLXSW_REG_PPCNT_RFC_2819_CNT: - *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; - *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; - break; - case MLXSW_REG_PPCNT_RFC_3635_CNT: - *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; - *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; - break; - case MLXSW_REG_PPCNT_EXT_CNT: - *p_hw_stats = mlxsw_sp_port_hw_ext_stats; - *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; - break; - case MLXSW_REG_PPCNT_DISCARD_CNT: - *p_hw_stats = mlxsw_sp_port_hw_discard_stats; - *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; - break; - case MLXSW_REG_PPCNT_PRIO_CNT: - *p_hw_stats = mlxsw_sp_port_hw_prio_stats; - *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; - break; - case MLXSW_REG_PPCNT_TC_CNT: - *p_hw_stats = mlxsw_sp_port_hw_tc_stats; - *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; - break; - default: - WARN_ON(1); - return -EOPNOTSUPP; - } - return 0; -} - -static void __mlxsw_sp_port_get_stats(struct net_device *dev, - enum mlxsw_reg_ppcnt_grp grp, int prio, - u64 *data, int data_index) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port_hw_stats *hw_stats; - char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; - int i, len; - int err; - - err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); - if (err) - return; - mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); - for (i = 0; i < len; i++) { - data[data_index + i] = hw_stats[i].getter(ppcnt_pl); - if (!hw_stats[i].cells_bytes) - continue; - data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, - data[data_index + i]); - } -} - -static void mlxsw_sp_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - int i, data_index = 0; - - /* IEEE 802.3 Counters */ - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, - data, data_index); - data_index = MLXSW_SP_PORT_HW_STATS_LEN; - - /* RFC 2863 Counters */ - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, - data, data_index); - data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; - - /* RFC 2819 Counters */ - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, - data, data_index); - data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; - - /* RFC 3635 Counters */ - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, - data, data_index); - data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; - - /* Extended Counters */ - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, - data, data_index); - data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; - - /* Discard Counters */ - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, - data, data_index); - data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; - - /* Per-Priority Counters */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, - data, data_index); - data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; - } - - /* Per-TC Counters */ - for (i = 0; i < TC_MAX_QUEUE; i++) { - __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, - data, data_index); - data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; - } - - /* PTP counters */ - mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, - data, data_index); - data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); -} - -static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - - switch (sset) { - case ETH_SS_STATS: - return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + - mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); - default: - return -EOPNOTSUPP; - } -} - struct mlxsw_sp1_port_link_mode { enum ethtool_link_mode_bit_indices mask_ethtool; u32 mask; @@ -2851,211 +2188,6 @@ mlxsw_sp2_port_type_speed_ops = { .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, }; -static void -mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, - u8 width, struct ethtool_link_ksettings *cmd) -{ - const struct mlxsw_sp_port_type_speed_ops *ops; - - ops = mlxsw_sp->port_type_speed_ops; - - ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); - ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); - - ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); - ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, - cmd->link_modes.supported); -} - -static void -mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, - u32 eth_proto_admin, bool autoneg, u8 width, - struct ethtool_link_ksettings *cmd) -{ - const struct mlxsw_sp_port_type_speed_ops *ops; - - ops = mlxsw_sp->port_type_speed_ops; - - if (!autoneg) - return; - - ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); - ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, - cmd->link_modes.advertising); -} - -static u8 -mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) -{ - switch (connector_type) { - case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: - return PORT_OTHER; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: - return PORT_NONE; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: - return PORT_TP; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: - return PORT_AUI; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: - return PORT_BNC; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: - return PORT_MII; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: - return PORT_FIBRE; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: - return PORT_DA; - case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: - return PORT_OTHER; - default: - WARN_ON_ONCE(1); - return PORT_OTHER; - } -} - -static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - const struct mlxsw_sp_port_type_speed_ops *ops; - char ptys_pl[MLXSW_REG_PTYS_LEN]; - u8 connector_type; - bool autoneg; - int err; - - ops = mlxsw_sp->port_type_speed_ops; - - autoneg = mlxsw_sp_port->link.autoneg; - ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, - 0, false); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); - if (err) - return err; - ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, - ð_proto_admin, ð_proto_oper); - - mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, - mlxsw_sp_port->mapping.width, cmd); - - mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, - mlxsw_sp_port->mapping.width, cmd); - - cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; - connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); - cmd->base.port = mlxsw_sp_port_connector_port(connector_type); - ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), - eth_proto_oper, cmd); - - return 0; -} - -static int -mlxsw_sp_port_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - const struct mlxsw_sp_port_type_speed_ops *ops; - char ptys_pl[MLXSW_REG_PTYS_LEN]; - u32 eth_proto_cap, eth_proto_new; - bool autoneg; - int err; - - ops = mlxsw_sp->port_type_speed_ops; - - ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, - 0, false); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); - if (err) - return err; - ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); - - autoneg = cmd->base.autoneg == AUTONEG_ENABLE; - eth_proto_new = autoneg ? - ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, - cmd) : - ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, - cmd->base.speed); - - eth_proto_new = eth_proto_new & eth_proto_cap; - if (!eth_proto_new) { - netdev_err(dev, "No supported speed requested\n"); - return -EINVAL; - } - - ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, - eth_proto_new, autoneg); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); - if (err) - return err; - - mlxsw_sp_port->link.autoneg = autoneg; - - if (!netif_running(dev)) - return 0; - - mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); - mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); - - return 0; -} - -static int mlxsw_sp_get_module_info(struct net_device *netdev, - struct ethtool_modinfo *modinfo) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - int err; - - err = mlxsw_env_get_module_info(mlxsw_sp->core, - mlxsw_sp_port->mapping.module, - modinfo); - - return err; -} - -static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, - struct ethtool_eeprom *ee, - u8 *data) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - int err; - - err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, - mlxsw_sp_port->mapping.module, ee, - data); - - return err; -} - -static int -mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) -{ - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - - return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); -} - -static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { - .get_drvinfo = mlxsw_sp_port_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_pauseparam = mlxsw_sp_port_get_pauseparam, - .set_pauseparam = mlxsw_sp_port_set_pauseparam, - .get_strings = mlxsw_sp_port_get_strings, - .set_phys_id = mlxsw_sp_port_set_phys_id, - .get_ethtool_stats = mlxsw_sp_port_get_stats, - .get_sset_count = mlxsw_sp_port_get_sset_count, - .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, - .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, - .get_module_info = mlxsw_sp_get_module_info, - .get_module_eeprom = mlxsw_sp_get_module_eeprom, - .get_ts_info = mlxsw_sp_get_ts_info, -}; - static int mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3e794d58184b..e29678b65a8a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -178,6 +178,39 @@ struct mlxsw_sp { u32 lowest_shaper_bs; }; +struct mlxsw_sp_ptp_ops { + struct mlxsw_sp_ptp_clock * + (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev); + void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock); + + struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp); + void (*fini)(struct mlxsw_sp_ptp_state *ptp_state); + + /* Notify a driver that a packet that might be PTP was received. Driver + * is responsible for freeing the passed-in SKB. + */ + void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, + u8 local_port); + + /* Notify a driver that a timestamped packet was transmitted. Driver + * is responsible for freeing the passed-in SKB. + */ + void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, + u8 local_port); + + int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port, + struct hwtstamp_config *config); + int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port, + struct hwtstamp_config *config); + void (*shaper_work)(struct work_struct *work); + int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, + struct ethtool_ts_info *info); + int (*get_stats_count)(void); + void (*get_stats_strings)(u8 **p); + void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, + u64 *data, int data_index); +}; + static inline struct mlxsw_sp_upper * mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id) { @@ -393,6 +426,13 @@ enum mlxsw_sp_flood_type { MLXSW_SP_FLOOD_TYPE_MC, }; +int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + int mtu, bool pause_en); +int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp, + int prio, char *ppcnt_pl); +int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool is_up); + /* spectrum_buffers.c */ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); @@ -1126,4 +1166,7 @@ static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp) return mlxsw_core_net(mlxsw_sp->core); } +/* spectrum_ethtool.c */ +extern const struct ethtool_ops mlxsw_sp_port_ethtool_ops; + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c new file mode 100644 index 000000000000..7c03c749b563 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */ + +#include "reg.h" +#include "spectrum.h" +#include "core_env.h" + +static const char mlxsw_sp_driver_version[] = "1.0"; + +static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mlxsw_sp_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + mlxsw_sp->bus_info->fw_rev.major, + mlxsw_sp->bus_info->fw_rev.minor, + mlxsw_sp->bus_info->fw_rev.subminor); + strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, + sizeof(drvinfo->bus_info)); +} + +static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + pause->rx_pause = mlxsw_sp_port->link.rx_pause; + pause->tx_pause = mlxsw_sp_port->link.tx_pause; +} + +static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ethtool_pauseparam *pause) +{ + char pfcc_pl[MLXSW_REG_PFCC_LEN]; + + mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); + + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), + pfcc_pl); +} + +static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = pause->tx_pause || pause->rx_pause; + int err; + + if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { + netdev_err(dev, "PFC already enabled on port\n"); + return -EINVAL; + } + + if (pause->autoneg) { + netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + if (err) { + netdev_err(dev, "Failed to configure port's headroom\n"); + return err; + } + + err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); + if (err) { + netdev_err(dev, "Failed to set PAUSE parameters\n"); + goto err_port_pause_configure; + } + + mlxsw_sp_port->link.rx_pause = pause->rx_pause; + mlxsw_sp_port->link.tx_pause = pause->tx_pause; + + return 0; + +err_port_pause_configure: + pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); + mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + return err; +} + +struct mlxsw_sp_port_hw_stats { + char str[ETH_GSTRING_LEN]; + u64 (*getter)(const char *payload); + bool cells_bytes; +}; + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { + { + .str = "a_frames_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, + }, + { + .str = "a_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, + }, + { + .str = "a_frame_check_sequence_errors", + .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, + }, + { + .str = "a_alignment_errors", + .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, + }, + { + .str = "a_octets_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, + }, + { + .str = "a_octets_received_ok", + .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, + }, + { + .str = "a_multicast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, + }, + { + .str = "a_broadcast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, + }, + { + .str = "a_multicast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, + }, + { + .str = "a_broadcast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, + }, + { + .str = "a_in_range_length_errors", + .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, + }, + { + .str = "a_out_of_range_length_field", + .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, + }, + { + .str = "a_frame_too_long_errors", + .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, + }, + { + .str = "a_symbol_error_during_carrier", + .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, + }, + { + .str = "a_mac_control_frames_transmitted", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, + }, + { + .str = "a_mac_control_frames_received", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, + }, + { + .str = "a_unsupported_opcodes_received", + .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_received", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_xmitted", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, + }, +}; + +#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { + { + .str = "if_in_discards", + .getter = mlxsw_reg_ppcnt_if_in_discards_get, + }, + { + .str = "if_out_discards", + .getter = mlxsw_reg_ppcnt_if_out_discards_get, + }, + { + .str = "if_out_errors", + .getter = mlxsw_reg_ppcnt_if_out_errors_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { + { + .str = "ether_stats_undersize_pkts", + .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, + }, + { + .str = "ether_stats_oversize_pkts", + .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, + }, + { + .str = "ether_stats_fragments", + .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, + }, + { + .str = "ether_pkts64octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, + }, + { + .str = "ether_pkts65to127octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get, + }, + { + .str = "ether_pkts128to255octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get, + }, + { + .str = "ether_pkts256to511octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get, + }, + { + .str = "ether_pkts512to1023octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get, + }, + { + .str = "ether_pkts1024to1518octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get, + }, + { + .str = "ether_pkts1519to2047octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get, + }, + { + .str = "ether_pkts2048to4095octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get, + }, + { + .str = "ether_pkts4096to8191octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get, + }, + { + .str = "ether_pkts8192to10239octets", + .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { + { + .str = "dot3stats_fcs_errors", + .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, + }, + { + .str = "dot3stats_symbol_errors", + .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, + }, + { + .str = "dot3control_in_unknown_opcodes", + .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, + }, + { + .str = "dot3in_pause_frames", + .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = { + { + .str = "ecn_marked", + .getter = mlxsw_reg_ppcnt_ecn_marked_get, + }, +}; + +#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { + { + .str = "discard_ingress_general", + .getter = mlxsw_reg_ppcnt_ingress_general_get, + }, + { + .str = "discard_ingress_policy_engine", + .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, + }, + { + .str = "discard_ingress_vlan_membership", + .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, + }, + { + .str = "discard_ingress_tag_frame_type", + .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, + }, + { + .str = "discard_egress_vlan_membership", + .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, + }, + { + .str = "discard_loopback_filter", + .getter = mlxsw_reg_ppcnt_loopback_filter_get, + }, + { + .str = "discard_egress_general", + .getter = mlxsw_reg_ppcnt_egress_general_get, + }, + { + .str = "discard_egress_hoq", + .getter = mlxsw_reg_ppcnt_egress_hoq_get, + }, + { + .str = "discard_egress_policy_engine", + .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, + }, + { + .str = "discard_ingress_tx_link_down", + .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, + }, + { + .str = "discard_egress_stp_filter", + .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, + }, + { + .str = "discard_egress_sll", + .getter = mlxsw_reg_ppcnt_egress_sll_get, + }, +}; + +#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { + { + .str = "rx_octets_prio", + .getter = mlxsw_reg_ppcnt_rx_octets_get, + }, + { + .str = "rx_frames_prio", + .getter = mlxsw_reg_ppcnt_rx_frames_get, + }, + { + .str = "tx_octets_prio", + .getter = mlxsw_reg_ppcnt_tx_octets_get, + }, + { + .str = "tx_frames_prio", + .getter = mlxsw_reg_ppcnt_tx_frames_get, + }, + { + .str = "rx_pause_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_get, + }, + { + .str = "rx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, + }, + { + .str = "tx_pause_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_get, + }, + { + .str = "tx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, + }, +}; + +#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { + { + .str = "tc_transmit_queue_tc", + .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get, + .cells_bytes = true, + }, + { + .str = "tc_no_buffer_discard_uc_tc", + .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, + }, +}; + +#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) + +#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ + MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ + MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ + MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ + MLXSW_SP_PORT_HW_EXT_STATS_LEN + \ + MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ + (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ + IEEE_8021QAZ_MAX_TCS) + \ + (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ + TC_MAX_QUEUE)) + +static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", + mlxsw_sp_port_hw_prio_stats[i].str, prio); + *p += ETH_GSTRING_LEN; + } +} + +static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d", + mlxsw_sp_port_hw_tc_stats[i].str, tc); + *p += ETH_GSTRING_LEN; + } +} + +static void mlxsw_sp_port_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_prio_strings(&p, i); + + for (i = 0; i < TC_MAX_QUEUE; i++) + mlxsw_sp_port_get_tc_strings(&p, i); + + mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p); + break; + } +} + +static int mlxsw_sp_port_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mlcr_pl[MLXSW_REG_MLCR_LEN]; + bool active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + active = true; + break; + case ETHTOOL_ID_INACTIVE: + active = false; + break; + default: + return -EOPNOTSUPP; + } + + mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); +} + +static int +mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, + int *p_len, enum mlxsw_reg_ppcnt_grp grp) +{ + switch (grp) { + case MLXSW_REG_PPCNT_IEEE_8023_CNT: + *p_hw_stats = mlxsw_sp_port_hw_stats; + *p_len = MLXSW_SP_PORT_HW_STATS_LEN; + break; + case MLXSW_REG_PPCNT_RFC_2863_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; + break; + case MLXSW_REG_PPCNT_RFC_2819_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + break; + case MLXSW_REG_PPCNT_RFC_3635_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; + break; + case MLXSW_REG_PPCNT_EXT_CNT: + *p_hw_stats = mlxsw_sp_port_hw_ext_stats; + *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN; + break; + case MLXSW_REG_PPCNT_DISCARD_CNT: + *p_hw_stats = mlxsw_sp_port_hw_discard_stats; + *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; + break; + case MLXSW_REG_PPCNT_PRIO_CNT: + *p_hw_stats = mlxsw_sp_port_hw_prio_stats; + *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + break; + case MLXSW_REG_PPCNT_TC_CNT: + *p_hw_stats = mlxsw_sp_port_hw_tc_stats; + *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; + break; + default: + WARN_ON(1); + return -EOPNOTSUPP; + } + return 0; +} + +static void __mlxsw_sp_port_get_stats(struct net_device *dev, + enum mlxsw_reg_ppcnt_grp grp, int prio, + u64 *data, int data_index) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_hw_stats *hw_stats; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int i, len; + int err; + + err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); + if (err) + return; + mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); + for (i = 0; i < len; i++) { + data[data_index + i] = hw_stats[i].getter(ppcnt_pl); + if (!hw_stats[i].cells_bytes) + continue; + data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp, + data[data_index + i]); + } +} + +static void mlxsw_sp_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int i, data_index = 0; + + /* IEEE 802.3 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, + data, data_index); + data_index = MLXSW_SP_PORT_HW_STATS_LEN; + + /* RFC 2863 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; + + /* RFC 2819 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + + /* RFC 3635 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; + + /* Extended Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN; + + /* Discard Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; + + /* Per-Priority Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + } + + /* Per-TC Counters */ + for (i = 0; i < TC_MAX_QUEUE; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; + } + + /* PTP counters */ + mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port, + data, data_index); + data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); +} + +static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + switch (sset) { + case ETH_SS_STATS: + return MLXSW_SP_PORT_ETHTOOL_STATS_LEN + + mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count(); + default: + return -EOPNOTSUPP; + } +} + +static void +mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap, + u8 width, struct ethtool_link_ksettings *cmd) +{ + const struct mlxsw_sp_port_type_speed_ops *ops; + + ops = mlxsw_sp->port_type_speed_ops; + + ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + + ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd); + ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width, + cmd->link_modes.supported); +} + +static void +mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp, + u32 eth_proto_admin, bool autoneg, u8 width, + struct ethtool_link_ksettings *cmd) +{ + const struct mlxsw_sp_port_type_speed_ops *ops; + + ops = mlxsw_sp->port_type_speed_ops; + + if (!autoneg) + return; + + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width, + cmd->link_modes.advertising); +} + +static u8 +mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type) +{ + switch (connector_type) { + case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR: + return PORT_OTHER; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE: + return PORT_NONE; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP: + return PORT_TP; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI: + return PORT_AUI; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC: + return PORT_BNC; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII: + return PORT_MII; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE: + return PORT_FIBRE; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA: + return PORT_DA; + case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER: + return PORT_OTHER; + default: + WARN_ON_ONCE(1); + return PORT_OTHER; + } +} + +static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + u32 eth_proto_cap, eth_proto_admin, eth_proto_oper; + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + const struct mlxsw_sp_port_type_speed_ops *ops; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u8 connector_type; + bool autoneg; + int err; + + ops = mlxsw_sp->port_type_speed_ops; + + autoneg = mlxsw_sp_port->link.autoneg; + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + 0, false); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) + return err; + ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); + + mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, + mlxsw_sp_port->mapping.width, cmd); + + mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, + mlxsw_sp_port->mapping.width, cmd); + + cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl); + cmd->base.port = mlxsw_sp_port_connector_port(connector_type); + ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev), + eth_proto_oper, cmd); + + return 0; +} + +static int +mlxsw_sp_port_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + const struct mlxsw_sp_port_type_speed_ops *ops; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_cap, eth_proto_new; + bool autoneg; + int err; + + ops = mlxsw_sp->port_type_speed_ops; + + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + 0, false); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) + return err; + ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap, NULL, NULL); + + autoneg = cmd->base.autoneg == AUTONEG_ENABLE; + eth_proto_new = autoneg ? + ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, + cmd) : + ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, + cmd->base.speed); + + eth_proto_new = eth_proto_new & eth_proto_cap; + if (!eth_proto_new) { + netdev_err(dev, "No supported speed requested\n"); + return -EINVAL; + } + + ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, + eth_proto_new, autoneg); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) + return err; + + mlxsw_sp_port->link.autoneg = autoneg; + + if (!netif_running(dev)) + return 0; + + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + + return 0; +} + +static int mlxsw_sp_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + int err; + + err = mlxsw_env_get_module_info(mlxsw_sp->core, + mlxsw_sp_port->mapping.module, + modinfo); + + return err; +} + +static int mlxsw_sp_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, u8 *data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + int err; + + err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core, + mlxsw_sp_port->mapping.module, ee, + data); + + return err; +} + +static int +mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info); +} + +const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { + .get_drvinfo = mlxsw_sp_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_pauseparam = mlxsw_sp_port_get_pauseparam, + .set_pauseparam = mlxsw_sp_port_set_pauseparam, + .get_strings = mlxsw_sp_port_get_strings, + .set_phys_id = mlxsw_sp_port_set_phys_id, + .get_ethtool_stats = mlxsw_sp_port_get_stats, + .get_sset_count = mlxsw_sp_port_get_sset_count, + .get_link_ksettings = mlxsw_sp_port_get_link_ksettings, + .set_link_ksettings = mlxsw_sp_port_set_link_ksettings, + .get_module_info = mlxsw_sp_get_module_info, + .get_module_eeprom = mlxsw_sp_get_module_eeprom, + .get_ts_info = mlxsw_sp_get_ts_info, +}; From 2be5c8a963192ecfa6c00acd687fc00e4d3b5296 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:14 +0300 Subject: [PATCH 0474/2056] mlxsw: spectrum_ethtool: Move mlxsw_sp_port_type_speed_ops structs Move mlxsw_sp1_port_type_speed_ops and mlxsw_sp2_port_type_speed_ops with the relevant code from spectrum.c to spectrum_ethtool.c. Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Reviewed-by: Florian Fainelli Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 660 ------------------ .../net/ethernet/mellanox/mlxsw/spectrum.h | 2 + .../mellanox/mlxsw/spectrum_ethtool.c | 658 +++++++++++++++++ 3 files changed, 660 insertions(+), 660 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d508edccc54b..81bb9ea1479b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1528,666 +1528,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_do_ioctl = mlxsw_sp_port_ioctl, }; -struct mlxsw_sp1_port_link_mode { - enum ethtool_link_mode_bit_indices mask_ethtool; - u32 mask; - u32 speed; -}; - -static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, - .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, - .speed = SPEED_100, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | - MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, - .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - .speed = SPEED_1000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, - .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - .speed = SPEED_10000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, - .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - .speed = SPEED_10000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, - .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - .speed = SPEED_10000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, - .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, - .speed = SPEED_20000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, - .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - .speed = SPEED_40000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, - .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - .speed = SPEED_40000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, - .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - .speed = SPEED_40000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, - .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, - .speed = SPEED_40000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, - .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - .speed = SPEED_25000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, - .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - .speed = SPEED_25000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, - .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, - .speed = SPEED_25000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, - .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - .speed = SPEED_50000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, - .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - .speed = SPEED_50000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, - .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, - .speed = SPEED_50000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, - .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - .speed = SPEED_100000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, - .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - .speed = SPEED_100000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, - .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - .speed = SPEED_100000, - }, - { - .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, - .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, - .speed = SPEED_100000, - }, -}; - -#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) - -static void -mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, - u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) -{ - if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | - MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | - MLXSW_REG_PTYS_ETH_SPEED_SGMII)) - ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - - if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | - MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | - MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | - MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | - MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) - ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); -} - -static void -mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, - u8 width, unsigned long *mode) -{ - int i; - - for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) - __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, - mode); - } -} - -static u32 -mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) -{ - int i; - - for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) - return mlxsw_sp1_port_link_mode[i].speed; - } - - return SPEED_UNKNOWN; -} - -static void -mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, - u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) -{ - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - - if (!carrier_ok) - return; - - cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); - if (cmd->base.speed != SPEED_UNKNOWN) - cmd->base.duplex = DUPLEX_FULL; -} - -static u32 -mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, - const struct ethtool_link_ksettings *cmd) -{ - u32 ptys_proto = 0; - int i; - - for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { - if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, - cmd->link_modes.advertising)) - ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; - } - return ptys_proto; -} - -static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, - u32 speed) -{ - u32 ptys_proto = 0; - int i; - - for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { - if (speed == mlxsw_sp1_port_link_mode[i].speed) - ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; - } - return ptys_proto; -} - -static void -mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, bool autoneg) -{ - mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); -} - -static void -mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, - u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, - u32 *p_eth_proto_oper) -{ - mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, - p_eth_proto_oper); -} - -static const struct mlxsw_sp_port_type_speed_ops -mlxsw_sp1_port_type_speed_ops = { - .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, - .from_ptys_link = mlxsw_sp1_from_ptys_link, - .from_ptys_speed = mlxsw_sp1_from_ptys_speed, - .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, - .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, - .to_ptys_speed = mlxsw_sp1_to_ptys_speed, - .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, - .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, -}; - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_sgmii_100m[] = { - ETHTOOL_LINK_MODE_100baseT_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { - ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { - ETHTOOL_LINK_MODE_2500baseX_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_5gbase_r[] = { - ETHTOOL_LINK_MODE_5000baseT_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { - ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, - ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, - ETHTOOL_LINK_MODE_10000baseER_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { - ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { - ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { - ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, - ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { - ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, - ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { - ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { - ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, - ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { - ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, - ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) - -static const enum ethtool_link_mode_bit_indices -mlxsw_sp2_mask_ethtool_400gaui_8[] = { - ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, - ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, - ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, - ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, - ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, -}; - -#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ - ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) - -#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) -#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) -#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) -#define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) - -static u8 mlxsw_sp_port_mask_width_get(u8 width) -{ - switch (width) { - case 1: - return MLXSW_SP_PORT_MASK_WIDTH_1X; - case 2: - return MLXSW_SP_PORT_MASK_WIDTH_2X; - case 4: - return MLXSW_SP_PORT_MASK_WIDTH_4X; - case 8: - return MLXSW_SP_PORT_MASK_WIDTH_8X; - default: - WARN_ON_ONCE(1); - return 0; - } -} - -struct mlxsw_sp2_port_link_mode { - const enum ethtool_link_mode_bit_indices *mask_ethtool; - int m_ethtool_len; - u32 mask; - u32 speed; - u8 mask_width; -}; - -static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, - .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | - MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_100, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, - .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | - MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_1000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, - .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | - MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_2500, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, - .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | - MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_5000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, - .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | - MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_10000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, - .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_40000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, - .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | - MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_25000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, - .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | - MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_50000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, - .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, - .speed = SPEED_50000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, - .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_100000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, - .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, - .speed = SPEED_100000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, - .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | - MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_200000, - }, - { - .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, - .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, - .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, - .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, - .speed = SPEED_400000, - }, -}; - -#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) - -static void -mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, - u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) -{ - ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); -} - -static void -mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, - unsigned long *mode) -{ - int i; - - for (i = 0; i < link_mode->m_ethtool_len; i++) - __set_bit(link_mode->mask_ethtool[i], mode); -} - -static void -mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, - u8 width, unsigned long *mode) -{ - u8 mask_width = mlxsw_sp_port_mask_width_get(width); - int i; - - for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && - (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) - mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], - mode); - } -} - -static u32 -mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) -{ - int i; - - for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) - return mlxsw_sp2_port_link_mode[i].speed; - } - - return SPEED_UNKNOWN; -} - -static void -mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, - u32 ptys_eth_proto, - struct ethtool_link_ksettings *cmd) -{ - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - - if (!carrier_ok) - return; - - cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); - if (cmd->base.speed != SPEED_UNKNOWN) - cmd->base.duplex = DUPLEX_FULL; -} - -static bool -mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, - const unsigned long *mode) -{ - int cnt = 0; - int i; - - for (i = 0; i < link_mode->m_ethtool_len; i++) { - if (test_bit(link_mode->mask_ethtool[i], mode)) - cnt++; - } - - return cnt == link_mode->m_ethtool_len; -} - -static u32 -mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, - const struct ethtool_link_ksettings *cmd) -{ - u8 mask_width = mlxsw_sp_port_mask_width_get(width); - u32 ptys_proto = 0; - int i; - - for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && - mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], - cmd->link_modes.advertising)) - ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; - } - return ptys_proto; -} - -static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, - u8 width, u32 speed) -{ - u8 mask_width = mlxsw_sp_port_mask_width_get(width); - u32 ptys_proto = 0; - int i; - - for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { - if ((speed == mlxsw_sp2_port_link_mode[i].speed) && - (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) - ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; - } - return ptys_proto; -} - -static void -mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, - u8 local_port, u32 proto_admin, - bool autoneg) -{ - mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); -} - -static void -mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, - u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, - u32 *p_eth_proto_oper) -{ - mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, - p_eth_proto_admin, p_eth_proto_oper); -} - -static const struct mlxsw_sp_port_type_speed_ops -mlxsw_sp2_port_type_speed_ops = { - .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, - .from_ptys_link = mlxsw_sp2_from_ptys_link, - .from_ptys_speed = mlxsw_sp2_from_ptys_speed, - .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, - .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, - .to_ptys_speed = mlxsw_sp2_to_ptys_speed, - .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, - .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, -}; - static int mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e29678b65a8a..1d6b2bc2774c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1168,5 +1168,7 @@ static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp) /* spectrum_ethtool.c */ extern const struct ethtool_ops mlxsw_sp_port_ethtool_ops; +extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops; +extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops; #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 7c03c749b563..04e1db604c69 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -839,3 +839,661 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_module_eeprom = mlxsw_sp_get_module_eeprom, .get_ts_info = mlxsw_sp_get_ts_info, }; + +struct mlxsw_sp1_port_link_mode { + enum ethtool_link_mode_bit_indices mask_ethtool; + u32 mask; + u32 speed; +}; + +static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, + .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + .speed = SPEED_100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, + .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .speed = SPEED_1000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, + .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + .speed = SPEED_10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, + .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + .speed = SPEED_10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, + .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + .speed = SPEED_10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, + .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, + .speed = SPEED_20000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, + .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + .speed = SPEED_40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, + .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + .speed = SPEED_40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, + .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + .speed = SPEED_40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, + .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + .speed = SPEED_40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR, + .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + .speed = SPEED_25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR, + .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + .speed = SPEED_25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, + .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + .speed = SPEED_25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2, + .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, + .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2, + .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4, + .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4, + .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4, + .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, + .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + .speed = SPEED_100000, + }, +}; + +#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode) + +static void +mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) + ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); +} + +static void +mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, + u8 width, unsigned long *mode) +{ + int i; + + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) + __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, + mode); + } +} + +static u32 +mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) +{ + int i; + + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask) + return mlxsw_sp1_port_link_mode[i].speed; + } + + return SPEED_UNKNOWN; +} + +static void +mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) +{ + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + + if (!carrier_ok) + return; + + cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto); + if (cmd->base.speed != SPEED_UNKNOWN) + cmd->base.duplex = DUPLEX_FULL; +} + +static u32 +mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, + const struct ethtool_link_ksettings *cmd) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool, + cmd->link_modes.advertising)) + ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; + } + return ptys_proto; +} + +static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width, + u32 speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sp1_port_link_mode[i].speed) + ptys_proto |= mlxsw_sp1_port_link_mode[i].mask; + } + return ptys_proto; +} + +static void +mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, + u8 local_port, u32 proto_admin, bool autoneg) +{ + mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg); +} + +static void +mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, + u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, + u32 *p_eth_proto_oper) +{ + mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin, + p_eth_proto_oper); +} + +const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = { + .from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port, + .from_ptys_link = mlxsw_sp1_from_ptys_link, + .from_ptys_speed = mlxsw_sp1_from_ptys_speed, + .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex, + .to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link, + .to_ptys_speed = mlxsw_sp1_to_ptys_speed, + .reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack, + .reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack, +}; + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_sgmii_100m[] = { + ETHTOOL_LINK_MODE_100baseT_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = { + ETHTOOL_LINK_MODE_2500baseX_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_5gbase_r[] = { + ETHTOOL_LINK_MODE_5000baseT_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseER_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = { + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = { + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = { + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { + ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) + +static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_400gaui_8[] = { + ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) + +#define MLXSW_SP_PORT_MASK_WIDTH_1X BIT(0) +#define MLXSW_SP_PORT_MASK_WIDTH_2X BIT(1) +#define MLXSW_SP_PORT_MASK_WIDTH_4X BIT(2) +#define MLXSW_SP_PORT_MASK_WIDTH_8X BIT(3) + +static u8 mlxsw_sp_port_mask_width_get(u8 width) +{ + switch (width) { + case 1: + return MLXSW_SP_PORT_MASK_WIDTH_1X; + case 2: + return MLXSW_SP_PORT_MASK_WIDTH_2X; + case 4: + return MLXSW_SP_PORT_MASK_WIDTH_4X; + case 8: + return MLXSW_SP_PORT_MASK_WIDTH_8X; + default: + WARN_ON_ONCE(1); + return 0; + } +} + +struct mlxsw_sp2_port_link_mode { + const enum ethtool_link_mode_bit_indices *mask_ethtool; + int m_ethtool_len; + u32 mask; + u32 speed; + u8 mask_width; +}; + +static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M, + .mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_100, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII, + .mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_1000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII, + .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_2500, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R, + .mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_5000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G, + .mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_10000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G, + .mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_40000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR, + .mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X | + MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_25000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2, + .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X | + MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR, + .mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X, + .speed = SPEED_50000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4, + .mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2, + .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X, + .speed = SPEED_100000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, + .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X | + MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_200000, + }, + { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, + .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, + .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X, + .speed = SPEED_400000, + }, +}; + +#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode) + +static void +mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) +{ + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane); +} + +static void +mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, + unsigned long *mode) +{ + int i; + + for (i = 0; i < link_mode->m_ethtool_len; i++) + __set_bit(link_mode->mask_ethtool[i], mode); +} + +static void +mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto, + u8 width, unsigned long *mode) +{ + u8 mask_width = mlxsw_sp_port_mask_width_get(width); + int i; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) && + (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) + mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i], + mode); + } +} + +static u32 +mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto) +{ + int i; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) + return mlxsw_sp2_port_link_mode[i].speed; + } + + return SPEED_UNKNOWN; +} + +static void +mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok, + u32 ptys_eth_proto, + struct ethtool_link_ksettings *cmd) +{ + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + + if (!carrier_ok) + return; + + cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto); + if (cmd->base.speed != SPEED_UNKNOWN) + cmd->base.duplex = DUPLEX_FULL; +} + +static bool +mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode, + const unsigned long *mode) +{ + int cnt = 0; + int i; + + for (i = 0; i < link_mode->m_ethtool_len; i++) { + if (test_bit(link_mode->mask_ethtool[i], mode)) + cnt++; + } + + return cnt == link_mode->m_ethtool_len; +} + +static u32 +mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width, + const struct ethtool_link_ksettings *cmd) +{ + u8 mask_width = mlxsw_sp_port_mask_width_get(width); + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) && + mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i], + cmd->link_modes.advertising)) + ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; + } + return ptys_proto; +} + +static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, + u8 width, u32 speed) +{ + u8 mask_width = mlxsw_sp_port_mask_width_get(width); + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) { + if ((speed == mlxsw_sp2_port_link_mode[i].speed) && + (mask_width & mlxsw_sp2_port_link_mode[i].mask_width)) + ptys_proto |= mlxsw_sp2_port_link_mode[i].mask; + } + return ptys_proto; +} + +static void +mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload, + u8 local_port, u32 proto_admin, + bool autoneg) +{ + mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg); +} + +static void +mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload, + u32 *p_eth_proto_cap, u32 *p_eth_proto_admin, + u32 *p_eth_proto_oper) +{ + mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap, + p_eth_proto_admin, p_eth_proto_oper); +} + +const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = { + .from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port, + .from_ptys_link = mlxsw_sp2_from_ptys_link, + .from_ptys_speed = mlxsw_sp2_from_ptys_speed, + .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex, + .to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link, + .to_ptys_speed = mlxsw_sp2_to_ptys_speed, + .reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack, + .reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack, +}; From e120c801b8c93bd9bd5d2a61f43f1a45a9ce4f23 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:15 +0300 Subject: [PATCH 0475/2056] Documentation: networking: ethtool-netlink: Add link extended state Add link extended state attributes. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Reviewed-by: Jacob Keller Reviewed-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- Documentation/networking/ethtool-netlink.rst | 128 ++++++++++++++++++- 1 file changed, 124 insertions(+), 4 deletions(-) diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 82470c36c27a..396390f4936b 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -443,10 +443,11 @@ supports. LINKSTATE_GET ============= -Requests link state information. At the moment, only link up/down flag (as -provided by ``ETHTOOL_GLINK`` ioctl command) is provided but some future -extensions are planned (e.g. link down reason). This request does not have any -attributes. +Requests link state information. Link up/down flag (as provided by +``ETHTOOL_GLINK`` ioctl command) is provided. Optionally, extended state might +be provided as well. In general, extended state describes reasons for why a port +is down, or why it operates in some non-obvious mode. This request does not have +any attributes. Request contents: @@ -461,16 +462,135 @@ Kernel response contents: ``ETHTOOL_A_LINKSTATE_LINK`` bool link state (up/down) ``ETHTOOL_A_LINKSTATE_SQI`` u32 Current Signal Quality Index ``ETHTOOL_A_LINKSTATE_SQI_MAX`` u32 Max support SQI value + ``ETHTOOL_A_LINKSTATE_EXT_STATE`` u8 link extended state + ``ETHTOOL_A_LINKSTATE_EXT_SUBSTATE`` u8 link extended substate ==================================== ====== ============================ For most NIC drivers, the value of ``ETHTOOL_A_LINKSTATE_LINK`` returns carrier flag provided by ``netif_carrier_ok()`` but there are drivers which define their own handler. +``ETHTOOL_A_LINKSTATE_EXT_STATE`` and ``ETHTOOL_A_LINKSTATE_EXT_SUBSTATE`` are +optional values. ethtool core can provide either both +``ETHTOOL_A_LINKSTATE_EXT_STATE`` and ``ETHTOOL_A_LINKSTATE_EXT_SUBSTATE``, +or only ``ETHTOOL_A_LINKSTATE_EXT_STATE``, or none of them. + ``LINKSTATE_GET`` allows dump requests (kernel returns reply messages for all devices supporting the request). +Link extended states: + + ================================================ ============================================ + ``ETHTOOL_LINK_EXT_STATE_AUTONEG`` States relating to the autonegotiation or + issues therein + + ``ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE`` Failure during link training + + ``ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH`` Logical mismatch in physical coding sublayer + or forward error correction sublayer + + ``ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY`` Signal integrity issues + + ``ETHTOOL_LINK_EXT_STATE_NO_CABLE`` No cable connected + + ``ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE`` Failure is related to cable, + e.g., unsupported cable + + ``ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE`` Failure is related to EEPROM, e.g., failure + during reading or parsing the data + + ``ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE`` Failure during calibration algorithm + + ``ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED`` The hardware is not able to provide the + power required from cable or module + + ``ETHTOOL_LINK_EXT_STATE_OVERHEAT`` The module is overheated + ================================================= ============================================ + +Link extended substates: + + Autoneg substates: + + ============================================================== ================================ + ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED`` Peer side is down + + ``ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED`` Ack not received from peer side + + ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED`` Next page exchange failed + + ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE`` Peer side is down during force + mode or there is no agreement of + speed + + ``ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE`` Forward error correction modes + in both sides are mismatched + + ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD`` No Highest Common Denominator + ============================================================== ================================ + + Link training substates: + + ========================================================================== ==================== + ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED`` Frames were not + recognized, the + lock failed + + ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT`` The lock did not + occur before + timeout + + ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY`` Peer side did not + send ready signal + after training + process + + ``ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT`` Remote side is not + ready yet + ========================================================================== ==================== + + Link logical mismatch substates: + + =============================================================== =============================== + ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK`` Physical coding sublayer was + not locked in first phase - + block lock + + ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK`` Physical coding sublayer was + not locked in second phase - + alignment markers lock + + ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS`` Physical coding sublayer did + not get align status + + ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED`` FC forward error correction is + not locked + + ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED`` RS forward error correction is + not locked + =============================================================== =============================== + + Bad signal integrity substates: + + ================================================================= ============================= + ``ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS`` Large number of physical + errors + + ``ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE`` The system attempted to + operate the cable at a rate + that is not formally + supported, which led to + signal integrity issues + ================================================================= ============================= + + Cable issue substates: + + ================================================== ============================================ + ``ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE`` Unsupported cable + + ``ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE`` Cable test failure + ================================================== ============================================ + DEBUG_GET ========= From ecc31c60240b9808a274befc5db6b8a249a6ade1 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:16 +0300 Subject: [PATCH 0476/2056] ethtool: Add link extended state Currently, drivers can only tell whether the link is up/down using LINKSTATE_GET, but no additional information is given. Add attributes to LINKSTATE_GET command in order to allow drivers to expose the user more information in addition to link state to ease the debug process, for example, reason for link down state. Extended state consists of two attributes - link_ext_state and link_ext_substate. The idea is to avoid 'vendor specific' states in order to prevent drivers to use specific link_ext_state that can be in the future common link_ext_state. The substates allows drivers to add more information to the common link_ext_state. For example, vendor can expose 'Autoneg' as link_ext_state and add 'No partner detected during force mode' as link_ext_substate. If a driver cannot pinpoint the extended state with the substate accuracy, it is free to expose only the extended state and omit the substate attribute. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/ethtool.h | 23 +++++++++ include/uapi/linux/ethtool.h | 70 ++++++++++++++++++++++++++++ include/uapi/linux/ethtool_netlink.h | 2 + net/ethtool/linkstate.c | 52 +++++++++++++++++++-- 4 files changed, 143 insertions(+), 4 deletions(-) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index a23b26eab479..48ad3b6a0150 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -86,6 +86,22 @@ struct net_device; u32 ethtool_op_get_link(struct net_device *dev); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); + +/** + * struct ethtool_link_ext_state_info - link extended state and substate. + */ +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + u8 __link_ext_substate; + }; +}; + /** * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table @@ -245,6 +261,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * @get_link: Report whether physical link is up. Will only be called if * the netdev is up. Should usually be set to ethtool_op_get_link(), * which uses netif_carrier_ok(). + * @get_link_ext_state: Report link extended state. Should set link_ext_state and + * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown, + * do not attach ext_substate attribute to netlink message). If link_ext_state + * and link_ext_substate are unknown, return -ENODATA. If not implemented, + * link_ext_state and link_ext_substate will not be sent to userspace. * @get_eeprom: Read data from the device EEPROM. * Should fill in the magic field. Don't need to check len for zero * or wraparound. Fill in the data argument with the eeprom values @@ -384,6 +405,8 @@ struct ethtool_ops { void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, + struct ethtool_link_ext_state_info *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index f4662b3a9e1e..d1413538ef30 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -579,6 +579,76 @@ struct ethtool_pauseparam { __u32 tx_pause; }; +/** + * enum ethtool_link_ext_state - link extended state + */ +enum ethtool_link_ext_state { + ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_STATE_NO_CABLE, + ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, + ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, + ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, + ETHTOOL_LINK_EXT_STATE_OVERHEAT, +}; + +/** + * enum ethtool_link_ext_substate_autoneg - more information in addition to + * ETHTOOL_LINK_EXT_STATE_AUTONEG. + */ +enum ethtool_link_ext_substate_autoneg { + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD, +}; + +/** + * enum ethtool_link_ext_substate_link_training - more information in addition to + * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE. + */ +enum ethtool_link_ext_substate_link_training { + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT, +}; + +/** + * enum ethtool_link_ext_substate_logical_mismatch - more information in addition + * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH. + */ +enum ethtool_link_ext_substate_link_logical_mismatch { + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED, +}; + +/** + * enum ethtool_link_ext_substate_bad_signal_integrity - more information in + * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY. + */ +enum ethtool_link_ext_substate_bad_signal_integrity { + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE, +}; + +/** + * enum ethtool_link_ext_substate_cable_issue - more information in + * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE. + */ +enum ethtool_link_ext_substate_cable_issue { + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1, + ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE, +}; + #define ETH_GSTRING_LEN 32 /** diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 4dda5e4244a7..c12ce4df4b6b 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -236,6 +236,8 @@ enum { ETHTOOL_A_LINKSTATE_LINK, /* u8 */ ETHTOOL_A_LINKSTATE_SQI, /* u32 */ ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */ + ETHTOOL_A_LINKSTATE_EXT_STATE, /* u8 */ + ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, /* u8 */ /* add new constants above here */ __ETHTOOL_A_LINKSTATE_CNT, diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c index afe5ac8a0f00..4834091ec24c 100644 --- a/net/ethtool/linkstate.c +++ b/net/ethtool/linkstate.c @@ -9,10 +9,12 @@ struct linkstate_req_info { }; struct linkstate_reply_data { - struct ethnl_reply_data base; - int link; - int sqi; - int sqi_max; + struct ethnl_reply_data base; + int link; + int sqi; + int sqi_max; + bool link_ext_state_provided; + struct ethtool_link_ext_state_info ethtool_link_ext_state_info; }; #define LINKSTATE_REPDATA(__reply_base) \ @@ -25,6 +27,8 @@ linkstate_get_policy[ETHTOOL_A_LINKSTATE_MAX + 1] = { [ETHTOOL_A_LINKSTATE_LINK] = { .type = NLA_REJECT }, [ETHTOOL_A_LINKSTATE_SQI] = { .type = NLA_REJECT }, [ETHTOOL_A_LINKSTATE_SQI_MAX] = { .type = NLA_REJECT }, + [ETHTOOL_A_LINKSTATE_EXT_STATE] = { .type = NLA_REJECT }, + [ETHTOOL_A_LINKSTATE_EXT_SUBSTATE] = { .type = NLA_REJECT }, }; static int linkstate_get_sqi(struct net_device *dev) @@ -61,6 +65,23 @@ static int linkstate_get_sqi_max(struct net_device *dev) mutex_unlock(&phydev->lock); return ret; +}; + +static int linkstate_get_link_ext_state(struct net_device *dev, + struct linkstate_reply_data *data) +{ + int err; + + if (!dev->ethtool_ops->get_link_ext_state) + return -EOPNOTSUPP; + + err = dev->ethtool_ops->get_link_ext_state(dev, &data->ethtool_link_ext_state_info); + if (err) + return err; + + data->link_ext_state_provided = true; + + return 0; } static int linkstate_prepare_data(const struct ethnl_req_info *req_base, @@ -86,6 +107,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, goto out; data->sqi_max = ret; + if (dev->flags & IFF_UP) { + ret = linkstate_get_link_ext_state(dev, data); + if (ret < 0 && ret != -EOPNOTSUPP && ret != -ENODATA) + goto out; + } + ret = 0; out: ethnl_ops_complete(dev); @@ -107,6 +134,12 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base, if (data->sqi_max != -EOPNOTSUPP) len += nla_total_size(sizeof(u32)); + if (data->link_ext_state_provided) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ + + if (data->ethtool_link_ext_state_info.__link_ext_substate) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_SUBSTATE */ + return len; } @@ -128,6 +161,17 @@ static int linkstate_fill_reply(struct sk_buff *skb, nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max)) return -EMSGSIZE; + if (data->link_ext_state_provided) { + if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, + data->ethtool_link_ext_state_info.link_ext_state)) + return -EMSGSIZE; + + if (data->ethtool_link_ext_state_info.__link_ext_substate && + nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE, + data->ethtool_link_ext_state_info.__link_ext_substate)) + return -EMSGSIZE; + } + return 0; } From 1bd06938dfccd87420178e7b3ea7bd13b9e11994 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:17 +0300 Subject: [PATCH 0477/2056] mlxsw: reg: Port Diagnostics Database Register The PDDR register enables to read the Phy debug database. Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 51 +++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fcb88d4271bf..b76c839223b5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5438,6 +5438,56 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port, MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0); } +/* PDDR - Port Diagnostics Database Register + * ----------------------------------------- + * The PDDR enables to read the Phy debug database + */ +#define MLXSW_REG_PDDR_ID 0x5031 +#define MLXSW_REG_PDDR_LEN 0x100 + +MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN); + +/* reg_pddr_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8); + +enum mlxsw_reg_pddr_page_select { + MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1, +}; + +/* reg_pddr_page_select + * Page select index. + * Access: Index + */ +MLXSW_ITEM32(reg, pddr, page_select, 0x04, 0, 8); + +enum mlxsw_reg_pddr_trblsh_group_opcode { + /* Monitor opcodes */ + MLXSW_REG_PDDR_TRBLSH_GROUP_OPCODE_MONITOR, +}; + +/* reg_pddr_group_opcode + * Group selector. + * Access: Index + */ +MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16); + +/* reg_pddr_status_opcode + * Group selector. + * Access: RO + */ +MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16); + +static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port, + u8 page_select) +{ + MLXSW_REG_ZERO(pddr, payload); + mlxsw_reg_pddr_local_port_set(payload, local_port); + mlxsw_reg_pddr_page_select_set(payload, page_select); +} + /* PMTM - Port Module Type Mapping Register * ---------------------------------------- * The PMTM allows query or configuration of module types. @@ -10758,6 +10808,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(pbmc), MLXSW_REG(pspa), MLXSW_REG(pplr), + MLXSW_REG(pddr), MLXSW_REG(pmtm), MLXSW_REG(htgt), MLXSW_REG(hpkt), From 60f30cd6c24afc66c3e227af1363a40c07ff0307 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:18 +0300 Subject: [PATCH 0478/2056] mlxsw: spectrum_ethtool: Add link extended state Implement .get_down_ext_state() as part of ethtool_ops. Query link down reason from PDDR register and convert it to ethtool link_ext_state. In case that more information than common link_ext_state is provided, fill link_ext_substate also with the appropriate value. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mellanox/mlxsw/spectrum_ethtool.c | 145 ++++++++++++++++++ 1 file changed, 145 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 04e1db604c69..14c78f73bb65 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -26,6 +26,150 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } +struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping { + u32 status_opcode; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +static const struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping +mlxsw_sp_link_ext_state_opcode_map[] = { + {2, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED}, + {3, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + {4, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED}, + {36, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE}, + {38, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE}, + {39, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + + {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED}, + {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0}, + {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK}, + {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS}, + {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0}, + {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE}, + + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + + {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0}, + + {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, + + {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0}, + + {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0}, + + {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0}, +}; + +static void +mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping + link_ext_state_mapping, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + switch (link_ext_state_mapping.link_ext_state) { + case ETHTOOL_LINK_EXT_STATE_AUTONEG: + link_ext_state_info->autoneg = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE: + link_ext_state_info->link_training = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH: + link_ext_state_info->link_logical_mismatch = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY: + link_ext_state_info->bad_signal_integrity = + link_ext_state_mapping.link_ext_substate; + break; + case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE: + link_ext_state_info->cable_issue = + link_ext_state_mapping.link_ext_substate; + break; + default: + break; + } + + link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state; +} + +static int +mlxsw_sp_port_get_link_ext_state(struct net_device *dev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping; + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + char pddr_pl[MLXSW_REG_PDDR_LEN]; + int opcode, err, i; + u32 status_opcode; + + if (netif_carrier_ok(dev)) + return -ENODATA; + + mlxsw_reg_pddr_pack(pddr_pl, mlxsw_sp_port->local_port, + MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO); + + opcode = MLXSW_REG_PDDR_TRBLSH_GROUP_OPCODE_MONITOR; + mlxsw_reg_pddr_trblsh_group_opcode_set(pddr_pl, opcode); + + err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pddr), + pddr_pl); + if (err) + return err; + + status_opcode = mlxsw_reg_pddr_trblsh_status_opcode_get(pddr_pl); + if (!status_opcode) + return -ENODATA; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_link_ext_state_opcode_map); i++) { + link_ext_state_mapping = mlxsw_sp_link_ext_state_opcode_map[i]; + if (link_ext_state_mapping.status_opcode == status_opcode) { + mlxsw_sp_port_set_link_ext_state(link_ext_state_mapping, + link_ext_state_info); + return 0; + } + } + + return -ENODATA; +} + static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) { @@ -827,6 +971,7 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ext_state = mlxsw_sp_port_get_link_ext_state, .get_pauseparam = mlxsw_sp_port_get_pauseparam, .set_pauseparam = mlxsw_sp_port_set_pauseparam, .get_strings = mlxsw_sp_port_get_strings, From dd9e67ff8086adeaf257a17eee9ee73dcb2c1c13 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:19 +0300 Subject: [PATCH 0479/2056] selftests: forwarding: ethtool: Move different_speeds_get() to ethtool_lib Currently different_speeds_get() is used only by ethtool.sh tests. The function can be useful for another tests that check ethtool configurations. Move the function to ethtool_lib in order to allow other tests to use it. Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Florian Fainelli Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../testing/selftests/net/forwarding/ethtool.sh | 17 ----------------- .../selftests/net/forwarding/ethtool_lib.sh | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index eb8e2a23bbb4..ea7a11a9f788 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -50,23 +50,6 @@ cleanup() h1_destroy } -different_speeds_get() -{ - local dev1=$1; shift - local dev2=$1; shift - local with_mode=$1; shift - local adver=$1; shift - - local -a speeds_arr - - speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver)) - if [[ ${#speeds_arr[@]} < 2 ]]; then - check_err 1 "cannot check different speeds. There are not enough speeds" - fi - - echo ${speeds_arr[0]} ${speeds_arr[1]} -} - same_speeds_autoneg_off() { # Check that when each of the reported speeds is forced, the links come diff --git a/tools/testing/selftests/net/forwarding/ethtool_lib.sh b/tools/testing/selftests/net/forwarding/ethtool_lib.sh index 925d229a59d8..9188e624dec0 100644 --- a/tools/testing/selftests/net/forwarding/ethtool_lib.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_lib.sh @@ -67,3 +67,20 @@ common_speeds_get() <(printf '%s\n' "${dev1_speeds[@]}" | sort -u) \ <(printf '%s\n' "${dev2_speeds[@]}" | sort -u) } + +different_speeds_get() +{ + local dev1=$1; shift + local dev2=$1; shift + local with_mode=$1; shift + local adver=$1; shift + + local -a speeds_arr + + speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver)) + if [[ ${#speeds_arr[@]} < 2 ]]; then + check_err 1 "cannot check different speeds. There are not enough speeds" + fi + + echo ${speeds_arr[0]} ${speeds_arr[1]} +} From 0433045c27bf9ae71e1c0300c278582407dd0e0b Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:20 +0300 Subject: [PATCH 0480/2056] selftests: forwarding: forwarding.config.sample: Add port with no cable connected Add NETIF_NO_CABLE port to tests topology. The port can also be declared as an environment variable and tests can be run like that: NETIF_NO_CABLE=eth9 ./test.sh eth{1..8} The NETIF_NO_CABLE port will be used by ethtool_extended_state test. Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Florian Fainelli Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../testing/selftests/net/forwarding/forwarding.config.sample | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample index e2adb533c8fc..b802c14d2950 100644 --- a/tools/testing/selftests/net/forwarding/forwarding.config.sample +++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample @@ -14,6 +14,9 @@ NETIFS[p6]=veth5 NETIFS[p7]=veth6 NETIFS[p8]=veth7 +# Port that does not have a cable connected. +NETIF_NO_CABLE=eth8 + ############################################################################## # Defines From 7d10bcce98cd44ea7c040380397114e0ac94422f Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 29 Jun 2020 23:46:21 +0300 Subject: [PATCH 0481/2056] selftests: forwarding: Add tests for ethtool extended state Add tests to check ethtool report about extended state. The tests configure several states and verify that the correct extended state is reported by ethtool. Check extended state with substate (Autoneg) and extended state without substate (No cable). Signed-off-by: Amit Cohen Reviewed-by: Petr Machata Reviewed-by: Florian Fainelli Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/forwarding/ethtool_extended_state.sh | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/ethtool_extended_state.sh diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh new file mode 100755 index 000000000000..4b42dfd4efd1 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +ALL_TESTS=" + autoneg + autoneg_force_mode + no_cable +" + +NUM_NETIFS=2 +source lib.sh +source ethtool_lib.sh + +setup_prepare() +{ + swp1=${NETIFS[p1]} + swp2=${NETIFS[p2]} + swp3=$NETIF_NO_CABLE +} + +ethtool_extended_state_check() +{ + local dev=$1; shift + local expected_ext_state=$1; shift + local expected_ext_substate=${1:-""}; shift + + local ext_state=$(ethtool $dev | grep "Link detected" \ + | cut -d "(" -f2 | cut -d ")" -f1) + local ext_substate=$(echo $ext_state | cut -sd "," -f2 \ + | sed -e 's/^[[:space:]]*//') + ext_state=$(echo $ext_state | cut -d "," -f1) + + [[ $ext_state == $expected_ext_state ]] + check_err $? "Expected \"$expected_ext_state\", got \"$ext_state\"" + + [[ $ext_substate == $expected_ext_substate ]] + check_err $? "Expected \"$expected_ext_substate\", got \"$ext_substate\"" +} + +autoneg() +{ + RET=0 + + ip link set dev $swp1 up + + sleep 4 + ethtool_extended_state_check $swp1 "Autoneg" "No partner detected" + + log_test "Autoneg, No partner detected" + + ip link set dev $swp1 down +} + +autoneg_force_mode() +{ + RET=0 + + ip link set dev $swp1 up + ip link set dev $swp2 up + + local -a speeds_arr=($(different_speeds_get $swp1 $swp2 0 0)) + local speed1=${speeds_arr[0]} + local speed2=${speeds_arr[1]} + + ethtool_set $swp1 speed $speed1 autoneg off + ethtool_set $swp2 speed $speed2 autoneg off + + sleep 4 + ethtool_extended_state_check $swp1 "Autoneg" \ + "No partner detected during force mode" + + ethtool_extended_state_check $swp2 "Autoneg" \ + "No partner detected during force mode" + + log_test "Autoneg, No partner detected during force mode" + + ethtool -s $swp2 autoneg on + ethtool -s $swp1 autoneg on + + ip link set dev $swp2 down + ip link set dev $swp1 down +} + +no_cable() +{ + RET=0 + + ip link set dev $swp3 up + + sleep 1 + ethtool_extended_state_check $swp3 "No cable" + + log_test "No cable" + + ip link set dev $swp3 down +} + +setup_prepare + +tests_run + +exit $EXIT_STATUS From 707abf0695481ad19b0b74af65f30c71123d6154 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Tue, 2 Jun 2020 10:50:47 +0300 Subject: [PATCH 0482/2056] igc: Add initial LTR support The LTR message on the PCIe inform the requested latency on which the PCIe must become active to the downstream PCIe port of the system. This patch provide recommended LTR parameters by i225 specification. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_defines.h | 27 +++++ drivers/net/ethernet/intel/igc/igc_i225.c | 100 +++++++++++++++++++ drivers/net/ethernet/intel/igc/igc_i225.h | 1 + drivers/net/ethernet/intel/igc/igc_mac.c | 5 + drivers/net/ethernet/intel/igc/igc_regs.h | 6 ++ 5 files changed, 139 insertions(+) diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index ee7fa1c062a0..ed0e560daaae 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -521,4 +521,31 @@ #define IGC_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ #define IGC_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +/* LTR defines */ +#define IGC_LTRC_EEEMS_EN 0x00000020 /* Enable EEE LTR max send */ +#define IGC_RXPBS_SIZE_I225_MASK 0x0000003F /* Rx packet buffer size */ +#define IGC_TW_SYSTEM_1000_MASK 0x000000FF +/* Minimum time for 100BASE-T where no data will be transmit following move out + * of EEE LPI Tx state + */ +#define IGC_TW_SYSTEM_100_MASK 0x0000FF00 +#define IGC_TW_SYSTEM_100_SHIFT 8 +#define IGC_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +#define IGC_DMACR_DMACTHR_MASK 0x00FF0000 +#define IGC_DMACR_DMACTHR_SHIFT 16 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMINV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMINV_SCALE_32768 3 +/* Reg val to set scale to 1024 nsec */ +#define IGC_LTRMAXV_SCALE_1024 2 +/* Reg val to set scale to 32768 nsec */ +#define IGC_LTRMAXV_SCALE_32768 3 +#define IGC_LTRMINV_LTRV_MASK 0x000003FF /* LTR minimum value */ +#define IGC_LTRMAXV_LTRV_MASK 0x000003FF /* LTR maximum value */ +#define IGC_LTRMINV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMINV_SCALE_SHIFT 10 +#define IGC_LTRMAXV_LSNP_REQ 0x00008000 /* LTR Snoop Requirement */ +#define IGC_LTRMAXV_SCALE_SHIFT 10 + #endif /* _IGC_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 3a4e982edb67..8b67d9b49a83 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -544,3 +544,103 @@ s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, return IGC_SUCCESS; } + +/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC + * settings, otherwise specify that there is no LTR requirement. + */ +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link) +{ + u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max; + u16 speed, duplex; + s32 size; + + /* If we do not have link, LTR thresholds are zero. */ + if (link) { + hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex); + + /* Check if using copper interface with EEE enabled or if the + * link speed is 10 Mbps. + */ + if (hw->dev_spec._base.eee_enable && + speed != SPEED_10) { + /* EEE enabled, so send LTRMAX threshold. */ + ltrc = rd32(IGC_LTRC) | + IGC_LTRC_EEEMS_EN; + wr32(IGC_LTRC, ltrc); + + /* Calculate tw_system (nsec). */ + if (speed == SPEED_100) { + tw_system = ((rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_100_MASK) >> + IGC_TW_SYSTEM_100_SHIFT) * 500; + } else { + tw_system = (rd32(IGC_EEE_SU) & + IGC_TW_SYSTEM_1000_MASK) * 500; + } + } else { + tw_system = 0; + } + + /* Get the Rx packet buffer size. */ + size = rd32(IGC_RXPBS) & + IGC_RXPBS_SIZE_I225_MASK; + + /* Calculations vary based on DMAC settings. */ + if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) { + size -= (rd32(IGC_DMACR) & + IGC_DMACR_DMACTHR_MASK) >> + IGC_DMACR_DMACTHR_SHIFT; + /* Convert size to bits. */ + size *= 1024 * 8; + } else { + /* Convert size to bytes, subtract the MTU, and then + * convert the size to bits. + */ + size *= 1024; + size *= 8; + } + + if (size < 0) { + hw_dbg("Invalid effective Rx buffer size %d\n", + size); + return -IGC_ERR_CONFIG; + } + + /* Calculate the thresholds. Since speed is in Mbps, simplify + * the calculation by multiplying size/speed by 1000 for result + * to be in nsec before dividing by the scale in nsec. Set the + * scale such that the LTR threshold fits in the register. + */ + ltr_min = (1000 * size) / speed; + ltr_max = ltr_min + tw_system; + scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 : + IGC_LTRMINV_SCALE_32768; + scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 : + IGC_LTRMAXV_SCALE_32768; + ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768; + ltr_min -= 1; + ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768; + ltr_max -= 1; + + /* Only write the LTR thresholds if they differ from before. */ + ltrv = rd32(IGC_LTRMINV); + if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) { + ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min | + (scale_min << IGC_LTRMINV_SCALE_SHIFT); + wr32(IGC_LTRMINV, ltrv); + } + + ltrv = rd32(IGC_LTRMAXV); + if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) { + ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max | + (scale_min << IGC_LTRMAXV_SCALE_SHIFT); + wr32(IGC_LTRMAXV, ltrv); + } + } + + return IGC_SUCCESS; +} diff --git a/drivers/net/ethernet/intel/igc/igc_i225.h b/drivers/net/ethernet/intel/igc/igc_i225.h index 04759e076a9e..dae47e4f16b0 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.h +++ b/drivers/net/ethernet/intel/igc/igc_i225.h @@ -11,5 +11,6 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw); bool igc_get_flash_presence_i225(struct igc_hw *hw); s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G, bool adv100M); +s32 igc_set_ltr_i225(struct igc_hw *hw, bool link); #endif diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 410aeb01de5c..bc077f230f17 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -417,6 +417,11 @@ s32 igc_check_for_copper_link(struct igc_hw *hw) hw_dbg("Error configuring flow control\n"); out: + /* Now that we are aware of our link settings, we can set the LTR + * thresholds. + */ + ret_val = igc_set_ltr_i225(hw, link); + return ret_val; } diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 75e040a5d46f..97f9b928509f 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -253,6 +253,12 @@ #define IGC_IPCNFG 0x0E38 /* Internal PHY Configuration */ #define IGC_EEE_SU 0x0E34 /* EEE Setup */ +/* LTR registers */ +#define IGC_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define IGC_DMACR 0x02508 /* DMA Coalescing Control Register */ +#define IGC_LTRMINV 0x5BB0 /* LTR Minimum Value */ +#define IGC_LTRMAXV 0x5BB4 /* LTR Maximum Value */ + /* forward declaration */ struct igc_hw; u32 igc_rd32(struct igc_hw *hw, u32 reg); From 3a66abe903e54b83099df70d1ecc67cab02d8241 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Wed, 3 Jun 2020 17:01:00 -0700 Subject: [PATCH 0483/2056] igc: Clean up Rx timestamping logic Differently from I210, I225 doesn't report Rx timestamps via the TS bit Rx descriptor + RXSTMPL/RXSTMPH registers mechanism. Rx timestamps are reported in the packet buffer only, which is implemented by igc_ptp_rx_ pktstamp(). So this patch removes igc_ptp_rx_rgtstamp() and all code related to it, copied from igb driver. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc.h | 3 -- drivers/net/ethernet/intel/igc/igc_defines.h | 2 - drivers/net/ethernet/intel/igc/igc_main.c | 12 ++---- drivers/net/ethernet/intel/igc/igc_ptp.c | 44 +------------------- drivers/net/ethernet/intel/igc/igc_regs.h | 2 - 5 files changed, 5 insertions(+), 58 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 9c57afad6afe..3070dfdb7eb4 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -210,8 +210,6 @@ struct igc_adapter { struct sk_buff *ptp_tx_skb; struct hwtstamp_config tstamp_config; unsigned long ptp_tx_start; - unsigned long last_rx_ptp_check; - unsigned long last_rx_timestamp; unsigned int ptp_flags; /* System time value lock */ spinlock_t tmreg_lock; @@ -549,7 +547,6 @@ void igc_ptp_init(struct igc_adapter *adapter); void igc_ptp_reset(struct igc_adapter *adapter); void igc_ptp_suspend(struct igc_adapter *adapter); void igc_ptp_stop(struct igc_adapter *adapter); -void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb); void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, struct sk_buff *skb); int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index ed0e560daaae..f1f464967f87 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -323,7 +323,6 @@ /* Advanced Receive Descriptor bit definitions */ #define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ -#define IGC_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ #define IGC_RXDEXT_STATERR_CE 0x01000000 #define IGC_RXDEXT_STATERR_SE 0x02000000 @@ -384,7 +383,6 @@ #define IGC_FTQF_MASK_PROTO_BP 0x10000000 /* Time Sync Receive Control bit definitions */ -#define IGC_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ #define IGC_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ #define IGC_TSYNCRXCTL_TYPE_L2_V2 0x00 #define IGC_TSYNCRXCTL_TYPE_L4_V1 0x02 diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7e4d56c7b4c4..1b71f63d0e86 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -1479,9 +1479,9 @@ static inline void igc_rx_hash(struct igc_ring *ring, * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * - * This function checks the ring, descriptor, and packet information in - * order to populate the hash, checksum, VLAN, timestamp, protocol, and - * other fields within the skb. + * This function checks the ring, descriptor, and packet information in order + * to populate the hash, checksum, VLAN, protocol, and other fields within the + * skb. */ static void igc_process_skb_fields(struct igc_ring *rx_ring, union igc_adv_rx_desc *rx_desc, @@ -1491,10 +1491,6 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring, igc_rx_checksum(rx_ring, rx_desc, skb); - if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) && - !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) - igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb); - skb_record_rx_queue(skb, rx_ring->queue_index); skb->protocol = eth_type_trans(skb, rx_ring->netdev); @@ -1975,7 +1971,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) /* probably a little skewed due to removing CRC */ total_bytes += skb->len; - /* populate checksum, timestamp, VLAN, and protocol */ + /* populate checksum, VLAN, and protocol */ igc_process_skb_fields(rx_ring, rx_desc, skb); napi_gro_receive(&q_vector->napi, skb); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 0d746f8588c8..82e6c6c962d5 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -205,46 +205,6 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); } -/** - * igc_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register - * @q_vector: Pointer to interrupt specific structure - * @skb: Buffer containing timestamp and packet - * - * This function is meant to retrieve a timestamp from the internal registers - * of the adapter and store it in the skb. - */ -void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, - struct sk_buff *skb) -{ - struct igc_adapter *adapter = q_vector->adapter; - struct igc_hw *hw = &adapter->hw; - u64 regval; - - /* If this bit is set, then the RX registers contain the time - * stamp. No other packet will be time stamped until we read - * these registers, so read the registers to make them - * available again. Because only one packet can be time - * stamped at a time, we know that the register values must - * belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a shared - * tx_flags that we can turn into a skb_shared_hwtstamps. - */ - if (!(rd32(IGC_TSYNCRXCTL) & IGC_TSYNCRXCTL_VALID)) - return; - - regval = rd32(IGC_RXSTMPL); - regval |= (u64)rd32(IGC_RXSTMPH) << 32; - - igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); - - /* Update the last_rx_timestamp timer in order to enable watchdog check - * for error case of latched timestamp on a dropped packet. - */ - adapter->last_rx_timestamp = jiffies; -} - /** * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue * @rx_ring: Pointer to RX queue @@ -419,11 +379,9 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, } wrfl(); - /* clear TX/RX time stamp registers, just to be sure */ + /* clear TX time stamp registers, just to be sure */ regval = rd32(IGC_TXSTMPL); regval = rd32(IGC_TXSTMPH); - regval = rd32(IGC_RXSTMPL); - regval = rd32(IGC_RXSTMPH); return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 97f9b928509f..d53f49833db5 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -228,8 +228,6 @@ #define IGC_SYSTIMR 0x0B6F8 /* System time register Residue */ #define IGC_TIMINCA 0x0B608 /* Increment attributes register - RW */ -#define IGC_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ -#define IGC_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ #define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ #define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ From 29b821fe976910bed20e375f9f3a77454dfcf9bc Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Wed, 3 Jun 2020 17:01:01 -0700 Subject: [PATCH 0484/2056] igc: Remove duplicate code in Tx timestamp handling The functions igc_ptp_tx_hang() and igc_ptp_tx_work() have duplicate code which handles Tx timestamp timeouts. This patch does a trivial refactoring by moving that code to its own function and reusing it. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_ptp.c | 34 +++++++++++------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 82e6c6c962d5..b1b23c6bf689 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -386,11 +386,23 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, return 0; } +static void igc_ptp_tx_timeout(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); + /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */ + rd32(IGC_TXSTMPH); + netdev_warn(adapter->netdev, "Tx timestamp timeout\n"); +} + void igc_ptp_tx_hang(struct igc_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT); - struct igc_hw *hw = &adapter->hw; if (!adapter->ptp_tx_skb) return; @@ -404,15 +416,7 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter) */ if (timeout) { cancel_work_sync(&adapter->ptp_tx_work); - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - adapter->tx_hwtstamp_timeouts++; - /* Clear the Tx valid bit in TSYNCTXCTL register to enable - * interrupt - */ - rd32(IGC_TXSTMPH); - netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n"); + igc_ptp_tx_timeout(adapter); } } @@ -467,15 +471,7 @@ static void igc_ptp_tx_work(struct work_struct *work) if (time_is_before_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT)) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - adapter->tx_hwtstamp_timeouts++; - /* Clear the tx valid bit in TSYNCTXCTL register to enable - * interrupt - */ - rd32(IGC_TXSTMPH); - netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n"); + igc_ptp_tx_timeout(adapter); return; } From 1801f8d9292fdbcf8b0fc6d6455eb05d508845a3 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Wed, 3 Jun 2020 17:01:02 -0700 Subject: [PATCH 0485/2056] igc: Check __IGC_PTP_TX_IN_PROGRESS instead of ptp_tx_skb The __IGC_PTP_TX_IN_PROGRESS flag indicates we have a pending Tx timestamp. In some places, instead of checking that flag, we check adapter->ptp_tx_skb. This patch fixes those places to use the flag. Quick note about igc_ptp_tx_hwtstamp() change: when that function is called, adapter->ptp_tx_skb is expected to be valid always so we WARN_ON_ONCE() in case it is not. Quick note about igc_ptp_suspend() change: when suspending, we don't really need to check if there is a pending timestamp. We can simply clear it unconditionally. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_ptp.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index b1b23c6bf689..e65fdcf966b2 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -404,9 +404,6 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter) bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + IGC_PTP_TX_TIMEOUT); - if (!adapter->ptp_tx_skb) - return; - if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) return; @@ -435,6 +432,9 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u64 regval; + if (WARN_ON_ONCE(!skb)) + return; + regval = rd32(IGC_TXSTMPL); regval |= (u64)rd32(IGC_TXSTMPH) << 32; igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); @@ -466,7 +466,7 @@ static void igc_ptp_tx_work(struct work_struct *work) struct igc_hw *hw = &adapter->hw; u32 tsynctxctl; - if (!adapter->ptp_tx_skb) + if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state)) return; if (time_is_before_jiffies(adapter->ptp_tx_start + @@ -588,11 +588,9 @@ void igc_ptp_suspend(struct igc_adapter *adapter) return; cancel_work_sync(&adapter->ptp_tx_work); - if (adapter->ptp_tx_skb) { - dev_kfree_skb_any(adapter->ptp_tx_skb); - adapter->ptp_tx_skb = NULL; - clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - } + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); } /** From 3b44d4c10c798d7d20c46e5364959558b6b2f73b Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Wed, 3 Jun 2020 17:01:03 -0700 Subject: [PATCH 0486/2056] igc: Remove UDP filter setup in PTP code As implemented in igc_ethtool_get_ts_info(), igc only supports HWTSTAMP_ FILTER_ALL so any HWTSTAMP_FILTER_* option the user may set falls back to HWTSTAMP_FILTER_ALL. HWTSTAMP_FILTER_ALL is implemented via Rx Time Sync Control (TSYNCRXCTL) configuration which timestamps all incoming packets. Configuring a UDP filter, in addition to TSYNCRXCTL, doesn't add much so this patch removes that code. It also takes this opportunity to remove some non-applicable comments. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_ptp.c | 51 +----------------------- 1 file changed, 1 insertion(+), 50 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index e65fdcf966b2..bdf934377abb 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -244,18 +244,7 @@ static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter, * @adapter: networking device structure * @config: hwtstamp configuration * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't case any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * + * Return: 0 in case of success, negative errno code otherwise. */ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, struct hwtstamp_config *config) @@ -263,8 +252,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED; u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED; struct igc_hw *hw = &adapter->hw; - u32 tsync_rx_cfg = 0; - bool is_l4 = false; u32 regval; /* reserved for future extensions */ @@ -285,15 +272,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, tsync_rx_ctl = 0; break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; - is_l4 = true; - break; case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; - is_l4 = true; - break; case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: @@ -303,32 +282,22 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2; - config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l4 = true; - break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL; config->rx_filter = HWTSTAMP_FILTER_ALL; break; - /* fall through */ default: config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } - /* Per-packet timestamping only works if all packets are - * timestamped, so enable timestamping in all packets as long - * as one Rx filter was configured. - */ if (tsync_rx_ctl) { tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED; tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL; tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG; config->rx_filter = HWTSTAMP_FILTER_ALL; - is_l4 = true; if (hw->mac.type == igc_i225) { regval = rd32(IGC_RXPBS); @@ -359,24 +328,6 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, regval |= tsync_rx_ctl; wr32(IGC_TSYNCRXCTL, regval); - /* define which PTP packets are time stamped */ - wr32(IGC_TSYNCRXCFG, tsync_rx_cfg); - - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IPPROTO_UDP /* UDP */ - | IGC_FTQF_VF_BP /* VF not compared */ - | IGC_FTQF_1588_TIME_STAMP /* Enable Timestamp */ - | IGC_FTQF_MASK); /* mask all inputs */ - ftqf &= ~IGC_FTQF_MASK_PROTO_BP; /* enable protocol check */ - - wr32(IGC_IMIR(3), htons(PTP_EV_PORT)); - wr32(IGC_IMIREXT(3), - (IGC_IMIREXT_SIZE_BP | IGC_IMIREXT_CTRL_BP)); - wr32(IGC_FTQF(3), ftqf); - } else { - wr32(IGC_FTQF(3), IGC_FTQF_MASK); - } wrfl(); /* clear TX time stamp registers, just to be sure */ From 3df7fd799b1d70a7d3afeee12784f208b47e97a7 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Wed, 3 Jun 2020 17:01:04 -0700 Subject: [PATCH 0487/2056] igc: Refactor igc_ptp_set_timestamp_mode() Current igc_ptp_set_timestamp_mode() logic is a bit tangled since it handles many different hardware configurations in one single place, making it harder to follow. This patch untangles that code by breaking it into helper functions. Quick note about the hw->mac.type check which was removed in this refactoring: this check it not really needed since igc_i225 is the only type supported by the IGC driver. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_ptp.c | 103 ++++++++++++----------- 1 file changed, 53 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index bdf934377abb..0251e6bedac4 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -239,6 +239,54 @@ static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter, } } +static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + + wr32(IGC_TSYNCRXCTL, 0); + + val = rd32(IGC_RXPBS); + val &= ~IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); +} + +static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 val; + + val = rd32(IGC_RXPBS); + val |= IGC_RXPBS_CFG_TS_EN; + wr32(IGC_RXPBS, val); + + /* FIXME: For now, only support retrieving RX timestamps from timer 0 + */ + igc_ptp_enable_tstamp_all_rxqueues(adapter, 0); + + val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | + IGC_TSYNCRXCTL_RXSYNSIG; + wr32(IGC_TSYNCRXCTL, val); +} + +static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, 0); +} + +static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + + wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG); + + /* Read TXSTMP registers to discard any timestamp previously stored. */ + rd32(IGC_TXSTMPL); + rd32(IGC_TXSTMPH); +} + /** * igc_ptp_set_timestamp_mode - setup hardware for timestamping * @adapter: networking device structure @@ -249,19 +297,16 @@ static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter, static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, struct hwtstamp_config *config) { - u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED; - struct igc_hw *hw = &adapter->hw; - u32 regval; - /* reserved for future extensions */ if (config->flags) return -EINVAL; switch (config->tx_type) { case HWTSTAMP_TX_OFF: - tsync_tx_ctl = 0; + igc_ptp_disable_tx_timestamp(adapter); + break; case HWTSTAMP_TX_ON: + igc_ptp_enable_tx_timestamp(adapter); break; default: return -ERANGE; @@ -269,7 +314,7 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, switch (config->rx_filter) { case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl = 0; + igc_ptp_disable_rx_timestamp(adapter); break; case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: @@ -285,55 +330,13 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter, case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: - tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL; + igc_ptp_enable_rx_timestamp(adapter); config->rx_filter = HWTSTAMP_FILTER_ALL; break; default: - config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } - if (tsync_rx_ctl) { - tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED; - tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL; - tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG; - config->rx_filter = HWTSTAMP_FILTER_ALL; - - if (hw->mac.type == igc_i225) { - regval = rd32(IGC_RXPBS); - regval |= IGC_RXPBS_CFG_TS_EN; - wr32(IGC_RXPBS, regval); - - /* FIXME: For now, only support retrieving RX - * timestamps from timer 0 - */ - igc_ptp_enable_tstamp_all_rxqueues(adapter, 0); - } - } - - if (tsync_tx_ctl) { - tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED; - tsync_tx_ctl |= IGC_TSYNCTXCTL_TXSYNSIG; - } - - /* enable/disable TX */ - regval = rd32(IGC_TSYNCTXCTL); - regval &= ~IGC_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; - wr32(IGC_TSYNCTXCTL, regval); - - /* enable/disable RX */ - regval = rd32(IGC_TSYNCRXCTL); - regval &= ~(IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; - wr32(IGC_TSYNCRXCTL, regval); - - wrfl(); - - /* clear TX time stamp registers, just to be sure */ - regval = rd32(IGC_TXSTMPL); - regval = rd32(IGC_TXSTMPH); - return 0; } From 1cbedabffdb1737fc40136de9b72e4c69c3e29ce Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Wed, 3 Jun 2020 17:01:05 -0700 Subject: [PATCH 0488/2056] igc: Fix Rx timestamp disabling When Rx timestamping is enabled, we set the timestamp bit in SRRCTL register for each queue, but we don't clear it when disabling. This patch fixes igc_ptp_disable_rx_timestamp() accordingly. Also, this patch gets rid of igc_ptp_enable_tstamp_rxqueue() and igc_ptp_enable_tstamp_all_rxqueues() and move their logic into igc_ptp_enable_rx_timestamp() to keep the enable and disable helpers symmetric. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_ptp.c | 54 ++++++++---------------- 1 file changed, 17 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 0251e6bedac4..e67d4655b47e 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -205,47 +205,20 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va, ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); } -/** - * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue - * @rx_ring: Pointer to RX queue - * @timer: Index for timer - * - * This function enables RX timestamping for a queue, and selects - * which 1588 timer will provide the timestamp. - */ -static void igc_ptp_enable_tstamp_rxqueue(struct igc_adapter *adapter, - struct igc_ring *rx_ring, u8 timer) -{ - struct igc_hw *hw = &adapter->hw; - int reg_idx = rx_ring->reg_idx; - u32 srrctl = rd32(IGC_SRRCTL(reg_idx)); - - srrctl |= IGC_SRRCTL_TIMESTAMP; - srrctl |= IGC_SRRCTL_TIMER1SEL(timer); - srrctl |= IGC_SRRCTL_TIMER0SEL(timer); - - wr32(IGC_SRRCTL(reg_idx), srrctl); -} - -static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter, - u8 timer) -{ - int i; - - for (i = 0; i < adapter->num_rx_queues; i++) { - struct igc_ring *ring = adapter->rx_ring[i]; - - igc_ptp_enable_tstamp_rxqueue(adapter, ring, timer); - } -} - static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; u32 val; + int i; wr32(IGC_TSYNCRXCTL, 0); + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + val &= ~IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } + val = rd32(IGC_RXPBS); val &= ~IGC_RXPBS_CFG_TS_EN; wr32(IGC_RXPBS, val); @@ -255,14 +228,21 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; u32 val; + int i; val = rd32(IGC_RXPBS); val |= IGC_RXPBS_CFG_TS_EN; wr32(IGC_RXPBS, val); - /* FIXME: For now, only support retrieving RX timestamps from timer 0 - */ - igc_ptp_enable_tstamp_all_rxqueues(adapter, 0); + for (i = 0; i < adapter->num_rx_queues; i++) { + val = rd32(IGC_SRRCTL(i)); + /* FIXME: For now, only support retrieving RX timestamps from + * timer 0. + */ + val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) | + IGC_SRRCTL_TIMESTAMP; + wr32(IGC_SRRCTL(i), val); + } val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL | IGC_TSYNCRXCTL_RXSYNSIG; From 900d1e8b346b6f3f33718f5db8d3ee8b192860ba Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Thu, 4 Jun 2020 14:25:16 +0300 Subject: [PATCH 0489/2056] igc: Add LPI counters Add EEE TX LPI and EEE RX LPI counters. A EEE TX LPI event occurs when the transmitter enters EEE (IEEE 802.3az) LPI state. A EEE RX LPI event occurs when the receiver detect link partner entry into EEE(IEEE 802.3az) LPI state. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_mac.c | 2 ++ drivers/net/ethernet/intel/igc/igc_regs.h | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index bc077f230f17..f3f7717b6233 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -307,6 +307,8 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_ICRXDMTC); rd32(IGC_RPTHC); + rd32(IGC_TLPIC); + rd32(IGC_RLPIC); rd32(IGC_HGPTC); rd32(IGC_HGORCL); rd32(IGC_HGORCH); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index d53f49833db5..eb3e8e70501d 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -188,6 +188,8 @@ #define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ #define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ +#define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ +#define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ #define IGC_HGPTC 0x04118 /* Host Good Packets Tx Count */ #define IGC_RXDMTC 0x04120 /* Rx Descriptor Minimum Threshold Count */ #define IGC_HGORCL 0x04128 /* Host Good Octets Received Count Low */ From 725fa16d3622a3e9e21f37a4b79ce866afca5ef0 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Sun, 7 Jun 2020 11:51:27 +0300 Subject: [PATCH 0490/2056] igc: Remove TCP segmentation TX fail counter TCP segmentation TX context fail counter is not applicable for i225 devices. This patch comes to clean up this counter. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_mac.c | 1 - drivers/net/ethernet/intel/igc/igc_main.c | 1 - drivers/net/ethernet/intel/igc/igc_regs.h | 1 - 3 files changed, 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index f3f7717b6233..9a5e44ef45f4 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -289,7 +289,6 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_TNCRS); rd32(IGC_HTDPMC); rd32(IGC_TSCTC); - rd32(IGC_TSCTFC); rd32(IGC_MGTPRC); rd32(IGC_MGTPDC); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1b71f63d0e86..6a11f897aa62 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3740,7 +3740,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.algnerrc += rd32(IGC_ALGNERRC); adapter->stats.tsctc += rd32(IGC_TSCTC); - adapter->stats.tsctfc += rd32(IGC_TSCTFC); adapter->stats.iac += rd32(IGC_IAC); adapter->stats.icrxoc += rd32(IGC_ICRXOC); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index eb3e8e70501d..1c46cec5a799 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -181,7 +181,6 @@ #define IGC_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ #define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ #define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ -#define IGC_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ #define IGC_IAC 0x04100 /* Interrupt Assertion Count */ #define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ #define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ From a0beb3c1b1acbe80a851a9892f72094ea27504be Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 8 Jun 2020 18:49:39 +0300 Subject: [PATCH 0491/2056] igc: Refactor the igc_power_down_link() Currently the implementation of igc_power_down_link() method was just calling igc_power_down_phy_copper_base() method. We can just call igc_power_down_phy_copper_base() method directly. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_main.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6a11f897aa62..555c6633f1c3 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -61,16 +61,6 @@ enum latency_range { latency_invalid = 255 }; -/** - * igc_power_down_link - Power down the phy/serdes link - * @adapter: address of board private structure - */ -static void igc_power_down_link(struct igc_adapter *adapter) -{ - if (adapter->hw.phy.media_type == igc_media_type_copper) - igc_power_down_phy_copper_base(&adapter->hw); -} - void igc_reset(struct igc_adapter *adapter) { struct net_device *dev = adapter->netdev; @@ -106,7 +96,7 @@ void igc_reset(struct igc_adapter *adapter) igc_set_eee_i225(hw, true, true, true); if (!netif_running(adapter->netdev)) - igc_power_down_link(adapter); + igc_power_down_phy_copper_base(&adapter->hw); /* Re-enable PTP, where applicable. */ igc_ptp_reset(adapter); @@ -4615,7 +4605,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) igc_free_irq(adapter); err_req_irq: igc_release_hw_control(adapter); - igc_power_down_link(adapter); + igc_power_down_phy_copper_base(&adapter->hw); igc_free_all_rx_resources(adapter); err_setup_rx: igc_free_all_tx_resources(adapter); @@ -5313,7 +5303,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake, wake = wufc || adapter->en_mng_pt; if (!wake) - igc_power_down_link(adapter); + igc_power_down_phy_copper_base(&adapter->hw); else igc_power_up_link(adapter); From 2b374e373807a7c3ce071de1a011acbfa94cf0d7 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Wed, 10 Jun 2020 15:43:08 +0300 Subject: [PATCH 0492/2056] igc: Remove unneeded check for copper media type PHY of the i225 device support only copper mode. There is no point to check media type in the igc_power_up_link() method. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 555c6633f1c3..e544f0599dcf 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -115,8 +115,7 @@ static void igc_power_up_link(struct igc_adapter *adapter) { igc_reset_phy(&adapter->hw); - if (adapter->hw.phy.media_type == igc_media_type_copper) - igc_power_up_phy_copper(&adapter->hw); + igc_power_up_phy_copper(&adapter->hw); igc_setup_link(&adapter->hw); } From f637471d33a7a786c455877361e70e0b2da3c56a Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Wed, 17 Jun 2020 15:01:31 +0300 Subject: [PATCH 0493/2056] igc: Remove checking media type during MAC initialization i225 device support only copper mode. There is no point to check media type in the igc_config_fc_after_link_up() method. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igc/igc_mac.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 9a5e44ef45f4..b47e7b0a6398 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -468,10 +468,8 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) * so we had to force link. In this case, we need to force the * configuration of the MAC to match the "fc" parameter. */ - if (mac->autoneg_failed) { - if (hw->phy.media_type == igc_media_type_copper) - ret_val = igc_force_mac_fc(hw); - } + if (mac->autoneg_failed) + ret_val = igc_force_mac_fc(hw); if (ret_val) { hw_dbg("Error forcing flow control settings\n"); @@ -483,7 +481,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) * has completed, and if so, how the PHY and link partner has * flow control configured. */ - if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) { + if (mac->autoneg) { /* Read the MII Status Register and check to see if AutoNeg * has completed. We read this twice because this reg has * some "sticky" (latched) bits. From f53b9b0bdc59c0823679f2e3214e0d538f5951b9 Mon Sep 17 00:00:00 2001 From: Laura Garcia Liebana Date: Sun, 31 May 2020 22:26:23 +0200 Subject: [PATCH 0494/2056] netfilter: introduce support for reject at prerouting stage REJECT statement can be only used in INPUT, FORWARD and OUTPUT chains. This patch adds support of REJECT, both icmp and tcp reset, at PREROUTING stage. The need for this patch comes from the requirement of some forwarding devices to reject traffic before the natting and routing decisions. The main use case is to be able to send a graceful termination to legitimate clients that, under any circumstances, the NATed endpoints are not available. This option allows clients to decide either to perform a reconnection or manage the error in their side, instead of just dropping the connection and let them die due to timeout. It is supported ipv4, ipv6 and inet families for nft infrastructure. Signed-off-by: Laura Garcia Liebana Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/nf_reject_ipv4.c | 21 +++++++++++++++++++++ net/ipv6/netfilter/nf_reject_ipv6.c | 26 ++++++++++++++++++++++++++ net/netfilter/nft_reject.c | 3 ++- 3 files changed, 49 insertions(+), 1 deletion(-) diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 2361fdac2c43..9dcfa4e461b6 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -96,6 +96,21 @@ void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, } EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); +static int nf_reject_fill_skb_dst(struct sk_buff *skb_in) +{ + struct dst_entry *dst = NULL; + struct flowi fl; + + memset(&fl, 0, sizeof(struct flowi)); + fl.u.ip4.daddr = ip_hdr(skb_in)->saddr; + nf_ip_route(dev_net(skb_in->dev), &dst, &fl, false); + if (!dst) + return -1; + + skb_dst_set(skb_in, dst); + return 0; +} + /* Send RST reply */ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) { @@ -109,6 +124,9 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) if (!oth) return; + if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(oldskb)) + return; + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) return; @@ -175,6 +193,9 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook) if (iph->frag_off & htons(IP_OFFSET)) return; + if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(skb_in)) + return; + if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) { icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0); return; diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 5fae66f66671..4aef6baaa55e 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -126,6 +126,21 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, } EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put); +static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in) +{ + struct dst_entry *dst = NULL; + struct flowi fl; + + memset(&fl, 0, sizeof(struct flowi)); + fl.u.ip6.daddr = ipv6_hdr(skb_in)->saddr; + nf_ip6_route(dev_net(skb_in->dev), &dst, &fl, false); + if (!dst) + return -1; + + skb_dst_set(skb_in, dst); + return 0; +} + void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) { struct net_device *br_indev __maybe_unused; @@ -154,6 +169,14 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) fl6.daddr = oip6h->saddr; fl6.fl6_sport = otcph->dest; fl6.fl6_dport = otcph->source; + + if (hook == NF_INET_PRE_ROUTING) { + nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false); + if (!dst) + return; + skb_dst_set(oldskb, dst); + } + fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev); fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark); security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); @@ -245,6 +268,9 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in, if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL) skb_in->dev = net->loopback_dev; + if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in)) + return; + icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0); } EXPORT_SYMBOL_GPL(nf_send_unreach6); diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index 86eafbb0fdd0..61fb7e8afbf0 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c @@ -30,7 +30,8 @@ int nft_reject_validate(const struct nft_ctx *ctx, return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | - (1 << NF_INET_LOCAL_OUT)); + (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_PRE_ROUTING)); } EXPORT_SYMBOL_GPL(nft_reject_validate); From d61d2e902aa0561e5f4b6348514fb35de544aa1f Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Sun, 14 Jun 2020 23:42:07 +0200 Subject: [PATCH 0495/2056] netfilter: nft_set_pipapo: Drop useless assignment of scratch map index on insert In nft_pipapo_insert(), we need to reallocate scratch maps that will be used for matching by lookup functions, if they have never been allocated or if the bucket size changes as a result of the insertion. As pipapo_realloc_scratch() provides a pair of fresh, zeroed out maps, there's no need to select a particular one after reallocation. Other than being useless, the existing assignment was also troubled by the fact that the index was set only on the CPU performing the actual insertion, as spotted by Florian. Simply drop the assignment. Reported-by: Florian Westphal Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges") Signed-off-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_pipapo.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 8c04388296b0..313de1d73168 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1249,8 +1249,6 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, if (err) return err; - this_cpu_write(nft_pipapo_scratch_index, false); - m->bsize_max = bsize_max; } else { put_cpu_ptr(m->scratch); From 857ca89711de3dbcc674d58a6d7d297ee0bd34e1 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sun, 21 Jun 2020 18:40:30 +0300 Subject: [PATCH 0496/2056] ipvs: register hooks only with services Keep the IPVS hooks registered in Netfilter only while there are configured virtual services. This saves CPU cycles while IPVS is loaded but not used. Signed-off-by: Julian Anastasov Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 5 +++ net/netfilter/ipvs/ip_vs_core.c | 80 ++++++++++++++++++++++++++------- net/netfilter/ipvs/ip_vs_ctl.c | 23 +++++++++- 3 files changed, 89 insertions(+), 19 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 83be2d93b407..0c9881241323 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -874,6 +874,7 @@ struct netns_ipvs { struct ip_vs_stats tot_stats; /* Statistics & est. */ int num_services; /* no of virtual services */ + int num_services6; /* IPv6 virtual services */ /* Trash for destinations */ struct list_head dest_trash; @@ -960,6 +961,7 @@ struct netns_ipvs { * are not supported when synchronization is enabled. */ unsigned int mixed_address_family_dests; + unsigned int hooks_afmask; /* &1=AF_INET, &2=AF_INET6 */ }; #define DEFAULT_SYNC_THRESHOLD 3 @@ -1670,6 +1672,9 @@ static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc) #endif } +int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af); +void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af); + static inline int ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) { diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index aa6a603a2425..ca3670152565 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2256,7 +2256,7 @@ ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb, #endif -static const struct nf_hook_ops ip_vs_ops[] = { +static const struct nf_hook_ops ip_vs_ops4[] = { /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_reply4, @@ -2302,7 +2302,10 @@ static const struct nf_hook_ops ip_vs_ops[] = { .hooknum = NF_INET_FORWARD, .priority = 100, }, +}; + #ifdef CONFIG_IP_VS_IPV6 +static const struct nf_hook_ops ip_vs_ops6[] = { /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_reply6, @@ -2348,8 +2351,64 @@ static const struct nf_hook_ops ip_vs_ops[] = { .hooknum = NF_INET_FORWARD, .priority = 100, }, -#endif }; +#endif + +int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af) +{ + const struct nf_hook_ops *ops; + unsigned int count; + unsigned int afmask; + int ret = 0; + + if (af == AF_INET6) { +#ifdef CONFIG_IP_VS_IPV6 + ops = ip_vs_ops6; + count = ARRAY_SIZE(ip_vs_ops6); + afmask = 2; +#else + return -EINVAL; +#endif + } else { + ops = ip_vs_ops4; + count = ARRAY_SIZE(ip_vs_ops4); + afmask = 1; + } + + if (!(ipvs->hooks_afmask & afmask)) { + ret = nf_register_net_hooks(ipvs->net, ops, count); + if (ret >= 0) + ipvs->hooks_afmask |= afmask; + } + return ret; +} + +void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af) +{ + const struct nf_hook_ops *ops; + unsigned int count; + unsigned int afmask; + + if (af == AF_INET6) { +#ifdef CONFIG_IP_VS_IPV6 + ops = ip_vs_ops6; + count = ARRAY_SIZE(ip_vs_ops6); + afmask = 2; +#else + return; +#endif + } else { + ops = ip_vs_ops4; + count = ARRAY_SIZE(ip_vs_ops4); + afmask = 1; + } + + if (ipvs->hooks_afmask & afmask) { + nf_unregister_net_hooks(ipvs->net, ops, count); + ipvs->hooks_afmask &= ~afmask; + } +} + /* * Initialize IP Virtual Server netns mem. */ @@ -2425,19 +2484,6 @@ static void __net_exit __ip_vs_cleanup_batch(struct list_head *net_list) } } -static int __net_init __ip_vs_dev_init(struct net *net) -{ - int ret; - - ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); - if (ret < 0) - goto hook_fail; - return 0; - -hook_fail: - return ret; -} - static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) { struct netns_ipvs *ipvs; @@ -2446,7 +2492,8 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) EnterFunction(2); list_for_each_entry(net, net_list, exit_list) { ipvs = net_ipvs(net); - nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); + ip_vs_unregister_hooks(ipvs, AF_INET); + ip_vs_unregister_hooks(ipvs, AF_INET6); ipvs->enable = 0; /* Disable packet reception */ smp_wmb(); ip_vs_sync_net_cleanup(ipvs); @@ -2462,7 +2509,6 @@ static struct pernet_operations ipvs_core_ops = { }; static struct pernet_operations ipvs_core_dev_ops = { - .init = __ip_vs_dev_init, .exit_batch = __ip_vs_dev_cleanup_batch, }; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 412656c34f20..0eed388c960b 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1272,6 +1272,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, struct ip_vs_scheduler *sched = NULL; struct ip_vs_pe *pe = NULL; struct ip_vs_service *svc = NULL; + int ret_hooks = -1; /* increase the module use count */ if (!ip_vs_use_count_inc()) @@ -1313,6 +1314,14 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, } #endif + if ((u->af == AF_INET && !ipvs->num_services) || + (u->af == AF_INET6 && !ipvs->num_services6)) { + ret = ip_vs_register_hooks(ipvs, u->af); + if (ret < 0) + goto out_err; + ret_hooks = ret; + } + svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL); if (svc == NULL) { IP_VS_DBG(1, "%s(): no memory\n", __func__); @@ -1374,6 +1383,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, /* Count only IPv4 services for old get/setsockopt interface */ if (svc->af == AF_INET) ipvs->num_services++; + else if (svc->af == AF_INET6) + ipvs->num_services6++; /* Hash the service into the service table */ ip_vs_svc_hash(svc); @@ -1385,6 +1396,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, out_err: + if (ret_hooks >= 0) + ip_vs_unregister_hooks(ipvs, u->af); if (svc != NULL) { ip_vs_unbind_scheduler(svc, sched); ip_vs_service_free(svc); @@ -1500,9 +1513,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) struct ip_vs_pe *old_pe; struct netns_ipvs *ipvs = svc->ipvs; - /* Count only IPv4 services for old get/setsockopt interface */ - if (svc->af == AF_INET) + if (svc->af == AF_INET) { ipvs->num_services--; + if (!ipvs->num_services) + ip_vs_unregister_hooks(ipvs, svc->af); + } else if (svc->af == AF_INET6) { + ipvs->num_services6--; + if (!ipvs->num_services6) + ip_vs_unregister_hooks(ipvs, svc->af); + } ip_vs_stop_estimator(svc->ipvs, &svc->stats); From 9d9f95a94097a21ee7a910db35c9b15ac1426724 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Tue, 30 Jun 2020 10:30:34 +0800 Subject: [PATCH 0497/2056] hinic: remove unused but set variable remove unused but set variable to avoid auto build test WARNING Signed-off-by: Luo bin Reported-by: kernel test robot Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 18 ------------------ .../net/ethernet/huawei/hinic/hinic_sriov.c | 2 -- 2 files changed, 20 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index a4a2a2d68f5c..cb5ebae54f73 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -749,12 +749,9 @@ static int __set_hw_coal_param(struct hinic_dev *nic_dev, static int __hinic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal, u16 queue) { - struct hinic_intr_coal_info *ori_rx_intr_coal = NULL; - struct hinic_intr_coal_info *ori_tx_intr_coal = NULL; struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_intr_coal_info rx_intr_coal = {0}; struct hinic_intr_coal_info tx_intr_coal = {0}; - char obj_str[OBJ_STR_MAX_LEN] = {0}; bool set_rx_coal = false; bool set_tx_coal = false; int err; @@ -779,21 +776,6 @@ static int __hinic_set_coalesce(struct net_device *netdev, set_tx_coal = true; } - if (queue == COALESCE_ALL_QUEUE) { - ori_rx_intr_coal = &nic_dev->rx_intr_coalesce[0]; - ori_tx_intr_coal = &nic_dev->tx_intr_coalesce[0]; - err = snprintf(obj_str, OBJ_STR_MAX_LEN, "for netdev"); - } else { - ori_rx_intr_coal = &nic_dev->rx_intr_coalesce[queue]; - ori_tx_intr_coal = &nic_dev->tx_intr_coalesce[queue]; - err = snprintf(obj_str, OBJ_STR_MAX_LEN, "for queue %d", queue); - } - if (err <= 0 || err >= OBJ_STR_MAX_LEN) { - netif_err(nic_dev, drv, netdev, "Failed to snprintf string, function return(%d) and dest_len(%d)\n", - err, OBJ_STR_MAX_LEN); - return -EFAULT; - } - /* setting coalesce timer or pending limit to zero will disable * coalesce */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index f5c7c1f48542..caf7e81e3f62 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -1060,9 +1060,7 @@ static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id) static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id) { struct vf_data_storage *vf_infos; - u16 func_id; - func_id = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + vf_id; vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id); if (vf_infos->pf_set_mac) hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0); From 65951a9eb65e79b5a056fa6912682d91136507a1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 29 Jun 2020 21:43:13 -0700 Subject: [PATCH 0498/2056] net: dsa: Improve subordinate PHY error message It is not very informative to know the DSA master device when a subordinate network device fails to get its PHY setup. Provide the device name and capitalize PHY while we are it. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/slave.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 4c7f086a047b..e147e10b411c 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1795,7 +1795,8 @@ int dsa_slave_create(struct dsa_port *port) ret = dsa_slave_phy_setup(slave_dev); if (ret) { - netdev_err(master, "error %d setting up slave phy\n", ret); + netdev_err(master, "error %d setting up slave PHY for %s\n", + ret, slave_dev->name); goto out_gcells; } From 3cad1c8b49e93c62f27f4eb34e0ccfde92f4cdc0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Jun 2020 11:25:00 +0100 Subject: [PATCH 0499/2056] net: dsa/b53: change b53_force_port_config() pause argument Replace the b53_force_port_config() pause argument, which is based on phylink's MLO_PAUSE_* definitions, to use a pair of booleans. This will allow us to move b53_force_port_config() from b53_phylink_mac_config() to b53_phylink_mac_link_up(). Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 1df05841ab6b..8a7fa6092b01 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1037,7 +1037,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link) } static void b53_force_port_config(struct b53_device *dev, int port, - int speed, int duplex, int pause) + int speed, int duplex, + bool tx_pause, bool rx_pause) { u8 reg, val, off; @@ -1075,9 +1076,9 @@ static void b53_force_port_config(struct b53_device *dev, int port, return; } - if (pause & MLO_PAUSE_RX) + if (rx_pause) reg |= PORT_OVERRIDE_RX_FLOW; - if (pause & MLO_PAUSE_TX) + if (tx_pause) reg |= PORT_OVERRIDE_TX_FLOW; b53_write8(dev, B53_CTRL_PAGE, off, reg); @@ -1089,22 +1090,24 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, struct b53_device *dev = ds->priv; struct ethtool_eee *p = &dev->ports[port].eee; u8 rgmii_ctrl = 0, reg = 0, off; - int pause = 0; + bool tx_pause = false; + bool rx_pause = false; if (!phy_is_pseudo_fixed_link(phydev)) return; /* Enable flow control on BCM5301x's CPU port */ if (is5301x(dev) && port == dev->cpu_port) - pause = MLO_PAUSE_TXRX_MASK; + tx_pause = rx_pause = true; if (phydev->pause) { if (phydev->asym_pause) - pause |= MLO_PAUSE_TX; - pause |= MLO_PAUSE_RX; + tx_pause = true; + rx_pause = true; } - b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause); + b53_force_port_config(dev, port, phydev->speed, phydev->duplex, + tx_pause, rx_pause); b53_force_link(dev, port, phydev->link); if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { @@ -1166,7 +1169,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, } else if (is5301x(dev)) { if (port != dev->cpu_port) { b53_force_port_config(dev, dev->cpu_port, 2000, - DUPLEX_FULL, MLO_PAUSE_TXRX_MASK); + DUPLEX_FULL, true, true); b53_force_link(dev, dev->cpu_port, 1); } } @@ -1256,7 +1259,9 @@ void b53_phylink_mac_config(struct dsa_switch *ds, int port, if (mode == MLO_AN_FIXED) { b53_force_port_config(dev, port, state->speed, - state->duplex, state->pause); + state->duplex, + !!(state->pause & MLO_PAUSE_TX), + !!(state->pause & MLO_PAUSE_RX)); return; } From ab017b7921d3242dd2fb04fce77680e88d97477e Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Jun 2020 11:25:06 +0100 Subject: [PATCH 0500/2056] net: dsa/b53: use resolved link config in mac_link_up() Convert the B53 driver to use the finalised link parameters in mac_link_up() rather than the parameters in mac_config(). This is just a matter of moving the call to b53_force_port_config(). Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 8a7fa6092b01..6500179c2ca2 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1254,17 +1254,9 @@ void b53_phylink_mac_config(struct dsa_switch *ds, int port, { struct b53_device *dev = ds->priv; - if (mode == MLO_AN_PHY) + if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED) return; - if (mode == MLO_AN_FIXED) { - b53_force_port_config(dev, port, state->speed, - state->duplex, - !!(state->pause & MLO_PAUSE_TX), - !!(state->pause & MLO_PAUSE_RX)); - return; - } - if ((phy_interface_mode_is_8023z(state->interface) || state->interface == PHY_INTERFACE_MODE_SGMII) && dev->ops->serdes_config) @@ -1314,6 +1306,8 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, return; if (mode == MLO_AN_FIXED) { + b53_force_port_config(dev, port, speed, duplex, + tx_pause, rx_pause); b53_force_link(dev, port, true); return; } From 2d1f90f9ba8348ebc85fe57a0034509b21570a85 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Jun 2020 11:28:08 +0100 Subject: [PATCH 0501/2056] net: dsa/bcm_sf2: fix incorrect usage of state->link state->link has never been valid in mac_config() implementations - while it may be correct in some calls, it is not true that it can be relied upon. Fix bcm_sf2 to use the correct method of handling forced link status. Reviewed-by: Florian Fainelli Tested-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 946e41f020a5..5a8759d2de6c 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -618,8 +618,6 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, break; } - if (state->link) - reg |= LINK_STS; if (state->duplex == DUPLEX_FULL) reg |= DUPLX_MODE; @@ -650,6 +648,20 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface) { + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + u32 reg, offset; + + if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + + reg = core_readl(priv, offset); + reg &= ~LINK_STS; + core_writel(priv, reg, offset); + } + bcm_sf2_sw_mac_link_set(ds, port, interface, false); } @@ -662,9 +674,21 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->dev->ports[port].eee; + u32 reg, offset; bcm_sf2_sw_mac_link_set(ds, port, interface, true); + if (port != core_readl(priv, CORE_IMP0_PRT_ID)) { + if (priv->type == BCM7445_DEVICE_ID) + offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); + else + offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + + reg = core_readl(priv, offset); + reg |= LINK_STS; + core_writel(priv, reg, offset); + } + if (mode == MLO_AN_PHY && phydev) p->eee_enabled = b53_eee_init(ds, port, phydev); } From 50cc2020a8055491534929abccda5047731f24b8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Jun 2020 11:28:13 +0100 Subject: [PATCH 0502/2056] net: dsa/bcm_sf2: move speed/duplex forcing to mac_link_up() Convert the bcm_sf2 to use the finalised speed and duplex in its mac_link_up() call rather than the parameters in mac_config(). Reviewed-by: Florian Fainelli Tested-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 43 +++++++++++++++------------------------ 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 5a8759d2de6c..062e6efad53f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -558,16 +558,11 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); u32 id_mode_dis = 0, port_mode; - u32 reg, offset; + u32 reg; if (port == core_readl(priv, CORE_IMP0_PRT_ID)) return; - if (priv->type == BCM7445_DEVICE_ID) - offset = CORE_STS_OVERRIDE_GMIIP_PORT(port); - else - offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); - switch (state->interface) { case PHY_INTERFACE_MODE_RGMII: id_mode_dis = 1; @@ -582,8 +577,8 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, port_mode = EXT_REVMII; break; default: - /* all other PHYs: internal and MoCA */ - goto force_link; + /* Nothing required for all other PHYs: internal and MoCA */ + return; } /* Clear id_mode_dis bit, and the existing port mode, let @@ -605,23 +600,6 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, } reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); - -force_link: - /* Force link settings detected from the PHY */ - reg = SW_OVERRIDE; - switch (state->speed) { - case SPEED_1000: - reg |= SPDSTS_1000 << SPEED_SHIFT; - break; - case SPEED_100: - reg |= SPDSTS_100 << SPEED_SHIFT; - break; - } - - if (state->duplex == DUPLEX_FULL) - reg |= DUPLX_MODE; - - core_writel(priv, reg, offset); } static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port, @@ -684,8 +662,19 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, else offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); - reg = core_readl(priv, offset); - reg |= LINK_STS; + reg = SW_OVERRIDE | LINK_STS; + switch (speed) { + case SPEED_1000: + reg |= SPDSTS_1000 << SPEED_SHIFT; + break; + case SPEED_100: + reg |= SPDSTS_100 << SPEED_SHIFT; + break; + } + + if (duplex == DUPLEX_FULL) + reg |= DUPLX_MODE; + core_writel(priv, reg, offset); } From 981015ac60dc5e073081e17a46189ebcfffeea19 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 30 Jun 2020 11:28:18 +0100 Subject: [PATCH 0503/2056] net: dsa/bcm_sf2: move pause mode setting into mac_link_up() bcm_sf2 only appears to support pause modes on RGMII interfaces (the enable bits are in the RGMII control register.) Setup the pause modes for RGMII connections. Reviewed-by: Florian Fainelli Tested-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 062e6efad53f..bafddb35f3a9 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -587,18 +587,11 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port, reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); reg &= ~ID_MODE_DIS; reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT); - reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); reg |= port_mode; if (id_mode_dis) reg |= ID_MODE_DIS; - if (state->pause & MLO_PAUSE_TXRX_MASK) { - if (state->pause & MLO_PAUSE_TX) - reg |= TX_PAUSE_EN; - reg |= RX_PAUSE_EN; - } - reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); } @@ -662,6 +655,21 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port, else offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port); + if (interface == PHY_INTERFACE_MODE_RGMII || + interface == PHY_INTERFACE_MODE_RGMII_TXID || + interface == PHY_INTERFACE_MODE_MII || + interface == PHY_INTERFACE_MODE_REVMII) { + reg = reg_readl(priv, REG_RGMII_CNTRL_P(port)); + reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN); + + if (tx_pause) + reg |= TX_PAUSE_EN; + if (rx_pause) + reg |= RX_PAUSE_EN; + + reg_writel(priv, reg, REG_RGMII_CNTRL_P(port)); + } + reg = SW_OVERRIDE | LINK_STS; switch (speed) { case SPEED_1000: From 83d00531cbc837ab354e9ab429d49539797c7f1c Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:02:24 +0100 Subject: [PATCH 0504/2056] sfc: move NIC-specific mcdi_port declarations out of common header These functions are implemented in mcdi_port.c, which will not be linked into the EF100 driver; thus their prototypes should not be visible in common header files. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 1 + drivers/net/ethernet/sfc/mcdi.h | 4 ---- drivers/net/ethernet/sfc/mcdi_port.c | 1 + drivers/net/ethernet/sfc/mcdi_port.h | 18 ++++++++++++++++++ drivers/net/ethernet/sfc/siena.c | 1 + 5 files changed, 21 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/sfc/mcdi_port.h diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index efc49869320f..a3bf9d8023d7 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -10,6 +10,7 @@ #include "io.h" #include "mcdi.h" #include "mcdi_pcol.h" +#include "mcdi_port.h" #include "mcdi_port_common.h" #include "mcdi_functions.h" #include "nic.h" diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index db9746a751d4..10f064f761a5 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -354,15 +354,11 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); int efx_mcdi_wol_filter_reset(struct efx_nic *efx); int efx_mcdi_flush_rxqs(struct efx_nic *efx); -int efx_mcdi_port_probe(struct efx_nic *efx); -void efx_mcdi_port_remove(struct efx_nic *efx); int efx_mcdi_port_reconfigure(struct efx_nic *efx); -u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); void efx_mcdi_mac_start_stats(struct efx_nic *efx); void efx_mcdi_mac_stop_stats(struct efx_nic *efx); void efx_mcdi_mac_pull_stats(struct efx_nic *efx); -bool efx_mcdi_mac_check_fault(struct efx_nic *efx); enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason); int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method); int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled, diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index b807871d8f69..212ff80e923b 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -10,6 +10,7 @@ #include #include "efx.h" +#include "mcdi_port.h" #include "mcdi.h" #include "mcdi_pcol.h" #include "nic.h" diff --git a/drivers/net/ethernet/sfc/mcdi_port.h b/drivers/net/ethernet/sfc/mcdi_port.h new file mode 100644 index 000000000000..07863ddbe740 --- /dev/null +++ b/drivers/net/ethernet/sfc/mcdi_port.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2008-2013 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + */ + +#ifndef EFX_MCDI_PORT_H +#define EFX_MCDI_PORT_H + +#include "net_driver.h" + +u32 efx_mcdi_phy_get_caps(struct efx_nic *efx); +bool efx_mcdi_mac_check_fault(struct efx_nic *efx); +int efx_mcdi_port_probe(struct efx_nic *efx); +void efx_mcdi_port_remove(struct efx_nic *efx); + +#endif /* EFX_MCDI_PORT_H */ diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 6462bbe2448a..ffe193f03352 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -21,6 +21,7 @@ #include "workarounds.h" #include "mcdi.h" #include "mcdi_pcol.h" +#include "mcdi_port.h" #include "mcdi_port_common.h" #include "selftest.h" #include "siena_sriov.h" From 272e53aa5c1648bdb1a11831d4ce96c17d69d99a Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:02:56 +0100 Subject: [PATCH 0505/2056] sfc: commonise MCDI MAC stats handling Most of it was already declared in mcdi_port_common.h, so just move the implementations to mcdi_port_common.c. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_port.c | 91 +---------------- drivers/net/ethernet/sfc/mcdi_port_common.c | 105 ++++++++++++++++++++ drivers/net/ethernet/sfc/mcdi_port_common.h | 2 + 3 files changed, 109 insertions(+), 89 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 212ff80e923b..133f8b8ec3b3 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -692,80 +692,6 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx) return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0; } -enum efx_stats_action { - EFX_STATS_ENABLE, - EFX_STATS_DISABLE, - EFX_STATS_PULL, -}; - -static int efx_mcdi_mac_stats(struct efx_nic *efx, - enum efx_stats_action action, int clear) -{ - MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); - int rc; - int change = action == EFX_STATS_PULL ? 0 : 1; - int enable = action == EFX_STATS_ENABLE ? 1 : 0; - int period = action == EFX_STATS_ENABLE ? 1000 : 0; - dma_addr_t dma_addr = efx->stats_buffer.dma_addr; - u32 dma_len = action != EFX_STATS_DISABLE ? - efx->num_mac_stats * sizeof(u64) : 0; - - BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); - - MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); - MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, - MAC_STATS_IN_DMA, !!enable, - MAC_STATS_IN_CLEAR, clear, - MAC_STATS_IN_PERIODIC_CHANGE, change, - MAC_STATS_IN_PERIODIC_ENABLE, enable, - MAC_STATS_IN_PERIODIC_CLEAR, 0, - MAC_STATS_IN_PERIODIC_NOEVENT, 1, - MAC_STATS_IN_PERIOD_MS, period); - MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); - - if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) - MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id); - - rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), - NULL, 0, NULL); - /* Expect ENOENT if DMA queues have not been set up */ - if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) - efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), - NULL, 0, rc); - return rc; -} - -void efx_mcdi_mac_start_stats(struct efx_nic *efx) -{ - __le64 *dma_stats = efx->stats_buffer.addr; - - dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; - - efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); -} - -void efx_mcdi_mac_stop_stats(struct efx_nic *efx) -{ - efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); -} - -#define EFX_MAC_STATS_WAIT_US 100 -#define EFX_MAC_STATS_WAIT_ATTEMPTS 10 - -void efx_mcdi_mac_pull_stats(struct efx_nic *efx) -{ - __le64 *dma_stats = efx->stats_buffer.addr; - int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; - - dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; - efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); - - while (dma_stats[efx->num_mac_stats - 1] == - EFX_MC_STATS_GENERATION_INVALID && - attempts-- != 0) - udelay(EFX_MAC_STATS_WAIT_US); -} - int efx_mcdi_port_probe(struct efx_nic *efx) { int rc; @@ -783,24 +709,11 @@ int efx_mcdi_port_probe(struct efx_nic *efx) if (rc != 0) return rc; - /* Allocate buffer for stats */ - rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, - efx->num_mac_stats * sizeof(u64), GFP_KERNEL); - if (rc) - return rc; - netif_dbg(efx, probe, efx->net_dev, - "stats buffer at %llx (virt %p phys %llx)\n", - (u64)efx->stats_buffer.dma_addr, - efx->stats_buffer.addr, - (u64)virt_to_phys(efx->stats_buffer.addr)); - - efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1); - - return 0; + return efx_mcdi_mac_init_stats(efx); } void efx_mcdi_port_remove(struct efx_nic *efx) { efx->phy_op->remove(efx); - efx_nic_free_buffer(efx, &efx->stats_buffer); + efx_mcdi_mac_fini_stats(efx); } diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index a6a072ba46d3..e0608d0d961b 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -10,6 +10,7 @@ #include "mcdi_port_common.h" #include "efx_common.h" +#include "nic.h" int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) { @@ -520,6 +521,110 @@ int efx_mcdi_set_mac(struct efx_nic *efx) NULL, 0, NULL); } +enum efx_stats_action { + EFX_STATS_ENABLE, + EFX_STATS_DISABLE, + EFX_STATS_PULL, +}; + +static int efx_mcdi_mac_stats(struct efx_nic *efx, + enum efx_stats_action action, int clear) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN); + int rc; + int change = action == EFX_STATS_PULL ? 0 : 1; + int enable = action == EFX_STATS_ENABLE ? 1 : 0; + int period = action == EFX_STATS_ENABLE ? 1000 : 0; + dma_addr_t dma_addr = efx->stats_buffer.dma_addr; + u32 dma_len = action != EFX_STATS_DISABLE ? + efx->num_mac_stats * sizeof(u64) : 0; + + BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0); + + MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr); + MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD, + MAC_STATS_IN_DMA, !!enable, + MAC_STATS_IN_CLEAR, clear, + MAC_STATS_IN_PERIODIC_CHANGE, change, + MAC_STATS_IN_PERIODIC_ENABLE, enable, + MAC_STATS_IN_PERIODIC_CLEAR, 0, + MAC_STATS_IN_PERIODIC_NOEVENT, 1, + MAC_STATS_IN_PERIOD_MS, period); + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); + + if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) + MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id); + + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), + NULL, 0, NULL); + /* Expect ENOENT if DMA queues have not been set up */ + if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues))) + efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf), + NULL, 0, rc); + return rc; +} + +void efx_mcdi_mac_start_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + + efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0); +} + +void efx_mcdi_mac_stop_stats(struct efx_nic *efx) +{ + efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0); +} + +#define EFX_MAC_STATS_WAIT_US 100 +#define EFX_MAC_STATS_WAIT_ATTEMPTS 10 + +void efx_mcdi_mac_pull_stats(struct efx_nic *efx) +{ + __le64 *dma_stats = efx->stats_buffer.addr; + int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS; + + dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID; + efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0); + + while (dma_stats[efx->num_mac_stats - 1] == + EFX_MC_STATS_GENERATION_INVALID && + attempts-- != 0) + udelay(EFX_MAC_STATS_WAIT_US); +} + +int efx_mcdi_mac_init_stats(struct efx_nic *efx) +{ + int rc; + + if (!efx->num_mac_stats) + return 0; + + /* Allocate buffer for stats */ + rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, + efx->num_mac_stats * sizeof(u64), GFP_KERNEL); + if (rc) { + netif_warn(efx, probe, efx->net_dev, + "failed to allocate DMA buffer: %d\n", rc); + return rc; + } + + netif_dbg(efx, probe, efx->net_dev, + "stats buffer at %llx (virt %p phys %llx)\n", + (u64) efx->stats_buffer.dma_addr, + efx->stats_buffer.addr, + (u64) virt_to_phys(efx->stats_buffer.addr)); + + return 0; +} + +void efx_mcdi_mac_fini_stats(struct efx_nic *efx) +{ + efx_nic_free_buffer(efx, &efx->stats_buffer); +} + /* Get physical port number (EF10 only; on Siena it is same as PF number) */ int efx_mcdi_port_get_number(struct efx_nic *efx) { diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h index b16f11265269..54c0acf8e243 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.h +++ b/drivers/net/ethernet/sfc/mcdi_port_common.h @@ -51,6 +51,8 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec); int efx_mcdi_phy_test_alive(struct efx_nic *efx); int efx_mcdi_set_mac(struct efx_nic *efx); +int efx_mcdi_mac_init_stats(struct efx_nic *efx); +void efx_mcdi_mac_fini_stats(struct efx_nic *efx); int efx_mcdi_port_get_number(struct efx_nic *efx); void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev); From 2c6c1e3cfda528f203bf5655760c508a43e0c822 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:03:21 +0100 Subject: [PATCH 0506/2056] sfc: add missing licence info to mcdi_filters.c Both the licence notice and the SPDX tag were missing from this file. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_filters.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 7b39a3aa3a1a..74ee06fe0996 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -1,3 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + #include "mcdi_filters.h" #include "mcdi.h" #include "nic.h" From 2d73515a1ce4ef8bc9ee931f2f61f0b2c88bde33 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:03:47 +0100 Subject: [PATCH 0507/2056] sfc: commonise miscellaneous efx functions Various left-over bits and pieces from efx.c that are needed by ef100. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 103 -------------------------- drivers/net/ethernet/sfc/efx.h | 26 ------- drivers/net/ethernet/sfc/efx_common.c | 102 +++++++++++++++++++++++++ drivers/net/ethernet/sfc/efx_common.h | 36 +++++++++ 4 files changed, 138 insertions(+), 129 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 4c2d305089ce..41a2c972323e 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -593,109 +593,6 @@ int efx_net_stop(struct net_device *net_dev) return 0; } -/* Context: netif_tx_lock held, BHs disabled. */ -static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - netif_err(efx, tx_err, efx->net_dev, - "TX stuck with port_enabled=%d: resetting channels\n", - efx->port_enabled); - - efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); -} - -static int efx_set_mac_address(struct net_device *net_dev, void *data) -{ - struct efx_nic *efx = netdev_priv(net_dev); - struct sockaddr *addr = data; - u8 *new_addr = addr->sa_data; - u8 old_addr[6]; - int rc; - - if (!is_valid_ether_addr(new_addr)) { - netif_err(efx, drv, efx->net_dev, - "invalid ethernet MAC address requested: %pM\n", - new_addr); - return -EADDRNOTAVAIL; - } - - /* save old address */ - ether_addr_copy(old_addr, net_dev->dev_addr); - ether_addr_copy(net_dev->dev_addr, new_addr); - if (efx->type->set_mac_address) { - rc = efx->type->set_mac_address(efx); - if (rc) { - ether_addr_copy(net_dev->dev_addr, old_addr); - return rc; - } - } - - /* Reconfigure the MAC */ - mutex_lock(&efx->mac_lock); - efx_mac_reconfigure(efx); - mutex_unlock(&efx->mac_lock); - - return 0; -} - -/* Context: netif_addr_lock held, BHs disabled. */ -static void efx_set_rx_mode(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->port_enabled) - queue_work(efx->workqueue, &efx->mac_work); - /* Otherwise efx_start_port() will do this */ -} - -static int efx_set_features(struct net_device *net_dev, netdev_features_t data) -{ - struct efx_nic *efx = netdev_priv(net_dev); - int rc; - - /* If disabling RX n-tuple filtering, clear existing filters */ - if (net_dev->features & ~data & NETIF_F_NTUPLE) { - rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); - if (rc) - return rc; - } - - /* If Rx VLAN filter is changed, update filters via mac_reconfigure. - * If rx-fcs is changed, mac_reconfigure updates that too. - */ - if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXFCS)) { - /* efx_set_rx_mode() will schedule MAC work to update filters - * when a new features are finally set in net_dev. - */ - efx_set_rx_mode(net_dev); - } - - return 0; -} - -static int efx_get_phys_port_id(struct net_device *net_dev, - struct netdev_phys_item_id *ppid) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (efx->type->get_phys_port_id) - return efx->type->get_phys_port_id(efx, ppid); - else - return -EOPNOTSUPP; -} - -static int efx_get_phys_port_name(struct net_device *net_dev, - char *name, size_t len) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - if (snprintf(name, len, "p%u", efx->port_num) >= len) - return -EINVAL; - return 0; -} - static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index e8be786582c0..e7e7d8d1a07b 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -36,13 +36,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel) __efx_rx_packet(channel); } -#define EFX_MAX_DMAQ_SIZE 4096UL -#define EFX_DEFAULT_DMAQ_SIZE 1024UL -#define EFX_MIN_DMAQ_SIZE 512UL - -#define EFX_MAX_EVQ_SIZE 16384UL -#define EFX_MIN_EVQ_SIZE 512UL - /* Maximum number of TCP segments we support for soft-TSO */ #define EFX_TSO_MAX_SEGS 100 @@ -166,10 +159,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs, void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs, unsigned int *rx_usecs, bool *rx_adaptive); -/* Dummy PHY ops for PHY drivers */ -int efx_port_dummy_op_int(struct efx_nic *efx); -void efx_port_dummy_op_void(struct efx_nic *efx); - /* Update the generic software stats in the passed stats array */ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats); @@ -196,21 +185,6 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx) } #endif -static inline void efx_schedule_channel(struct efx_channel *channel) -{ - netif_vdbg(channel->efx, intr, channel->efx->net_dev, - "channel %d scheduling NAPI poll on CPU%d\n", - channel->channel, raw_smp_processor_id()); - - napi_schedule(&channel->napi_str); -} - -static inline void efx_schedule_channel_irq(struct efx_channel *channel) -{ - channel->event_test_cpu = raw_smp_processor_id(); - efx_schedule_channel(channel); -} - static inline void efx_device_detach_sync(struct efx_nic *efx) { struct net_device *dev = efx->net_dev; diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 88ade7f41819..251e37bc7048 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -162,6 +162,76 @@ static void efx_mac_work(struct work_struct *data) mutex_unlock(&efx->mac_lock); } +int efx_set_mac_address(struct net_device *net_dev, void *data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct sockaddr *addr = data; + u8 *new_addr = addr->sa_data; + u8 old_addr[6]; + int rc; + + if (!is_valid_ether_addr(new_addr)) { + netif_err(efx, drv, efx->net_dev, + "invalid ethernet MAC address requested: %pM\n", + new_addr); + return -EADDRNOTAVAIL; + } + + /* save old address */ + ether_addr_copy(old_addr, net_dev->dev_addr); + ether_addr_copy(net_dev->dev_addr, new_addr); + if (efx->type->set_mac_address) { + rc = efx->type->set_mac_address(efx); + if (rc) { + ether_addr_copy(net_dev->dev_addr, old_addr); + return rc; + } + } + + /* Reconfigure the MAC */ + mutex_lock(&efx->mac_lock); + efx_mac_reconfigure(efx); + mutex_unlock(&efx->mac_lock); + + return 0; +} + +/* Context: netif_addr_lock held, BHs disabled. */ +void efx_set_rx_mode(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->port_enabled) + queue_work(efx->workqueue, &efx->mac_work); + /* Otherwise efx_start_port() will do this */ +} + +int efx_set_features(struct net_device *net_dev, netdev_features_t data) +{ + struct efx_nic *efx = netdev_priv(net_dev); + int rc; + + /* If disabling RX n-tuple filtering, clear existing filters */ + if (net_dev->features & ~data & NETIF_F_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + /* If Rx VLAN filter is changed, update filters via mac_reconfigure. + * If rx-fcs is changed, mac_reconfigure updates that too. + */ + if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXFCS)) { + /* efx_set_rx_mode() will schedule MAC work to update filters + * when a new features are finally set in net_dev. + */ + efx_set_rx_mode(net_dev); + } + + return 0; +} + /* This ensures that the kernel is kept informed (via * netif_carrier_on/off) of the link status, and also maintains the * link status's stop on the port's TX queue. @@ -650,6 +720,18 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) efx->type->fini(efx); } +/* Context: netif_tx_lock held, BHs disabled. */ +void efx_watchdog(struct net_device *net_dev, unsigned int txqueue) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_err(efx, tx_err, efx->net_dev, + "TX stuck with port_enabled=%d: resetting channels\n", + efx->port_enabled); + + efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); +} + /* This function will always ensure that the locks acquired in * efx_reset_down() are released. A failure return code indicates * that we were unable to reinitialise the hardware, and the @@ -1221,3 +1303,23 @@ const struct pci_error_handlers efx_err_handlers = { .slot_reset = efx_io_slot_reset, .resume = efx_io_resume, }; + +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->get_phys_port_id) + return efx->type->get_phys_port_id(efx, ppid); + else + return -EOPNOTSUPP; +} + +int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (snprintf(name, len, "p%u", efx->port_num) >= len) + return -EINVAL; + return 0; +} diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index 68af2af3b5da..73c355fc2590 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -18,6 +18,13 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev); void efx_fini_struct(struct efx_nic *efx); +#define EFX_MAX_DMAQ_SIZE 4096UL +#define EFX_DEFAULT_DMAQ_SIZE 1024UL +#define EFX_MIN_DMAQ_SIZE 512UL + +#define EFX_MAX_EVQ_SIZE 16384UL +#define EFX_MIN_EVQ_SIZE 512UL + void efx_link_clear_advertising(struct efx_nic *efx); void efx_link_set_wanted_fc(struct efx_nic *efx, u8); @@ -46,10 +53,15 @@ int efx_reconfigure_port(struct efx_nic *efx); int efx_try_recovery(struct efx_nic *efx); void efx_reset_down(struct efx_nic *efx, enum reset_type method); +void efx_watchdog(struct net_device *net_dev, unsigned int txqueue); int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); int efx_reset(struct efx_nic *efx, enum reset_type method); void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); +/* Dummy PHY ops for PHY drivers */ +int efx_port_dummy_op_int(struct efx_nic *efx); +void efx_port_dummy_op_void(struct efx_nic *efx); + static inline int efx_check_disabled(struct efx_nic *efx) { if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) { @@ -60,6 +72,21 @@ static inline int efx_check_disabled(struct efx_nic *efx) return 0; } +static inline void efx_schedule_channel(struct efx_channel *channel) +{ + netif_vdbg(channel->efx, intr, channel->efx->net_dev, + "channel %d scheduling NAPI poll on CPU%d\n", + channel->channel, raw_smp_processor_id()); + + napi_schedule(&channel->napi_str); +} + +static inline void efx_schedule_channel_irq(struct efx_channel *channel) +{ + channel->event_test_cpu = raw_smp_processor_id(); + efx_schedule_channel(channel); +} + #ifdef CONFIG_SFC_MCDI_LOGGING void efx_init_mcdi_logging(struct efx_nic *efx); void efx_fini_mcdi_logging(struct efx_nic *efx); @@ -69,9 +96,18 @@ static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} #endif void efx_mac_reconfigure(struct efx_nic *efx); +int efx_set_mac_address(struct net_device *net_dev, void *data); +void efx_set_rx_mode(struct net_device *net_dev); +int efx_set_features(struct net_device *net_dev, netdev_features_t data); void efx_link_status_changed(struct efx_nic *efx); unsigned int efx_xdp_max_mtu(struct efx_nic *efx); int efx_change_mtu(struct net_device *net_dev, int new_mtu); extern const struct pci_error_handlers efx_err_handlers; + +int efx_get_phys_port_id(struct net_device *net_dev, + struct netdev_phys_item_id *ppid); + +int efx_get_phys_port_name(struct net_device *net_dev, + char *name, size_t len); #endif From f7e55550a38ded95eca13e13b7e76b87e4814260 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:11:35 +0100 Subject: [PATCH 0508/2056] sfc: commonise some MAC configuration code Refactor it a little as we go, and introduce efx_mcdi_set_mtu() which we will later use for ef100 to change MTU without touching other MAC settings. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_port.c | 13 -------- drivers/net/ethernet/sfc/mcdi_port_common.c | 36 +++++++++++++++++++-- drivers/net/ethernet/sfc/mcdi_port_common.h | 1 + 3 files changed, 35 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 133f8b8ec3b3..98eeb404f68d 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -176,19 +176,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx) return rc; } -int efx_mcdi_port_reconfigure(struct efx_nic *efx) -{ - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; - u32 caps = (efx->link_advertising[0] ? - ethtool_linkset_to_mcdi_cap(efx->link_advertising) : - phy_cfg->forced_cap); - - caps |= ethtool_fec_caps_to_mcdi(efx->fec_config); - - return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), - efx->loopback_mode, 0); -} - static void efx_mcdi_phy_remove(struct efx_nic *efx) { struct efx_mcdi_phy_data *phy_data = efx->phy_data; diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c index e0608d0d961b..56af8b54a864 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.c +++ b/drivers/net/ethernet/sfc/mcdi_port_common.c @@ -476,6 +476,24 @@ int efx_mcdi_phy_test_alive(struct efx_nic *efx) return 0; } +int efx_mcdi_port_reconfigure(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + u32 caps = (efx->link_advertising[0] ? + ethtool_linkset_to_mcdi_cap(efx->link_advertising) : + phy_cfg->forced_cap); + + caps |= ethtool_fec_caps_to_mcdi(efx->fec_config); + + return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), + efx->loopback_mode, 0); +} + +static unsigned int efx_calc_mac_mtu(struct efx_nic *efx) +{ + return EFX_MAX_FRAME_LEN(efx->net_dev->mtu); +} + int efx_mcdi_set_mac(struct efx_nic *efx) { u32 fcntl; @@ -487,8 +505,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx) ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), efx->net_dev->dev_addr); - MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, - EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); + MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx)); MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0); /* Set simple MAC filter for Siena */ @@ -521,6 +538,21 @@ int efx_mcdi_set_mac(struct efx_nic *efx) NULL, 0, NULL); } +int efx_mcdi_set_mtu(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx)); + + MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL, + SET_MAC_EXT_IN_CFG_MTU, 1); + + return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + enum efx_stats_action { EFX_STATS_ENABLE, EFX_STATS_DISABLE, diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h index 54c0acf8e243..f6f81cbeb07e 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.h +++ b/drivers/net/ethernet/sfc/mcdi_port_common.h @@ -51,6 +51,7 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec); int efx_mcdi_phy_test_alive(struct efx_nic *efx); int efx_mcdi_set_mac(struct efx_nic *efx); +int efx_mcdi_set_mtu(struct efx_nic *efx); int efx_mcdi_mac_init_stats(struct efx_nic *efx); void efx_mcdi_mac_fini_stats(struct efx_nic *efx); int efx_mcdi_port_get_number(struct efx_nic *efx); From 80a0074e6aee3657784744ed66ca2c0cc918ecf8 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:11:52 +0100 Subject: [PATCH 0509/2056] sfc: commonise efx_sync_rx_buffer() The ef100 RX path will also need to DMA-sync RX buffers. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/rx.c | 8 -------- drivers/net/ethernet/sfc/rx_common.h | 9 +++++++++ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index c73b933a9101..59a43d586967 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -40,14 +40,6 @@ #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \ EFX_RX_USR_BUF_SIZE) -static inline void efx_sync_rx_buffer(struct efx_nic *efx, - struct efx_rx_buffer *rx_buf, - unsigned int len) -{ - dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, - DMA_FROM_DEVICE); -} - static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, int len) diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h index 1672d74f30e2..207ccd8ba062 100644 --- a/drivers/net/ethernet/sfc/rx_common.h +++ b/drivers/net/ethernet/sfc/rx_common.h @@ -57,6 +57,15 @@ void efx_init_rx_buffer(struct efx_rx_queue *rx_queue, unsigned int page_offset, u16 flags); void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf); + +static inline void efx_sync_rx_buffer(struct efx_nic *efx, + struct efx_rx_buffer *rx_buf, + unsigned int len) +{ + dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len, + DMA_FROM_DEVICE); +} + void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf, unsigned int num_bufs); From 740acc15c8a52c959111a9fbad974e9b0e5b4eb7 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:12:17 +0100 Subject: [PATCH 0510/2056] sfc: commonise TSO fallback code ef100 will need this if it gets GSO skbs it can't handle (e.g. too long header length). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/tx.c | 28 ---------------------------- drivers/net/ethernet/sfc/tx_common.c | 27 +++++++++++++++++++++++++++ drivers/net/ethernet/sfc/tx_common.h | 2 +- 3 files changed, 28 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 19b58563cb78..ed20f6aef435 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -268,34 +268,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, } #endif /* EFX_USE_PIO */ -/* - * Fallback to software TSO. - * - * This is used if we are unable to send a GSO packet through hardware TSO. - * This should only ever happen due to per-queue restrictions - unsupported - * packets should first be filtered by the feature flags. - * - * Returns 0 on success, error code otherwise. - */ -static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, - struct sk_buff *skb) -{ - struct sk_buff *segments, *next; - - segments = skb_gso_segment(skb, 0); - if (IS_ERR(segments)) - return PTR_ERR(segments); - - dev_consume_skb_any(skb); - - skb_list_walk_safe(segments, skb, next) { - skb_mark_not_on_list(skb); - efx_enqueue_skb(tx_queue, skb); - } - - return 0; -} - /* * Add a socket buffer to a TX queue * diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 70876df1da69..9a005e7c2c68 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -405,3 +405,30 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) return max_descs; } + +/* + * Fallback to software TSO. + * + * This is used if we are unable to send a GSO packet through hardware TSO. + * This should only ever happen due to per-queue restrictions - unsupported + * packets should first be filtered by the feature flags. + * + * Returns 0 on success, error code otherwise. + */ +int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct sk_buff *segments, *next; + + segments = skb_gso_segment(skb, 0); + if (IS_ERR(segments)) + return PTR_ERR(segments); + + dev_consume_skb_any(skb); + + skb_list_walk_safe(segments, skb, next) { + skb_mark_not_on_list(skb); + efx_enqueue_skb(tx_queue, skb); + } + + return 0; +} diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index 99cf7ce2f36c..82e2e291317d 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -38,5 +38,5 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int segment_count); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); - +int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); #endif From 93841000ed9fc86dd9615a1048abb31c07792e0b Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:12:49 +0100 Subject: [PATCH 0511/2056] sfc: remove duplicate declaration of efx_enqueue_skb_tso() Define it in nic_common.h, even though the ef100 driver will have a different implementation backing it (actually a WARN_ON_ONCE as it should never get called by ef100. But it needs to still exist because common TX path code references it). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/nic.h | 3 --- drivers/net/ethernet/sfc/nic_common.h | 3 +++ drivers/net/ethernet/sfc/tx.h | 3 --- drivers/net/ethernet/sfc/tx_common.c | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index c24dc55532c2..724e2776b585 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -306,9 +306,6 @@ extern const struct efx_nic_type efx_hunt_a0_vf_nic_type; int falcon_probe_board(struct efx_nic *efx, u16 revision_info); -int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - bool *data_mapped); - /* Falcon/Siena queue operations */ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue); void efx_farch_tx_init(struct efx_tx_queue *tx_queue); diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index e90ce85359cb..8d0d163afc0d 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -110,6 +110,9 @@ static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue) efx_nic_tx_is_empty(partner); } +int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, + bool *data_mapped); + /* Decide whether to push a TX descriptor to the NIC vs merely writing * the doorbell. This can reduce latency when we are adding a single * descriptor to an empty queue, but is otherwise pointless. Further, diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h index e04d5ddeb32c..a3cf06c5570d 100644 --- a/drivers/net/ethernet/sfc/tx.h +++ b/drivers/net/ethernet/sfc/tx.h @@ -18,7 +18,4 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue, u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, size_t len); -int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb, - bool *data_mapped); - #endif /* EFX_TX_H */ diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 9a005e7c2c68..6ac19daa891a 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -10,7 +10,7 @@ #include "net_driver.h" #include "efx.h" -#include "nic.h" +#include "nic_common.h" #include "tx_common.h" static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) From e7a256858f5fb58949c6f03c52e70cc98a4ad542 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:13:15 +0100 Subject: [PATCH 0512/2056] sfc: factor out efx_tx_tso_header_length() and understand encapsulation ef100 will need to check this against NIC limits. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/tx_common.c | 17 +++++++++++++++-- drivers/net/ethernet/sfc/tx_common.h | 1 + 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 6ac19daa891a..2a058b76d1f0 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -311,6 +311,20 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, return buffer; } +int efx_tx_tso_header_length(struct sk_buff *skb) +{ + size_t header_len; + + if (skb->encapsulation) + header_len = skb_inner_transport_header(skb) - + skb->data + + (inner_tcp_hdr(skb)->doff << 2u); + else + header_len = skb_transport_header(skb) - skb->data + + (tcp_hdr(skb)->doff << 2u); + return header_len; +} + /* Map all data from an SKB for DMA and create descriptors on the queue. */ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int segment_count) @@ -339,8 +353,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, /* For TSO we need to put the header in to a separate * descriptor. Map this separately if necessary. */ - size_t header_len = skb_transport_header(skb) - skb->data + - (tcp_hdr(skb)->doff << 2u); + size_t header_len = efx_tx_tso_header_length(skb); if (header_len != len) { tx_queue->tso_long_headers++; diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index 82e2e291317d..cbe995b024a6 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -34,6 +34,7 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, size_t len); +int efx_tx_tso_header_length(struct sk_buff *skb); int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int segment_count); From 20e1026cbed4af3fcf15779ec3edecf990ee8d4e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:13:47 +0100 Subject: [PATCH 0513/2056] sfc: move definition of EFX_MC_STATS_GENERATION_INVALID Saves a whole #include from nic.c. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_port_common.h | 2 -- drivers/net/ethernet/sfc/nic.c | 1 - drivers/net/ethernet/sfc/nic_common.h | 2 ++ 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h index f6f81cbeb07e..9dbeee83266f 100644 --- a/drivers/net/ethernet/sfc/mcdi_port_common.h +++ b/drivers/net/ethernet/sfc/mcdi_port_common.h @@ -28,8 +28,6 @@ struct efx_mcdi_phy_data { u32 forced_cap; }; -#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) - int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg); void efx_link_set_advertising(struct efx_nic *efx, const unsigned long *advertising); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index ac6630510324..d994d136bb03 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -20,7 +20,6 @@ #include "farch_regs.h" #include "io.h" #include "workarounds.h" -#include "mcdi_port_common.h" #include "mcdi_pcol.h" /************************************************************************** diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 8d0d163afc0d..197ecac5e005 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -263,6 +263,8 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); size_t efx_nic_get_regs_len(struct efx_nic *efx); void efx_nic_get_regs(struct efx_nic *efx, void *buf); +#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1)) + size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count, const unsigned long *mask, u8 *names); int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest); From 937aa3ae4d14e2184b5ce5a801003723712e96bd Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:14:13 +0100 Subject: [PATCH 0514/2056] sfc: initialise max_[tx_]channels in efx_init_channels() Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_channels.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 2c3510b0524a..2f9db219513a 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -566,6 +566,9 @@ int efx_init_channels(struct efx_nic *efx) efx->interrupt_mode = min(efx->type->min_interrupt_mode, interrupt_mode); + efx->max_channels = EFX_MAX_CHANNELS; + efx->max_tx_channels = EFX_MAX_CHANNELS; + return 0; } From d4adc5162b9731df8ad95808c088e8ca4a5b1aaa Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:14:45 +0100 Subject: [PATCH 0515/2056] sfc: commonise efx->[rt]xq_entries initialisation Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 1 - drivers/net/ethernet/sfc/efx_common.c | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 41a2c972323e..028d826ab147 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -385,7 +385,6 @@ static int efx_probe_all(struct efx_nic *efx) rc = -EINVAL; goto fail3; } - efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; #ifdef CONFIG_SFC_SRIOV rc = efx->type->vswitching_probe(efx); diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 251e37bc7048..822e9e147404 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1035,6 +1035,9 @@ int efx_init_struct(struct efx_nic *efx, INIT_WORK(&efx->mac_work, efx_mac_work); init_waitqueue_head(&efx->flush_wq); + efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; + efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; + efx->mem_bar = UINT_MAX; rc = efx_init_channels(efx); From f07cb4128abb2bbb69bdc7d37a3d69147cb13dcd Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:15:10 +0100 Subject: [PATCH 0516/2056] sfc: commonise initialisation of efx->vport_id Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 -- drivers/net/ethernet/sfc/efx_common.c | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index a3bf9d8023d7..5faf2f0e4d62 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -554,8 +554,6 @@ static int efx_ef10_probe(struct efx_nic *efx) efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - efx->vport_id = EVB_PORT_ID_ASSIGNED; - /* In case we're recovering from a crash (kexec), we want to * cancel any outstanding request by the previous user of this * function. We send a special message using the least diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 822e9e147404..a2f744377aaa 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1018,6 +1018,7 @@ int efx_init_struct(struct efx_nic *efx, efx->type->rx_ts_offset - efx->type->rx_prefix_size; INIT_LIST_HEAD(&efx->rss_context.list); mutex_init(&efx->rss_lock); + efx->vport_id = EVB_PORT_ID_ASSIGNED; spin_lock_init(&efx->stats_lock); efx->vi_stride = EFX_DEFAULT_VI_STRIDE; efx->num_mac_stats = MC_CMD_MAC_NSTATS; From c72ae701ee34385c9d4a17b59d127ca6a428300c Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 30 Jun 2020 13:15:34 +0100 Subject: [PATCH 0517/2056] sfc: don't call tx_remove if there isn't one EF100 won't have an efx->type->tx_remove method, because there's nothing for it to do. So make the call conditional. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/nic_common.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 197ecac5e005..fd474d9e55e4 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -163,7 +163,8 @@ static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue) } static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) { - tx_queue->efx->type->tx_remove(tx_queue); + if (tx_queue->efx->type->tx_remove) + tx_queue->efx->type->tx_remove(tx_queue); } static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) { From a6ed3ebca49b62d7a917287b9986feff4e9fa7b1 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 30 Jun 2020 15:27:46 +0100 Subject: [PATCH 0518/2056] net/tls: fix sign extension issue when left shifting u16 value Left shifting the u16 value promotes it to a int and then it gets sign extended to a u64. If len << 16 is greater than 0x7fffffff then the upper bits get set to 1 because of the implicit sign extension. Fix this by casting len to u64 before shifting it. Addresses-Coverity: ("integer handling issues") Fixes: ed9b7646b06a ("net/tls: Add asynchronous resync") Signed-off-by: Colin Ian King Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- include/net/tls.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/tls.h b/include/net/tls.h index c875c0a445a6..e5dac7e74e79 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -637,7 +637,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | - (len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); + ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); rx_ctx->resync_async->loglen = 0; } From 6bad912b7e5ab51c23d8fa8362ca2d4ceeebdb74 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 30 Jun 2020 16:38:26 +0200 Subject: [PATCH 0519/2056] mptcp: do nonce initialization at subflow creation time This clean-up the code a bit, reduces the number of used hooks and indirect call requested, and allow better error reporting from __mptcp_subflow_connect() Signed-off-by: Paolo Abeni Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 54 +++++++++++++++++---------------------------- 1 file changed, 20 insertions(+), 34 deletions(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 548f9e347ff5..664aa9158363 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -29,34 +29,6 @@ static void SUBFLOW_REQ_INC_STATS(struct request_sock *req, MPTCP_INC_STATS(sock_net(req_to_sk(req)), field); } -static int subflow_rebuild_header(struct sock *sk) -{ - struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); - int local_id; - - if (subflow->request_join && !subflow->local_nonce) { - struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn; - - pr_debug("subflow=%p", sk); - - do { - get_random_bytes(&subflow->local_nonce, sizeof(u32)); - } while (!subflow->local_nonce); - - if (subflow->local_id) - goto out; - - local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk); - if (local_id < 0) - return -EINVAL; - - subflow->local_id = local_id; - } - -out: - return subflow->icsk_af_ops->rebuild_header(sk); -} - static void subflow_req_destructor(struct request_sock *req) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); @@ -984,7 +956,9 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex, struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_subflow_context *subflow; struct sockaddr_storage addr; + int local_id = loc->id; struct socket *sf; + struct sock *ssk; u32 remote_token; int addrlen; int err; @@ -996,7 +970,20 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex, if (err) return err; - subflow = mptcp_subflow_ctx(sf->sk); + ssk = sf->sk; + subflow = mptcp_subflow_ctx(ssk); + do { + get_random_bytes(&subflow->local_nonce, sizeof(u32)); + } while (!subflow->local_nonce); + + if (!local_id) { + err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk); + if (err < 0) + goto failed; + + local_id = err; + } + subflow->remote_key = msk->remote_key; subflow->local_key = msk->local_key; subflow->token = msk->token; @@ -1007,15 +994,16 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex, if (loc->family == AF_INET6) addrlen = sizeof(struct sockaddr_in6); #endif - sf->sk->sk_bound_dev_if = ifindex; + ssk->sk_bound_dev_if = ifindex; err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen); if (err) goto failed; mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); - pr_debug("msk=%p remote_token=%u", msk, remote_token); + pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token, + local_id); subflow->remote_token = remote_token; - subflow->local_id = loc->id; + subflow->local_id = local_id; subflow->request_join = 1; subflow->request_bkup = 1; mptcp_info2sockaddr(remote, &addr); @@ -1288,7 +1276,6 @@ void __init mptcp_subflow_init(void) subflow_specific.conn_request = subflow_v4_conn_request; subflow_specific.syn_recv_sock = subflow_syn_recv_sock; subflow_specific.sk_rx_dst_set = subflow_finish_connect; - subflow_specific.rebuild_header = subflow_rebuild_header; #if IS_ENABLED(CONFIG_MPTCP_IPV6) subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops; @@ -1298,7 +1285,6 @@ void __init mptcp_subflow_init(void) subflow_v6_specific.conn_request = subflow_v6_conn_request; subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock; subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect; - subflow_v6_specific.rebuild_header = subflow_rebuild_header; subflow_v6m_specific = subflow_v6_specific; subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit; From 5831b33362fdb60d56314a237b764e24ee1a538e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 30 Jun 2020 16:16:46 +0100 Subject: [PATCH 0520/2056] net/mlx5e: fix memory leak of tls The error return path when create_singlethread_workqueue fails currently does not kfree tls and leads to a memory leak. Fix this by kfree'ing tls before returning -ENOMEM. Addresses-Coverity: ("Resource leak") Fixes: 1182f3659357 ("net/mlx5e: kTLS, Add kTLS RX HW offload support") Signed-off-by: Colin Ian King Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c index 99beb928feff..fee991f5ee7c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c @@ -232,8 +232,10 @@ int mlx5e_tls_init(struct mlx5e_priv *priv) return -ENOMEM; tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx"); - if (!tls->rx_wq) + if (!tls->rx_wq) { + kfree(tls); return -ENOMEM; + } priv->tls = tls; return 0; From ff91e9292fc5aafd9ee1dc44c03cff69a3b0f39f Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Tue, 30 Jun 2020 09:49:33 -0700 Subject: [PATCH 0521/2056] tcp: call tcp_ack_tstamp() when not fully acked When skb is coalesced tcp_ack_tstamp() still needs to be called when not fully acked in tcp_clean_rtx_queue(), otherwise SCM_TSTAMP_ACK timestamps may never be fired. Since the original patch series had dependent commits, this patch fixes the issue instead of reverting by restoring calls to tcp_ack_tstamp() when skb is not fully acked. Fixes: fdb7eb21ddd3 ("tcp: stamp SCM_TSTAMP_ACK later in tcp_clean_rtx_queue()") Signed-off-by: Yousuk Seung Signed-off-by: Willem de Bruijn Signed-off-by: Eric Dumazet Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8479b84f0a7f..12c26c9565b7 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3172,8 +3172,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack, if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; - if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) - flag |= FLAG_SACK_RENEGING; + if (skb) { + tcp_ack_tstamp(sk, skb, prior_snd_una); + if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) + flag |= FLAG_SACK_RENEGING; + } if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) { seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); From ec23eb705620234421fd48fc2382490fcfbafc37 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 29 Jun 2020 17:47:58 -0700 Subject: [PATCH 0522/2056] tools/bpftool: Allow substituting custom vmlinux.h for the build In some build contexts (e.g., Travis CI build for outdated kernel), vmlinux.h, generated from available kernel, doesn't contain all the types necessary for BPF program compilation. For such set up, the most maintainable way to deal with this problem is to keep pre-generated (almost up-to-date) vmlinux.h checked in and use it for compilation purposes. bpftool after that can deal with kernel missing some of the features in runtime with no problems. To that effect, allow to specify path to custom vmlinux.h to bpftool's Makefile with VMLINUX_H variable. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200630004759.521530-1-andriin@fb.com --- tools/bpf/bpftool/Makefile | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 8c6563e56ffc..273da1615503 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -122,20 +122,24 @@ BPFTOOL_BOOTSTRAP := $(if $(OUTPUT),$(OUTPUT)bpftool-bootstrap,./bpftool-bootstr BOOTSTRAP_OBJS = $(addprefix $(OUTPUT),main.o common.o json_writer.o gen.o btf.o) OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o -VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \ +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ ../../../vmlinux \ /sys/kernel/btf/vmlinux \ /boot/vmlinux-$(shell uname -r) -VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) -ifneq ($(VMLINUX_BTF),) +ifneq ($(VMLINUX_BTF)$(VMLINUX_H),) ifeq ($(feature-clang-bpf-co-re),1) BUILD_BPF_SKELS := 1 $(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP) +ifeq ($(VMLINUX_H),) $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@ +else + $(Q)cp "$(VMLINUX_H)" $@ +endif $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) $(QUIET_CLANG)$(CLANG) \ From ca4db6389d611eee2eb7c1dfe710b62d8ea06772 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 29 Jun 2020 17:47:59 -0700 Subject: [PATCH 0523/2056] selftests/bpf: Allow substituting custom vmlinux.h for selftests build Similarly to bpftool Makefile, allow to specify custom location of vmlinux.h to be used during the build. This allows simpler testing setups with checked-in pre-generated vmlinux.h. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200630004759.521530-2-andriin@fb.com --- tools/testing/selftests/bpf/Makefile | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 22aaec74ea0a..1f9c696b3edf 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -134,12 +134,12 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ) $(call msg,CC,,$@) $(CC) -c $(CFLAGS) -o $@ $< -VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \ +VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \ ../../../../vmlinux \ /sys/kernel/btf/vmlinux \ /boot/vmlinux-$(shell uname -r) -VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) +VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS)))) $(OUTPUT)/runqslower: $(BPFOBJ) $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \ @@ -182,8 +182,13 @@ $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR): mkdir -p $@ $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR) +ifeq ($(VMLINUX_H),) $(call msg,GEN,,$@) $(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@ +else + $(call msg,CP,,$@) + cp "$(VMLINUX_H)" $@ +endif # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, From ab81e23cf77905896a5e51422eb58e8763507b85 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:07 +0300 Subject: [PATCH 0524/2056] net: qed: correct existing SPDX tags QLogic QED drivers source code is dual licensed under GPL-2.0/BSD-3-Clause. Correct already existing but wrong SPDX tags to match the actual license. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 3 ++- drivers/net/ethernet/qlogic/qed/qed_debug.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_debug.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 3 ++- drivers/net/ethernet/qlogic/qed/qed_selftest.h | 3 ++- include/linux/qed/fcoe_common.h | 2 +- include/linux/qed/qed_fcoe_if.h | 3 ++- 7 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index a0acb94d65f0..cee566faba2f 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,5 @@ -# SPDX-License-Identifier: GPL-2.0 +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 81e8fbe4a05b..8b14e6852daf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index edf99d296bd1..685696878ec2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 6c16158d8090..56b7567d7a60 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -1,4 +1,5 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index ad00d082fec8..d8121fd39bc1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -1,4 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + #ifndef _QED_SELFTEST_API_H #define _QED_SELFTEST_API_H #include diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 98cfc195abe2..a669d7d84284 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation */ diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 46082480a2c3..65d0317ef67e 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -1,4 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ + #ifndef _QED_FCOE_IF_H #define _QED_FCOE_IF_H #include From 1f4d4ed6acc5b74974fa038e5e9d2e19f3b532f3 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:08 +0300 Subject: [PATCH 0525/2056] net: qed: convert to SPDX License Identifiers QLogic QED drivers source code is dual licensed under GPL-2.0/BSD-3-Clause. Remove all the boilerplates in the existing code and replace it with the correct SPDX tag. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_dev.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_hw.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_hw.h | 29 +----------------- .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 29 +----------------- .../net/ethernet/qlogic/qed/qed_init_ops.c | 29 +----------------- .../net/ethernet/qlogic/qed/qed_init_ops.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_int.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_int.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 30 ++----------------- drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 30 ++----------------- drivers/net/ethernet/qlogic/qed/qed_l2.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_l2.h | 30 ++----------------- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_main.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_ooo.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_ooo.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_ptp.c | 30 ++----------------- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 30 ++----------------- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 30 ++----------------- .../net/ethernet/qlogic/qed/qed_reg_addr.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_roce.c | 30 ++----------------- drivers/net/ethernet/qlogic/qed/qed_roce.h | 30 ++----------------- .../net/ethernet/qlogic/qed/qed_selftest.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_sp.h | 29 +----------------- .../net/ethernet/qlogic/qed/qed_sp_commands.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_spq.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_vf.c | 29 +----------------- drivers/net/ethernet/qlogic/qed/qed_vf.h | 29 +----------------- include/linux/qed/common_hsi.h | 29 +----------------- include/linux/qed/eth_common.h | 29 +----------------- include/linux/qed/iscsi_common.h | 29 +----------------- include/linux/qed/iwarp_common.h | 29 +----------------- include/linux/qed/qed_chain.h | 29 +----------------- include/linux/qed/qed_eth_if.h | 29 +----------------- include/linux/qed/qed_if.h | 29 +----------------- include/linux/qed/qed_iov_if.h | 29 +----------------- include/linux/qed/qed_iscsi_if.h | 29 +----------------- include/linux/qed/qed_ll2_if.h | 29 +----------------- include/linux/qed/qed_rdma_if.h | 30 ++----------------- include/linux/qed/qede_rdma.h | 30 ++----------------- include/linux/qed/rdma_common.h | 29 +----------------- include/linux/qed/roce_common.h | 29 +----------------- include/linux/qed/storage_common.h | 29 +----------------- include/linux/qed/tcp_common.h | 29 +----------------- 60 files changed, 70 insertions(+), 1680 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a49743d56b9c..52130dcbfe4c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 08ba9d54ab63..3985dd746ca2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index ce08ae8d8498..05b28919653a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_CXT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 5c6a276f69ac..a72523298307 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 01f253ea4b22..537d60de4e2b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_DCBX_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 3aa51374e727..fa7c10e8aa7a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index eb4808b3bf67..1f2122c699cc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_DEV_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 4c7fa391fd33..91d6cdf4abe8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 027a76ac839a..bf324736c7cb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_FCOE_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index f00460d00cab..ebbca7d999a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_HSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 5fa251489536..bdbb8fa8d399 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index f5b109b04b66..68f44b747565 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 2f1049b0b93a..72ff2e5c5f24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 5a6e4ac4fef4..74c425640d67 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index e9e8ade50ed3..cf33c41e0952 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b7b974f0ef21..be336d47c934 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index e09db3386367..6fca82f6c7fa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 7245a615517a..164b4d953b67 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 225c75b02a06..8b6518a31b7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 5409a2da6106..7fac39744275 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index c1b2057d23b8..83ca05cd74d7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_IWARP_H #define _QED_IWARP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 29810a1aa210..750098e60c64 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 7127d5aaac42..dce39f5a87ca 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_L2_H #define _QED_L2_H #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 4afd8572ada6..47da6d4e226c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 288642d526b7..c0d13bd6c3a6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_LL2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 11367a248d55..d00335cc145b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9624616806e7..c17b140aa7ae 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5750b4c5ef63..351a13215ddd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_MCP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index ffac4ac87394..d01f91f7f661 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 49c4e75b15b1..2731c392a3f4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 3e613058e225..f10ddf9d1704 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include "qed.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 19c0c8864da1..9a3541f159dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index 1e69d5bb0a70..a20397a395cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_RDMA_H #define _QED_RDMA_H #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 3dcb6ff58e73..bdfd90748042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef REG_ADDR_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 7271dd7166e5..1e03d66e33d1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index f801f39fde61..9178904bf0e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_ROCE_H #define _QED_ROCE_H #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index cf1d4476f9d8..d24ee1ea8d3c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index b7b4fbbbccfe..4f646e101074 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_SP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 900bc603e30a..23d630b37199 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 790c28d696a0..18c59981cab7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 20679fd4204b..6d3c6d4f6308 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 368e88565783..43dfaf410332 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_SRIOV_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index adc2c8f3d48e..c800f8812492 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,33 +1,6 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 033409db86ae..60d2bb64e65f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_VF_H diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 2c4737e6694a..294d01eae5cb 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _COMMON_HSI_H diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 95f5fd615852..a9566ef3c2ce 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 2f0a771a9176..7ca89fb9247f 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index c6cfd39cd910..23583e644257 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __IWARP_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 6d15040c642c..e6e25197f7cb 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_CHAIN_H diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index a1310482c4ed..7803dedbcb52 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_ETH_IF_H diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8cb76405cbce..4a36608ff3a8 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_IF_H diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index ac2e6a3199a3..c2ca8196def9 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_IOV_IF_H diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index d0df1bec5357..89912c6c440c 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 1313c34d9a68..79cac277e38b 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 2d3ddd2b85e0..041a5b005a82 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QED_RDMA_IF_H #define _QED_RDMA_IF_H #include diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 5a00c7a473bf..20ed7f2c55bb 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef QEDE_ROCE_H #define QEDE_ROCE_H diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 480a57eb36cc..3e3f01136c06 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 473fba76aa77..89065f023813 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 9a973ffbbff5..34f069c79067 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 4a4845193539..925e7cb7a582 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,33 +1,6 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef __TCP_COMMON__ From 663eacd899ac131dcfc2279184c1ce3c4fd2815f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:09 +0300 Subject: [PATCH 0526/2056] net: qed: update copyright years Set the actual copyright holder and years in all qed source files. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 1 + drivers/net/ethernet/qlogic/qed/qed.h | 1 + drivers/net/ethernet/qlogic/qed/qed_cxt.c | 1 + drivers/net/ethernet/qlogic/qed/qed_cxt.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 1 + drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 1 + drivers/net/ethernet/qlogic/qed/qed_debug.c | 1 + drivers/net/ethernet/qlogic/qed/qed_debug.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 1 + drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 1 + drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 1 + drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 1 + drivers/net/ethernet/qlogic/qed/qed_hsi.h | 1 + drivers/net/ethernet/qlogic/qed/qed_hw.c | 1 + drivers/net/ethernet/qlogic/qed/qed_hw.h | 1 + drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 1 + drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 1 + drivers/net/ethernet/qlogic/qed/qed_init_ops.h | 1 + drivers/net/ethernet/qlogic/qed/qed_int.c | 1 + drivers/net/ethernet/qlogic/qed/qed_int.h | 1 + drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 1 + drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 1 + drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_l2.c | 1 + drivers/net/ethernet/qlogic/qed/qed_l2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_ll2.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ll2.h | 1 + drivers/net/ethernet/qlogic/qed/qed_main.c | 1 + drivers/net/ethernet/qlogic/qed/qed_mcp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_mcp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ooo.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ooo.h | 1 + drivers/net/ethernet/qlogic/qed/qed_ptp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_rdma.c | 1 + drivers/net/ethernet/qlogic/qed/qed_rdma.h | 1 + drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 1 + drivers/net/ethernet/qlogic/qed/qed_roce.c | 1 + drivers/net/ethernet/qlogic/qed/qed_roce.h | 1 + drivers/net/ethernet/qlogic/qed/qed_selftest.c | 1 + drivers/net/ethernet/qlogic/qed/qed_selftest.h | 1 + drivers/net/ethernet/qlogic/qed/qed_sp.h | 1 + drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 1 + drivers/net/ethernet/qlogic/qed/qed_spq.c | 1 + drivers/net/ethernet/qlogic/qed/qed_sriov.c | 1 + drivers/net/ethernet/qlogic/qed/qed_sriov.h | 1 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 1 + include/linux/qed/common_hsi.h | 1 + include/linux/qed/eth_common.h | 1 + include/linux/qed/fcoe_common.h | 1 + include/linux/qed/iscsi_common.h | 1 + include/linux/qed/iwarp_common.h | 1 + include/linux/qed/qed_chain.h | 1 + include/linux/qed/qed_eth_if.h | 1 + include/linux/qed/qed_fcoe_if.h | 1 + include/linux/qed/qed_if.h | 1 + include/linux/qed/qed_iov_if.h | 1 + include/linux/qed/qed_iscsi_if.h | 1 + include/linux/qed/qed_ll2_if.h | 1 + include/linux/qed/qed_rdma_if.h | 1 + include/linux/qed/qede_rdma.h | 1 + include/linux/qed/rdma_common.h | 1 + include/linux/qed/roce_common.h | 1 + include/linux/qed/storage_common.h | 1 + include/linux/qed/tcp_common.h | 1 + 66 files changed, 66 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index cee566faba2f..4176bbf2a22b 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +# Copyright (c) 2019-2020 Marvell International Ltd. obj-$(CONFIG_QED) := qed.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 52130dcbfe4c..68fe745c9b99 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 3985dd746ca2..e72d25854d79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 05b28919653a..8b64495f8745 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_CXT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index a72523298307..9f16a3a66007 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index 537d60de4e2b..ba5f3927034c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_DCBX_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 8b14e6852daf..45cbe1c87106 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h index 685696878ec2..e71af82d3200 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.h +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_DEBUGFS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index fa7c10e8aa7a..74142896b707 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 1f2122c699cc..395d4932c262 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_DEV_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index 91d6cdf4abe8..a10e57bba6b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index bf324736c7cb..13c5ccfe06e7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_FCOE_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index ebbca7d999a4..d7e70625bdc4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_HSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index bdbb8fa8d399..e8c777f30207 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 68f44b747565..6a61b88b544d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_HW_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 72ff2e5c5f24..1eb48ea80484 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 74c425640d67..736702fb81b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index cf33c41e0952..a573c8921982 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_INIT_OPS_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index be336d47c934..297983d66e2f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 6fca82f6c7fa..aea04b121586 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_INT_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 164b4d953b67..f9d9e21cb66b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index 8b6518a31b7e..d6af7ea19bbb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ISCSI_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 7fac39744275..b7a0a717ee6d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 83ca05cd74d7..c3872cd9457f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IWARP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 750098e60c64..03dc804c92a9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index dce39f5a87ca..8eceeebb1a7b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_L2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 47da6d4e226c..cce6fd27c042 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index c0d13bd6c3a6..8356c7d4a193 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_LL2_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d00335cc145b..0cd6b8bf023a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index c17b140aa7ae..25433d162a54 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 351a13215ddd..d77b4c262cff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_MCP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 56b7567d7a60..1dd01e0373ab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (c) 2019-2020 Marvell International Ltd. */ #include #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index d01f91f7f661..88353aa404dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index 2731c392a3f4..3a7e1b59d6fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_OOO_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index f10ddf9d1704..3bd2f8c961c9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 9a3541f159dc..59d916693654 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index a20397a395cf..fba43adbb68e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_RDMA_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index bdfd90748042..9db22be42476 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef REG_ADDR_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 1e03d66e33d1..d5db07db65b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h index 9178904bf0e9..3a4a2d72a826 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.h +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ROCE_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c index d24ee1ea8d3c..6e70781ab87c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.c +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index d8121fd39bc1..e27dd9a4547e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_SELFTEST_API_H #define _QED_SELFTEST_API_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4f646e101074..35539e951bee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_SP_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 23d630b37199..745d76d13732 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 18c59981cab7..ee89a8f4f585 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 6d3c6d4f6308..fcf4d82da161 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 43dfaf410332..552892c45670 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_SRIOV_H diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index c800f8812492..72a38d53d33f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 294d01eae5cb..977807e1be53 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _COMMON_HSI_H diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index a9566ef3c2ce..cd1207ad4ada 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index a669d7d84284..68eda1c21cde 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __FCOE_COMMON__ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 7ca89fb9247f..157019f716f1 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index 23583e644257..14f9e4c0ddd9 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __IWARP_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index e6e25197f7cb..92cdc79e5019 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_CHAIN_H diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 7803dedbcb52..812a4d751163 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ETH_IF_H diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 65d0317ef67e..16752eca5cbd 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_FCOE_IF_H #define _QED_FCOE_IF_H diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 4a36608ff3a8..5ca081cd2ed9 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IF_H diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index c2ca8196def9..8e31a28e51b9 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IOV_IF_H diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 89912c6c440c..04180d9af560 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 79cac277e38b..2f64ed79cee9 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 041a5b005a82..f464d85e88a4 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_RDMA_IF_H diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 20ed7f2c55bb..072da2f6da37 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef QEDE_ROCE_H diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 3e3f01136c06..bab078b25834 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 89065f023813..ccddd7a96b67 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 34f069c79067..91896e8793bf 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 925e7cb7a582..2b2c87d10e0a 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __TCP_COMMON__ From 090efe00ab077a669ff65a48b0b749723979b228 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:10 +0300 Subject: [PATCH 0527/2056] net: qede: correct existing SPDX tags QLogic QED drivers source code is dual licensed under GPL-2.0/BSD-3-Clause. Correct already existing but wrong SPDX tags to match the actual license. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/Makefile | 3 ++- drivers/net/ethernet/qlogic/qede/qede_dcbnl.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 3fc91d12413f..1d26ffc82290 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,4 +1,5 @@ -# SPDX-License-Identifier: GPL-2.0-only +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) + obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c index e6e844a8cbc7..f6b7ee51e056 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c +++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015 QLogic Corporation */ From 7268f33e5504c2db337ce5a375b44b70ec85a451 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:11 +0300 Subject: [PATCH 0528/2056] net: qede: convert to SPDX License Identifiers QLogic QED drivers source code is dual licensed under GPL-2.0/BSD-3-Clause. Remove all the boilerplates in the existing code and replace it with the correct SPDX tag. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 30 ++----------------- .../net/ethernet/qlogic/qede/qede_ethtool.c | 30 ++----------------- .../net/ethernet/qlogic/qede/qede_filter.c | 30 ++----------------- drivers/net/ethernet/qlogic/qede/qede_fp.c | 30 ++----------------- drivers/net/ethernet/qlogic/qede/qede_main.c | 30 ++----------------- drivers/net/ethernet/qlogic/qede/qede_ptp.c | 30 ++----------------- drivers/net/ethernet/qlogic/qede/qede_ptp.h | 30 ++----------------- drivers/net/ethernet/qlogic/qede/qede_rdma.c | 30 ++----------------- 8 files changed, 16 insertions(+), 224 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 8857da1208d7..8567f295a12f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QEDE_H_ #define _QEDE_H_ #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 24cc68391ac4..26acfb380dc3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index fe72bb6c9455..e03d6a957ceb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 7598ebe0962a..c21d4c53121b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 29e285430f99..01c20b2d81e2 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index cd5841a9415e..797ce187d683 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include "qede_ptp.h" #define QEDE_PTP_TX_TIMEOUT (2 * HZ) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 89c7f3cf3ee2..00ff981efa66 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -1,34 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #ifndef _QEDE_PTP_H_ #define _QEDE_PTP_H_ diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 668ccc9d49f8..2228f1d1166f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -1,34 +1,8 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ + #include #include #include From c4fad2a5323d991eb80fbd1197e9bb57162eb776 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 29 Jun 2020 14:05:12 +0300 Subject: [PATCH 0529/2056] net: qede: update copyright years Set the actual copyright holder and years in all qede source files. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/Makefile | 1 + drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_dcbnl.c | 5 +++-- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 1 + drivers/net/ethernet/qlogic/qede/qede_filter.c | 1 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 1 + drivers/net/ethernet/qlogic/qede/qede_main.c | 1 + drivers/net/ethernet/qlogic/qede/qede_ptp.c | 1 + drivers/net/ethernet/qlogic/qede/qede_ptp.h | 1 + drivers/net/ethernet/qlogic/qede/qede_rdma.c | 1 + 10 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 1d26ffc82290..a6e8d9fc8832 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +# Copyright (c) 2019-2020 Marvell International Ltd. obj-$(CONFIG_QEDE) := qede.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 8567f295a12f..591dd4051d06 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QEDE_H_ diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c index f6b7ee51e056..2763369bbc61 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c +++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver -* Copyright (c) 2015 QLogic Corporation -*/ + * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ #include #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 26acfb380dc3..d3fc7403d095 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index e03d6a957ceb..db17a675bd02 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index c21d4c53121b..1c4ece0713f8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 01c20b2d81e2..8cd27f8f1b3a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 797ce187d683..60a29a937a94 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include "qede_ptp.h" diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 00ff981efa66..1db0f021c645 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qede NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QEDE_PTP_H_ diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 2228f1d1166f..769ec2f4d0b7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include From e0cdac65ba269767d5531cc40ab7fe858e2650d9 Mon Sep 17 00:00:00 2001 From: Nirranjan Kirubaharan Date: Mon, 29 Jun 2020 17:25:13 +0530 Subject: [PATCH 0530/2056] cxgb4vf: configure ports accessible by the VF Find ports accessible by the VF, based on the index of the mac address stored for the VF in the adapter. If no mac address is stored for the VF, use the port mask provided by firmware. Signed-off-by: Nirranjan Kirubaharan Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 47 +++++++++++++++---- .../ethernet/chelsio/cxgb4vf/t4vf_common.h | 2 +- .../net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 6 +-- 3 files changed, 43 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index a7641be9094f..dbe8ee7e0e21 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2916,6 +2916,39 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { #endif }; +/** + * cxgb4vf_get_port_mask - Get port mask for the VF based on mac + * address stored on the adapter + * @adapter: The adapter + * + * Find the the port mask for the VF based on the index of mac + * address stored in the adapter. If no mac address is stored on + * the adapter for the VF, use the port mask received from the + * firmware. + */ +static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter) +{ + unsigned int naddr = 1, pidx = 0; + unsigned int pmask, rmask = 0; + u8 mac[ETH_ALEN]; + int err; + + pmask = adapter->params.vfres.pmask; + while (pmask) { + if (pmask & 1) { + err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac); + if (!err && !is_zero_ether_addr(mac)) + rmask |= (1 << pidx); + } + pmask >>= 1; + pidx++; + } + if (!rmask) + rmask = adapter->params.vfres.pmask; + + return rmask; +} + /* * "Probe" a device: initialize a device and construct all kernel and driver * state needed to manage the device. This routine is called "init_one" in @@ -2924,13 +2957,12 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { static int cxgb4vf_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct adapter *adapter; + struct net_device *netdev; + struct port_info *pi; + unsigned int pmask; int pci_using_dac; int err, pidx; - unsigned int pmask; - struct adapter *adapter; - struct port_info *pi; - struct net_device *netdev; - unsigned int pf; /* * Initialize generic PCI device state. @@ -3073,8 +3105,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, /* * Allocate our "adapter ports" and stitch everything together. */ - pmask = adapter->params.vfres.pmask; - pf = t4vf_get_pf_from_vf(adapter); + pmask = cxgb4vf_get_port_mask(adapter); for_each_port(adapter, pidx) { int port_id, viid; u8 mac[ETH_ALEN]; @@ -3157,7 +3188,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, goto err_free_dev; } - err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac); + err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac); if (err) { dev_err(&pdev->dev, "unable to determine MAC ACL address, " diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 57cfd10a99ec..03777145efec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -415,7 +415,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int); int t4vf_update_port_info(struct port_info *pi); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); -int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port, unsigned int *naddr, u8 *addr); int t4vf_get_vf_vlan_acl(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index a31b87390b50..cd8f9a481d73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -2187,14 +2187,14 @@ int t4vf_prep_adapter(struct adapter *adapter) * t4vf_get_vf_mac_acl - Get the MAC address to be set to * the VI of this VF. * @adapter: The adapter - * @pf: The pf associated with vf + * @port: The port associated with vf * @naddr: the number of ACL MAC addresses returned in addr * @addr: Placeholder for MAC addresses * * Find the MAC address to be set to the VF's VI. The requested MAC address * is from the host OS via callback in the PF driver. */ -int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, +int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port, unsigned int *naddr, u8 *addr) { struct fw_acl_mac_cmd cmd; @@ -2212,7 +2212,7 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, if (cmd.nmac < *naddr) *naddr = cmd.nmac; - switch (pf) { + switch (port) { case 3: memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3)); break; From 09ef193fef7efb0175a04634853862d717adbb95 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:37 +0200 Subject: [PATCH 0531/2056] net: ethernet: ixgbe: check the return value of ixgbe_mii_bus_init() This function may fail. Check its return value and propagate the error code. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 254486e6b09d..fabdca9aadae 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11173,10 +11173,14 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); - ixgbe_mii_bus_init(hw); + err = ixgbe_mii_bus_init(hw); + if (err) + goto err_netdev; return 0; +err_netdev: + unregister_netdev(netdev); err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); From d10d607f504bc7d0a6f69867a8eef0daa453dab7 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:38 +0200 Subject: [PATCH 0532/2056] net: ethernet: ixgbe: don't call devm_mdiobus_free() The idea behind devres is that the release callbacks are called if probe fails. As we now check the return value of ixgbe_mii_bus_init(), we can drop the call devm_mdiobus_free() in error path as the release callback will be called automatically. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 2fb97967961c..7980d7265e10 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -905,7 +905,6 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) struct pci_dev *pdev = adapter->pdev; struct device *dev = &adapter->netdev->dev; struct mii_bus *bus; - int err = -ENODEV; bus = devm_mdiobus_alloc(dev); if (!bus) @@ -923,7 +922,7 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: if (!ixgbe_x550em_a_has_mii(hw)) - goto ixgbe_no_mii_bus; + return -ENODEV; bus->read = &ixgbe_x550em_a_mii_bus_read; bus->write = &ixgbe_x550em_a_mii_bus_write; break; @@ -948,15 +947,8 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) */ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; - err = mdiobus_register(bus); - if (!err) { - adapter->mii_bus = bus; - return 0; - } - -ixgbe_no_mii_bus: - devm_mdiobus_free(dev, bus); - return err; + adapter->mii_bus = bus; + return mdiobus_register(bus); } /** From fe189519e4d3ce655efa0bc64a6bed3f6f1ad1c2 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:39 +0200 Subject: [PATCH 0533/2056] net: devres: rename the release callback of devm_register_netdev() Make it an explicit counterpart to devm_register_netdev() just like we do with devm_free_netdev() for better clarity. Signed-off-by: Bartosz Golaszewski Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/devres.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/devres.c b/net/devres.c index 57a6a88d11f6..1f9be2133787 100644 --- a/net/devres.c +++ b/net/devres.c @@ -39,7 +39,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv, } EXPORT_SYMBOL(devm_alloc_etherdev_mqs); -static void devm_netdev_release(struct device *dev, void *this) +static void devm_unregister_netdev(struct device *dev, void *this) { struct net_device_devres *res = this; @@ -77,7 +77,7 @@ int devm_register_netdev(struct device *dev, struct net_device *ndev) netdev_devres_match, ndev))) return -EINVAL; - dr = devres_alloc(devm_netdev_release, sizeof(*dr), GFP_KERNEL); + dr = devres_alloc(devm_unregister_netdev, sizeof(*dr), GFP_KERNEL); if (!dr) return -ENOMEM; From bd8ff6de0cf5b05bc7e1822aec79edac05b02a35 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:40 +0200 Subject: [PATCH 0534/2056] Documentation: devres: add missing mdio helper We have a devres variant of mdiobus_register() but it's not listed in devres.rst. Add it under other mdio devm functions. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- Documentation/driver-api/driver-model/devres.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index e0b58c392e4f..5463fc8a60c1 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -343,6 +343,7 @@ MDIO devm_mdiobus_alloc() devm_mdiobus_alloc_size() devm_mdiobus_free() + devm_mdiobus_register() MEM devm_free_pages() From 8b11c20a658de159fcf18ebce4f2acfdf747ff25 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:41 +0200 Subject: [PATCH 0535/2056] phy: un-inline devm_mdiobus_register() Functions should only be static inline if they're very short. This devres helper is already over 10 lines and it will grow soon as we'll be improving upon its approach. Pull it into mdio_devres.c. Signed-off-by: Bartosz Golaszewski Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Makefile | 2 +- drivers/net/phy/mdio_devres.c | 18 ++++++++++++++++++ include/linux/phy.h | 15 ++------------- 3 files changed, 21 insertions(+), 14 deletions(-) create mode 100644 drivers/net/phy/mdio_devres.c diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index dc9e53b511d6..896afdcac437 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,7 +3,7 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ linkmode.o -mdio-bus-y += mdio_bus.o mdio_device.o +mdio-bus-y += mdio_bus.o mdio_device.o mdio_devres.o ifdef CONFIG_MDIO_DEVICE obj-y += mdio-boardinfo.o diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c new file mode 100644 index 000000000000..f0b4b6cfe5e3 --- /dev/null +++ b/drivers/net/phy/mdio_devres.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include + +int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner) +{ + int ret; + + if (!bus->is_managed) + return -EPERM; + + ret = __mdiobus_register(bus, owner); + if (!ret) + bus->is_managed_registered = 1; + + return ret; +} +EXPORT_SYMBOL(__devm_mdiobus_register); diff --git a/include/linux/phy.h b/include/linux/phy.h index ecc7640705b9..7c75e1c90141 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -322,20 +322,9 @@ static inline struct mii_bus *mdiobus_alloc(void) } int __mdiobus_register(struct mii_bus *bus, struct module *owner); +int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) -static inline int devm_mdiobus_register(struct mii_bus *bus) -{ - int ret; - - if (!bus->is_managed) - return -EPERM; - - ret = mdiobus_register(bus); - if (!ret) - bus->is_managed_registered = 1; - - return ret; -} +#define devm_mdiobus_register(bus) __devm_mdiobus_register(bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); From 6a9a5723cb2ef3cdf3604e9f0c63c8e68cbb08d5 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:42 +0200 Subject: [PATCH 0536/2056] phy: mdio: add kerneldoc for __devm_mdiobus_register() This function is not documented. Add a short kerneldoc description. Signed-off-by: Bartosz Golaszewski Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_devres.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index f0b4b6cfe5e3..3ee887733d4a 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -2,6 +2,13 @@ #include +/** + * __devm_mdiobus_register - Resource-managed variant of mdiobus_register() + * @bus: MII bus structure to register + * @owner: Owning module + * + * Returns 0 on success, negative error number on failure. + */ int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner) { int ret; From ac3a68d56651c3dad2c12c7afce065fe15267f44 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:43 +0200 Subject: [PATCH 0537/2056] net: phy: don't abuse devres in devm_mdiobus_register() We currently have two managed helpers for mdiobus - devm_mdiobus_alloc() and devm_mdiobus_register(). The idea behind devres is that the release callback releases whatever resource the devm function allocates. In the mdiobus case however there's no devres associated with the device by devm_mdiobus_register(). Instead the release callback for devm_mdiobus_alloc(): _devm_mdiobus_free() unregisters the device if it is marked as managed. This all seems wrong. The managed structure shouldn't need to know or care about whether it's managed or not - and this is the case now for struct mii_bus. The devres wrapper should be opaque to the managed resource. This changeset makes devm_mdiobus_alloc() and devm_mdiobus_register() conform to common devres standards: devm_mdiobus_alloc() allocates a devres structure and registers a callback that will call mdiobus_free(). __devm_mdiobus_register() allocated another devres and registers a callback that will unregister the bus. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- .../driver-api/driver-model/devres.rst | 1 - drivers/net/ethernet/realtek/r8169_main.c | 2 +- drivers/net/phy/mdio_bus.c | 73 ---------------- drivers/net/phy/mdio_devres.c | 83 +++++++++++++++++-- include/linux/phy.h | 10 +-- 5 files changed, 82 insertions(+), 87 deletions(-) diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 5463fc8a60c1..e0333d66a7f4 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -342,7 +342,6 @@ LED MDIO devm_mdiobus_alloc() devm_mdiobus_alloc_size() - devm_mdiobus_free() devm_mdiobus_register() MEM diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 07a33af1f64e..745e1739e9c8 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5012,7 +5012,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; - ret = devm_mdiobus_register(new_bus); + ret = devm_mdiobus_register(&pdev->dev, new_bus); if (ret) return ret; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 134f82d72da8..46b33701ad4b 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -165,79 +165,6 @@ struct mii_bus *mdiobus_alloc_size(size_t size) } EXPORT_SYMBOL(mdiobus_alloc_size); -static void _devm_mdiobus_free(struct device *dev, void *res) -{ - struct mii_bus *bus = *(struct mii_bus **)res; - - if (bus->is_managed_registered && bus->state == MDIOBUS_REGISTERED) - mdiobus_unregister(bus); - - mdiobus_free(bus); -} - -static int devm_mdiobus_match(struct device *dev, void *res, void *data) -{ - struct mii_bus **r = res; - - if (WARN_ON(!r || !*r)) - return 0; - - return *r == data; -} - -/** - * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() - * @dev: Device to allocate mii_bus for - * @sizeof_priv: Space to allocate for private structure. - * - * Managed mdiobus_alloc_size. mii_bus allocated with this function is - * automatically freed on driver detach. - * - * If an mii_bus allocated with this function needs to be freed separately, - * devm_mdiobus_free() must be used. - * - * RETURNS: - * Pointer to allocated mii_bus on success, NULL on failure. - */ -struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) -{ - struct mii_bus **ptr, *bus; - - ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return NULL; - - /* use raw alloc_dr for kmalloc caller tracing */ - bus = mdiobus_alloc_size(sizeof_priv); - if (bus) { - *ptr = bus; - devres_add(dev, ptr); - bus->is_managed = 1; - } else { - devres_free(ptr); - } - - return bus; -} -EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size); - -/** - * devm_mdiobus_free - Resource-managed mdiobus_free() - * @dev: Device this mii_bus belongs to - * @bus: the mii_bus associated with the device - * - * Free mii_bus allocated with devm_mdiobus_alloc_size(). - */ -void devm_mdiobus_free(struct device *dev, struct mii_bus *bus) -{ - int rc; - - rc = devres_release(dev, _devm_mdiobus_free, - devm_mdiobus_match, bus); - WARN_ON(rc); -} -EXPORT_SYMBOL_GPL(devm_mdiobus_free); - /** * mdiobus_release - mii_bus device release callback * @d: the target struct device that contains the mii_bus diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index 3ee887733d4a..0b9bd9a61378 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -1,25 +1,96 @@ // SPDX-License-Identifier: GPL-2.0-or-later +#include #include +#include + +struct mdiobus_devres { + struct mii_bus *mii; +}; + +static void devm_mdiobus_free(struct device *dev, void *this) +{ + struct mdiobus_devres *dr = this; + + mdiobus_free(dr->mii); +} + +/** + * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size() + * @dev: Device to allocate mii_bus for + * @sizeof_priv: Space to allocate for private structure + * + * Managed mdiobus_alloc_size. mii_bus allocated with this function is + * automatically freed on driver detach. + * + * RETURNS: + * Pointer to allocated mii_bus on success, NULL on out-of-memory error. + */ +struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv) +{ + struct mdiobus_devres *dr; + + dr = devres_alloc(devm_mdiobus_free, sizeof(*dr), GFP_KERNEL); + if (!dr) + return NULL; + + dr->mii = mdiobus_alloc_size(sizeof_priv); + if (!dr->mii) { + devres_free(dr); + return NULL; + } + + devres_add(dev, dr); + return dr->mii; +} +EXPORT_SYMBOL(devm_mdiobus_alloc_size); + +static void devm_mdiobus_unregister(struct device *dev, void *this) +{ + struct mdiobus_devres *dr = this; + + mdiobus_unregister(dr->mii); +} + +static int mdiobus_devres_match(struct device *dev, + void *this, void *match_data) +{ + struct mdiobus_devres *res = this; + struct mii_bus *mii = match_data; + + return mii == res->mii; +} /** * __devm_mdiobus_register - Resource-managed variant of mdiobus_register() + * @dev: Device to register mii_bus for * @bus: MII bus structure to register * @owner: Owning module * * Returns 0 on success, negative error number on failure. */ -int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner) +int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, + struct module *owner) { + struct mdiobus_devres *dr; int ret; - if (!bus->is_managed) - return -EPERM; + if (WARN_ON(!devres_find(dev, devm_mdiobus_free, + mdiobus_devres_match, bus))) + return -EINVAL; + + dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; ret = __mdiobus_register(bus, owner); - if (!ret) - bus->is_managed_registered = 1; + if (ret) { + devres_free(dr); + return ret; + } - return ret; + dr->mii = bus; + devres_add(dev, dr); + return 0; } EXPORT_SYMBOL(__devm_mdiobus_register); diff --git a/include/linux/phy.h b/include/linux/phy.h index 7c75e1c90141..101a48fa6750 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -261,9 +261,6 @@ struct mii_bus { int (*reset)(struct mii_bus *bus); struct mdio_bus_stats stats[PHY_MAX_ADDR]; - unsigned int is_managed:1; /* is device-managed */ - unsigned int is_managed_registered:1; - /* * A lock to ensure that only one thing can read/write * the MDIO bus at a time @@ -322,9 +319,11 @@ static inline struct mii_bus *mdiobus_alloc(void) } int __mdiobus_register(struct mii_bus *bus, struct module *owner); -int __devm_mdiobus_register(struct mii_bus *bus, struct module *owner); +int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, + struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) -#define devm_mdiobus_register(bus) __devm_mdiobus_register(bus, THIS_MODULE) +#define devm_mdiobus_register(dev, bus) \ + __devm_mdiobus_register(dev, bus, THIS_MODULE) void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); @@ -335,7 +334,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) } struct mii_bus *mdio_find_bus(const char *mdio_name); -void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); #define PHY_INTERRUPT_DISABLED false From a0bd96f5aed2108505386733abe76f37362c2f50 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:44 +0200 Subject: [PATCH 0538/2056] of: mdio: remove the 'extern' keyword from function declarations The 'extern' keyword in headers doesn't have any benefit. Remove them all from the of_mdio.h header. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- include/linux/of_mdio.h | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 0f61a4ac6bcf..ba8e157f24ad 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -12,27 +12,26 @@ #include #if IS_ENABLED(CONFIG_OF_MDIO) -extern bool of_mdiobus_child_is_phy(struct device_node *child); -extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); -extern struct phy_device *of_phy_find_device(struct device_node *phy_np); -extern struct phy_device *of_phy_connect(struct net_device *dev, - struct device_node *phy_np, - void (*hndlr)(struct net_device *), - u32 flags, phy_interface_t iface); -extern struct phy_device * +bool of_mdiobus_child_is_phy(struct device_node *child); +int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +struct phy_device *of_phy_find_device(struct device_node *phy_np); +struct phy_device * +of_phy_connect(struct net_device *dev, struct device_node *phy_np, + void (*hndlr)(struct net_device *), u32 flags, + phy_interface_t iface); +struct phy_device * of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)); -struct phy_device *of_phy_attach(struct net_device *dev, - struct device_node *phy_np, u32 flags, - phy_interface_t iface); +struct phy_device * +of_phy_attach(struct net_device *dev, struct device_node *phy_np, + u32 flags, phy_interface_t iface); -extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); -extern int of_phy_register_fixed_link(struct device_node *np); -extern void of_phy_deregister_fixed_link(struct device_node *np); -extern bool of_phy_is_fixed_link(struct device_node *np); -extern int of_mdiobus_phy_device_register(struct mii_bus *mdio, - struct phy_device *phy, - struct device_node *child, u32 addr); +struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +int of_phy_register_fixed_link(struct device_node *np); +void of_phy_deregister_fixed_link(struct device_node *np); +bool of_phy_is_fixed_link(struct device_node *np); +int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, + struct device_node *child, u32 addr); static inline int of_mdio_parse_addr(struct device *dev, const struct device_node *np) From 14eeb6e086d6b9004c600e5f0f62bacb458ecfba Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:45 +0200 Subject: [PATCH 0539/2056] of: mdio: provide devm_of_mdiobus_register() Implement a managed variant of of_mdiobus_register(). We need to make mdio_devres into its own module because otherwise we'd hit circular sumbol dependencies between phylib and of_mdio. Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- .../driver-api/driver-model/devres.rst | 1 + drivers/net/phy/Makefile | 4 +- drivers/net/phy/mdio_devres.c | 37 +++++++++++++++++++ include/linux/of_mdio.h | 3 ++ 4 files changed, 44 insertions(+), 1 deletion(-) diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index e0333d66a7f4..eaaaafc21134 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -343,6 +343,7 @@ MDIO devm_mdiobus_alloc() devm_mdiobus_alloc_size() devm_mdiobus_register() + devm_of_mdiobus_register() MEM devm_free_pages() diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 896afdcac437..c9a9adf194d5 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -3,7 +3,8 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ linkmode.o -mdio-bus-y += mdio_bus.o mdio_device.o mdio_devres.o +mdio-bus-y += mdio_bus.o mdio_device.o +mdio-devres-y += mdio_devres.o ifdef CONFIG_MDIO_DEVICE obj-y += mdio-boardinfo.o @@ -17,6 +18,7 @@ libphy-y += $(mdio-bus-y) else obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o endif +obj-$(CONFIG_MDIO_DEVICE) += mdio-devres.o libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c index 0b9bd9a61378..b560e99695df 100644 --- a/drivers/net/phy/mdio_devres.c +++ b/drivers/net/phy/mdio_devres.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include #include #include @@ -94,3 +95,39 @@ int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, return 0; } EXPORT_SYMBOL(__devm_mdiobus_register); + +#if IS_ENABLED(CONFIG_OF_MDIO) +/** + * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register() + * @dev: Device to register mii_bus for + * @mdio: MII bus structure to register + * @np: Device node to parse + */ +int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np) +{ + struct mdiobus_devres *dr; + int ret; + + if (WARN_ON(!devres_find(dev, devm_mdiobus_free, + mdiobus_devres_match, mdio))) + return -EINVAL; + + dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL); + if (!dr) + return -ENOMEM; + + ret = of_mdiobus_register(mdio, np); + if (ret) { + devres_free(dr); + return ret; + } + + dr->mii = mdio; + devres_add(dev, dr); + return 0; +} +EXPORT_SYMBOL(devm_of_mdiobus_register); +#endif /* CONFIG_OF_MDIO */ + +MODULE_LICENSE("GPL"); diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index ba8e157f24ad..1efb88d9f892 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -8,12 +8,15 @@ #ifndef __LINUX_OF_MDIO_H #define __LINUX_OF_MDIO_H +#include #include #include #if IS_ENABLED(CONFIG_OF_MDIO) bool of_mdiobus_child_is_phy(struct device_node *child); int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np); struct phy_device *of_phy_find_device(struct device_node *phy_np); struct phy_device * of_phy_connect(struct net_device *dev, struct device_node *phy_np, From 9ed0a3fac08bddaf104038b6371eaa6494c6fd83 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 29 Jun 2020 14:03:46 +0200 Subject: [PATCH 0540/2056] net: ethernet: mtk-star-emac: use devm_of_mdiobus_register() Shrink the code by using the managed variant of of_mdiobus_register(). Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_star_emac.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 3e765bdcf9e1..13250553263b 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1389,7 +1389,7 @@ static int mtk_star_mdio_init(struct net_device *ndev) priv->mii->write = mtk_star_mdio_write; priv->mii->priv = priv; - ret = of_mdiobus_register(priv->mii, mdio_node); + ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); out_put_node: of_node_put(mdio_node); @@ -1441,13 +1441,6 @@ static void mtk_star_clk_disable_unprepare(void *data) clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); } -static void mtk_star_mdiobus_unregister(void *data) -{ - struct mtk_star_priv *priv = data; - - mdiobus_unregister(priv->mii); -} - static int mtk_star_probe(struct platform_device *pdev) { struct device_node *of_node; @@ -1549,10 +1542,6 @@ static int mtk_star_probe(struct platform_device *pdev) if (ret) return ret; - ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv); - if (ret) - return ret; - ret = eth_platform_get_mac_address(dev, ndev->dev_addr); if (ret || !is_valid_ether_addr(ndev->dev_addr)) eth_hw_addr_random(ndev); From 0adcd2981ddddba4c1330078ea12e89fd782e79f Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 30 Jun 2020 14:00:33 -0700 Subject: [PATCH 0541/2056] amd8111e: Mark PM functions as __maybe_unused In certain configurations without power management support, the following warnings happen: ../drivers/net/ethernet/amd/amd8111e.c:1623:12: warning: 'amd8111e_resume' defined but not used [-Wunused-function] 1623 | static int amd8111e_resume(struct device *dev_d) | ^~~~~~~~~~~~~~~ ../drivers/net/ethernet/amd/amd8111e.c:1584:12: warning: 'amd8111e_suspend' defined but not used [-Wunused-function] 1584 | static int amd8111e_suspend(struct device *dev_d) | ^~~~~~~~~~~~~~~~ Mark these functions as __maybe_unused to make it clear to the compiler that this is going to happen based on the configuration, which is the standard for these types of functions. Fixes: 2caf751fe080 ("amd8111e: Convert to generic power management") Signed-off-by: Nathan Chancellor Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index c6591b33abcc..5d389a984394 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1581,7 +1581,7 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue) netif_wake_queue(dev); } -static int amd8111e_suspend(struct device *dev_d) +static int __maybe_unused amd8111e_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct amd8111e_priv *lp = netdev_priv(dev); @@ -1620,7 +1620,7 @@ static int amd8111e_suspend(struct device *dev_d) return 0; } -static int amd8111e_resume(struct device *dev_d) +static int __maybe_unused amd8111e_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct amd8111e_priv *lp = netdev_priv(dev); From 75603a3112664cb1f6b1508e2b489f16863bf153 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 30 Jun 2020 14:00:34 -0700 Subject: [PATCH 0542/2056] pcnet32: Mark PM functions as __maybe_unused In certain configurations without power management support, the following warnings happen: ../drivers/net/ethernet/amd/pcnet32.c:2928:12: warning: 'pcnet32_pm_resume' defined but not used [-Wunused-function] 2928 | static int pcnet32_pm_resume(struct device *device_d) | ^~~~~~~~~~~~~~~~~ ../drivers/net/ethernet/amd/pcnet32.c:2916:12: warning: 'pcnet32_pm_suspend' defined but not used [-Wunused-function] 2916 | static int pcnet32_pm_suspend(struct device *device_d) | ^~~~~~~~~~~~~~~~~~ Mark these functions as __maybe_unused to make it clear to the compiler that this is going to happen based on the configuration, which is the standard for these types of functions. Fixes: a86688fbef1b ("pcnet32: Convert to generic power management") Signed-off-by: Nathan Chancellor Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index d32f54d760e7..f47140391f67 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -2913,7 +2913,7 @@ static void pcnet32_watchdog(struct timer_list *t) mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT)); } -static int pcnet32_pm_suspend(struct device *device_d) +static int __maybe_unused pcnet32_pm_suspend(struct device *device_d) { struct net_device *dev = dev_get_drvdata(device_d); @@ -2925,7 +2925,7 @@ static int pcnet32_pm_suspend(struct device *device_d) return 0; } -static int pcnet32_pm_resume(struct device *device_d) +static int __maybe_unused pcnet32_pm_resume(struct device *device_d) { struct net_device *dev = dev_get_drvdata(device_d); From fd2261d8ed6f1375f18341e959d4bbcd94d748a3 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 30 Jun 2020 18:41:28 +0530 Subject: [PATCH 0543/2056] cxgb4: add mirror action to TC-MATCHALL offload Add mirror Virtual Interface (VI) support to receive all ingress mirror traffic from the underlying device. The mirror VI is created dynamically, if the TC-MATCHALL rule has a corresponding mirror action. Also request MSI-X vectors needed for the mirror VI Rxqs. If no vectors are available, then disable mirror VI support. v3: - Replace mirror VI refcount_t with normal u32 variable. v2: - Add mutex to protect all mirror VI data, instead of just mirror Rxqs. Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 11 ++ .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 103 +++++++++++++++++- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 16 ++- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 3 +- .../chelsio/cxgb4/cxgb4_tc_matchall.c | 57 +++++++++- .../chelsio/cxgb4/cxgb4_tc_matchall.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 16 +++ 7 files changed, 197 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d811df1b93d9..2239d19a2ac4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -679,6 +679,12 @@ struct port_info { u8 rx_cchan; bool tc_block_shared; + + /* Mirror VI information */ + u16 viid_mirror; + u16 nmirrorqsets; + u32 vi_mirror_count; + struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */ }; struct dentry; @@ -960,6 +966,7 @@ struct sge { u16 ofldqsets; /* # of active ofld queue sets */ u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 eoqsets; /* # of ETHOFLD queues */ + u16 mirrorqsets; /* # of Mirror queues */ u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; @@ -1857,6 +1864,8 @@ int t4_init_rss_mode(struct adapter *adap, int mbox); int t4_init_portinfo(struct port_info *pi, int mbox, int port, int pf, int vf, u8 mac[]); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf, + u16 *mirror_viid); void t4_fatal_err(struct adapter *adapter); unsigned int t4_chip_rss_size(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, @@ -2141,6 +2150,8 @@ int cxgb_open(struct net_device *dev); int cxgb_close(struct net_device *dev); void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); void cxgb4_quiesce_rx(struct sge_rspq *q); +int cxgb4_port_mirror_alloc(struct net_device *dev); +void cxgb4_port_mirror_free(struct net_device *dev); #ifdef CONFIG_CHELSIO_TLS_DEVICE int cxgb4_set_ktls_feature(struct adapter *adap, bool enable); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 234f01389cb1..e659af08e352 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1285,6 +1285,55 @@ static int setup_debugfs(struct adapter *adap) return 0; } +int cxgb4_port_mirror_alloc(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret = 0; + + if (!pi->nmirrorqsets) + return -EOPNOTSUPP; + + mutex_lock(&pi->vi_mirror_mutex); + if (pi->viid_mirror) { + pi->vi_mirror_count++; + goto out_unlock; + } + + ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0, + &pi->viid_mirror); + if (ret) + goto out_unlock; + + pi->vi_mirror_count = 1; + +out_unlock: + mutex_unlock(&pi->vi_mirror_mutex); + return ret; +} + +void cxgb4_port_mirror_free(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + mutex_lock(&pi->vi_mirror_mutex); + if (!pi->viid_mirror) + goto out_unlock; + + if (pi->vi_mirror_count > 1) { + pi->vi_mirror_count--; + goto out_unlock; + } + + pi->vi_mirror_count = 0; + t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); + pi->viid_mirror = 0; + +out_unlock: + mutex_unlock(&pi->vi_mirror_mutex); +} + /* * upper-layer driver support */ @@ -5504,6 +5553,19 @@ static int cfg_queues(struct adapter *adap) avail_qsets -= s->eoqsets; } + /* Mirror queues must follow same scheme as normal Ethernet + * Queues, when there are enough queues available. Otherwise, + * allocate at least 1 queue per port. If even 1 queue is not + * available, then disable mirror queues support. + */ + if (avail_qsets >= s->max_ethqsets) + s->mirrorqsets = s->max_ethqsets; + else if (avail_qsets >= adap->params.nports) + s->mirrorqsets = adap->params.nports; + else + s->mirrorqsets = 0; + avail_qsets -= s->mirrorqsets; + for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { struct sge_eth_rxq *r = &s->ethrxq[i]; @@ -5617,8 +5679,8 @@ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, static int enable_msix(struct adapter *adap) { - u32 eth_need, uld_need = 0, ethofld_need = 0; - u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0; + u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0; + u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0; u8 num_uld = 0, nchan = adap->params.nports; u32 i, want, need, num_vec; struct sge *s = &adap->sge; @@ -5649,6 +5711,12 @@ static int enable_msix(struct adapter *adap) need += ethofld_need; } + if (s->mirrorqsets) { + want += s->mirrorqsets; + mirror_need = nchan; + need += mirror_need; + } + want += EXTRA_VECS; need += EXTRA_VECS; @@ -5682,8 +5750,10 @@ static int enable_msix(struct adapter *adap) adap->params.ethofld = 0; s->ofldqsets = 0; s->eoqsets = 0; + s->mirrorqsets = 0; uld_need = 0; ethofld_need = 0; + mirror_need = 0; } num_vec = allocated; @@ -5697,6 +5767,8 @@ static int enable_msix(struct adapter *adap) ofldqsets = nchan; if (is_ethofld(adap)) eoqsets = ethofld_need; + if (s->mirrorqsets) + mirrorqsets = mirror_need; num_vec -= need; while (num_vec) { @@ -5728,12 +5800,25 @@ static int enable_msix(struct adapter *adap) num_vec -= uld_need; } } + + if (s->mirrorqsets) { + while (num_vec) { + if (num_vec < mirror_need || + mirrorqsets > s->mirrorqsets) + break; + + mirrorqsets++; + num_vec -= mirror_need; + } + } } else { ethqsets = s->max_ethqsets; if (is_uld(adap)) ofldqsets = s->ofldqsets; if (is_ethofld(adap)) eoqsets = s->eoqsets; + if (s->mirrorqsets) + mirrorqsets = s->mirrorqsets; } if (ethqsets < s->max_ethqsets) { @@ -5749,6 +5834,15 @@ static int enable_msix(struct adapter *adap) if (is_ethofld(adap)) s->eoqsets = eoqsets; + if (s->mirrorqsets) { + s->mirrorqsets = mirrorqsets; + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + pi->nmirrorqsets = s->mirrorqsets / nchan; + mutex_init(&pi->vi_mirror_mutex); + } + } + /* map for msix */ ret = alloc_msix_info(adap, allocated); if (ret) @@ -5760,8 +5854,9 @@ static int enable_msix(struct adapter *adap) } dev_info(adap->pdev_dev, - "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n", - allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld); + "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n", + allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld, + s->mirrorqsets); kfree(entries); return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index ae0e998d9338..6251444ffa58 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -372,6 +372,7 @@ void cxgb4_process_flow_actions(struct net_device *in, case FLOW_ACTION_DROP: fs->action = FILTER_DROP; break; + case FLOW_ACTION_MIRRED: case FLOW_ACTION_REDIRECT: { struct net_device *out = act->dev; struct port_info *pi = netdev_priv(out); @@ -529,7 +530,8 @@ static bool valid_pedit_action(struct net_device *dev, int cxgb4_validate_flow_actions(struct net_device *dev, struct flow_action *actions, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + u8 matchall_filter) { struct flow_action_entry *act; bool act_redir = false; @@ -546,11 +548,19 @@ int cxgb4_validate_flow_actions(struct net_device *dev, case FLOW_ACTION_DROP: /* Do nothing */ break; + case FLOW_ACTION_MIRRED: case FLOW_ACTION_REDIRECT: { struct adapter *adap = netdev2adap(dev); struct net_device *n_dev, *target_dev; - unsigned int i; bool found = false; + unsigned int i; + + if (act->id == FLOW_ACTION_MIRRED && + !matchall_filter) { + NL_SET_ERR_MSG_MOD(extack, + "Egress mirror action is only supported for tc-matchall"); + return -EOPNOTSUPP; + } target_dev = act->dev; for_each_port(adap, i) { @@ -689,7 +699,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, u8 inet_family; int fidx, ret; - if (cxgb4_validate_flow_actions(dev, &rule->action, extack)) + if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0)) return -EOPNOTSUPP; if (cxgb4_validate_flow_match(dev, rule)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index befa459324fb..6296e1d5a12b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -113,7 +113,8 @@ void cxgb4_process_flow_actions(struct net_device *in, struct ch_filter_specification *fs); int cxgb4_validate_flow_actions(struct net_device *dev, struct flow_action *actions, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + u8 matchall_filter); int cxgb4_tc_flower_replace(struct net_device *dev, struct flow_cls_offload *cls); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index c439b5bce9c9..e377e50c2492 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -188,6 +188,49 @@ static void cxgb4_matchall_free_tc(struct net_device *dev) tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED; } +static int cxgb4_matchall_mirror_alloc(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct flow_action_entry *act; + int ret; + u32 i; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + flow_action_for_each(i, act, &cls->rule->action) { + if (act->id == FLOW_ACTION_MIRRED) { + ret = cxgb4_port_mirror_alloc(dev); + if (ret) { + NL_SET_ERR_MSG_MOD(extack, + "Couldn't allocate mirror"); + return ret; + } + + tc_port_matchall->ingress.viid_mirror = pi->viid_mirror; + break; + } + } + + return 0; +} + +static void cxgb4_matchall_mirror_free(struct net_device *dev) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + if (!tc_port_matchall->ingress.viid_mirror) + return; + + cxgb4_port_mirror_free(dev); + tc_port_matchall->ingress.viid_mirror = 0; +} + static int cxgb4_matchall_alloc_filter(struct net_device *dev, struct tc_cls_matchall_offload *cls) { @@ -211,6 +254,10 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev, return -ENOMEM; } + ret = cxgb4_matchall_mirror_alloc(dev, cls); + if (ret) + return ret; + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; fs = &tc_port_matchall->ingress.fs; memset(fs, 0, sizeof(*fs)); @@ -229,11 +276,15 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev, ret = cxgb4_set_filter(dev, fidx, fs); if (ret) - return ret; + goto out_free; tc_port_matchall->ingress.tid = fidx; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; + +out_free: + cxgb4_matchall_mirror_free(dev); + return ret; } static int cxgb4_matchall_free_filter(struct net_device *dev) @@ -250,6 +301,8 @@ static int cxgb4_matchall_free_filter(struct net_device *dev) if (ret) return ret; + cxgb4_matchall_mirror_free(dev); + tc_port_matchall->ingress.packets = 0; tc_port_matchall->ingress.bytes = 0; tc_port_matchall->ingress.last_used = 0; @@ -279,7 +332,7 @@ int cxgb4_tc_matchall_replace(struct net_device *dev, ret = cxgb4_validate_flow_actions(dev, &cls_matchall->rule->action, - extack); + extack, 1); if (ret) return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h index ab6b5683dfd3..e264b6e606c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h @@ -21,6 +21,7 @@ struct cxgb4_matchall_ingress_entry { enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ u32 tid; /* Index to hardware filter entry */ struct ch_filter_specification fs; /* Filter entry */ + u16 viid_mirror; /* Identifier for allocated Mirror VI */ u64 bytes; /* # of bytes hitting the filter */ u64 packets; /* # of packets hitting the filter */ u64 last_used; /* Last updated jiffies time */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 70fe189202be..7876aa392aae 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -9711,6 +9711,22 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) return 0; } +int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf, + u16 *mirror_viid) +{ + int ret; + + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL, + NULL, NULL); + if (ret < 0) + return ret; + + if (mirror_viid) + *mirror_viid = ret; + + return 0; +} + /** * t4_read_cimq_cfg - read CIM queue configuration * @adap: the adapter From 2b465ed00f7db9c5b2aca95a91671f86282b1822 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 30 Jun 2020 18:41:29 +0530 Subject: [PATCH 0544/2056] cxgb4: add support for mirror Rxqs When mirror VI is enabled, allocate the mirror Rxqs and setup the mirror VI RSS table. The mirror Rxqs are allocated/freed when the mirror VI is created/destroyed or when underlying port is brought up/down, respectively. v3: - Replace mirror VI refcount_t with normal u32 variable. v2: - Use mutex to protect all mirror VI data, instead of just mirror Rxqs. - Remove the un-needed mirror Rxq mutex. Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 + .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 66 ++++- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 235 ++++++++++++++++-- 3 files changed, 288 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 2239d19a2ac4..980b64c7a411 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -711,6 +711,13 @@ enum { ULP_CRYPTO_KTLS_INLINE = 1 << 3, }; +#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024 +#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64 +#define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5 +#define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8 + +#define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72 + struct rx_sw_desc; struct sge_fl { /* SGE free-buffer queue state */ @@ -960,6 +967,8 @@ struct sge { struct sge_eohw_txq *eohw_txq; struct sge_ofld_rxq *eohw_rxq; + struct sge_eth_rxq *mirror_rxq[NCHAN]; + u16 max_ethqsets; /* # of available Ethernet queue sets */ u16 ethqsets; /* # of active Ethernet queue sets */ u16 ethtxq_rover; /* Tx queue to clean up next */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index b477b8842905..05f33b7e3677 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2742,6 +2742,58 @@ do { \ } r -= eth_entries; + for_each_port(adap, j) { + struct port_info *pi = adap2pinfo(adap, j); + const struct sge_eth_rxq *rx; + + mutex_lock(&pi->vi_mirror_mutex); + if (!pi->vi_mirror_count) { + mutex_unlock(&pi->vi_mirror_mutex); + continue; + } + + if (r >= DIV_ROUND_UP(pi->nmirrorqsets, 4)) { + r -= DIV_ROUND_UP(pi->nmirrorqsets, 4); + mutex_unlock(&pi->vi_mirror_mutex); + continue; + } + + rx = &s->mirror_rxq[j][r * 4]; + n = min(4, pi->nmirrorqsets - 4 * r); + + S("QType:", "Mirror-Rxq"); + S("Interface:", + rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); + R("RspQ ID:", rspq.abs_id); + R("RspQ size:", rspq.size); + R("RspQE size:", rspq.iqe_len); + R("RspQ CIDX:", rspq.cidx); + R("RspQ Gen:", rspq.gen); + S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); + S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); + R("FL ID:", fl.cntxt_id); + R("FL size:", fl.size - 8); + R("FL pend:", fl.pend_cred); + R("FL avail:", fl.avail); + R("FL PIDX:", fl.pidx); + R("FL CIDX:", fl.cidx); + RL("RxPackets:", stats.pkts); + RL("RxCSO:", stats.rx_cso); + RL("VLANxtract:", stats.vlan_ex); + RL("LROmerged:", stats.lro_merged); + RL("LROpackets:", stats.lro_pkts); + RL("RxDrops:", stats.rx_drops); + RL("RxBadPkts:", stats.bad_rx_pkts); + RL("FLAllocErr:", fl.alloc_failed); + RL("FLLrgAlcErr:", fl.large_alloc_failed); + RL("FLMapErr:", fl.mapping_err); + RL("FLLow:", fl.low); + RL("FLStarving:", fl.starving); + + mutex_unlock(&pi->vi_mirror_mutex); + goto out; + } + if (!adap->tc_mqprio) goto skip_mqprio; @@ -3098,9 +3150,10 @@ do { \ return 0; } -static int sge_queue_entries(const struct adapter *adap) +static int sge_queue_entries(struct adapter *adap) { int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0; + int mirror_rxq_entries = 0; if (adap->tc_mqprio) { struct cxgb4_tc_port_mqprio *port_mqprio; @@ -3123,6 +3176,15 @@ static int sge_queue_entries(const struct adapter *adap) mutex_unlock(&adap->tc_mqprio->mqprio_mutex); } + for_each_port(adap, i) { + struct port_info *pi = adap2pinfo(adap, i); + + mutex_lock(&pi->vi_mirror_mutex); + if (pi->vi_mirror_count) + mirror_rxq_entries += DIV_ROUND_UP(pi->nmirrorqsets, 4); + mutex_unlock(&pi->vi_mirror_mutex); + } + if (!is_uld(adap)) goto lld_only; @@ -3137,7 +3199,7 @@ static int sge_queue_entries(const struct adapter *adap) mutex_unlock(&uld_mutex); lld_only: - return DIV_ROUND_UP(adap->sge.ethqsets, 4) + + return DIV_ROUND_UP(adap->sge.ethqsets, 4) + mirror_rxq_entries + eohw_entries + eosw_entries + tot_uld_entries + DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e659af08e352..c7b9161d4326 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -822,6 +822,31 @@ static void adap_config_hpfilter(struct adapter *adapter) "HP filter region isn't supported by FW\n"); } +static int cxgb4_config_rss(const struct port_info *pi, u16 *rss, + u16 rss_size, u16 viid) +{ + struct adapter *adap = pi->adapter; + int ret; + + ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss, + rss_size); + if (ret) + return ret; + + /* If Tunnel All Lookup isn't specified in the global RSS + * Configuration, then we need to specify a default Ingress + * Queue for any ingress packets which aren't hashed. We'll + * use our first ingress queue ... + */ + return t4_config_vi_rss(adap, adap->mbox, viid, + FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | + FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | + FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | + FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | + FW_RSS_VI_CONFIG_CMD_UDPEN_F, + rss[0]); +} + /** * cxgb4_write_rss - write the RSS table for a given port * @pi: the port @@ -833,10 +858,10 @@ static void adap_config_hpfilter(struct adapter *adapter) */ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) { - u16 *rss; - int i, err; struct adapter *adapter = pi->adapter; const struct sge_eth_rxq *rxq; + int i, err; + u16 *rss; rxq = &adapter->sge.ethrxq[pi->first_qset]; rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL); @@ -847,21 +872,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) for (i = 0; i < pi->rss_size; i++, queues++) rss[i] = rxq[*queues].rspq.abs_id; - err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0, - pi->rss_size, rss, pi->rss_size); - /* If Tunnel All Lookup isn't specified in the global RSS - * Configuration, then we need to specify a default Ingress - * Queue for any ingress packets which aren't hashed. We'll - * use our first ingress queue ... - */ - if (!err) - err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid, - FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F | - FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F | - FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F | - FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F | - FW_RSS_VI_CONFIG_CMD_UDPEN_F, - rss[0]); + err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid); kfree(rss); return err; } @@ -1285,6 +1296,151 @@ static int setup_debugfs(struct adapter *adap) return 0; } +static void cxgb4_port_mirror_free_rxq(struct adapter *adap, + struct sge_eth_rxq *mirror_rxq) +{ + if ((adap->flags & CXGB4_FULL_INIT_DONE) && + !(adap->flags & CXGB4_SHUTTING_DOWN)) + cxgb4_quiesce_rx(&mirror_rxq->rspq); + + if (adap->flags & CXGB4_USING_MSIX) { + cxgb4_clear_msix_aff(mirror_rxq->msix->vec, + mirror_rxq->msix->aff_mask); + free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq); + cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); + } + + free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); +} + +static int cxgb4_port_mirror_alloc_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge_eth_rxq *mirror_rxq; + struct sge *s = &adap->sge; + int ret = 0, msix = 0; + u16 i, rxqid; + u16 *rss; + + if (!pi->vi_mirror_count) + return 0; + + if (s->mirror_rxq[pi->port_id]) + return 0; + + mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL); + if (!mirror_rxq) + return -ENOMEM; + + s->mirror_rxq[pi->port_id] = mirror_rxq; + + if (!(adap->flags & CXGB4_USING_MSIX)) + msix = -((int)adap->sge.intrq.abs_id + 1); + + for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) { + mirror_rxq = &s->mirror_rxq[pi->port_id][i]; + + /* Allocate Mirror Rxqs */ + if (msix >= 0) { + msix = cxgb4_get_msix_idx_from_bmap(adap); + if (msix < 0) { + ret = msix; + goto out_free_queues; + } + + mirror_rxq->msix = &adap->msix_info[msix]; + snprintf(mirror_rxq->msix->desc, + sizeof(mirror_rxq->msix->desc), + "%s-mirrorrxq%d", dev->name, i); + } + + init_rspq(adap, &mirror_rxq->rspq, + CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC, + CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT, + CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM, + CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE); + + mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM; + + ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false, + dev, msix, &mirror_rxq->fl, + t4_ethrx_handler, NULL, 0); + if (ret) + goto out_free_msix_idx; + + /* Setup MSI-X vectors for Mirror Rxqs */ + if (adap->flags & CXGB4_USING_MSIX) { + ret = request_irq(mirror_rxq->msix->vec, + t4_sge_intr_msix, 0, + mirror_rxq->msix->desc, + &mirror_rxq->rspq); + if (ret) + goto out_free_rxq; + + cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec, + &mirror_rxq->msix->aff_mask, i); + } + + /* Start NAPI for Mirror Rxqs */ + cxgb4_enable_rx(adap, &mirror_rxq->rspq); + } + + /* Setup RSS for Mirror Rxqs */ + rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); + if (!rss) { + ret = -ENOMEM; + goto out_free_queues; + } + + mirror_rxq = &s->mirror_rxq[pi->port_id][0]; + for (i = 0; i < pi->rss_size; i++) + rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id; + + ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror); + kfree(rss); + if (ret) + goto out_free_queues; + + return 0; + +out_free_rxq: + free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl); + +out_free_msix_idx: + cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx); + +out_free_queues: + while (rxqid-- > 0) + cxgb4_port_mirror_free_rxq(adap, + &s->mirror_rxq[pi->port_id][rxqid]); + + kfree(s->mirror_rxq[pi->port_id]); + s->mirror_rxq[pi->port_id] = NULL; + return ret; +} + +static void cxgb4_port_mirror_free_queues(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + struct sge *s = &adap->sge; + u16 i; + + if (!pi->vi_mirror_count) + return; + + if (!s->mirror_rxq[pi->port_id]) + return; + + for (i = 0; i < pi->nmirrorqsets; i++) + cxgb4_port_mirror_free_rxq(adap, + &s->mirror_rxq[pi->port_id][i]); + + kfree(s->mirror_rxq[pi->port_id]); + s->mirror_rxq[pi->port_id] = NULL; +} + int cxgb4_port_mirror_alloc(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); @@ -1307,6 +1463,20 @@ int cxgb4_port_mirror_alloc(struct net_device *dev) pi->vi_mirror_count = 1; + if (adap->flags & CXGB4_FULL_INIT_DONE) { + ret = cxgb4_port_mirror_alloc_queues(dev); + if (ret) + goto out_free_vi; + } + + mutex_unlock(&pi->vi_mirror_mutex); + return 0; + +out_free_vi: + pi->vi_mirror_count = 0; + t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); + pi->viid_mirror = 0; + out_unlock: mutex_unlock(&pi->vi_mirror_mutex); return ret; @@ -1326,6 +1496,8 @@ void cxgb4_port_mirror_free(struct net_device *dev) goto out_unlock; } + cxgb4_port_mirror_free_queues(dev); + pi->vi_mirror_count = 0; t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); pi->viid_mirror = 0; @@ -2606,8 +2778,22 @@ int cxgb_open(struct net_device *dev) return err; err = link_start(dev); - if (!err) - netif_tx_start_all_queues(dev); + if (err) + return err; + + if (pi->nmirrorqsets) { + mutex_lock(&pi->vi_mirror_mutex); + err = cxgb4_port_mirror_alloc_queues(dev); + if (err) + goto out_unlock; + mutex_unlock(&pi->vi_mirror_mutex); + } + + netif_tx_start_all_queues(dev); + return 0; + +out_unlock: + mutex_unlock(&pi->vi_mirror_mutex); return err; } @@ -2625,7 +2811,16 @@ int cxgb_close(struct net_device *dev) cxgb4_dcb_reset(dev); dcb_tx_queue_prio_enable(dev, false); #endif - return ret; + if (ret) + return ret; + + if (pi->nmirrorqsets) { + mutex_lock(&pi->vi_mirror_mutex); + cxgb4_port_mirror_free_queues(dev); + mutex_unlock(&pi->vi_mirror_mutex); + } + + return 0; } int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, From 696c278fdfd8dd852af286fcebaacc11adbf20f0 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 30 Jun 2020 18:41:30 +0530 Subject: [PATCH 0545/2056] cxgb4: add main VI to mirror VI config replication When mirror VI is enabled, replicate various VI config params enabled on main VI to mirror VI. These include replicating MTU, promiscuous mode, all-multicast mode, and enabled netdev Rx feature offloads. v3: - Replace mirror VI refcount_t with normal u32 variable. - Add back calling cxgb4_port_mirror_start() in cxgb_open(), which was there in v1, but got missed in v2 during refactoring. v2: - Simplify the replication code by refactoring t4_set_rxmode() to handle mirror VI, instead of duplicating the t4_set_rxmode() calls in multiple places. Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 +- .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 107 ++++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 27 ++++- 3 files changed, 121 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 980b64c7a411..ff53c78307c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1984,8 +1984,8 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid); int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok); + unsigned int viid_mirror, int mtu, int promisc, int all_multi, + int bcast, int vlanex, bool sleep_ok); int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid, const u8 *addr, const u8 *mask, unsigned int idx, u8 lookup_type, u8 port_id, bool sleep_ok); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c7b9161d4326..0991631f3a91 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -435,8 +435,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); - return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, - (dev->flags & IFF_PROMISC) ? 1 : 0, + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror, + mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, sleep_ok); } @@ -503,15 +503,16 @@ int cxgb4_change_mac(struct port_info *pi, unsigned int viid, */ static int link_start(struct net_device *dev) { - int ret; struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->pf; + unsigned int mb = pi->adapter->mbox; + int ret; /* * We do not set address filters and promiscuity here, the stack does * that step explicitly. */ - ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, + ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror, + dev->mtu, -1, -1, -1, !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); if (ret == 0) ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt, @@ -1270,15 +1271,15 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, static int cxgb_set_features(struct net_device *dev, netdev_features_t features) { - const struct port_info *pi = netdev_priv(dev); netdev_features_t changed = dev->features ^ features; + const struct port_info *pi = netdev_priv(dev); int err; if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) return 0; - err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1, - -1, -1, -1, + err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, + pi->viid_mirror, -1, -1, -1, -1, !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); if (unlikely(err)) dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; @@ -1441,6 +1442,74 @@ static void cxgb4_port_mirror_free_queues(struct net_device *dev) s->mirror_rxq[pi->port_id] = NULL; } +static int cxgb4_port_mirror_start(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret, idx = -1; + + if (!pi->vi_mirror_count) + return 0; + + /* Mirror VIs can be created dynamically after stack had + * already setup Rx modes like MTU, promisc, allmulti, etc. + * on main VI. So, parse what the stack had setup on the + * main VI and update the same on the mirror VI. + */ + ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror, + dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, + !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); + if (ret) { + dev_err(adap->pdev_dev, + "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n", + pi->viid_mirror, ret); + return ret; + } + + /* Enable replication bit for the device's MAC address + * in MPS TCAM, so that the packets for the main VI are + * replicated to mirror VI. + */ + ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx, + dev->dev_addr, true, NULL); + if (ret) { + dev_err(adap->pdev_dev, + "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n", + pi->viid_mirror, ret); + return ret; + } + + /* Enabling a Virtual Interface can result in an interrupt + * during the processing of the VI Enable command and, in some + * paths, result in an attempt to issue another command in the + * interrupt context. Thus, we disable interrupts during the + * course of the VI Enable command ... + */ + local_bh_disable(); + ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, + false); + local_bh_enable(); + if (ret) + dev_err(adap->pdev_dev, + "Failed starting Mirror VI 0x%x, ret: %d\n", + pi->viid_mirror, ret); + + return ret; +} + +static void cxgb4_port_mirror_stop(struct net_device *dev) +{ + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + + if (!pi->vi_mirror_count) + return; + + t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false, + false); +} + int cxgb4_port_mirror_alloc(struct net_device *dev) { struct port_info *pi = netdev2pinfo(dev); @@ -1467,11 +1536,18 @@ int cxgb4_port_mirror_alloc(struct net_device *dev) ret = cxgb4_port_mirror_alloc_queues(dev); if (ret) goto out_free_vi; + + ret = cxgb4_port_mirror_start(dev); + if (ret) + goto out_free_queues; } mutex_unlock(&pi->vi_mirror_mutex); return 0; +out_free_queues: + cxgb4_port_mirror_free_queues(dev); + out_free_vi: pi->vi_mirror_count = 0; t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror); @@ -1496,6 +1572,7 @@ void cxgb4_port_mirror_free(struct net_device *dev) goto out_unlock; } + cxgb4_port_mirror_stop(dev); cxgb4_port_mirror_free_queues(dev); pi->vi_mirror_count = 0; @@ -2786,12 +2863,19 @@ int cxgb_open(struct net_device *dev) err = cxgb4_port_mirror_alloc_queues(dev); if (err) goto out_unlock; + + err = cxgb4_port_mirror_start(dev); + if (err) + goto out_free_queues; mutex_unlock(&pi->vi_mirror_mutex); } netif_tx_start_all_queues(dev); return 0; +out_free_queues: + cxgb4_port_mirror_free_queues(dev); + out_unlock: mutex_unlock(&pi->vi_mirror_mutex); return err; @@ -2816,6 +2900,7 @@ int cxgb_close(struct net_device *dev) if (pi->nmirrorqsets) { mutex_lock(&pi->vi_mirror_mutex); + cxgb4_port_mirror_stop(dev); cxgb4_port_mirror_free_queues(dev); mutex_unlock(&pi->vi_mirror_mutex); } @@ -3086,11 +3171,11 @@ static void cxgb_set_rxmode(struct net_device *dev) static int cxgb_change_mtu(struct net_device *dev, int new_mtu) { - int ret; struct port_info *pi = netdev_priv(dev); + int ret; - ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1, - -1, -1, -1, true); + ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, + pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); if (!ret) dev->mtu = new_mtu; return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 7876aa392aae..0af5ee9975df 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -7711,6 +7711,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, * @adap: the adapter * @mbox: mailbox to use for the FW command * @viid: the VI id + * @viid_mirror: the mirror VI id * @mtu: the new MTU or -1 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change @@ -7721,10 +7722,11 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, * Sets Rx properties of a virtual interface. */ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, - int mtu, int promisc, int all_multi, int bcast, int vlanex, - bool sleep_ok) + unsigned int viid_mirror, int mtu, int promisc, int all_multi, + int bcast, int vlanex, bool sleep_ok) { - struct fw_vi_rxmode_cmd c; + struct fw_vi_rxmode_cmd c, c_mirror; + int ret; /* convert to FW values */ if (mtu < 0) @@ -7749,7 +7751,24 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) | FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) | FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex)); - return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); + + if (viid_mirror) { + memcpy(&c_mirror, &c, sizeof(c_mirror)); + c_mirror.op_to_viid = + cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) | + FW_CMD_REQUEST_F | FW_CMD_WRITE_F | + FW_VI_RXMODE_CMD_VIID_V(viid_mirror)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); + if (ret) + return ret; + + if (viid_mirror) + ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror), + NULL, sleep_ok); + + return ret; } /** From 30ad688094bcfe8721bfd4003f6a20c9b6ddf964 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 30 Jun 2020 08:21:24 -0700 Subject: [PATCH 0546/2056] libbpf: Make bpf_endian co-exist with vmlinux.h Make bpf_endian.h compatible with vmlinux.h. It is a frequent request from users wanting to use bpf_endian.h in their BPF applications using CO-RE and vmlinux.h. To achieve that, re-implement byte swap macros and drop all the header includes. This way it can be used both with linux header includes, as well as with a vmlinux.h. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200630152125.3631920-2-andriin@fb.com --- tools/lib/bpf/bpf_endian.h | 43 +++++++++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/tools/lib/bpf/bpf_endian.h b/tools/lib/bpf/bpf_endian.h index fbe28008450f..ec9db4feca9f 100644 --- a/tools/lib/bpf/bpf_endian.h +++ b/tools/lib/bpf/bpf_endian.h @@ -2,8 +2,35 @@ #ifndef __BPF_ENDIAN__ #define __BPF_ENDIAN__ -#include -#include +/* + * Isolate byte #n and put it into byte #m, for __u##b type. + * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: + * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx + * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 + * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn + * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 + */ +#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) + +#define ___bpf_swab16(x) ((__u16)( \ + ___bpf_mvb(x, 16, 0, 1) | \ + ___bpf_mvb(x, 16, 1, 0))) + +#define ___bpf_swab32(x) ((__u32)( \ + ___bpf_mvb(x, 32, 0, 3) | \ + ___bpf_mvb(x, 32, 1, 2) | \ + ___bpf_mvb(x, 32, 2, 1) | \ + ___bpf_mvb(x, 32, 3, 0))) + +#define ___bpf_swab64(x) ((__u64)( \ + ___bpf_mvb(x, 64, 0, 7) | \ + ___bpf_mvb(x, 64, 1, 6) | \ + ___bpf_mvb(x, 64, 2, 5) | \ + ___bpf_mvb(x, 64, 3, 4) | \ + ___bpf_mvb(x, 64, 4, 3) | \ + ___bpf_mvb(x, 64, 5, 2) | \ + ___bpf_mvb(x, 64, 6, 1) | \ + ___bpf_mvb(x, 64, 7, 0))) /* LLVM's BPF target selects the endianness of the CPU * it compiles on, or the user specifies (bpfel/bpfeb), @@ -23,16 +50,16 @@ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ # define __bpf_ntohs(x) __builtin_bswap16(x) # define __bpf_htons(x) __builtin_bswap16(x) -# define __bpf_constant_ntohs(x) ___constant_swab16(x) -# define __bpf_constant_htons(x) ___constant_swab16(x) +# define __bpf_constant_ntohs(x) ___bpf_swab16(x) +# define __bpf_constant_htons(x) ___bpf_swab16(x) # define __bpf_ntohl(x) __builtin_bswap32(x) # define __bpf_htonl(x) __builtin_bswap32(x) -# define __bpf_constant_ntohl(x) ___constant_swab32(x) -# define __bpf_constant_htonl(x) ___constant_swab32(x) +# define __bpf_constant_ntohl(x) ___bpf_swab32(x) +# define __bpf_constant_htonl(x) ___bpf_swab32(x) # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) -# define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x) -# define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x) +# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) +# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ # define __bpf_ntohs(x) (x) # define __bpf_htons(x) (x) From 8c18311067d0f0d5f332b9e1f3859eb15e23332d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 30 Jun 2020 08:21:25 -0700 Subject: [PATCH 0547/2056] selftests/bpf: Add byte swapping selftest Add simple selftest validating byte swap built-ins and compile-time macros. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200630152125.3631920-3-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/endian.c | 53 +++++++++++++++++++ .../testing/selftests/bpf/progs/test_endian.c | 37 +++++++++++++ 2 files changed, 90 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/endian.c create mode 100644 tools/testing/selftests/bpf/progs/test_endian.c diff --git a/tools/testing/selftests/bpf/prog_tests/endian.c b/tools/testing/selftests/bpf/prog_tests/endian.c new file mode 100644 index 000000000000..1a11612ace6c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/endian.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include +#include "test_endian.skel.h" + +static int duration; + +#define IN16 0x1234 +#define IN32 0x12345678U +#define IN64 0x123456789abcdef0ULL + +#define OUT16 0x3412 +#define OUT32 0x78563412U +#define OUT64 0xf0debc9a78563412ULL + +void test_endian(void) +{ + struct test_endian* skel; + struct test_endian__bss *bss; + int err; + + skel = test_endian__open_and_load(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + bss = skel->bss; + + bss->in16 = IN16; + bss->in32 = IN32; + bss->in64 = IN64; + + err = test_endian__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + usleep(1); + + CHECK(bss->out16 != OUT16, "out16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out16, (__u64)OUT16); + CHECK(bss->out32 != OUT32, "out32", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out32, (__u64)OUT32); + CHECK(bss->out64 != OUT64, "out16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->out64, (__u64)OUT64); + + CHECK(bss->const16 != OUT16, "const16", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const16, (__u64)OUT16); + CHECK(bss->const32 != OUT32, "const32", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const32, (__u64)OUT32); + CHECK(bss->const64 != OUT64, "const64", "got 0x%llx != exp 0x%llx\n", + (__u64)bss->const64, (__u64)OUT64); +cleanup: + test_endian__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_endian.c b/tools/testing/selftests/bpf/progs/test_endian.c new file mode 100644 index 000000000000..ddb687c5d125 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_endian.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ + +#include "vmlinux.h" +#include +#include + +#define IN16 0x1234 +#define IN32 0x12345678U +#define IN64 0x123456789abcdef0ULL + +__u16 in16 = 0; +__u32 in32 = 0; +__u64 in64 = 0; + +__u16 out16 = 0; +__u32 out32 = 0; +__u64 out64 = 0; + +__u16 const16 = 0; +__u32 const32 = 0; +__u64 const64 = 0; + +SEC("raw_tp/sys_enter") +int sys_enter(const void *ctx) +{ + out16 = __builtin_bswap16(in16); + out32 = __builtin_bswap32(in32); + out64 = __builtin_bswap64(in64); + const16 = ___bpf_swab16(IN16); + const32 = ___bpf_swab32(IN32); + const64 = ___bpf_swab64(IN64); + + return 0; +} + +char _license[] SEC("license") = "GPL"; From f9200a52eedfe96ae8d9cbf68a363f5409a46117 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sat, 20 Jun 2020 13:03:55 +0300 Subject: [PATCH 0548/2056] ipvs: avoid expiring many connections from timer Add new functions ip_vs_conn_del() and ip_vs_conn_del_put() to release many IPVS connections in process context. They are suitable for connections found in table when we do not want to overload the timers. Currently, the change is useful for the dropentry delayed work but it will be used also in following patch when flushing connections to failed destinations. Signed-off-by: Julian Anastasov Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_conn.c | 55 +++++++++++++++++++++++---------- net/netfilter/ipvs/ip_vs_ctl.c | 6 ++-- 2 files changed, 43 insertions(+), 18 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 02f2f636798d..b3921ae92740 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -807,6 +807,31 @@ static void ip_vs_conn_rcu_free(struct rcu_head *head) kmem_cache_free(ip_vs_conn_cachep, cp); } +/* Try to delete connection while not holding reference */ +static void ip_vs_conn_del(struct ip_vs_conn *cp) +{ + if (del_timer(&cp->timer)) { + /* Drop cp->control chain too */ + if (cp->control) + cp->timeout = 0; + ip_vs_conn_expire(&cp->timer); + } +} + +/* Try to delete connection while holding reference */ +static void ip_vs_conn_del_put(struct ip_vs_conn *cp) +{ + if (del_timer(&cp->timer)) { + /* Drop cp->control chain too */ + if (cp->control) + cp->timeout = 0; + __ip_vs_conn_put(cp); + ip_vs_conn_expire(&cp->timer); + } else { + __ip_vs_conn_put(cp); + } +} + static void ip_vs_conn_expire(struct timer_list *t) { struct ip_vs_conn *cp = from_timer(cp, t, timer); @@ -827,14 +852,17 @@ static void ip_vs_conn_expire(struct timer_list *t) /* does anybody control me? */ if (ct) { + bool has_ref = !cp->timeout && __ip_vs_conn_get(ct); + ip_vs_control_del(cp); /* Drop CTL or non-assured TPL if not used anymore */ - if (!cp->timeout && !atomic_read(&ct->n_control) && + if (has_ref && !atomic_read(&ct->n_control) && (!(ct->flags & IP_VS_CONN_F_TEMPLATE) || !(ct->state & IP_VS_CTPL_S_ASSURED))) { IP_VS_DBG(4, "drop controlling connection\n"); - ct->timeout = 0; - ip_vs_conn_expire_now(ct); + ip_vs_conn_del_put(ct); + } else if (has_ref) { + __ip_vs_conn_put(ct); } } @@ -1317,8 +1345,7 @@ void ip_vs_random_dropentry(struct netns_ipvs *ipvs) drop: IP_VS_DBG(4, "drop connection\n"); - cp->timeout = 0; - ip_vs_conn_expire_now(cp); + ip_vs_conn_del(cp); } cond_resched_rcu(); } @@ -1341,19 +1368,15 @@ static void ip_vs_conn_flush(struct netns_ipvs *ipvs) hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { if (cp->ipvs != ipvs) continue; - /* As timers are expired in LIFO order, restart - * the timer of controlling connection first, so - * that it is expired after us. - */ + if (atomic_read(&cp->n_control)) + continue; cp_c = cp->control; - /* cp->control is valid only with reference to cp */ - if (cp_c && __ip_vs_conn_get(cp)) { - IP_VS_DBG(4, "del controlling connection\n"); - ip_vs_conn_expire_now(cp_c); - __ip_vs_conn_put(cp); - } IP_VS_DBG(4, "del connection\n"); - ip_vs_conn_expire_now(cp); + ip_vs_conn_del(cp); + if (cp_c && !atomic_read(&cp_c->n_control)) { + IP_VS_DBG(4, "del controlling connection\n"); + ip_vs_conn_del(cp_c); + } } cond_resched_rcu(); } diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 0eed388c960b..4af83f466dfc 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -224,7 +224,8 @@ static void defense_work_handler(struct work_struct *work) update_defense_level(ipvs); if (atomic_read(&ipvs->dropentry)) ip_vs_random_dropentry(ipvs); - schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD); + queue_delayed_work(system_long_wq, &ipvs->defense_work, + DEFENSE_TIMER_PERIOD); } #endif @@ -4082,7 +4083,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_tbl = tbl; /* Schedule defense work */ INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler); - schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD); + queue_delayed_work(system_long_wq, &ipvs->defense_work, + DEFENSE_TIMER_PERIOD); return 0; } From bba1dc0b55ac462d24ed1228ad49800c238cd6d7 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 29 Jun 2020 21:33:39 -0700 Subject: [PATCH 0549/2056] bpf: Remove redundant synchronize_rcu. bpf_free_used_maps() or close(map_fd) will trigger map_free callback. bpf_free_used_maps() is called after bpf prog is no longer executing: bpf_prog_put->call_rcu->bpf_prog_free->bpf_free_used_maps. Hence there is no need to call synchronize_rcu() to protect map elements. Note that hash_of_maps and array_of_maps update/delete inner maps via sys_bpf() that calls maybe_wait_bpf_programs() and synchronize_rcu(). Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Paul E. McKenney Link: https://lore.kernel.org/bpf/20200630043343.53195-2-alexei.starovoitov@gmail.com --- kernel/bpf/arraymap.c | 9 --------- kernel/bpf/hashtab.c | 8 +++----- kernel/bpf/lpm_trie.c | 5 ----- kernel/bpf/queue_stack_maps.c | 7 ------- kernel/bpf/reuseport_array.c | 2 -- kernel/bpf/ringbuf.c | 7 ------- kernel/bpf/stackmap.c | 3 --- 7 files changed, 3 insertions(+), 38 deletions(-) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index ec5cd11032aa..c66e8273fccd 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -386,13 +386,6 @@ static void array_map_free(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); - /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, - * so the programs (can be more than one that used this map) were - * disconnected from events. Wait for outstanding programs to complete - * and free the array - */ - synchronize_rcu(); - if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) bpf_array_free_percpu(array); @@ -546,8 +539,6 @@ static void fd_array_map_free(struct bpf_map *map) struct bpf_array *array = container_of(map, struct bpf_array, map); int i; - synchronize_rcu(); - /* make sure it's empty */ for (i = 0; i < array->map.max_entries; i++) BUG_ON(array->ptrs[i] != NULL); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index acd06081d81d..d4378d7d442b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1290,12 +1290,10 @@ static void htab_map_free(struct bpf_map *map) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, - * so the programs (can be more than one that used this map) were - * disconnected from events. Wait for outstanding critical sections in - * these programs to complete + /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback. + * bpf_free_used_maps() is called after bpf prog is no longer executing. + * There is no need to synchronize_rcu() here to protect map elements. */ - synchronize_rcu(); /* some of free_htab_elem() callbacks for elements of this map may * not have executed. Wait for them. diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 1abd4e3f906d..44474bf3ab7a 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -589,11 +589,6 @@ static void trie_free(struct bpf_map *map) struct lpm_trie_node __rcu **slot; struct lpm_trie_node *node; - /* Wait for outstanding programs to complete - * update/lookup/delete/get_next_key and free the trie. - */ - synchronize_rcu(); - /* Always start at the root and walk down to a node that has no * children. Then free that node, nullify its reference in the parent * and start over. diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c index 80c66a6d7c54..44184f82916a 100644 --- a/kernel/bpf/queue_stack_maps.c +++ b/kernel/bpf/queue_stack_maps.c @@ -101,13 +101,6 @@ static void queue_stack_map_free(struct bpf_map *map) { struct bpf_queue_stack *qs = bpf_queue_stack(map); - /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, - * so the programs (can be more than one that used this map) were - * disconnected from events. Wait for outstanding critical sections in - * these programs to complete - */ - synchronize_rcu(); - bpf_map_area_free(qs); } diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c index a09922f656e4..3625c4fcc65c 100644 --- a/kernel/bpf/reuseport_array.c +++ b/kernel/bpf/reuseport_array.c @@ -96,8 +96,6 @@ static void reuseport_array_free(struct bpf_map *map) struct sock *sk; u32 i; - synchronize_rcu(); - /* * ops->map_*_elem() will not be able to access this * array now. Hence, this function only races with diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index dbf37aff4827..13a8d3967e07 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -215,13 +215,6 @@ static void ringbuf_map_free(struct bpf_map *map) { struct bpf_ringbuf_map *rb_map; - /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, - * so the programs (can be more than one that used this map) were - * disconnected from events. Wait for outstanding critical sections in - * these programs to complete - */ - synchronize_rcu(); - rb_map = container_of(map, struct bpf_ringbuf_map, map); bpf_ringbuf_free(rb_map->rb); kfree(rb_map); diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 27dc9b1b08a5..071f98d0f7c6 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -604,9 +604,6 @@ static void stack_map_free(struct bpf_map *map) { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); - /* wait for bpf programs to complete before freeing stack map */ - synchronize_rcu(); - bpf_map_area_free(smap->elems); pcpu_freelist_destroy(&smap->freelist); bpf_map_area_free(smap); From d141b8bc5773cbbaf5b8530f08f94fc10fff9e8c Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 29 Jun 2020 23:28:43 -0700 Subject: [PATCH 0550/2056] perf: Expose get/put_callchain_entry() Sanitize and expose get/put_callchain_entry(). This would be used by bpf stack map. Suggested-by: Peter Zijlstra Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200630062846.664389-2-songliubraving@fb.com --- include/linux/perf_event.h | 2 ++ kernel/events/callchain.c | 13 ++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b4bb32082342..00ab5efa3833 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1244,6 +1244,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); +extern struct perf_callchain_entry *get_callchain_entry(int *rctx); +extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 334d48b16c36..c6ce894e4ce9 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -149,7 +149,7 @@ void put_callchain_buffers(void) } } -static struct perf_callchain_entry *get_callchain_entry(int *rctx) +struct perf_callchain_entry *get_callchain_entry(int *rctx) { int cpu; struct callchain_cpus_entries *entries; @@ -159,8 +159,10 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) return NULL; entries = rcu_dereference(callchain_cpus_entries); - if (!entries) + if (!entries) { + put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx); return NULL; + } cpu = smp_processor_id(); @@ -168,7 +170,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx) (*rctx * perf_callchain_entry__sizeof())); } -static void +void put_callchain_entry(int rctx) { put_recursion_context(this_cpu_ptr(callchain_recursion), rctx); @@ -183,11 +185,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, int rctx; entry = get_callchain_entry(&rctx); - if (rctx == -1) - return NULL; - if (!entry) - goto exit_put; + return NULL; ctx.entry = entry; ctx.max_stack = max_stack; From fa28dcb82a38f8e3993b0fae9106b1a80b59e4f0 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 29 Jun 2020 23:28:44 -0700 Subject: [PATCH 0551/2056] bpf: Introduce helper bpf_get_task_stack() Introduce helper bpf_get_task_stack(), which dumps stack trace of given task. This is different to bpf_get_stack(), which gets stack track of current task. One potential use case of bpf_get_task_stack() is to call it from bpf_iter__task and dump all /proc//stack to a seq_file. bpf_get_task_stack() uses stack_trace_save_tsk() instead of get_perf_callchain() for kernel stack. The benefit of this choice is that stack_trace_save_tsk() doesn't require changes in arch/. The downside of using stack_trace_save_tsk() is that stack_trace_save_tsk() dumps the stack trace to unsigned long array. For 32-bit systems, we need to translate it to u64 array. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200630062846.664389-3-songliubraving@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 37 +++++++++++++++- kernel/bpf/stackmap.c | 77 ++++++++++++++++++++++++++++++++-- kernel/bpf/verifier.c | 4 +- kernel/trace/bpf_trace.c | 2 + scripts/bpf_helpers_doc.py | 2 + tools/include/uapi/linux/bpf.h | 37 +++++++++++++++- 7 files changed, 153 insertions(+), 7 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3d2ade703a35..0cd7f6884c5c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1627,6 +1627,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_get_task_stack_proto; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0cb8ec948816..da9bf35a26f8 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3285,6 +3285,39 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) + * Description + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to struct task_struct. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * Return + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + * */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3427,7 +3460,9 @@ union bpf_attr { FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ FN(skc_to_tcp_request_sock), \ - FN(skc_to_udp6_sock), + FN(skc_to_udp6_sock), \ + FN(get_task_stack), \ + /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 071f98d0f7c6..5ad72ab2276b 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -348,6 +348,40 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, } } +static struct perf_callchain_entry * +get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) +{ + struct perf_callchain_entry *entry; + int rctx; + + entry = get_callchain_entry(&rctx); + + if (!entry) + return NULL; + + entry->nr = init_nr + + stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr), + sysctl_perf_event_max_stack - init_nr, 0); + + /* stack_trace_save_tsk() works on unsigned long array, while + * perf_callchain_entry uses u64 array. For 32-bit systems, it is + * necessary to fix this mismatch. + */ + if (__BITS_PER_LONG != 64) { + unsigned long *from = (unsigned long *) entry->ip; + u64 *to = entry->ip; + int i; + + /* copy data from the end to avoid using extra buffer */ + for (i = entry->nr - 1; i >= (int)init_nr; i--) + to[i] = (u64)(from[i]); + } + + put_callchain_entry(rctx); + + return entry; +} + BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u64, flags) { @@ -448,8 +482,8 @@ const struct bpf_func_proto bpf_get_stackid_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, - u64, flags) +static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, + void *buf, u32 size, u64 flags) { u32 init_nr, trace_nr, copy_len, elem_size, num_elem; bool user_build_id = flags & BPF_F_USER_BUILD_ID; @@ -471,13 +505,22 @@ BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, if (unlikely(size % elem_size)) goto clear; + /* cannot get valid user stack for task without user_mode regs */ + if (task && user && !user_mode(regs)) + goto err_fault; + num_elem = size / elem_size; if (sysctl_perf_event_max_stack < num_elem) init_nr = 0; else init_nr = sysctl_perf_event_max_stack - num_elem; - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); + + if (kernel && task) + trace = get_callchain_entry_for_task(task, init_nr); + else + trace = get_perf_callchain(regs, init_nr, kernel, user, + sysctl_perf_event_max_stack, + false, false); if (unlikely(!trace)) goto err_fault; @@ -505,6 +548,12 @@ BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, return err; } +BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, + u64, flags) +{ + return __bpf_get_stack(regs, NULL, buf, size, flags); +} + const struct bpf_func_proto bpf_get_stack_proto = { .func = bpf_get_stack, .gpl_only = true, @@ -515,6 +564,26 @@ const struct bpf_func_proto bpf_get_stack_proto = { .arg4_type = ARG_ANYTHING, }; +BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, + u32, size, u64, flags) +{ + struct pt_regs *regs = task_pt_regs(task); + + return __bpf_get_stack(regs, task, buf, size, flags); +} + +static int bpf_get_task_stack_btf_ids[5]; +const struct bpf_func_proto bpf_get_task_stack_proto = { + .func = bpf_get_task_stack, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_BTF_ID, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, + .btf_id = bpf_get_task_stack_btf_ids, +}; + /* Called from eBPF program */ static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7de98906ddf4..b608185e1ffd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4864,7 +4864,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn if (err) return err; - if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { + if ((func_id == BPF_FUNC_get_stack || + func_id == BPF_FUNC_get_task_stack) && + !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 5d59dda5f661..977ba3b6f6c6 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1137,6 +1137,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_ringbuf_query_proto; case BPF_FUNC_jiffies64: return &bpf_jiffies64_proto; + case BPF_FUNC_get_task_stack: + return &bpf_get_task_stack_proto; default: return NULL; } diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6bab40ff442e..6843376733df 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -426,6 +426,7 @@ class PrinterHelpers(Printer): 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', + 'struct task_struct', 'struct __sk_buff', 'struct sk_msg_md', @@ -468,6 +469,7 @@ class PrinterHelpers(Printer): 'struct tcp_timewait_sock', 'struct tcp_request_sock', 'struct udp6_sock', + 'struct task_struct', } mapped_types = { 'u8': '__u8', diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 0cb8ec948816..da9bf35a26f8 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3285,6 +3285,39 @@ union bpf_attr { * Dynamically cast a *sk* pointer to a *udp6_sock* pointer. * Return * *sk* if casting is valid, or NULL otherwise. + * + * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags) + * Description + * Return a user or a kernel stack in bpf program provided buffer. + * To achieve this, the helper needs *task*, which is a valid + * pointer to struct task_struct. To store the stacktrace, the + * bpf program provides *buf* with a nonnegative *size*. + * + * The last argument, *flags*, holds the number of stack frames to + * skip (from 0 to 255), masked with + * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set + * the following flags: + * + * **BPF_F_USER_STACK** + * Collect a user space stack instead of a kernel stack. + * **BPF_F_USER_BUILD_ID** + * Collect buildid+offset instead of ips for user stack, + * only valid if **BPF_F_USER_STACK** is also specified. + * + * **bpf_get_task_stack**\ () can collect up to + * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject + * to sufficient large buffer size. Note that + * this limit can be controlled with the **sysctl** program, and + * that it should be manually increased in order to profile long + * user stacks (such as stacks for Java programs). To do so, use: + * + * :: + * + * # sysctl kernel.perf_event_max_stack= + * Return + * A non-negative value equal to or less than *size* on success, + * or a negative error in case of failure. + * */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3427,7 +3460,9 @@ union bpf_attr { FN(skc_to_tcp_sock), \ FN(skc_to_tcp_timewait_sock), \ FN(skc_to_tcp_request_sock), \ - FN(skc_to_udp6_sock), + FN(skc_to_udp6_sock), \ + FN(get_task_stack), \ + /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 2df6bb5493f83c7e818f66c384ea337c1b3da228 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 29 Jun 2020 23:28:45 -0700 Subject: [PATCH 0552/2056] bpf: Allow %pB in bpf_seq_printf() and bpf_trace_printk() This makes it easy to dump stack trace in text. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200630062846.664389-4-songliubraving@fb.com --- kernel/trace/bpf_trace.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 977ba3b6f6c6..1d874d8e4384 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -376,7 +376,7 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, /* * Only limited trace_printk() conversion specifiers allowed: - * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s + * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s */ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, u64, arg2, u64, arg3) @@ -420,6 +420,11 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, goto fmt_str; } + if (fmt[i + 1] == 'B') { + i++; + goto fmt_next; + } + /* disallow any further format extensions */ if (fmt[i + 1] != 0 && !isspace(fmt[i + 1]) && @@ -636,7 +641,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, if (fmt[i] == 'p') { if (fmt[i + 1] == 0 || fmt[i + 1] == 'K' || - fmt[i + 1] == 'x') { + fmt[i + 1] == 'x' || + fmt[i + 1] == 'B') { /* just kernel pointers */ params[fmt_cnt] = args[fmt_cnt]; fmt_cnt++; From c7568114bc56cf3ec0bd9eb117bbe7cad3d30e11 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Mon, 29 Jun 2020 23:28:46 -0700 Subject: [PATCH 0553/2056] selftests/bpf: Add bpf_iter test with bpf_get_task_stack() The new test is similar to other bpf_iter tests. It dumps all /proc//stack to a seq_file. Here is some example output: pid: 2873 num_entries: 3 [<0>] worker_thread+0xc6/0x380 [<0>] kthread+0x135/0x150 [<0>] ret_from_fork+0x22/0x30 pid: 2874 num_entries: 9 [<0>] __bpf_get_stack+0x15e/0x250 [<0>] bpf_prog_22a400774977bb30_dump_task_stack+0x4a/0xb3c [<0>] bpf_iter_run_prog+0x81/0x170 [<0>] __task_seq_show+0x58/0x80 [<0>] bpf_seq_read+0x1c3/0x3b0 [<0>] vfs_read+0x9e/0x170 [<0>] ksys_read+0xa7/0xe0 [<0>] do_syscall_64+0x4c/0xa0 [<0>] entry_SYSCALL_64_after_hwframe+0x44/0xa9 Note: bpf_iter test as-is doesn't print the contents of the seq_file. To see the example above, it is necessary to add printf() to do_dummy_read. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200630062846.664389-5-songliubraving@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 17 +++++++++ .../selftests/bpf/progs/bpf_iter_task_stack.c | 37 +++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 1e2e0fced6e8..fed42755416d 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -5,6 +5,7 @@ #include "bpf_iter_netlink.skel.h" #include "bpf_iter_bpf_map.skel.h" #include "bpf_iter_task.skel.h" +#include "bpf_iter_task_stack.skel.h" #include "bpf_iter_task_file.skel.h" #include "bpf_iter_tcp4.skel.h" #include "bpf_iter_tcp6.skel.h" @@ -110,6 +111,20 @@ static void test_task(void) bpf_iter_task__destroy(skel); } +static void test_task_stack(void) +{ + struct bpf_iter_task_stack *skel; + + skel = bpf_iter_task_stack__open_and_load(); + if (CHECK(!skel, "bpf_iter_task_stack__open_and_load", + "skeleton open_and_load failed\n")) + return; + + do_dummy_read(skel->progs.dump_task_stack); + + bpf_iter_task_stack__destroy(skel); +} + static void test_task_file(void) { struct bpf_iter_task_file *skel; @@ -452,6 +467,8 @@ void test_bpf_iter(void) test_bpf_map(); if (test__start_subtest("task")) test_task(); + if (test__start_subtest("task_stack")) + test_task_stack(); if (test__start_subtest("task_file")) test_task_file(); if (test__start_subtest("tcp4")) diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c new file mode 100644 index 000000000000..e40d32a2ed93 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +#define MAX_STACK_TRACE_DEPTH 64 +unsigned long entries[MAX_STACK_TRACE_DEPTH]; +#define SIZE_OF_ULONG (sizeof(unsigned long)) + +SEC("iter/task") +int dump_task_stack(struct bpf_iter__task *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct task_struct *task = ctx->task; + long i, retlen; + + if (task == (void *)0) + return 0; + + retlen = bpf_get_task_stack(task, entries, + MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0); + if (retlen < 0) + return 0; + + BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid, + retlen / SIZE_OF_ULONG); + for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) { + if (retlen > i * SIZE_OF_ULONG) + BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]); + } + BPF_SEQ_PRINTF(seq, "\n"); + + return 0; +} From 6b207d66aa9fad0deed13d5f824e1ea193b0a777 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Tue, 30 Jun 2020 10:29:10 -0700 Subject: [PATCH 0554/2056] bpf: Fix net/core/filter build errors when INET is not enabled Fix build errors when CONFIG_INET is not set/enabled. (.text+0x2b1b): undefined reference to `tcp_prot' (.text+0x2b3b): undefined reference to `tcp_prot' Signed-off-by: Randy Dunlap Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/b1a858ec-7e04-56bc-248a-62cb9bbee726@infradead.org --- net/core/filter.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/core/filter.c b/net/core/filter.c index c796e141ea8e..c5e696e6c315 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9328,8 +9328,10 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) { +#ifdef CONFIG_INET if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) return (unsigned long)sk; +#endif #if IS_BUILTIN(CONFIG_IPV6) if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) @@ -9350,8 +9352,10 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) { +#ifdef CONFIG_INET if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) return (unsigned long)sk; +#endif #if IS_BUILTIN(CONFIG_IPV6) if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) From 2a6d6c31f136c136c05455788579586874a0525c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 1 Jul 2020 16:04:33 +0100 Subject: [PATCH 0555/2056] net/packet: remove redundant initialization of variable err The variable err is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- net/packet/af_packet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 29bd405adbbd..7b436ebde61d 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4293,7 +4293,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, struct packet_ring_buffer *rb; struct sk_buff_head *rb_queue; __be16 num; - int err = -EINVAL; + int err; /* Added to avoid minimal code churn */ struct tpacket_req *req = &req_u->req; From 4f195d280347c00bc4e397c8a8cbffb2611ebeed Mon Sep 17 00:00:00 2001 From: Hulk Robot Date: Wed, 1 Jul 2020 23:38:03 +0800 Subject: [PATCH 0556/2056] qed: Make symbol 'qed_hw_err_type_descr' static Fix sparse build warning: drivers/net/ethernet/qlogic/qed/qed_main.c:2480:6: warning: symbol 'qed_hw_err_type_descr' was not declared. Should it be static? Signed-off-by: Hulk Robot Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 0cd6b8bf023a..98527e42f918 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2451,7 +2451,7 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) ops->schedule_recovery_handler(cookie); } -char *qed_hw_err_type_descr[] = { +static char *qed_hw_err_type_descr[] = { [QED_HW_ERR_FAN_FAIL] = "Fan Failure", [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", [QED_HW_ERR_HW_ATTN] = "HW Attention", From 7b46681cf4b24cd686e24ca4a3d456fef20121eb Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:47 +0530 Subject: [PATCH 0557/2056] typhoon: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. And they use PCI helper functions to do it. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. In this driver: typhoon_resume() calls typhoon_wakeup() which then calls PCI helper functions pci_set_power_state() and pci_restore_state(). The only other function, using typhoon_wakeup() is typhoon_open(). Thus remove the pci_*() calls from tyhpoon_wakeup() and place them in typhoon_open(), maintaining the order, to retain the normal behavior of the function Now, typhoon_suspend() calls typhoon_sleep() which then calls PCI helper functions pci_enable_wake(), pci_disable_device() and pci_set_power_state(). Other functions: - typhoon_open() - typhoon_close() - typhoon_init_one() are also invoking typhoon_sleep(). Thus, in this case, cannot simply move PCI helper functions call. Hence, define a new function typhoon_sleep_early() which will do all the operations, which typhoon_sleep() was doing before calling PCI helper functions. Now typhoon_sleep() will call typhoon_sleep_early() to do those tasks, hence, the behavior for _open(), _close and _init_one() remain unchanged. And typhon_suspend() only requires typhoon_sleep_early(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/typhoon.c | 53 +++++++++++++++++------------ 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c index 5ed33c2c4742..d3b30bacc94e 100644 --- a/drivers/net/ethernet/3com/typhoon.c +++ b/drivers/net/ethernet/3com/typhoon.c @@ -1801,9 +1801,8 @@ typhoon_free_rx_rings(struct typhoon *tp) } static int -typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) +typhoon_sleep_early(struct typhoon *tp, __le16 events) { - struct pci_dev *pdev = tp->pdev; void __iomem *ioaddr = tp->ioaddr; struct cmd_desc xp_cmd; int err; @@ -1832,20 +1831,29 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) */ netif_carrier_off(tp->dev); + return 0; +} + +static int +typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) +{ + int err; + + err = typhoon_sleep_early(tp, events); + + if (err) + return err; + pci_enable_wake(tp->pdev, state, 1); - pci_disable_device(pdev); - return pci_set_power_state(pdev, state); + pci_disable_device(tp->pdev); + return pci_set_power_state(tp->pdev, state); } static int typhoon_wakeup(struct typhoon *tp, int wait_type) { - struct pci_dev *pdev = tp->pdev; void __iomem *ioaddr = tp->ioaddr; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* Post 2.x.x versions of the Sleep Image require a reset before * we can download the Runtime Image. But let's not make users of * the old firmware pay for the reset. @@ -2049,6 +2057,9 @@ typhoon_open(struct net_device *dev) if (err) goto out; + pci_set_power_state(tp->pdev, PCI_D0); + pci_restore_state(tp->pdev); + err = typhoon_wakeup(tp, WaitSleep); if (err < 0) { netdev_err(dev, "unable to wakeup device\n"); @@ -2114,11 +2125,10 @@ typhoon_close(struct net_device *dev) return 0; } -#ifdef CONFIG_PM -static int -typhoon_resume(struct pci_dev *pdev) +static int __maybe_unused +typhoon_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct typhoon *tp = netdev_priv(dev); /* If we're down, resume when we are upped. @@ -2144,9 +2154,10 @@ typhoon_resume(struct pci_dev *pdev) return -EBUSY; } -static int -typhoon_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused +typhoon_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *dev = pci_get_drvdata(pdev); struct typhoon *tp = netdev_priv(dev); struct cmd_desc xp_cmd; @@ -2190,18 +2201,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state) goto need_resume; } - if (typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { + if (typhoon_sleep_early(tp, tp->wol_events) < 0) { netdev_err(dev, "unable to put card to sleep\n"); goto need_resume; } + device_wakeup_enable(dev_d); + return 0; need_resume: - typhoon_resume(pdev); + typhoon_resume(dev_d); return -EBUSY; } -#endif static int typhoon_test_mmio(struct pci_dev *pdev) @@ -2533,15 +2545,14 @@ typhoon_remove_one(struct pci_dev *pdev) free_netdev(dev); } +static SIMPLE_DEV_PM_OPS(typhoon_pm_ops, typhoon_suspend, typhoon_resume); + static struct pci_driver typhoon_driver = { .name = KBUILD_MODNAME, .id_table = typhoon_pci_tbl, .probe = typhoon_init_one, .remove = typhoon_remove_one, -#ifdef CONFIG_PM - .suspend = typhoon_suspend, - .resume = typhoon_resume, -#endif + .driver.pm = &typhoon_pm_ops, }; static int __init From 33b7a252c8dc66ad4abd01a50f23a0d59d95d06d Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:48 +0530 Subject: [PATCH 0558/2056] ne2k-pci: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Thus, there is no need to call the PCI helper functions like pci_enable/disable_device(), pci_save/restore_sate() and pci_set_power_state(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ne2k-pci.c | 29 ++++++---------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 77d78b4c59c4..e500f0c05725 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -699,30 +699,18 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int ne2k_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ne2k_pci_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); netif_device_detach(dev); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } -static int ne2k_pci_resume(struct pci_dev *pdev) +static int __maybe_unused ne2k_pci_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - int rc; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - rc = pci_enable_device(pdev); - if (rc) - return rc; + struct net_device *dev = dev_get_drvdata(dev_d); NS8390_init(dev, 1); netif_device_attach(dev); @@ -730,19 +718,14 @@ static int ne2k_pci_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ - +static SIMPLE_DEV_PM_OPS(ne2k_pci_pm_ops, ne2k_pci_suspend, ne2k_pci_resume); static struct pci_driver ne2k_driver = { .name = DRV_NAME, .probe = ne2k_pci_init_one, .remove = ne2k_pci_remove_one, .id_table = ne2k_pci_tbl, -#ifdef CONFIG_PM - .suspend = ne2k_pci_suspend, - .resume = ne2k_pci_resume, -#endif - + .driver.pm = &ne2k_pci_pm_ops, }; From a7c48c721109edf664555d47c67676ee362cde2f Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:49 +0530 Subject: [PATCH 0559/2056] starfire: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Thus, there is no need to call the PCI helper functions like pci_save/restore_sate() and pci_set_power_state(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/adaptec/starfire.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index a64191fc2af9..ba0055bb1614 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1984,28 +1984,21 @@ static int netdev_close(struct net_device *dev) return 0; } -#ifdef CONFIG_PM -static int starfire_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused starfire_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); if (netif_running(dev)) { netif_device_detach(dev); netdev_close(dev); } - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev,state)); - return 0; } -static int starfire_resume(struct pci_dev *pdev) +static int __maybe_unused starfire_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); if (netif_running(dev)) { netdev_open(dev); @@ -2014,8 +2007,6 @@ static int starfire_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ - static void starfire_remove_one(struct pci_dev *pdev) { @@ -2040,15 +2031,13 @@ static void starfire_remove_one(struct pci_dev *pdev) free_netdev(dev); /* Will also free np!! */ } +static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume); static struct pci_driver starfire_driver = { .name = DRV_NAME, .probe = starfire_init_one, .remove = starfire_remove_one, -#ifdef CONFIG_PM - .suspend = starfire_suspend, - .resume = starfire_resume, -#endif /* CONFIG_PM */ + .driver.pm = &starfire_pm_ops, .id_table = starfire_pci_tbl, }; From 817a89ae10a7fa78449d7f6270550e0d40236924 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:50 +0530 Subject: [PATCH 0560/2056] ena_netdev: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 22 ++++++++------------ 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index dda4b8fc9525..91be3ffa1c5c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4420,13 +4420,12 @@ static void ena_shutdown(struct pci_dev *pdev) __ena_shutoff(pdev, true); } -#ifdef CONFIG_PM /* ena_suspend - PM suspend callback - * @pdev: PCI device information struct - * @state:power state + * @dev_d: Device information struct */ -static int ena_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ena_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct ena_adapter *adapter = pci_get_drvdata(pdev); u64_stats_update_begin(&adapter->syncp); @@ -4445,12 +4444,11 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state) } /* ena_resume - PM resume callback - * @pdev: PCI device information struct - * + * @dev_d: Device information struct */ -static int ena_resume(struct pci_dev *pdev) +static int __maybe_unused ena_resume(struct device *dev_d) { - struct ena_adapter *adapter = pci_get_drvdata(pdev); + struct ena_adapter *adapter = dev_get_drvdata(dev_d); int rc; u64_stats_update_begin(&adapter->syncp); @@ -4462,7 +4460,8 @@ static int ena_resume(struct pci_dev *pdev) rtnl_unlock(); return rc; } -#endif + +static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume); static struct pci_driver ena_pci_driver = { .name = DRV_MODULE_NAME, @@ -4470,10 +4469,7 @@ static struct pci_driver ena_pci_driver = { .probe = ena_probe, .remove = ena_remove, .shutdown = ena_shutdown, -#ifdef CONFIG_PM - .suspend = ena_suspend, - .resume = ena_resume, -#endif + .driver.pm = &ena_pm_ops, .sriov_configure = pci_sriov_configure_simple, }; From 1c2e4839eca20fe0b512cb7014a8a6a1ca83ba99 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:51 +0530 Subject: [PATCH 0561/2056] liquidio: use generic power management Drivers should not use legacy power management as they have to manage power states and related operations, for the device, themselves. This driver was handling them with the help of PCI helper functions. With generic PM, all essentials will be handled by the PCI core. Driver needs to do only device-specific operations. The driver defined empty-body .suspend() and .resume() callbacks earlier. They can now be define NULL and bind with "struct dev_pm_ops" variable. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/lio_main.c | 31 +++---------------- 1 file changed, 5 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 66d31c018c7e..19689d72bc4e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -405,27 +405,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused))) /* Nothing to be done here. */ } -#ifdef CONFIG_PM -/** - * \brief called when suspending - * @param pdev Pointer to PCI device - * @param state state to suspend to - */ -static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)), - pm_message_t state __attribute__((unused))) -{ - return 0; -} - -/** - * \brief called when resuming - * @param pdev Pointer to PCI device - */ -static int liquidio_resume(struct pci_dev *pdev __attribute__((unused))) -{ - return 0; -} -#endif +#define liquidio_suspend NULL +#define liquidio_resume NULL /* For PCI-E Advanced Error Recovery (AER) Interface */ static const struct pci_error_handlers liquidio_err_handler = { @@ -451,17 +432,15 @@ static const struct pci_device_id liquidio_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); +static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); + static struct pci_driver liquidio_pci_driver = { .name = "LiquidIO", .id_table = liquidio_pci_tbl, .probe = liquidio_probe, .remove = liquidio_remove, .err_handler = &liquidio_err_handler, /* For AER */ - -#ifdef CONFIG_PM - .suspend = liquidio_suspend, - .resume = liquidio_resume, -#endif + .driver.pm = &liquidio_pm_ops, #ifdef CONFIG_PCI_IOV .sriov_configure = liquidio_enable_sriov, #endif From 78cad4cec6601d891e9fe6936b56a2571abfecb1 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:52 +0530 Subject: [PATCH 0562/2056] sundance: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Thus, there is no need to call the PCI helper functions like pci_enable/disable_device(), pci_save/restore_sate() and pci_set_power_state(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/sundance.c | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index dc566fcc3ba9..ca97e321082d 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1928,11 +1928,9 @@ static void sundance_remove1(struct pci_dev *pdev) } } -#ifdef CONFIG_PM - -static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) +static int __maybe_unused sundance_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pci_dev); + struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; @@ -1942,30 +1940,24 @@ static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) netdev_close(dev); netif_device_detach(dev); - pci_save_state(pci_dev); if (np->wol_enabled) { iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); iowrite16(RxEnable, ioaddr + MACCtrl1); } - pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state), - np->wol_enabled); - pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); + + device_set_wakeup_enable(dev_d, np->wol_enabled); return 0; } -static int sundance_resume(struct pci_dev *pci_dev) +static int __maybe_unused sundance_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pci_dev); + struct net_device *dev = dev_get_drvdata(dev_d); int err = 0; if (!netif_running(dev)) return 0; - pci_set_power_state(pci_dev, PCI_D0); - pci_restore_state(pci_dev); - pci_enable_wake(pci_dev, PCI_D0, 0); - err = netdev_open(dev); if (err) { printk(KERN_ERR "%s: Can't resume interface!\n", @@ -1979,17 +1971,14 @@ static int sundance_resume(struct pci_dev *pci_dev) return err; } -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume); static struct pci_driver sundance_driver = { .name = DRV_NAME, .id_table = sundance_pci_tbl, .probe = sundance_probe1, .remove = sundance_remove1, -#ifdef CONFIG_PM - .suspend = sundance_suspend, - .resume = sundance_resume, -#endif /* CONFIG_PM */ + .driver.pm = &sundance_pm_ops, }; static int __init sundance_init(void) From e9a7f8c5865d877e96f19dc7e6c457e42d05c030 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:53 +0530 Subject: [PATCH 0563/2056] benet: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Thus, there is no need to call the PCI helper functions like pci_enable/disable_device(), pci_save/restore_sate() and pci_set_power_state(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 22 +++++++-------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a7ac23a6862b..e26f59336cfd 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -6037,32 +6037,23 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) return status; } -static int be_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused be_suspend(struct device *dev_d) { - struct be_adapter *adapter = pci_get_drvdata(pdev); + struct be_adapter *adapter = dev_get_drvdata(dev_d); be_intr_set(adapter, false); be_cancel_err_detection(adapter); be_cleanup(adapter); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } -static int be_pci_resume(struct pci_dev *pdev) +static int __maybe_unused be_pci_resume(struct device *dev_d) { - struct be_adapter *adapter = pci_get_drvdata(pdev); + struct be_adapter *adapter = dev_get_drvdata(dev_d); int status = 0; - status = pci_enable_device(pdev); - if (status) - return status; - - pci_restore_state(pdev); - status = be_resume(adapter); if (status) return status; @@ -6234,13 +6225,14 @@ static const struct pci_error_handlers be_eeh_handlers = { .resume = be_eeh_resume, }; +static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume); + static struct pci_driver be_driver = { .name = DRV_NAME, .id_table = be_dev_ids, .probe = be_probe, .remove = be_remove, - .suspend = be_suspend, - .resume = be_pci_resume, + .driver.pm = &be_pci_pm_ops, .shutdown = be_shutdown, .sriov_configure = be_pci_sriov_configure, .err_handler = &be_eeh_handlers From 0e3e206a3e127417d492cb959b3b53522255dfd0 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:54 +0530 Subject: [PATCH 0564/2056] mlx4: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Use "struct dev_pm_ops" variable to bind the callbacks. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 3d9aa7da95e9..4cae7db8d49c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4370,8 +4370,9 @@ static const struct pci_error_handlers mlx4_err_handler = { .resume = mlx4_pci_resume, }; -static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state) +static int mlx4_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; @@ -4384,8 +4385,9 @@ static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int mlx4_resume(struct pci_dev *pdev) +static int mlx4_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); @@ -4414,14 +4416,15 @@ static int mlx4_resume(struct pci_dev *pdev) return ret; } +static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume); + static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, .shutdown = mlx4_shutdown, .remove = mlx4_remove_one, - .suspend = mlx4_suspend, - .resume = mlx4_resume, + .driver.pm = &mlx4_pm_ops, .err_handler = &mlx4_err_handler, }; From 64120615d140c88d110c6ffc964d9e6cd8bf5395 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:55 +0530 Subject: [PATCH 0565/2056] ksz884x: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Thus, there is no need to call the PCI helper functions like pci_enable_wake(), pci_save/restore_sate() and pci_set_power_state(). Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 4fe6aedca22f..24901342ecc0 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -7155,17 +7155,14 @@ static void pcidev_exit(struct pci_dev *pdev) kfree(info); } -#ifdef CONFIG_PM -static int pcidev_resume(struct pci_dev *pdev) +static int __maybe_unused pcidev_resume(struct device *dev_d) { int i; - struct platform_info *info = pci_get_drvdata(pdev); + struct platform_info *info = dev_get_drvdata(dev_d); struct dev_info *hw_priv = &info->dev_info; struct ksz_hw *hw = &hw_priv->hw; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_enable_wake(pdev, PCI_D0, 0); + device_wakeup_disable(dev_d); if (hw_priv->wol_enable) hw_cfg_wol_pme(hw, 0); @@ -7182,10 +7179,10 @@ static int pcidev_resume(struct pci_dev *pdev) return 0; } -static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state) +static int pcidev_suspend(struct device *dev_d) { int i; - struct platform_info *info = pci_get_drvdata(pdev); + struct platform_info *info = dev_get_drvdata(dev_d); struct dev_info *hw_priv = &info->dev_info; struct ksz_hw *hw = &hw_priv->hw; @@ -7207,12 +7204,9 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state) hw_cfg_wol_pme(hw, 1); } - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + device_wakeup_enable(dev_d); return 0; } -#endif static char pcidev_name[] = "ksz884xp"; @@ -7226,11 +7220,10 @@ static const struct pci_device_id pcidev_table[] = { MODULE_DEVICE_TABLE(pci, pcidev_table); +static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume); + static struct pci_driver pci_device_driver = { -#ifdef CONFIG_PM - .suspend = pcidev_suspend, - .resume = pcidev_resume, -#endif + .driver.pm = &pcidev_pm_ops, .name = pcidev_name, .id_table = pcidev_table, .probe = pcidev_init, From 4c2ad1263b4dd2bfdc805afdbf9b27cf7f10895e Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:56 +0530 Subject: [PATCH 0566/2056] vxge: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Use "struct dev_pm_ops" variable to bind the callbacks. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-main.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 9b63574b6202..5de85b9e9e35 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3999,12 +3999,11 @@ static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask) } } -#ifdef CONFIG_PM /** * vxge_pm_suspend - vxge power management suspend entry point * */ -static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused vxge_pm_suspend(struct device *dev_d) { return -ENOSYS; } @@ -4012,13 +4011,11 @@ static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state) * vxge_pm_resume - vxge power management resume entry point * */ -static int vxge_pm_resume(struct pci_dev *pdev) +static int __maybe_unused vxge_pm_resume(struct device *dev_d) { return -ENOSYS; } -#endif - /** * vxge_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -4796,15 +4793,14 @@ static const struct pci_error_handlers vxge_err_handler = { .resume = vxge_io_resume, }; +static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume); + static struct pci_driver vxge_driver = { .name = VXGE_DRIVER_NAME, .id_table = vxge_id_table, .probe = vxge_probe, .remove = vxge_remove, -#ifdef CONFIG_PM - .suspend = vxge_pm_suspend, - .resume = vxge_pm_resume, -#endif + .driver.pm = &vxge_pm_ops, .err_handler = &vxge_err_handler, }; From 40c1b1ee5599e3f30416563ebe1568b414febd81 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 1 Jul 2020 22:20:57 +0530 Subject: [PATCH 0567/2056] natsemi: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Thus, there is no need to call the PCI helper functions like pci_enable_device, which is not recommended. Hence, removed. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/natsemi/natsemi.c | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index d21d706b83a7..c2867fe995bc 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -3247,8 +3247,6 @@ static void natsemi_remove1(struct pci_dev *pdev) free_netdev (dev); } -#ifdef CONFIG_PM - /* * The ns83815 chip doesn't have explicit RxStop bits. * Kicking the Rx or Tx process for a new packet reenables the Rx process @@ -3275,9 +3273,9 @@ static void natsemi_remove1(struct pci_dev *pdev) * Interrupts must be disabled, otherwise hands_off can cause irq storms. */ -static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused natsemi_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); void __iomem * ioaddr = ns_ioaddr(dev); @@ -3326,11 +3324,10 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state) } -static int natsemi_resume (struct pci_dev *pdev) +static int __maybe_unused natsemi_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata (pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct netdev_private *np = netdev_priv(dev); - int ret = 0; rtnl_lock(); if (netif_device_present(dev)) @@ -3339,12 +3336,6 @@ static int natsemi_resume (struct pci_dev *pdev) const int irq = np->pci_dev->irq; BUG_ON(!np->hands_off); - ret = pci_enable_device(pdev); - if (ret < 0) { - dev_err(&pdev->dev, - "pci_enable_device() failed: %d\n", ret); - goto out; - } /* pci_power_on(pdev); */ napi_enable(&np->napi); @@ -3364,20 +3355,17 @@ static int natsemi_resume (struct pci_dev *pdev) netif_device_attach(dev); out: rtnl_unlock(); - return ret; + return 0; } -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume); static struct pci_driver natsemi_driver = { .name = DRV_NAME, .id_table = natsemi_pci_tbl, .probe = natsemi_probe1, .remove = natsemi_remove1, -#ifdef CONFIG_PM - .suspend = natsemi_suspend, - .resume = natsemi_resume, -#endif + .driver.pm = &natsemi_pm_ops, }; static int __init natsemi_init_mod (void) From 5463fce643e8d041f54378b28b35940fd5e5a5a4 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Wed, 3 Jun 2020 20:07:26 -0700 Subject: [PATCH 0568/2056] ethernet/intel: Convert fallthrough code comments Convert all the remaining 'fall through" code comments to the newer 'fallthrough;' keyword. Suggested-by: Joe Perches Signed-off-by: Jeff Kirsher Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e1000/e1000_hw.c | 4 +-- drivers/net/ethernet/intel/e1000/e1000_main.c | 3 +- .../net/ethernet/intel/e1000/e1000_param.c | 2 +- drivers/net/ethernet/intel/e1000e/82571.c | 4 +-- drivers/net/ethernet/intel/e1000e/ethtool.c | 11 ++++--- drivers/net/ethernet/intel/e1000e/ich8lan.c | 14 ++++----- drivers/net/ethernet/intel/e1000e/netdev.c | 30 +++++++++---------- drivers/net/ethernet/intel/e1000e/param.c | 2 +- drivers/net/ethernet/intel/e1000e/phy.c | 2 +- drivers/net/ethernet/intel/e1000e/ptp.c | 3 +- .../net/ethernet/intel/fm10k/fm10k_ethtool.c | 4 +-- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 4 +-- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 6 ++-- .../net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 8 ++--- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 2 +- .../net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ++-- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 ++-- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 4 +-- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 2 +- drivers/net/ethernet/intel/igb/e1000_82575.c | 4 +-- drivers/net/ethernet/intel/igb/e1000_nvm.c | 2 +- drivers/net/ethernet/intel/igb/e1000_phy.c | 4 +-- drivers/net/ethernet/intel/igb/igb_ethtool.c | 8 ++--- drivers/net/ethernet/intel/igb/igb_main.c | 26 ++++++++-------- drivers/net/ethernet/intel/igb/igb_ptp.c | 2 +- drivers/net/ethernet/intel/igbvf/netdev.c | 4 +-- drivers/net/ethernet/intel/igc/igc_ethtool.c | 16 +++------- drivers/net/ethernet/intel/igc/igc_main.c | 5 ++-- .../net/ethernet/intel/ixgbe/ixgbe_82598.c | 2 +- .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 6 ++-- .../net/ethernet/intel/ixgbe/ixgbe_common.c | 4 +-- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 10 +++---- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 30 +++++++++---------- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 4 +-- .../net/ethernet/intel/ixgbe/ixgbe_sriov.c | 4 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 10 +++---- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +-- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 10 +++---- drivers/net/ethernet/intel/ixgbevf/vf.c | 6 ++-- 43 files changed, 135 insertions(+), 151 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index 623e516a9630..4e7a0810eaeb 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -4526,7 +4526,7 @@ s32 e1000_setup_led(struct e1000_hw *hw) ~IGP01E1000_GMII_SPD)); if (ret_val) return ret_val; - /* Fall Through */ + fallthrough; default: if (hw->media_type == e1000_media_type_fiber) { ledctl = er32(LEDCTL); @@ -4571,7 +4571,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw) hw->phy_spd_default); if (ret_val) return ret_val; - /* Fall Through */ + fallthrough; default: /* Restore LEDCTL settings */ ew32(LEDCTL, hw->ledctl_default); diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 266899c0c933..1e6ec081fd9d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1138,7 +1138,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); break; } - /* Fall Through */ + fallthrough; default: e1000_read_eeprom(hw, EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); @@ -3154,7 +3154,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) break; - /* fall through */ pull_size = min((unsigned int)4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { e_err(drv, "__pskb_pull_tail " diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c index d3f29ffe1e47..4d4f5bf1e516 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_param.c +++ b/drivers/net/ethernet/intel/e1000/e1000_param.c @@ -708,7 +708,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter) goto full_duplex_only; case SPEED_1000 + HALF_DUPLEX: e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); - /* fall through */ + fallthrough; case SPEED_1000 + FULL_DUPLEX: full_duplex_only: e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 2c1bab377b2a..88faf05e23ba 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -154,7 +154,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) ew32(EECD, eecd); break; } - /* Fall Through */ + fallthrough; default: nvm->type = e1000_nvm_eeprom_spi; size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> @@ -1107,7 +1107,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw) switch (mac->type) { case e1000_82573: e1000e_enable_tx_pkt_filtering(hw); - /* fall through */ + fallthrough; case e1000_82574: case e1000_82583: reg_data = er32(GCR); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 11de79e49661..64f684dc6c7a 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -893,7 +893,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: - /* fall through */ case e1000_pch_tgp: case e1000_pch_adp: mask |= BIT(18); @@ -1569,7 +1568,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) /* set bit 29 (value of MULR requests is now 0) */ tarc0 &= 0xcfffffff; ew32(TARC(0), tarc0); - /* fall through */ + fallthrough; case e1000_80003es2lan: if (hw->phy.media_type == e1000_media_type_fiber || hw->phy.media_type == e1000_media_type_internal_serdes) { @@ -1577,7 +1576,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) ew32(CTRL_EXT, adapter->tx_fifo_head); adapter->tx_fifo_head = 0; } - /* fall through */ + fallthrough; case e1000_82571: case e1000_82572: if (hw->phy.media_type == e1000_media_type_fiber || @@ -1587,7 +1586,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) usleep_range(10000, 11000); break; } - /* Fall Through */ + fallthrough; default: hw->mac.autoneg = 1; if (hw->phy.type == e1000_phy_gg82563) @@ -2122,7 +2121,7 @@ static int e1000_get_rxnfc(struct net_device *netdev, case TCP_V4_FLOW: if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case UDP_V4_FLOW: case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: @@ -2133,7 +2132,7 @@ static int e1000_get_rxnfc(struct net_device *netdev, case TCP_V6_FLOW: if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case UDP_V6_FLOW: case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index f999cca37a8a..ae0a6332fd30 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -338,12 +338,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) */ msleep(50); - /* fall-through */ + fallthrough; case e1000_pch2lan: if (e1000_phy_is_accessible_pchlan(hw)) break; - /* fall-through */ + fallthrough; case e1000_pchlan: if ((hw->mac.type == e1000_pchlan) && (fwsm & E1000_ICH_FWSM_FW_VALID)) @@ -459,7 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) return ret_val; if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) break; - /* fall-through */ + fallthrough; case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: @@ -704,7 +704,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) case e1000_pch2lan: mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; mac->ops.rar_set = e1000_rar_set_pch2lan; - /* fall-through */ + fallthrough; case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: @@ -1559,7 +1559,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) ret_val = e1000_k1_workaround_lv(hw); if (ret_val) return ret_val; - /* fall-thru */ + fallthrough; case e1000_pchlan: if (hw->phy.type == e1000_phy_82578) { ret_val = e1000_link_stall_workaround_hv(hw); @@ -2096,7 +2096,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break; } - /* Fall-thru */ + fallthrough; case e1000_pchlan: case e1000_pch2lan: case e1000_pch_lpt: @@ -3189,7 +3189,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) return 0; } e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); - /* fall-thru */ + fallthrough; default: /* set bank to 0 in case flash read fails */ *bank = 0; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 75937a48d517..63dde3bcf5bc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2107,7 +2107,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) e1000e_reset_interrupt_capability(adapter); } adapter->int_mode = E1000E_INT_MODE_MSI; - /* Fall through */ + fallthrough; case E1000E_INT_MODE_MSI: if (!pci_enable_msi(adapter->pdev)) { adapter->flags |= FLAG_MSI_ENABLED; @@ -2115,7 +2115,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) adapter->int_mode = E1000E_INT_MODE_LEGACY; e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); } - /* Fall through */ + fallthrough; case E1000E_INT_MODE_LEGACY: /* Don't do anything; this is the system default */ break; @@ -3173,10 +3173,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) switch (adapter->rx_ps_pages) { case 3: psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; - /* fall-through */ + fallthrough; case 2: psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; - /* fall-through */ + fallthrough; case 1: psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; break; @@ -3673,9 +3673,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, is_l2 = true; break; case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - /* Hardware cannot filter just V2 L4 Sync messages; - * fall-through to V2 (both L2 and L4) Sync. - */ + /* Hardware cannot filter just V2 L4 Sync messages */ + fallthrough; case HWTSTAMP_FILTER_PTP_V2_SYNC: /* Also time stamps V2 Path Delay Request/Response. */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; @@ -3684,9 +3683,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, is_l4 = true; break; case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - /* Hardware cannot filter just V2 L4 Delay Request messages; - * fall-through to V2 (both L2 and L4) Delay Request. - */ + /* Hardware cannot filter just V2 L4 Delay Request messages */ + fallthrough; case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: /* Also time stamps V2 Path Delay Request/Response. */ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; @@ -3696,9 +3694,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - /* Hardware cannot filter just V2 L4 or L2 Event messages; - * fall-through to all V2 (both L2 and L4) Events. - */ + /* Hardware cannot filter just V2 L4 or L2 Event messages */ + fallthrough; case HWTSTAMP_FILTER_PTP_V2_EVENT: tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; @@ -3710,6 +3707,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, * Delay Request messages but not both so fall-through to * time stamp all packets. */ + fallthrough; case HWTSTAMP_FILTER_NTP_ALL: case HWTSTAMP_FILTER_ALL: is_l2 = true; @@ -4056,7 +4054,7 @@ void e1000e_reset(struct e1000_adapter *adapter) fc->low_water = fc->high_water - 8; break; } - /* fall-through */ + fallthrough; default: hwm = min(((pba << 10) * 9 / 10), ((pba << 10) - adapter->max_frame_size)); @@ -4081,7 +4079,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: - /* fall-through */ + fallthrough; case e1000_pch_tgp: case e1000_pch_adp: fc->refresh_time = 0xFFFF; @@ -6764,7 +6762,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) case PCIE_LINK_STATE_L0S: case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; - /* fall-through - can't have L1 without L0s */ + fallthrough; /* can't have L1 without L0s */ case PCIE_LINK_STATE_L1: aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; break; diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 098369fd3e65..ebe121db4307 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -375,7 +375,7 @@ void e1000e_check_options(struct e1000_adapter *adapter) "%s Invalid mode - setting default\n", opt.name); adapter->itr_setting = opt.def; - /* fall-through */ + fallthrough; case 3: dev_info(&adapter->pdev->dev, "%s set to dynamic conservative mode\n", diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index 42233019255a..e11c877595fb 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -607,7 +607,7 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) break; case e1000_ms_auto: phy_data &= ~CTL1000_ENABLE_MASTER; - /* fall-through */ + fallthrough; default: break; } diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 439fda2f5368..34b988d70488 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -295,7 +295,6 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_lpt: case e1000_pch_spt: case e1000_pch_cnp: - /* fall-through */ case e1000_pch_tgp: case e1000_pch_adp: if ((hw->mac.type < e1000_pch_lpt) || @@ -303,7 +302,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) adapter->ptp_clock_info.max_adj = 24000000 - 1; break; } - /* fall-through */ + fallthrough; case e1000_82574: case e1000_82583: adapter->ptp_clock_info.max_adj = 600000000 - 1; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 30ea2b422678..908fefaa6b85 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -692,12 +692,12 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface, case TCP_V4_FLOW: case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case UDP_V4_FLOW: if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP, interface->flags)) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fall through */ + fallthrough; case SCTP_V4_FLOW: case SCTP_V6_FLOW: case AH_ESP_V4_FLOW: diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 05e9bdb5f4aa..34f1f5350f68 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -856,7 +856,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, case IPPROTO_GRE: if (skb->encapsulation) break; - /* fall through */ + fallthrough; default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, @@ -1554,7 +1554,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. + * fall through conditions. * **/ static void fm10k_set_num_queues(struct fm10k_intfc *interface) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 75e51f91036c..8e2e92bf3cd4 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -967,7 +967,7 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) if (tail != mbx->head) return FM10K_MBX_ERR_TAIL; - /* fall through */ + fallthrough; case FM10K_MSG_DATA: /* validate that head is moving correctly */ if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) @@ -987,7 +987,7 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx) if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1))) return FM10K_MBX_ERR_SIZE; - /* fall through */ + fallthrough; case FM10K_MSG_ERROR: if (!head || (head == FM10K_MSG_HDR_MASK(HEAD))) return FM10K_MBX_ERR_HEAD; @@ -1570,7 +1570,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0); break; } - /* fall through */ + fallthrough; default: return FM10K_MBX_ERR_NO_MBX; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 0637ccadee79..1450a9f98c5a 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -853,7 +853,7 @@ void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface, /* Don't free requests for other interfaces */ if (r->mac.glort != glort) break; - /* fall through */ + fallthrough; case FM10K_VLAN_REQUEST: if (vlans) { list_del(&r->list); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index be07bfdb0bb4..c0780c3624c8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1317,19 +1317,19 @@ static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, case FM10K_XCAST_MODE_PROMISC: if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) return FM10K_XCAST_MODE_PROMISC; - /* fall through */ + fallthrough; case FM10K_XCAST_MODE_ALLMULTI: if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) return FM10K_XCAST_MODE_ALLMULTI; - /* fall through */ + fallthrough; case FM10K_XCAST_MODE_MULTI: if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) return FM10K_XCAST_MODE_MULTI; - /* fall through */ + fallthrough; case FM10K_XCAST_MODE_NONE: if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) return FM10K_XCAST_MODE_NONE; - /* fall through */ + fallthrough; default: break; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 6a089848c857..c897a2863e4f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -541,7 +541,7 @@ static void i40e_set_hw_flags(struct i40e_hw *hw) (aq->api_maj_ver == 1 && aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722)) hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; - /* fall through */ + fallthrough; default: break; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index cbba22a2e449..d09c12795c65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4102,7 +4102,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, switch (fsp->flow_type & ~FLOW_EXT) { case SCTP_V4_FLOW: new_mask &= ~I40E_VERIFY_TAG_MASK; - /* Fall through */ + fallthrough; case TCP_V4_FLOW: case UDP_V4_FLOW: tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b749b0e1a7d5..37e3bc072b0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1817,7 +1817,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, num_tc_qps); break; } - /* fall through */ + fallthrough; case I40E_VSI_FDIR: case I40E_VSI_SRIOV: case I40E_VSI_VMDQ2: @@ -13686,8 +13686,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, /* Setup DCB netlink interface */ i40e_dcbnl_setup(vsi); #endif /* CONFIG_I40E_DCB */ - /* fall through */ - + fallthrough; case I40E_VSI_FDIR: /* set up vectors and rings if needed */ ret = i40e_vsi_setup_vectors(vsi); @@ -13703,7 +13702,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, i40e_vsi_reset_stats(vsi); break; - default: /* no netdev or rings for the other VSI types */ break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 9bf1ad4319f5..ff7b19c6bc73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -586,7 +586,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE)) return -ERANGE; - /* fall through */ + fallthrough; case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f9555c847f73..7e22a4ef582b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1690,7 +1690,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, case I40E_RX_PTYPE_INNER_PROT_UDP: case I40E_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; - /* fall though */ + fallthrough; default: break; } @@ -2210,10 +2210,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fall through -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = I40E_XDP_CONSUMED; break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 7276580cbe64..8e489ed54138 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -168,10 +168,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) break; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = I40E_XDP_CONSUMED; break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index e091bab7e770..ca041b39ffda 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1007,7 +1007,7 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi, case IAVF_RX_PTYPE_INNER_PROT_UDP: case IAVF_RX_PTYPE_INNER_PROT_SCTP: skb->ip_summed = CHECKSUM_UNNECESSARY; - /* fall though */ + fallthrough; default: break; } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 438b42ce2cd9..a32391e82762 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -638,7 +638,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) dev_spec->sgmii_active = true; break; } - /* fall through - for I2C based SGMII */ + fallthrough; /* for I2C based SGMII */ case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: /* read media type from SFP EEPROM */ ret_val = igb_set_sfp_media_type_82575(hw); @@ -1704,7 +1704,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: /* disable PCS autoneg and support parallel detect only */ pcs_autoneg = false; - /* fall through */ + fallthrough; default: if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) { diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index 09f4dcb09632..fa136e6e9328 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -721,7 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) igb_read_invm_version(hw, fw_vers); return; } - /* fall through */ + fallthrough; case e1000_i350: /* find combo image version */ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index ad2125e5a7f7..8c8eb82e6272 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -659,7 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw) phy_data |= M88E1000_PSCR_AUTO_X_1000T; break; } - /* fall through */ + fallthrough; case 0: default: phy_data |= M88E1000_PSCR_AUTO_X_MODE; @@ -2621,7 +2621,7 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw) break; case e1000_ms_auto: phy_data &= ~CR_1000T_MS_ENABLE; - /* fall-through */ + fallthrough; default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index da60e8d2128f..c2cf414d126b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2517,11 +2517,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case UDP_V4_FLOW: if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2531,11 +2531,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter, break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case UDP_V6_FLOW: if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ce611cb02c0a..ae8d64324619 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -711,14 +711,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) adapter->rx_ring[i]->reg_idx = rbase_offset + Q_IDX_82576(i); } - /* Fall through */ + fallthrough; case e1000_82575: case e1000_82580: case e1000_i350: case e1000_i354: case e1000_i210: case e1000_i211: - /* Fall through */ + fallthrough; default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = rbase_offset + i; @@ -2873,7 +2873,7 @@ void igb_set_fw_version(struct igb_adapter *adapter) fw.invm_img_type); break; } - /* fall through */ + fallthrough; default: /* if option is rom valid, display its version too */ if (fw.or_valid) { @@ -3724,13 +3724,13 @@ unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter) max_rss_queues = 1; break; } - /* fall through */ + fallthrough; case e1000_82576: if (!!adapter->vfs_allocated_count) { max_rss_queues = 2; break; } - /* fall through */ + fallthrough; case e1000_82580: case e1000_i354: default: @@ -4869,14 +4869,14 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter) /* VLAN filtering needed for VLAN prio filter */ if (adapter->netdev->features & NETIF_F_NTUPLE) break; - /* fall through */ + fallthrough; case e1000_82576: case e1000_82580: case e1000_i354: /* VLAN filtering needed for pool filtering */ if (adapter->vfs_allocated_count) break; - /* fall through */ + fallthrough; default: return 1; } @@ -5156,7 +5156,7 @@ bool igb_has_link(struct igb_adapter *adapter) case e1000_media_type_copper: if (!hw->mac.get_link_status) return true; - /* fall through */ + fallthrough; case e1000_media_type_internal_serdes: hw->mac.ops.check_for_link(hw); link_active = !hw->mac.get_link_status; @@ -5816,7 +5816,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): @@ -5828,7 +5828,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto csum_failed; @@ -6706,7 +6706,7 @@ static int __igb_notify_dca(struct device *dev, void *data) igb_setup_dca(adapter); break; } - /* Fall Through - since DCA is disabled. */ + fallthrough; /* since DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IGB_FLAG_DCA_ENABLED) { /* without this a class_device is left @@ -9375,13 +9375,13 @@ static void igb_vmm_control(struct igb_adapter *adapter) reg = rd32(E1000_DTXCTL); reg |= E1000_DTXCTL_VLAN_ADDED; wr32(E1000_DTXCTL, reg); - /* Fall through */ + fallthrough; case e1000_82580: /* enable replication vlan tag stripping */ reg = rd32(E1000_RPLOLR); reg |= E1000_RPLOLR_STRVLAN; wr32(E1000_RPLOLR, reg); - /* Fall through */ + fallthrough; case e1000_i350: /* none of the above registers are supported by i350 */ break; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c39e921757ba..490368d3d03c 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1053,7 +1053,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, config->rx_filter = HWTSTAMP_FILTER_ALL; break; } - /* fall through */ + fallthrough; default: config->rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 07740654df5c..97a065928976 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2091,7 +2091,7 @@ static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb, switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): @@ -2103,7 +2103,7 @@ static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb, type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto csum_failed; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index ac331116ea08..44410c2265d6 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1015,37 +1015,29 @@ static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case UDP_V4_FLOW: if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V4_FLOW: - /* Fall through */ case AH_ESP_V4_FLOW: - /* Fall through */ case AH_V4_FLOW: - /* Fall through */ case ESP_V4_FLOW: - /* Fall through */ case IPV4_FLOW: cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case UDP_V6_FLOW: if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* Fall through */ + fallthrough; case SCTP_V6_FLOW: - /* Fall through */ case AH_ESP_V6_FLOW: - /* Fall through */ case AH_V6_FLOW: - /* Fall through */ case ESP_V6_FLOW: - /* Fall through */ case IPV6_FLOW: cmd->data |= RXH_IP_SRC | RXH_IP_DST; break; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index e544f0599dcf..8d5869dcf798 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -969,7 +969,7 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): @@ -981,7 +981,7 @@ static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto csum_failed; @@ -3269,7 +3269,6 @@ static void igc_cache_ring_register(struct igc_adapter *adapter) switch (adapter->hw.mac.type) { case igc_i225: - /* Fall through */ default: for (; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = i; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index eee277c1bedf..95c92fe890a1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1098,7 +1098,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); /* Setup the last four at 48KB...don't re-init i */ rxpktsize = IXGBE_RXPBSIZE_48KB; - /* Fall Through */ + fallthrough; case PBA_STRATEGY_EQUAL: default: /* Divide the remaining Rx packet buffer evenly among the TCs */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 109f8de5a1c2..8d3798a32f0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1568,7 +1568,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case 0x0000: /* mask VLAN ID */ fdirm |= IXGBE_FDIRM_VLANID; - /* fall through */ + fallthrough; case 0x0FFF: /* mask VLAN priority */ fdirm |= IXGBE_FDIRM_VLANP; @@ -1576,7 +1576,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case 0xE000: /* mask VLAN ID only */ fdirm |= IXGBE_FDIRM_VLANID; - /* fall through */ + fallthrough; case 0xEFFF: /* no VLAN fields masked */ break; @@ -1589,7 +1589,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, case 0x0000: /* Mask Flex Bytes */ fdirm |= IXGBE_FDIRM_FLEX; - /* fall through */ + fallthrough; case 0xFFFF: break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 17357a12cbdc..62ddb452f862 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -145,7 +145,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) if (ret_val) return ret_val; - /* fall through - only backplane uses autoc */ + fallthrough; /* only backplane uses autoc */ case ixgbe_media_type_fiber: reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); @@ -3533,7 +3533,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; for (; i < (num_pb / 2); i++) IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); - /* fall through - configure remaining packet buffers */ + fallthrough; /* configure remaining packet buffers */ case (PBA_STRATEGY_EQUAL): /* Divide the remaining Rx packet buffer evenly among the TCs */ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 5da367cb5c93..8ae2c8c2f6a1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2507,11 +2507,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, switch (cmd->flow_type) { case TCP_V4_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + fallthrough; case UDP_V4_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + fallthrough; case SCTP_V4_FLOW: case AH_ESP_V4_FLOW: case AH_V4_FLOW: @@ -2521,11 +2521,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, break; case TCP_V6_FLOW: cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + fallthrough; case UDP_V6_FLOW: if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + fallthrough; case SCTP_V6_FLOW: case AH_ESP_V6_FLOW: case AH_V6_FLOW: @@ -2657,7 +2657,7 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; break; } - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 6c5703cdf062..e67b1a59ecb7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -444,7 +444,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ddp->err = (__force u32)ddp_err; ddp->sgl = NULL; ddp->sgc = 0; - /* fall through */ + fallthrough; /* if DDP length is present pass it through to ULD */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): /* update length of DDPed data */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index fabdca9aadae..4601118f88c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1395,7 +1395,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) IXGBE_DCA_CTRL_DCA_MODE_CB2); break; } - /* fall through - DCA is disabled. */ + fallthrough; /* DCA is disabled. */ case DCA_PROVIDER_REMOVE: if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { dca_remove_requester(dev); @@ -2229,10 +2229,10 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBE_XDP_CONSUMED; break; @@ -3007,7 +3007,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_82599EB: mask |= IXGBE_EIMS_GPI_SDP1(hw); mask |= IXGBE_EIMS_GPI_SDP2(hw); - /* fall through */ + fallthrough; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: @@ -3313,7 +3313,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) switch (hw->mac.type) { case ixgbe_mac_82599EB: ixgbe_check_sfp_event(adapter, eicr); - /* Fall through */ + fallthrough; case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: @@ -4335,7 +4335,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; - /* fall through */ + fallthrough; case ixgbe_mac_82599EB: case ixgbe_mac_X540: /* Disable RSC for ACK packets */ @@ -5885,7 +5885,7 @@ void ixgbe_disable_tx(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); - /* fall through */ + fallthrough; default: break; } @@ -6337,7 +6337,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, default: break; } - /* fall through */ + fallthrough; case ixgbe_mac_X550EM_x: #ifdef CONFIG_IXGBE_DCB adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; @@ -6348,7 +6348,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->fcoe.up = 0; #endif /* IXGBE_DCB */ #endif /* IXGBE_FCOE */ - /* Fall Through */ + fallthrough; case ixgbe_mac_X550: if (hw->mac.type == ixgbe_mac_X550) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; @@ -7168,7 +7168,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); - /* fall through */ + fallthrough; case ixgbe_mac_82599EB: for (i = 0; i < 16; i++) adapter->hw_rx_no_dma_resources += @@ -8077,7 +8077,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): @@ -8089,7 +8089,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto csum_failed; @@ -8532,7 +8532,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) break; - /* fall through */ + fallthrough; default: return netdev_pick_tx(dev, skb, sb_dev); } @@ -8866,7 +8866,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) case SIOCGMIIPHY: if (!adapter->hw.phy.ops.read_reg) return -EOPNOTSUPP; - /* fall through */ + fallthrough; default: return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); } @@ -10656,7 +10656,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, /* only support first port */ if (hw->bus.func != 0) break; - /* fall through */ + fallthrough; case IXGBE_SUBDEV_ID_82599_SP_560FLR: case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 0be13a90ff79..22a874eee2e8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1051,7 +1051,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; break; } - /* fall through */ + fallthrough; default: /* * register RXMTRL must be set in order to do V1 packets, @@ -1242,7 +1242,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) cc.mult = 3; cc.shift = 2; } - /* fallthrough */ + fallthrough; case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d05a5690e66b..23a92656821d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -503,7 +503,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) */ if (pf_max_frame > ETH_FRAME_LEN) break; - /* fall through */ + fallthrough; default: /* If the PF or VF are running w/ jumbo frames enabled * we need to shut down the VF Rx path as we cannot @@ -1141,7 +1141,7 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; - /* Fall through */ + fallthrough; case ixgbe_mbox_api_13: case ixgbe_mbox_api_14: break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 9c42f741ed5e..5e339afa682a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -306,7 +306,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); - /* Fallthrough */ + fallthrough; case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: @@ -325,7 +325,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; else hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; - /* Fallthrough */ + fallthrough; case IXGBE_DEV_ID_X550EM_X_10G_T: return ixgbe_identify_phy_generic(hw); case IXGBE_DEV_ID_X550EM_X_1G_T: @@ -2303,7 +2303,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, break; } } - /* fall through */ + fallthrough; default: *speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; @@ -2885,7 +2885,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) * through to the fc_full statement. Later, we will * disable the adapter's ability to send PAUSE frames. */ - /* Fallthrough */ + fallthrough; case ixgbe_fc_full: pause = true; asm_dir = true; @@ -3284,7 +3284,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_SGMII: case IXGBE_DEV_ID_X550EM_A_SGMII_L: hw->phy.type = ixgbe_phy_sgmii; - /* Fallthrough */ + fallthrough; case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: case IXGBE_DEV_ID_X550EM_X_XFI: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index be9d2a8da515..ec7121f352e2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -120,10 +120,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBE_XDP_CONSUMED; break; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 635cbc25e2f2..6e9a397db583 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1079,10 +1079,10 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, break; default: bpf_warn_invalid_xdp_action(act); - /* fallthrough */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - /* fallthrough -- handle aborts by dropping packet */ + fallthrough; /* handle aborts by dropping packet */ case XDP_DROP: result = IXGBEVF_XDP_CONSUMED; break; @@ -2602,7 +2602,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, * important, starting with the "most" number of features turned on at once, * and ending with the smallest set of features. This way large combinations * can be allocated if they're turned on, and smaller combinations are the - * fallthrough conditions. + * fall through conditions. * **/ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) @@ -3874,7 +3874,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, switch (skb->csum_offset) { case offsetof(struct tcphdr, check): type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - /* fall through */ + fallthrough; case offsetof(struct udphdr, check): break; case offsetof(struct sctphdr, checksum): @@ -3886,7 +3886,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; } - /* fall through */ + fallthrough; default: skb_checksum_help(skb); goto no_csum; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index d5ce49636548..bfe6dfcec4ab 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -314,7 +314,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) break; - /* fall through */ + fallthrough; default: return -EOPNOTSUPP; } @@ -382,7 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) break; - /* fall through */ + fallthrough; default: return -EOPNOTSUPP; } @@ -540,7 +540,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; - /* Fall threw */ + fallthrough; case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: break; From d5ec9e2ce41ac198de2ee18e0e529b7ebbc67408 Mon Sep 17 00:00:00 2001 From: Arkadiusz Kubalewski Date: Wed, 17 Jun 2020 00:05:55 +0000 Subject: [PATCH 0569/2056] i40e: Add support for a new feature Total Port Shutdown After OS requests to down a link on a physical network port, the traffic is no longer being processed but the physical link with a link partner is still established. Currently there is a feature (Link down on close) which allows to physically bring the link down (after OS request). With this patch new feature with similar capability is introduced: TOTAL_PORT_SHUTDOWN Allows to physically disable the link on the NIC's port. If enabled, (after link down request from the OS) no link, traffic or led activity is possible on that port. If I40E_FLAG_TOTAL_PORT_SHUTDOWN is enabled, the I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true and cannot be disabled at that time. The functionalities are exclusive in terms of configuration, but they also have similar behavior (allowing to disable physical link of the port), with following differences: - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is supported by whole family of 7xx Intel Ethernet Controllers - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS) only if motherboard's BIOS and NIC's FW has support of it - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down by sending phy_type=0 to NIC's FW - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK) in abilities field of i40e_aq_set_phy_config structure Introduced changes: - new private flag I40E_FLAG_TOTAL_PORT_SHUTDOWN for handling the feature - probe of NVM if the feature was enabled at driver's port initialization - special handling on link-down procedure to let FW physically shutdown the port if the feature was enabled Signed-off-by: Arkadiusz Kubalewski Signed-off-by: Aleksandr Loktionov Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e.h | 22 ++++ .../net/ethernet/intel/i40e/i40e_adminq_cmd.h | 1 + .../net/ethernet/intel/i40e/i40e_ethtool.c | 9 ++ drivers/net/ethernet/intel/i40e/i40e_main.c | 122 +++++++++++++++--- 4 files changed, 133 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7618834b149b..a7e212d1caa2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -543,6 +543,28 @@ struct i40e_pf { #define I40E_FLAG_DISABLE_FW_LLDP BIT(24) #define I40E_FLAG_RS_FEC BIT(25) #define I40E_FLAG_BASE_R_FEC BIT(26) +/* TOTAL_PORT_SHUTDOWN + * Allows to physically disable the link on the NIC's port. + * If enabled, (after link down request from the OS) + * no link, traffic or led activity is possible on that port. + * + * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the + * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true + * and cannot be disabled by system admin at that time. + * The functionalities are exclusive in terms of configuration, but they also + * have similar behavior (allowing to disable physical link of the port), + * with following differences: + * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is + * supported by whole family of 7xx Intel Ethernet Controllers + * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS) + * only if motherboard's BIOS and NIC's FW has support of it + * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down + * by sending phy_type=0 to NIC's FW + * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead + * the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK) + * in abilities field of i40e_aq_set_phy_config structure + */ +#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27) struct i40e_client_instance *cinst; bool stat_offsets_loaded; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index c52910f03cfb..a62ddd626929 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1676,6 +1676,7 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */ u8 link_speed; u8 abilities; /* bits 0-2 use the values from get_phy_abilities_resp */ +#define I40E_AQ_PHY_ENABLE_LINK 0x08 #define I40E_AQ_PHY_ENABLE_AN 0x10 #define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 __le16 eee_capability; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index d09c12795c65..825c104ecba1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -428,6 +428,8 @@ struct i40e_priv_flags { static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { /* NOTE: MFP setting cannot be changed */ I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1), + I40E_PRIV_FLAG("total-port-shutdown", + I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1), I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), @@ -5007,6 +5009,13 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); } + if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && + (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) { + dev_err(&pf->pdev->dev, + "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n"); + return -EOPNOTSUPP; + } + if ((changed_flags & new_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) && (new_flags & I40E_FLAG_MFP_ENABLED)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 37e3bc072b0e..c559b9c4514c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); static int i40e_get_capabilities(struct i40e_pf *pf, enum i40e_admin_queue_opc list_type); - +static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf); /* i40e_pci_tbl - PCI Device ID Table * @@ -6679,21 +6679,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) clear_bit(__I40E_CONFIG_BUSY, pf->state); } -/** - * i40e_up - Bring the connection back up after being down - * @vsi: the VSI being configured - **/ -int i40e_up(struct i40e_vsi *vsi) -{ - int err; - - err = i40e_vsi_configure(vsi); - if (!err) - err = i40e_up_complete(vsi); - - return err; -} - /** * i40e_force_link_state - Force the link status * @pf: board private structure @@ -6703,6 +6688,7 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) { struct i40e_aq_get_phy_abilities_resp abilities; struct i40e_aq_set_phy_config config = {0}; + bool non_zero_phy_type = is_up; struct i40e_hw *hw = &pf->hw; i40e_status err; u64 mask; @@ -6738,8 +6724,11 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) /* If link needs to go up, but was not forced to go down, * and its speed values are OK, no need for a flap + * if non_zero_phy_type was set, still need to force up */ - if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) + if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) + non_zero_phy_type = true; + else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return I40E_SUCCESS; /* To force link we need to set bits for all supported PHY types, @@ -6747,10 +6736,18 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) * across two fields. */ mask = I40E_PHY_TYPES_BITMASK; - config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; - config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; + config.phy_type = + non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0; + config.phy_type_ext = + non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; + if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) { + if (is_up) + config.abilities |= I40E_AQ_PHY_ENABLE_LINK; + else + config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK); + } if (abilities.link_speed != 0) config.link_speed = abilities.link_speed; else @@ -6781,11 +6778,31 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) i40e_update_link_info(hw); } - i40e_aq_set_link_restart_an(hw, true, NULL); + i40e_aq_set_link_restart_an(hw, is_up, NULL); return I40E_SUCCESS; } +/** + * i40e_up - Bring the connection back up after being down + * @vsi: the VSI being configured + **/ +int i40e_up(struct i40e_vsi *vsi) +{ + int err; + + if (vsi->type == I40E_VSI_MAIN && + (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || + vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) + i40e_force_link_state(vsi->back, true); + + err = i40e_vsi_configure(vsi); + if (!err) + err = i40e_up_complete(vsi); + + return err; +} + /** * i40e_down - Shutdown the connection processing * @vsi: the VSI being stopped @@ -6804,7 +6821,8 @@ void i40e_down(struct i40e_vsi *vsi) i40e_vsi_disable_irq(vsi); i40e_vsi_stop_rings(vsi); if (vsi->type == I40E_VSI_MAIN && - vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) + (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED || + vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) i40e_force_link_state(vsi->back, false); i40e_napi_disable_all(vsi); @@ -11837,6 +11855,58 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf) return ret; } +/** + * i40e_is_total_port_shutdown_enabled - read NVM and return value + * if total port shutdown feature is enabled for this PF + * @pf: board private structure + **/ +static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf) +{ +#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED BIT(4) +#define I40E_FEATURES_ENABLE_PTR 0x2A +#define I40E_CURRENT_SETTING_PTR 0x2B +#define I40E_LINK_BEHAVIOR_WORD_OFFSET 0x2D +#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1 +#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0) +#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4 + i40e_status read_status = I40E_SUCCESS; + u16 sr_emp_sr_settings_ptr = 0; + u16 features_enable = 0; + u16 link_behavior = 0; + bool ret = false; + + read_status = i40e_read_nvm_word(&pf->hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + &sr_emp_sr_settings_ptr); + if (read_status) + goto err_nvm; + read_status = i40e_read_nvm_word(&pf->hw, + sr_emp_sr_settings_ptr + + I40E_FEATURES_ENABLE_PTR, + &features_enable); + if (read_status) + goto err_nvm; + if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) { + read_status = i40e_read_nvm_module_data(&pf->hw, + I40E_SR_EMP_SR_SETTINGS_PTR, + I40E_CURRENT_SETTING_PTR, + I40E_LINK_BEHAVIOR_WORD_OFFSET, + I40E_LINK_BEHAVIOR_WORD_LENGTH, + &link_behavior); + if (read_status) + goto err_nvm; + link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); + ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior; + } + return ret; + +err_nvm: + dev_warn(&pf->pdev->dev, + "total-port-shutdown feature is off due to read nvm error: %s\n", + i40e_stat_str(&pf->hw, read_status)); + return ret; +} + /** * i40e_sw_init - Initialize general software structures (struct i40e_pf) * @pf: board private structure to initialize @@ -12012,6 +12082,16 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->tx_timeout_recovery_level = 1; + if (pf->hw.mac.type != I40E_MAC_X722 && + i40e_is_total_port_shutdown_enabled(pf)) { + /* Link down on close must be on when total port shutdown + * is enabled for a given port + */ + pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED | + I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED); + dev_info(&pf->pdev->dev, + "total-port-shutdown was enabled, link-down-on-close is forced on\n"); + } mutex_init(&pf->switch_mutex); sw_init_done: From 753f3884f253de6b6d3a516e6651bda0baf4aede Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 18 Jun 2020 14:19:53 +0000 Subject: [PATCH 0570/2056] iavf: fix error return code in iavf_init_get_resources() Fix to return negative error code -ENOMEM from the error handling case instead of 0, as done elsewhere in this function. Fixes: b66c7bc1cd4d ("iavf: Refactor init state machine") Signed-off-by: Wei Yongjun Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/iavf/iavf_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 78bd9e3df3ac..b90ad1abbabb 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1852,8 +1852,10 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter) adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); - if (!adapter->rss_key || !adapter->rss_lut) + if (!adapter->rss_key || !adapter->rss_lut) { + err = -ENOMEM; goto err_mem; + } if (RSS_AQ(adapter)) adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; else From 5574ff7b7b3d864556173bf822796593451a6b8c Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 23 Jun 2020 11:44:16 +0200 Subject: [PATCH 0571/2056] i40e: optimize AF_XDP Tx completion path Improve the performance of the AF_XDP zero-copy Tx completion path. When there are no XDP buffers being sent using XDP_TX or XDP_REDIRECT, we do not have go through the SW ring to clean up any entries since the AF_XDP path does not use these. In these cases, just fast forward the next-to-use counter and skip going through the SW ring. The limit on the maximum number of entries to complete is also removed since the algorithm is now O(1). To simplify the code path, the maximum number of entries to complete for the XDP path is therefore also increased from 256 to 512 (the default number of Tx HW descriptors). This should be fine since the completion in the XDP path is faster than in the SKB path that has 256 as the maximum number. This patch provides around 4% throughput improvement for the l2fwd application in xdpsock on my machine. Signed-off-by: Magnus Karlsson Reviewed-by: Sridhar Samudrala Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 + drivers/net/ethernet/intel/i40e/i40e_xsk.c | 43 +++++++++++---------- drivers/net/ethernet/intel/i40e/i40e_xsk.h | 3 +- 4 files changed, 27 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 7e22a4ef582b..aad4326ab0c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2580,7 +2580,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) */ i40e_for_each_ring(ring, q_vector->tx) { bool wd = ring->xsk_umem ? - i40e_clean_xdp_tx_irq(vsi, ring, budget) : + i40e_clean_xdp_tx_irq(vsi, ring) : i40e_clean_tx_irq(vsi, ring, budget); if (!wd) { @@ -3538,6 +3538,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, */ smp_wmb(); + xdp_ring->xdp_tx_active++; i++; if (i == xdp_ring->count) i = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 8d3c9d37e42e..4036893d6825 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -347,6 +347,7 @@ struct i40e_ring { /* used in interrupt processing */ u16 next_to_use; u16 next_to_clean; + u16 xdp_tx_active; u8 atr_sample_rate; u8 atr_count; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 8e489ed54138..744861dc7acd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -378,6 +378,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) **/ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) { + unsigned int sent_frames = 0, total_bytes = 0; struct i40e_tx_desc *tx_desc = NULL; struct i40e_tx_buffer *tx_bi; bool work_done = true; @@ -408,6 +409,9 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) | I40E_TX_DESC_CMD_EOP, 0, desc.len, 0); + sent_frames++; + total_bytes += tx_bi->bytecount; + xdp_ring->next_to_use++; if (xdp_ring->next_to_use == xdp_ring->count) xdp_ring->next_to_use = 0; @@ -420,6 +424,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes); } return !!budget && work_done; @@ -434,6 +439,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, struct i40e_tx_buffer *tx_bi) { xdp_return_frame(tx_bi->xdpf); + tx_ring->xdp_tx_active--; dma_unmap_single(tx_ring->dev, dma_unmap_addr(tx_bi, dma), dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); @@ -447,27 +453,25 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, * * Returns true if cleanup/tranmission is done. **/ -bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget) +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) { - unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; - u32 i, completed_frames, frames_ready, xsk_frames = 0; + unsigned int ntc, budget = vsi->work_limit; struct xdp_umem *umem = tx_ring->xsk_umem; + u32 i, completed_frames, xsk_frames = 0; u32 head_idx = i40e_get_head(tx_ring); - bool work_done = true, xmit_done; struct i40e_tx_buffer *tx_bi; + bool xmit_done; if (head_idx < tx_ring->next_to_clean) head_idx += tx_ring->count; - frames_ready = head_idx - tx_ring->next_to_clean; + completed_frames = head_idx - tx_ring->next_to_clean; - if (frames_ready == 0) { + if (completed_frames == 0) goto out_xmit; - } else if (frames_ready > budget) { - completed_frames = budget; - work_done = false; - } else { - completed_frames = frames_ready; + + if (likely(!tx_ring->xdp_tx_active)) { + xsk_frames = completed_frames; + goto skip; } ntc = tx_ring->next_to_clean; @@ -475,18 +479,18 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, for (i = 0; i < completed_frames; i++) { tx_bi = &tx_ring->tx_bi[ntc]; - if (tx_bi->xdpf) + if (tx_bi->xdpf) { i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); - else + tx_bi->xdpf = NULL; + } else { xsk_frames++; - - tx_bi->xdpf = NULL; - total_bytes += tx_bi->bytecount; + } if (++ntc >= tx_ring->count) ntc = 0; } +skip: tx_ring->next_to_clean += completed_frames; if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) tx_ring->next_to_clean -= tx_ring->count; @@ -494,8 +498,7 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, if (xsk_frames) xsk_umem_complete_tx(umem, xsk_frames); - i40e_arm_wb(tx_ring, vsi, budget); - i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); + i40e_arm_wb(tx_ring, vsi, completed_frames); out_xmit: if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) @@ -503,7 +506,7 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, xmit_done = i40e_xmit_zc(tx_ring, budget); - return work_done && xmit_done; + return xmit_done; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index ea919a7d60ec..c524c142127f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -15,8 +15,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); -bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, - struct i40e_ring *tx_ring, int napi_budget); +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); From 4b5539c01ddf838ea6f5b67d00ee035231c8248a Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 23 Jun 2020 11:44:17 +0200 Subject: [PATCH 0572/2056] i40e: eliminate division in napi_poll data path Eliminate a division in the napi_poll data path. This division is executed even though it is only needed in the rare case when there are not enough interrupt lines so they have to be shared between queue pairs. Instead, just test for this case and only execute the division if needed. The code has been lifted from the ice driver. Signed-off-by: Magnus Karlsson Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index aad4326ab0c9..3e5c566ceb01 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2595,10 +2595,16 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) if (budget <= 0) goto tx_only; - /* We attempt to distribute budget to each Rx queue fairly, but don't - * allow the budget to go below 1 because that would exit polling early. - */ - budget_per_ring = max(budget/q_vector->num_ringpairs, 1); + /* normally we have 1 Rx ring per q_vector */ + if (unlikely(q_vector->num_ringpairs > 1)) + /* We attempt to distribute budget to each Rx queue fairly, but + * don't allow the budget to go below 1 because that would exit + * polling early. + */ + budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); + else + /* Max of 1 Rx ring in this q_vector so give it the budget */ + budget_per_ring = budget; i40e_for_each_ring(ring, q_vector->rx) { int cleaned = ring->xsk_umem ? From 1fd972ebe5230eb0585642f2172c9dd2b86ea381 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Tue, 23 Jun 2020 11:44:18 +0200 Subject: [PATCH 0573/2056] i40e: move check of full Tx ring to outside of send loop Move the check if the HW Tx ring is full to outside the send loop. Currently it is checked for every single descriptor that we send. Instead, tell the send loop to only process a maximum number of packets equal to the number of available slots in the Tx ring. This way, we can remove the check inside the send loop to and gain some performance. Suggested-by: Sridhar Samudrala Signed-off-by: Magnus Karlsson Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 744861dc7acd..8ce57b507a21 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -381,17 +381,10 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) unsigned int sent_frames = 0, total_bytes = 0; struct i40e_tx_desc *tx_desc = NULL; struct i40e_tx_buffer *tx_bi; - bool work_done = true; struct xdp_desc desc; dma_addr_t dma; while (budget-- > 0) { - if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { - xdp_ring->tx_stats.tx_busy++; - work_done = false; - break; - } - if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc)) break; @@ -427,7 +420,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes); } - return !!budget && work_done; + return !!budget; } /** @@ -448,19 +441,18 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, /** * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries + * @vsi: Current VSI * @tx_ring: XDP Tx ring - * @tx_bi: Tx buffer info to clean * * Returns true if cleanup/tranmission is done. **/ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) { - unsigned int ntc, budget = vsi->work_limit; struct xdp_umem *umem = tx_ring->xsk_umem; u32 i, completed_frames, xsk_frames = 0; u32 head_idx = i40e_get_head(tx_ring); struct i40e_tx_buffer *tx_bi; - bool xmit_done; + unsigned int ntc; if (head_idx < tx_ring->next_to_clean) head_idx += tx_ring->count; @@ -504,9 +496,7 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring) if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) xsk_set_tx_need_wakeup(tx_ring->xsk_umem); - xmit_done = i40e_xmit_zc(tx_ring, budget); - - return xmit_done; + return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring)); } /** @@ -570,7 +560,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) /** * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown - * @xdp_ring: XDP Tx ring + * @tx_ring: XDP Tx ring **/ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) { From e2968260e169e11b5fa13db05792eb3142b78723 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Tue, 23 Jun 2020 13:06:55 +0000 Subject: [PATCH 0574/2056] i40e: add XDP ring statistics to VSI stats Prior to this, only Rx and Tx ring statistics were accounted for. Signed-off-by: Ciara Loftus Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_main.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c559b9c4514c..dadbfb3d2a2b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -811,6 +811,25 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_p += packets; rx_buf += p->rx_stats.alloc_buff_failed; rx_page += p->rx_stats.alloc_page_failed; + + if (i40e_enabled_xdp_vsi(vsi)) { + /* locate XDP ring */ + p = READ_ONCE(vsi->xdp_rings[q]); + if (!p) + continue; + + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + packets = p->stats.packets; + bytes = p->stats.bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + tx_b += bytes; + tx_p += packets; + tx_restart += p->tx_stats.restart_queue; + tx_busy += p->tx_stats.tx_busy; + tx_linearize += p->tx_stats.tx_linearize; + tx_force_wb += p->tx_stats.tx_force_wb; + } } rcu_read_unlock(); vsi->tx_restart = tx_restart; From 890c402c7b113137c485367d5651f09a39c7d893 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Tue, 23 Jun 2020 13:06:56 +0000 Subject: [PATCH 0575/2056] i40e: add XDP ring statistics to dump VSI debug output Prior to this, only the Rx and Tx ring statistics were dumped. The XDP ring statistics are now dumped as well. Signed-off-by: Ciara Loftus Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/i40e/i40e_debugfs.c | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 9cb9b781451c..41a4cf8e3d61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -319,6 +319,47 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) i, tx_ring->itr_setting, ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed"); } + if (i40e_enabled_xdp_vsi(vsi)) { + for (i = 0; i < vsi->num_queue_pairs; i++) { + struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]); + + if (!xdp_ring) + continue; + + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", + i, *xdp_ring->state, + xdp_ring->queue_index, + xdp_ring->reg_idx); + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, + xdp_ring->next_to_use, + xdp_ring->next_to_clean, + xdp_ring->ring_active); + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", + i, xdp_ring->stats.packets, + xdp_ring->stats.bytes, + xdp_ring->tx_stats.restart_queue); + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", + i, + xdp_ring->tx_stats.tx_busy, + xdp_ring->tx_stats.tx_done_old); + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: size = %i\n", + i, xdp_ring->size); + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: DCB tc = %d\n", + i, xdp_ring->dcb_tc); + dev_info(&pf->pdev->dev, + " xdp_rings[%i]: itr_setting = %d (%s)\n", + i, xdp_ring->itr_setting, + ITR_IS_DYNAMIC(xdp_ring->itr_setting) ? + "dynamic" : "fixed"); + } + } rcu_read_unlock(); dev_info(&pf->pdev->dev, " work_limit = %d\n", From 44ea803e2fa7e12adb5d6260da4e4956e784effb Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Tue, 23 Jun 2020 13:06:57 +0000 Subject: [PATCH 0576/2056] i40e: introduce new dump desc XDP command Interfaces already exist for dumping Rx and Tx descriptor information. Introduce another for doing the same for XDP descriptors. Signed-off-by: Ciara Loftus Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/i40e/i40e_debugfs.c | 59 +++++++++++++++---- 1 file changed, 48 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 41a4cf8e3d61..d3ad2e3aa838 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -10,6 +10,12 @@ static struct dentry *i40e_dbg_root; +enum ring_type { + RING_TYPE_RX, + RING_TYPE_TX, + RING_TYPE_XDP +}; + /** * i40e_dbg_find_vsi - searches for the vsi with the given seid * @pf: the PF structure to search for the vsi @@ -530,11 +536,12 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) * @ring_id: ring id entered by user * @desc_n: descriptor number entered by user * @pf: the i40e_pf created in command write - * @is_rx_ring: true if rx, false if tx + * @type: enum describing whether ring is RX, TX or XDP **/ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, - struct i40e_pf *pf, bool is_rx_ring) + struct i40e_pf *pf, enum ring_type type) { + bool is_rx_ring = type == RING_TYPE_RX; struct i40e_tx_desc *txd; union i40e_rx_desc *rxd; struct i40e_ring *ring; @@ -546,6 +553,10 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); return; } + if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) { + dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid); + return; + } if (ring_id >= vsi->num_queue_pairs || ring_id < 0) { dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); return; @@ -557,15 +568,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, return; } - ring = kmemdup(is_rx_ring - ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id], - sizeof(*ring), GFP_KERNEL); + switch (type) { + case RING_TYPE_RX: + ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL); + break; + case RING_TYPE_TX: + ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL); + break; + case RING_TYPE_XDP: + ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL); + break; + } if (!ring) return; if (cnt == 2) { - dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", - vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); + switch (type) { + case RING_TYPE_RX: + dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id); + break; + case RING_TYPE_TX: + dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id); + break; + case RING_TYPE_XDP: + dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id); + break; + } for (i = 0; i < ring->count; i++) { if (!is_rx_ring) { txd = I40E_TX_DESC(ring, i); @@ -603,7 +631,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, rxd->read.rsvd1, rxd->read.rsvd2); } } else { - dev_info(&pf->pdev->dev, "dump desc rx/tx []\n"); + dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp []\n"); } out: @@ -960,13 +988,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp, cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, - desc_n, pf, true); + desc_n, pf, RING_TYPE_RX); } else if (strncmp(&cmd_buf[10], "tx", 2) == 0) { cnt = sscanf(&cmd_buf[12], "%i %i %i", &vsi_seid, &ring_id, &desc_n); i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, - desc_n, pf, false); + desc_n, pf, RING_TYPE_TX); + } else if (strncmp(&cmd_buf[10], "xdp", 3) + == 0) { + cnt = sscanf(&cmd_buf[13], "%i %i %i", + &vsi_seid, &ring_id, &desc_n); + i40e_dbg_dump_desc(cnt, vsi_seid, ring_id, + desc_n, pf, RING_TYPE_XDP); } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) { i40e_dbg_dump_aq_desc(pf); } else { @@ -974,6 +1008,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, "dump desc tx []\n"); dev_info(&pf->pdev->dev, "dump desc rx []\n"); + dev_info(&pf->pdev->dev, + "dump desc xdp []\n"); dev_info(&pf->pdev->dev, "dump desc aq\n"); } } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) { @@ -1144,7 +1180,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, buff = NULL; } else { dev_info(&pf->pdev->dev, - "dump desc tx [], dump desc rx [],\n"); + "dump desc tx [], dump desc rx [], dump desc xdp [],\n"); dev_info(&pf->pdev->dev, "dump switch\n"); dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); dev_info(&pf->pdev->dev, "dump reset stats\n"); @@ -1560,6 +1596,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); dev_info(&pf->pdev->dev, " dump desc tx []\n"); dev_info(&pf->pdev->dev, " dump desc rx []\n"); + dev_info(&pf->pdev->dev, " dump desc xdp []\n"); dev_info(&pf->pdev->dev, " dump desc aq\n"); dev_info(&pf->pdev->dev, " dump reset stats\n"); dev_info(&pf->pdev->dev, " dump debug fwdata \n"); From 9358076642f14cec8c414850d5a909cafca3a9d6 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 24 Jun 2020 09:04:22 -0700 Subject: [PATCH 0577/2056] iavf: Fix updating statistics Commit bac8486116b0 ("iavf: Refactor the watchdog state machine") inverted the logic for when to update statistics. Statistics should be updated when no other commands are pending, instead they were only requested when a command was processed. iavf_request_stats() would see a pending request and not request statistics to be updated. This caused statistics to never be updated; fix the logic. Fixes: bac8486116b0 ("iavf: Refactor the watchdog state machine") Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers --- drivers/net/ethernet/intel/iavf/iavf_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index b90ad1abbabb..48c956d90b90 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1937,7 +1937,10 @@ static void iavf_watchdog_task(struct work_struct *work) iavf_send_api_ver(adapter); } } else { - if (!iavf_process_aq_command(adapter) && + /* An error will be returned if no commands were + * processed; use this opportunity to update stats + */ + if (iavf_process_aq_command(adapter) && adapter->state == __IAVF_RUNNING) iavf_request_stats(adapter); } From bb0967c04e8bed3428e4c54f925cdd1890f8d2c6 Mon Sep 17 00:00:00 2001 From: Jeff Kirsher Date: Thu, 25 Jun 2020 22:21:41 -0700 Subject: [PATCH 0578/2056] ixgbe: Cleanup unneeded delay in ethtool test There is a 4 seconds delay in ixgbe_diag_test() that is holding up other ioctls such as SIOCGIFCONF that Oracle database applications use. One of Oracle's product runs "ethtool -t ethX online" periodically for system monitoring and that is impacting database applications that use SIOCGIFCONF at that same time. This 4 second delay was needed in out early 1GbE parts to give the PHY time to recover from a reset. This code was carried forward to the 10 GbE driver even it was not needed for the supported PHYs in the ixgbe driver. CC: Aleksandr Loktionov CC: Jack Vogel Reported-by: Venkat Venkatsubra Signed-off-by: Jeff Kirsher Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 8ae2c8c2f6a1..fd6784952766 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2084,7 +2084,7 @@ static void ixgbe_diag_test(struct net_device *netdev, eth_test->flags |= ETH_TEST_FL_FAILED; clear_bit(__IXGBE_TESTING, &adapter->state); - goto skip_ol_tests; + return; } } } @@ -2156,9 +2156,6 @@ static void ixgbe_diag_test(struct net_device *netdev, clear_bit(__IXGBE_TESTING, &adapter->state); } - -skip_ol_tests: - msleep_interruptible(4 * 1000); } static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, From a296d665eae1e8ec6445683bfb999c884058426a Mon Sep 17 00:00:00 2001 From: Radoslaw Tyl Date: Fri, 26 Jun 2020 15:28:14 +0200 Subject: [PATCH 0579/2056] ixgbe: Add ethtool support to enable 2.5 and 5.0 Gbps support Added full support for new version Ethtool API. New API allow use 2500Gbase-T and 5000base-T supported and advertised link speed modes. Signed-off-by: Radoslaw Tyl Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 260 ++++++++++++------ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 +- 2 files changed, 182 insertions(+), 84 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index fd6784952766..6725d892336e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -142,32 +142,71 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) -/* currently supported speeds for 10G */ -#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ - SUPPORTED_10000baseKX4_Full | \ - SUPPORTED_10000baseKR_Full) - #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane) -static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw) +static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw, + struct ethtool_link_ksettings *cmd) { - if (!ixgbe_isbackplane(hw->phy.media_type)) - return SUPPORTED_10000baseT_Full; + if (!ixgbe_isbackplane(hw->phy.media_type)) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + return; + } switch (hw->device_id) { case IXGBE_DEV_ID_82598: case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_X550EM_X_KX4: - return SUPPORTED_10000baseKX4_Full; + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKX4_Full); + break; case IXGBE_DEV_ID_82598_BX: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_XFI: - return SUPPORTED_10000baseKR_Full; + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKR_Full); + break; default: - return SUPPORTED_10000baseKX4_Full | - SUPPORTED_10000baseKR_Full; + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode + (cmd, supported, 10000baseKR_Full); + break; + } +} + +static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw, + struct ethtool_link_ksettings *cmd) +{ + if (!ixgbe_isbackplane(hw->phy.media_type)) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + return; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_X550EM_X_KX4: + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKX4_Full); + break; + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_XFI: + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKR_Full); + break; + default: + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseKR_Full); + break; } } @@ -178,52 +217,88 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; ixgbe_link_speed supported_link; bool autoneg = false; - u32 supported, advertising; - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); /* set the supported link speeds */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - supported |= ixgbe_get_supported_10gtypes(hw); - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? - SUPPORTED_1000baseKX_Full : - SUPPORTED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - supported |= SUPPORTED_100baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_10_FULL) - supported |= SUPPORTED_10baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) { + ixgbe_set_supported_10gtypes(hw, cmd); + ixgbe_set_advertising_10gtypes(hw, cmd); + } + if (supported_link & IXGBE_LINK_SPEED_5GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 5000baseT_Full); + + if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, supported, + 2500baseT_Full); + + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) { + if (ixgbe_isbackplane(hw->phy.media_type)) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseKX_Full); + } else { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + } + } + if (supported_link & IXGBE_LINK_SPEED_100_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + } + if (supported_link & IXGBE_LINK_SPEED_10_FULL) { + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + } - /* default advertised speed if phy.autoneg_advertised isn't set */ - advertising = supported; /* set the advertised speeds */ if (hw->phy.autoneg_advertised) { - advertising = 0; + ethtool_link_ksettings_zero_link_mode(cmd, advertising); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) - advertising |= ADVERTISED_10baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - advertising |= ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - advertising |= supported & ADVRTSD_MSK_10G; + ixgbe_set_advertising_10gtypes(hw, cmd); if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { - if (supported & SUPPORTED_1000baseKX_Full) - advertising |= ADVERTISED_1000baseKX_Full; + if (ethtool_link_ksettings_test_link_mode + (cmd, supported, 1000baseKX_Full)) + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 1000baseKX_Full); else - advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 1000baseT_Full); } + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 5000baseT_Full); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 2500baseT_Full); } else { if (hw->phy.multispeed_fiber && !autoneg) { if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - advertising = ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode + (cmd, advertising, 10000baseT_Full); } } if (autoneg) { - supported |= SUPPORTED_Autoneg; - advertising |= ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); cmd->base.autoneg = AUTONEG_ENABLE; } else cmd->base.autoneg = AUTONEG_DISABLE; @@ -235,13 +310,13 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_phy_x550em_ext_t: case ixgbe_phy_fw: case ixgbe_phy_cu_unknown: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); cmd->base.port = PORT_TP; break; case ixgbe_phy_qt: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); cmd->base.port = PORT_FIBRE; break; case ixgbe_phy_nl: @@ -260,8 +335,10 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_DA; break; case ixgbe_sfp_type_sr: @@ -272,61 +349,76 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_FIBRE; break; case ixgbe_sfp_type_not_present: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_NONE; break; case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: - supported |= SUPPORTED_TP; - advertising |= ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, + TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + TP); cmd->base.port = PORT_TP; break; case ixgbe_sfp_type_unknown: default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_OTHER; break; } break; case ixgbe_phy_xaui: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_NONE; break; case ixgbe_phy_unknown: case ixgbe_phy_generic: case ixgbe_phy_sfp_unsupported: default: - supported |= SUPPORTED_FIBRE; - advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + FIBRE); cmd->base.port = PORT_OTHER; break; } /* Indicate pause support */ - supported |= SUPPORTED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); switch (hw->fc.requested_mode) { case ixgbe_fc_full: - advertising |= ADVERTISED_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); break; case ixgbe_fc_rx_pause: - advertising |= ADVERTISED_Pause | - ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; case ixgbe_fc_tx_pause: - advertising |= ADVERTISED_Asym_Pause; + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); break; default: - advertising &= ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause); + ethtool_link_ksettings_del_link_mode(cmd, advertising, + Asym_Pause); } if (netif_carrier_ok(netdev)) { @@ -358,11 +450,6 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); - return 0; } @@ -373,12 +460,6 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; u32 advertised, old; s32 err = 0; - u32 supported, advertising; - - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); if ((hw->phy.media_type == ixgbe_media_type_copper) || (hw->phy.multispeed_fiber)) { @@ -386,29 +467,41 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, * this function does not support duplex forcing, but can * limit the advertising of the adapter to the specified speed */ - if (advertising & ~supported) + if (!bitmap_subset(cmd->link_modes.advertising, + cmd->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) return -EINVAL; /* only allow one speed at a time if no autoneg */ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { - if (advertising == - (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full)) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full) && + ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) return -EINVAL; } old = hw->phy.autoneg_advertised; advertised = 0; - if (advertising & ADVERTISED_10000baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) advertised |= IXGBE_LINK_SPEED_10GB_FULL; - - if (advertising & ADVERTISED_1000baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 5000baseT_Full)) + advertised |= IXGBE_LINK_SPEED_5GB_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 2500baseT_Full)) + advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 1000baseT_Full)) advertised |= IXGBE_LINK_SPEED_1GB_FULL; - if (advertising & ADVERTISED_100baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100baseT_Full)) advertised |= IXGBE_LINK_SPEED_100_FULL; - if (advertising & ADVERTISED_10baseT_Full) + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10baseT_Full)) advertised |= IXGBE_LINK_SPEED_10_FULL; if (old == advertised) @@ -429,7 +522,8 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev, u32 speed = cmd->base.speed; if ((cmd->base.autoneg == AUTONEG_ENABLE) || - (advertising != ADVERTISED_10000baseT_Full) || + (!ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) || (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) return -EINVAL; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4601118f88c9..f5d3d6230786 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5501,9 +5501,13 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw) return ret; speed = hw->phy.autoneg_advertised; - if ((!speed) && (hw->mac.ops.get_link_capabilities)) + if (!speed && hw->mac.ops.get_link_capabilities) { ret = hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); + speed &= ~(IXGBE_LINK_SPEED_5GB_FULL | + IXGBE_LINK_SPEED_2_5GB_FULL); + } + if (ret) return ret; From 8d821b5db70723d27ee749b4870de90760606918 Mon Sep 17 00:00:00 2001 From: Hao Luo Date: Wed, 1 Jul 2020 10:53:15 -0700 Subject: [PATCH 0580/2056] selftests/bpf: Switch test_vmlinux to use hrtimer_range_start_ns. The test_vmlinux test uses hrtimer_nanosleep as hook to test tracing programs. But in a kernel built by clang, which performs more aggresive inlining, that function gets inlined into its caller SyS_nanosleep. Therefore, even though fentry and kprobe do hook on the function, they aren't triggered by the call to nanosleep in the test. A possible fix is switching to use a function that is less likely to be inlined, such as hrtimer_range_start_ns. The EXPORT_SYMBOL functions shouldn't be inlined based on the description of [1], therefore safe to use for this test. Also the arguments of this function include the duration of sleep, therefore suitable for test verification. [1] af3b56289be1 time: don't inline EXPORT_SYMBOL functions Tested: In a clang build kernel, before this change, the test fails: test_vmlinux:PASS:skel_open 0 nsec test_vmlinux:PASS:skel_attach 0 nsec test_vmlinux:PASS:tp 0 nsec test_vmlinux:PASS:raw_tp 0 nsec test_vmlinux:PASS:tp_btf 0 nsec test_vmlinux:FAIL:kprobe not called test_vmlinux:FAIL:fentry not called After switching to hrtimer_range_start_ns, the test passes: test_vmlinux:PASS:skel_open 0 nsec test_vmlinux:PASS:skel_attach 0 nsec test_vmlinux:PASS:tp 0 nsec test_vmlinux:PASS:raw_tp 0 nsec test_vmlinux:PASS:tp_btf 0 nsec test_vmlinux:PASS:kprobe 0 nsec test_vmlinux:PASS:fentry 0 nsec Signed-off-by: Hao Luo Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200701175315.1161242-1-haoluo@google.com --- tools/testing/selftests/bpf/progs/test_vmlinux.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c index 5611b564d3b1..29fa09d6a6c6 100644 --- a/tools/testing/selftests/bpf/progs/test_vmlinux.c +++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c @@ -63,20 +63,20 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id) return 0; } -SEC("kprobe/hrtimer_nanosleep") -int BPF_KPROBE(handle__kprobe, - ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid) +SEC("kprobe/hrtimer_start_range_ns") +int BPF_KPROBE(handle__kprobe, struct hrtimer *timer, ktime_t tim, u64 delta_ns, + const enum hrtimer_mode mode) { - if (rqtp == MY_TV_NSEC) + if (tim == MY_TV_NSEC) kprobe_called = true; return 0; } -SEC("fentry/hrtimer_nanosleep") -int BPF_PROG(handle__fentry, - ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid) +SEC("fentry/hrtimer_start_range_ns") +int BPF_PROG(handle__fentry, struct hrtimer *timer, ktime_t tim, u64 delta_ns, + const enum hrtimer_mode mode) { - if (rqtp == MY_TV_NSEC) + if (tim == MY_TV_NSEC) fentry_called = true; return 0; } From 17bbf925c6f86be8c08bc473cb5327c173154596 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 1 Jul 2020 14:28:16 -0700 Subject: [PATCH 0581/2056] tools/bpftool: Turn off -Wnested-externs warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Turn off -Wnested-externs to avoid annoying warnings in BUILD_BUG_ON macro when compiling bpftool: In file included from /data/users/andriin/linux/tools/include/linux/build_bug.h:5, from /data/users/andriin/linux/tools/include/linux/kernel.h:8, from /data/users/andriin/linux/kernel/bpf/disasm.h:10, from /data/users/andriin/linux/kernel/bpf/disasm.c:8: /data/users/andriin/linux/kernel/bpf/disasm.c: In function ‘__func_get_name’: /data/users/andriin/linux/tools/include/linux/compiler.h:37:38: warning: nested extern declaration of ‘__compiletime_assert_0’ [-Wnested-externs] _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) ^~~~~~~~~~~~~~~~~~~~~ /data/users/andriin/linux/tools/include/linux/compiler.h:16:15: note: in definition of macro ‘__compiletime_assert’ extern void prefix ## suffix(void) __compiletime_error(msg); \ ^~~~~~ /data/users/andriin/linux/tools/include/linux/compiler.h:37:2: note: in expansion of macro ‘_compiletime_assert’ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) ^~~~~~~~~~~~~~~~~~~ /data/users/andriin/linux/tools/include/linux/build_bug.h:39:37: note: in expansion of macro ‘compiletime_assert’ #define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) ^~~~~~~~~~~~~~~~~~ /data/users/andriin/linux/tools/include/linux/build_bug.h:50:2: note: in expansion of macro ‘BUILD_BUG_ON_MSG’ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) ^~~~~~~~~~~~~~~~ /data/users/andriin/linux/kernel/bpf/disasm.c:20:2: note: in expansion of macro ‘BUILD_BUG_ON’ BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); ^~~~~~~~~~~~ Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200701212816.2072340-1-andriin@fb.com --- tools/bpf/bpftool/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 273da1615503..51bd520ed437 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -40,7 +40,7 @@ bash_compdir ?= /usr/share/bash-completion/completions CFLAGS += -O2 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers -CFLAGS += $(filter-out -Wswitch-enum,$(EXTRA_WARNINGS)) +CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS)) CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ -I$(if $(OUTPUT),$(OUTPUT),.) \ -I$(srctree)/kernel/bpf/ \ From 6c92bd5cd4650c39dd929565ee172984c680fead Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 1 Jul 2020 23:44:07 +0200 Subject: [PATCH 0582/2056] selftests/bpf: Test_progs indicate to shell on non-actions When a user selects a non-existing test the summary is printed with indication 0 for all info types, and shell "success" (EXIT_SUCCESS) is indicated. This can be understood by a human end-user, but for shell scripting is it useful to indicate a shell failure (EXIT_FAILURE). Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159363984736.930467.17956007131403952343.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 54fa5fa688ce..da70a4f72f54 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -687,5 +687,8 @@ int main(int argc, char **argv) free_str_set(&env.subtest_selector.whitelist); free(env.subtest_selector.num_set); + if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) + return EXIT_FAILURE; + return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } From 643e7233aa948901dce81d4573c91ed99fdd272e Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 1 Jul 2020 23:44:12 +0200 Subject: [PATCH 0583/2056] selftests/bpf: Test_progs option for getting number of tests It can be practial to get the number of tests that test_progs contain. This could for example be used to create a shell for-loop construct that runs the individual tests. Like: for N in $(seq 1 $(./test_progs -c)); do ./test_progs -n $N 2>&1 > result_test_${N}.log & done ; wait V2: Add the ability to return the count for the selected tests. This is useful for getting a count e.g. after excluding some tests with option -b. The current beakers test script like to report the max test count upfront. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159363985244.930467.12617117873058936829.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 18 ++++++++++++++++++ tools/testing/selftests/bpf/test_progs.h | 1 + 2 files changed, 19 insertions(+) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index da70a4f72f54..a5dba14b2025 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -366,6 +366,7 @@ enum ARG_KEYS { ARG_TEST_NAME_BLACKLIST = 'b', ARG_VERIFIER_STATS = 's', ARG_VERBOSE = 'v', + ARG_GET_TEST_CNT = 'c', }; static const struct argp_option opts[] = { @@ -379,6 +380,8 @@ static const struct argp_option opts[] = { "Output verifier statistics", }, { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL, "Verbose output (use -vv or -vvv for progressively verbose output)" }, + { "count", ARG_GET_TEST_CNT, NULL, 0, + "Get number of selected top-level tests " }, {}, }; @@ -511,6 +514,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) } } break; + case ARG_GET_TEST_CNT: + env->get_test_cnt = true; + break; case ARGP_KEY_ARG: argp_usage(state); break; @@ -654,6 +660,11 @@ int main(int argc, char **argv) test->test_num, test->test_name)) continue; + if (env.get_test_cnt) { + env.succ_cnt++; + continue; + } + test->run_test(); /* ensure last sub-test is finalized properly */ if (test->subtest_name) @@ -677,9 +688,16 @@ int main(int argc, char **argv) cleanup_cgroup_environment(); } stdio_restore(); + + if (env.get_test_cnt) { + printf("%d\n", env.succ_cnt); + goto out; + } + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); +out: free_str_set(&env.test_selector.blacklist); free_str_set(&env.test_selector.whitelist); free(env.test_selector.num_set); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index f4503c926aca..0030584619c3 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -66,6 +66,7 @@ struct test_env { enum verbosity verbosity; bool jit_enabled; + bool get_test_cnt; struct prog_test_def *test; FILE *stdout; From c1f1f3656eee3a59a40e1805699041ec1c14ab83 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Wed, 1 Jul 2020 23:44:17 +0200 Subject: [PATCH 0584/2056] selftests/bpf: Test_progs option for listing test names The program test_progs have some very useful ability to specify a list of test name substrings for selecting which tests to run. This patch add the ability to list the selected test names without running them. This is practical for seeing which tests gets selected with given select arguments (which can also contain a exclude list via --name-blacklist). This output can also be used by shell-scripts in a for-loop: for N in $(./test_progs --list -t xdp); do \ ./test_progs -t $N 2>&1 > result_test_${N}.log & \ done ; wait This features can also be used for looking up a test number and returning a testname. If the selection was empty then a shell EXIT_FAILURE is returned. This is useful for scripting. e.g. like this: n=1; while [ $(./test_progs --list -n $n) ] ; do \ ./test_progs -n $n ; n=$(( n+1 )); \ done Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159363985751.930467.9610992940793316982.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 15 +++++++++++++++ tools/testing/selftests/bpf/test_progs.h | 1 + 2 files changed, 16 insertions(+) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index a5dba14b2025..ef05d2f0e7bb 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -367,6 +367,7 @@ enum ARG_KEYS { ARG_VERIFIER_STATS = 's', ARG_VERBOSE = 'v', ARG_GET_TEST_CNT = 'c', + ARG_LIST_TEST_NAMES = 'l', }; static const struct argp_option opts[] = { @@ -382,6 +383,8 @@ static const struct argp_option opts[] = { "Verbose output (use -vv or -vvv for progressively verbose output)" }, { "count", ARG_GET_TEST_CNT, NULL, 0, "Get number of selected top-level tests " }, + { "list", ARG_LIST_TEST_NAMES, NULL, 0, + "List test names that would run (without running them) " }, {}, }; @@ -517,6 +520,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) case ARG_GET_TEST_CNT: env->get_test_cnt = true; break; + case ARG_LIST_TEST_NAMES: + env->list_test_names = true; + break; case ARGP_KEY_ARG: argp_usage(state); break; @@ -665,6 +671,12 @@ int main(int argc, char **argv) continue; } + if (env.list_test_names) { + fprintf(env.stdout, "%s\n", test->test_name); + env.succ_cnt++; + continue; + } + test->run_test(); /* ensure last sub-test is finalized properly */ if (test->subtest_name) @@ -694,6 +706,9 @@ int main(int argc, char **argv) goto out; } + if (env.list_test_names) + goto out; + fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 0030584619c3..ec31f382e7fd 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -67,6 +67,7 @@ struct test_env { bool jit_enabled; bool get_test_cnt; + bool list_test_names; struct prog_test_def *test; FILE *stdout; From 2cef30d7bd8b8fbddeb74e3753c29d4248c094e0 Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 29 Jun 2020 16:13:27 +0300 Subject: [PATCH 0585/2056] xen: netif.h: add a new extra type for XDP The patch adds a new extra type to be able to diffirentiate between RX responses on xen-netfront side with the adjusted offset required for XDP processing. The offset value from a guest is passed via xenstore. Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller --- include/xen/interface/io/netif.h | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/include/xen/interface/io/netif.h b/include/xen/interface/io/netif.h index 4f20dbc42910..2194322c3c7f 100644 --- a/include/xen/interface/io/netif.h +++ b/include/xen/interface/io/netif.h @@ -160,6 +160,19 @@ * be applied if it is set. */ +/* + * "xdp-headroom" is used to request that extra space is added + * for XDP processing. The value is measured in bytes and passed by + * the frontend to be consistent between both ends. + * If the value is greater than zero that means that + * an RX response is going to be passed to an XDP program for processing. + * XEN_NETIF_MAX_XDP_HEADROOM defines the maximum headroom offset in bytes + * + * "feature-xdp-headroom" is set to "1" by the netback side like other features + * so a guest can check if an XDP program can be processed. + */ +#define XEN_NETIF_MAX_XDP_HEADROOM 0x7FFF + /* * Control ring * ============ @@ -846,7 +859,8 @@ struct xen_netif_tx_request { #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */ #define XEN_NETIF_EXTRA_TYPE_HASH (4) /* u.hash */ -#define XEN_NETIF_EXTRA_TYPE_MAX (5) +#define XEN_NETIF_EXTRA_TYPE_XDP (5) /* u.xdp */ +#define XEN_NETIF_EXTRA_TYPE_MAX (6) /* xen_netif_extra_info_t flags. */ #define _XEN_NETIF_EXTRA_FLAG_MORE (0) @@ -879,6 +893,10 @@ struct xen_netif_extra_info { uint8_t algorithm; uint8_t value[4]; } hash; + struct { + uint16_t headroom; + uint16_t pad[2]; + } xdp; uint16_t pad[3]; } u; }; From 6c5aa6fc4defc2a0977a2c59e4710d50fa1e834c Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 29 Jun 2020 16:13:28 +0300 Subject: [PATCH 0586/2056] xen networking: add basic XDP support for xen-netfront The patch adds a basic XDP processing to xen-netfront driver. We ran an XDP program for an RX response received from netback driver. Also we request xen-netback to adjust data offset for bpf_xdp_adjust_head() header space for custom headers. synchronization between frontend and backend parts is done by using xenbus state switching: Reconfiguring -> Reconfigured- > Connected UDP packets drop rate using xdp program is around 310 kpps using ./pktgen_sample04_many_flows.sh and 160 kpps without the patch. Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller --- drivers/net/Kconfig | 1 + drivers/net/xen-netfront.c | 336 +++++++++++++++++++++++++++++++++++-- 2 files changed, 327 insertions(+), 10 deletions(-) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 7dcb465824be..1368d1d6a114 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -495,6 +495,7 @@ config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN select XEN_XENBUS_FRONTEND + select PAGE_POOL default y help This driver provides support for Xen paravirtual network diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 482c6c8b0fb7..468f3f6f1425 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -44,6 +44,9 @@ #include #include #include +#include +#include +#include #include #include @@ -102,6 +105,8 @@ struct netfront_queue { char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ struct netfront_info *info; + struct bpf_prog __rcu *xdp_prog; + struct napi_struct napi; /* Split event channels support, tx_* == rx_* when using @@ -144,6 +149,9 @@ struct netfront_queue { struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; + + struct page_pool *page_pool; + struct xdp_rxq_info xdp_rxq; }; struct netfront_info { @@ -159,6 +167,10 @@ struct netfront_info { struct netfront_stats __percpu *rx_stats; struct netfront_stats __percpu *tx_stats; + /* XDP state */ + bool netback_has_xdp_headroom; + bool netfront_xdp_enabled; + atomic_t rx_gso_checksum_fixup; }; @@ -265,8 +277,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) if (unlikely(!skb)) return NULL; - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); - if (!page) { + page = page_pool_dev_alloc_pages(queue->page_pool); + if (unlikely(!page)) { kfree_skb(skb); return NULL; } @@ -560,6 +572,66 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, return queue_idx; } +static int xennet_xdp_xmit_one(struct net_device *dev, + struct netfront_queue *queue, + struct xdp_frame *xdpf) +{ + struct netfront_info *np = netdev_priv(dev); + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); + int notify; + + xennet_make_first_txreq(queue, NULL, + virt_to_page(xdpf->data), + offset_in_page(xdpf->data), + xdpf->len); + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); + if (notify) + notify_remote_via_irq(queue->tx_irq); + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->bytes += xdpf->len; + tx_stats->packets++; + u64_stats_update_end(&tx_stats->syncp); + + xennet_tx_buf_gc(queue); + + return 0; +} + +static int xennet_xdp_xmit(struct net_device *dev, int n, + struct xdp_frame **frames, u32 flags) +{ + unsigned int num_queues = dev->real_num_tx_queues; + struct netfront_info *np = netdev_priv(dev); + struct netfront_queue *queue = NULL; + unsigned long irq_flags; + int drops = 0; + int i, err; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + queue = &np->queues[smp_processor_id() % num_queues]; + + spin_lock_irqsave(&queue->tx_lock, irq_flags); + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + + if (!xdpf) + continue; + err = xennet_xdp_xmit_one(dev, queue, xdpf); + if (err) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + spin_unlock_irqrestore(&queue->tx_lock, irq_flags); + + return n - drops; +} + + #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -778,23 +850,82 @@ static int xennet_get_extras(struct netfront_queue *queue, return err; } +static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, + struct xen_netif_rx_response *rx, struct bpf_prog *prog, + struct xdp_buff *xdp, bool *need_xdp_flush) +{ + struct xdp_frame *xdpf; + u32 len = rx->status; + u32 act = XDP_PASS; + int err; + + xdp->data_hard_start = page_address(pdata); + xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; + xdp_set_data_meta_invalid(xdp); + xdp->data_end = xdp->data + len; + xdp->rxq = &queue->xdp_rxq; + xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_TX: + get_page(pdata); + xdpf = xdp_convert_buff_to_frame(xdp); + err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); + if (unlikely(err < 0)) + trace_xdp_exception(queue->info->netdev, prog, act); + break; + case XDP_REDIRECT: + get_page(pdata); + err = xdp_do_redirect(queue->info->netdev, xdp, prog); + *need_xdp_flush = true; + if (unlikely(err)) + trace_xdp_exception(queue->info->netdev, prog, act); + break; + case XDP_PASS: + case XDP_DROP: + break; + + case XDP_ABORTED: + trace_xdp_exception(queue->info->netdev, prog, act); + break; + + default: + bpf_warn_invalid_xdp_action(act); + } + + return act; +} + static int xennet_get_responses(struct netfront_queue *queue, struct netfront_rx_info *rinfo, RING_IDX rp, - struct sk_buff_head *list) + struct sk_buff_head *list, + bool *need_xdp_flush) { struct xen_netif_rx_response *rx = &rinfo->rx; - struct xen_netif_extra_info *extras = rinfo->extras; - struct device *dev = &queue->info->netdev->dev; + int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *skb = xennet_get_rx_skb(queue, cons); + struct xen_netif_extra_info *extras = rinfo->extras; grant_ref_t ref = xennet_get_rx_ref(queue, cons); - int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); + struct device *dev = &queue->info->netdev->dev; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + unsigned long ret; int slots = 1; int err = 0; - unsigned long ret; + u32 verdict; if (rx->flags & XEN_NETRXF_extra_info) { err = xennet_get_extras(queue, extras, rp); + if (!err) { + if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) { + struct xen_netif_extra_info *xdp; + + xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; + rx->offset = xdp->u.xdp.headroom; + } + } cons = queue->rx.rsp_cons; } @@ -827,9 +958,24 @@ static int xennet_get_responses(struct netfront_queue *queue, gnttab_release_grant_reference(&queue->gref_rx_head, ref); - __skb_queue_tail(list, skb); - + rcu_read_lock(); + xdp_prog = rcu_dereference(queue->xdp_prog); + if (xdp_prog) { + if (!(rx->flags & XEN_NETRXF_more_data)) { + /* currently only a single page contains data */ + verdict = xennet_run_xdp(queue, + skb_frag_page(&skb_shinfo(skb)->frags[0]), + rx, xdp_prog, &xdp, need_xdp_flush); + if (verdict != XDP_PASS) + err = -EINVAL; + } else { + /* drop the frame */ + err = -EINVAL; + } + } + rcu_read_unlock(); next: + __skb_queue_tail(list, skb); if (!(rx->flags & XEN_NETRXF_more_data)) break; @@ -998,6 +1144,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) struct sk_buff_head errq; struct sk_buff_head tmpq; int err; + bool need_xdp_flush = false; spin_lock(&queue->rx_lock); @@ -1014,7 +1161,8 @@ static int xennet_poll(struct napi_struct *napi, int budget) memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); memset(extras, 0, sizeof(rinfo.extras)); - err = xennet_get_responses(queue, &rinfo, rp, &tmpq); + err = xennet_get_responses(queue, &rinfo, rp, &tmpq, + &need_xdp_flush); if (unlikely(err)) { err: @@ -1060,6 +1208,8 @@ static int xennet_poll(struct napi_struct *napi, int budget) i = ++queue->rx.rsp_cons; work_done++; } + if (need_xdp_flush) + xdp_do_flush(); __skb_queue_purge(&errq); @@ -1261,6 +1411,101 @@ static void xennet_poll_controller(struct net_device *dev) } #endif +#define NETBACK_XDP_HEADROOM_DISABLE 0 +#define NETBACK_XDP_HEADROOM_ENABLE 1 + +static int talk_to_netback_xdp(struct netfront_info *np, int xdp) +{ + int err; + unsigned short headroom; + + headroom = xdp ? XDP_PACKET_HEADROOM : 0; + err = xenbus_printf(XBT_NIL, np->xbdev->nodename, + "xdp-headroom", "%hu", + headroom); + if (err) + pr_warn("Error writing xdp-headroom\n"); + + return err; +} + +static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM; + struct netfront_info *np = netdev_priv(dev); + struct bpf_prog *old_prog; + unsigned int i, err; + + if (dev->mtu > max_mtu) { + netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu); + return -EINVAL; + } + + if (!np->netback_has_xdp_headroom) + return 0; + + xenbus_switch_state(np->xbdev, XenbusStateReconfiguring); + + err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE : + NETBACK_XDP_HEADROOM_DISABLE); + if (err) + return err; + + /* avoid the race with XDP headroom adjustment */ + wait_event(module_wq, + xenbus_read_driver_state(np->xbdev->otherend) == + XenbusStateReconfigured); + np->netfront_xdp_enabled = true; + + old_prog = rtnl_dereference(np->queues[0].xdp_prog); + + if (prog) + bpf_prog_add(prog, dev->real_num_tx_queues); + + for (i = 0; i < dev->real_num_tx_queues; ++i) + rcu_assign_pointer(np->queues[i].xdp_prog, prog); + + if (old_prog) + for (i = 0; i < dev->real_num_tx_queues; ++i) + bpf_prog_put(old_prog); + + xenbus_switch_state(np->xbdev, XenbusStateConnected); + + return 0; +} + +static u32 xennet_xdp_query(struct net_device *dev) +{ + unsigned int num_queues = dev->real_num_tx_queues; + struct netfront_info *np = netdev_priv(dev); + const struct bpf_prog *xdp_prog; + struct netfront_queue *queue; + unsigned int i; + + for (i = 0; i < num_queues; ++i) { + queue = &np->queues[i]; + xdp_prog = rtnl_dereference(queue->xdp_prog); + if (xdp_prog) + return xdp_prog->aux->id; + } + + return 0; +} + +static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return xennet_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = xennet_xdp_query(dev); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops xennet_netdev_ops = { .ndo_open = xennet_open, .ndo_stop = xennet_close, @@ -1272,6 +1517,8 @@ static const struct net_device_ops xennet_netdev_ops = { .ndo_fix_features = xennet_fix_features, .ndo_set_features = xennet_set_features, .ndo_select_queue = xennet_select_queue, + .ndo_bpf = xennet_xdp, + .ndo_xdp_xmit = xennet_xdp_xmit, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xennet_poll_controller, #endif @@ -1331,6 +1578,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) SET_NETDEV_DEV(netdev, &dev->dev); np->netdev = netdev; + np->netfront_xdp_enabled = false; netif_carrier_off(netdev); @@ -1419,6 +1667,8 @@ static void xennet_disconnect_backend(struct netfront_info *info) queue->rx_ring_ref = GRANT_INVALID_REF; queue->tx.sring = NULL; queue->rx.sring = NULL; + + page_pool_destroy(queue->page_pool); } } @@ -1754,6 +2004,51 @@ static void xennet_destroy_queues(struct netfront_info *info) info->queues = NULL; } + + +static int xennet_create_page_pool(struct netfront_queue *queue) +{ + int err; + struct page_pool_params pp_params = { + .order = 0, + .flags = 0, + .pool_size = NET_RX_RING_SIZE, + .nid = NUMA_NO_NODE, + .dev = &queue->info->netdev->dev, + .offset = XDP_PACKET_HEADROOM, + .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM, + }; + + queue->page_pool = page_pool_create(&pp_params); + if (IS_ERR(queue->page_pool)) { + err = PTR_ERR(queue->page_pool); + queue->page_pool = NULL; + return err; + } + + err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev, + queue->id); + if (err) { + netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n"); + goto err_free_pp; + } + + err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq, + MEM_TYPE_PAGE_POOL, queue->page_pool); + if (err) { + netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n"); + goto err_unregister_rxq; + } + return 0; + +err_unregister_rxq: + xdp_rxq_info_unreg(&queue->xdp_rxq); +err_free_pp: + page_pool_destroy(queue->page_pool); + queue->page_pool = NULL; + return err; +} + static int xennet_create_queues(struct netfront_info *info, unsigned int *num_queues) { @@ -1779,6 +2074,14 @@ static int xennet_create_queues(struct netfront_info *info, break; } + /* use page pool recycling instead of buddy allocator */ + ret = xennet_create_page_pool(queue); + if (ret < 0) { + dev_err(&info->xbdev->dev, "can't allocate page pool\n"); + *num_queues = i; + return ret; + } + netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64); if (netif_running(info->netdev)) @@ -1825,6 +2128,17 @@ static int talk_to_netback(struct xenbus_device *dev, goto out_unlocked; } + info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend, + "feature-xdp-headroom", 0); + if (info->netback_has_xdp_headroom) { + /* set the current xen-netfront xdp state */ + err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ? + NETBACK_XDP_HEADROOM_ENABLE : + NETBACK_XDP_HEADROOM_DISABLE); + if (err) + goto out_unlocked; + } + rtnl_lock(); if (info->queues) xennet_destroy_queues(info); @@ -1959,6 +2273,8 @@ static int xennet_connect(struct net_device *dev) err = talk_to_netback(np->xbdev, np); if (err) return err; + if (np->netback_has_xdp_headroom) + pr_info("backend supports XDP headroom\n"); /* talk_to_netback() sets the correct number of queues */ num_queues = dev->real_num_tx_queues; From 1c9535c701fb287c2f292bc7fb39a07c5f8a25fd Mon Sep 17 00:00:00 2001 From: Denis Kirjanov Date: Mon, 29 Jun 2020 16:13:29 +0300 Subject: [PATCH 0587/2056] xen networking: add XDP offset adjustment to xen-netback the patch basically adds the offset adjustment and netfront state reading to make XDP work on netfront side. Reviewed-by: Paul Durrant Signed-off-by: Denis Kirjanov Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 4 ++++ drivers/net/xen-netback/interface.c | 2 ++ drivers/net/xen-netback/netback.c | 7 ++++++ drivers/net/xen-netback/rx.c | 15 ++++++++++++- drivers/net/xen-netback/xenbus.c | 34 +++++++++++++++++++++++++++++ 5 files changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 05847eb91a1b..ae477f7756af 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -281,6 +281,9 @@ struct xenvif { u8 ipv6_csum:1; u8 multicast_control:1; + /* headroom requested by xen-netfront */ + u16 xdp_headroom; + /* Is this interface disabled? True when backend discovers * frontend is rogue. */ @@ -395,6 +398,7 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) irqreturn_t xenvif_interrupt(int irq, void *dev_id); extern bool separate_tx_rx_irq; +extern bool provides_xdp_headroom; extern unsigned int rx_drain_timeout_msecs; extern unsigned int rx_stall_timeout_msecs; diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0c8a02a1ead7..8af497285691 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -483,6 +483,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, vif->queues = NULL; vif->num_queues = 0; + vif->xdp_headroom = 0; + spin_lock_init(&vif->lock); INIT_LIST_HEAD(&vif->fe_mcast_addr); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 315dfc6ea297..6dfca7265644 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -96,6 +96,13 @@ unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT; module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644); MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache"); +/* The module parameter tells that we have to put data + * for xen-netfront with the XDP_PACKET_HEADROOM offset + * needed for XDP processing + */ +bool provides_xdp_headroom = true; +module_param(provides_xdp_headroom, bool, 0644); + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, u8 status); diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index ef5887037b22..ac034f69a170 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -258,6 +258,19 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue, pkt->extra_count++; } + if (queue->vif->xdp_headroom) { + struct xen_netif_extra_info *extra; + + extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1]; + + memset(extra, 0, sizeof(struct xen_netif_extra_info)); + extra->u.xdp.headroom = queue->vif->xdp_headroom; + extra->type = XEN_NETIF_EXTRA_TYPE_XDP; + extra->flags = 0; + + pkt->extra_count++; + } + if (skb->sw_hash) { struct xen_netif_extra_info *extra; @@ -356,7 +369,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue, struct xen_netif_rx_request *req, struct xen_netif_rx_response *rsp) { - unsigned int offset = 0; + unsigned int offset = queue->vif->xdp_headroom; unsigned int flags; do { diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 286054b60d47..7e62a6ee7622 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -393,6 +393,24 @@ static void set_backend_state(struct backend_info *be, } } +static void read_xenbus_frontend_xdp(struct backend_info *be, + struct xenbus_device *dev) +{ + struct xenvif *vif = be->vif; + u16 headroom; + int err; + + err = xenbus_scanf(XBT_NIL, dev->otherend, + "xdp-headroom", "%hu", &headroom); + if (err != 1) { + vif->xdp_headroom = 0; + return; + } + if (headroom > XEN_NETIF_MAX_XDP_HEADROOM) + headroom = XEN_NETIF_MAX_XDP_HEADROOM; + vif->xdp_headroom = headroom; +} + /** * Callback received when the frontend's state changes. */ @@ -417,6 +435,11 @@ static void frontend_changed(struct xenbus_device *dev, set_backend_state(be, XenbusStateConnected); break; + case XenbusStateReconfiguring: + read_xenbus_frontend_xdp(be, dev); + xenbus_switch_state(dev, XenbusStateReconfigured); + break; + case XenbusStateClosing: set_backend_state(be, XenbusStateClosing); break; @@ -947,6 +970,8 @@ static int read_xenbus_vif_flags(struct backend_info *be) vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend, "feature-ipv6-csum-offload", 0); + read_xenbus_frontend_xdp(be, dev); + return 0; } @@ -1036,6 +1061,15 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } + /* we can adjust a headroom for netfront XDP processing */ + err = xenbus_printf(xbt, dev->nodename, + "feature-xdp-headroom", "%d", + provides_xdp_headroom); + if (err) { + message = "writing feature-xdp-headroom"; + goto abort_transaction; + } + /* We don't support rx-flip path (except old guests who * don't grok this feature flag). */ From 317a5740b70568f71ecdcf0db1aefad2e4b74811 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 29 Jun 2020 16:55:21 -0500 Subject: [PATCH 0588/2056] net: ipa: rework ipa_aggr_granularity_val() The timer used for aggregation makes use of an internal 32 KHz clock. The granularity of the timer is programmed by a field whose value is computed by ipa_aggr_granularity_val(). Redefine the way that value is computed by using a new TIMER_FREQUENCY constant representing the underlying clock frequency. Add two BUILD_BUG_ON() calls to ensure the value used is valid. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_main.c | 5 +++++ drivers/net/ipa/ipa_reg.h | 17 ++++++++--------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 76d5108b8403..27a869df3a4b 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -674,6 +674,11 @@ static void ipa_validate_build(void) /* This is used as a divisor */ BUILD_BUG_ON(!IPA_AGGR_GRANULARITY); + + /* Aggregation granularity value can't be 0, and must fit */ + BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY)); + BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) > + field_max(AGGR_GRANULARITY)); #endif /* IPA_VALIDATE */ } diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 0a688d8c1d7c..7ae8015798ae 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -190,24 +190,23 @@ static inline u32 ipa_reg_bcr_val(enum ipa_version version) return 0x00000000; } - #define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET 0x000001e8 #define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec /* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */ +/* The internal inactivity timer clock is used for the aggregation timer */ +#define TIMER_FREQUENCY 32000 /* 32 KHz inactivity timer clock */ + #define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0 #define AGGR_GRANULARITY GENMASK(8, 4) -/* Compute the value to use in the AGGR_GRANULARITY field representing - * the given number of microseconds (up to 1 millisecond). - * x = (32 * usec) / 1000 - 1 +/* Compute the value to use in the AGGR_GRANULARITY field representing the + * given number of microseconds. The value is one less than the number of + * timer ticks in the requested period. Zero not a valid granularity value. */ -static inline u32 ipa_aggr_granularity_val(u32 microseconds) +static inline u32 ipa_aggr_granularity_val(u32 usec) { - /* assert(microseconds >= 16); (?) */ - /* assert(microseconds <= 1015); */ - - return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1; + return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1; } #define IPA_REG_TX_CFG_OFFSET 0x000001fc From 1d86652b13e8404ee42b6575d97ed228e92547d6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 29 Jun 2020 16:55:22 -0500 Subject: [PATCH 0589/2056] net: ipa: reduce aggregation time limit Halve the time limit used when aggregation is enabled on an RX endpoint, to half a millisecond. Use DIV_ROUND_CLOSEST() to compute the value that represents the time period, to get better accuracy in the event the time limit is not an even multiple of the granularity. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9f50d0d11704..93449366d7d8 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -36,7 +36,7 @@ #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 -#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */ +#define IPA_AGGR_TIME_LIMIT_DEFAULT 500 /* microseconds */ /** enum ipa_status_opcode - status element opcode hardware values */ enum ipa_status_opcode { @@ -583,9 +583,11 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); val |= u32_encode_bits(aggr_size, AGGR_BYTE_LIMIT_FMASK); + limit = IPA_AGGR_TIME_LIMIT_DEFAULT; - val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY, - AGGR_TIME_LIMIT_FMASK); + limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); + val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK); + val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK); if (endpoint->data->rx.aggr_close_eof) val |= AGGR_SW_EOF_ACTIVE_FMASK; From 9e88cb5ff713bcacfe216c89d614eb505286ad6d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 29 Jun 2020 16:55:23 -0500 Subject: [PATCH 0590/2056] net: ipa: reuse a local variable in ipa_endpoint_init_aggr() Reuse the "limit" local variable in ipa_endpoint_init_aggr() when setting the aggregation size limit. Simple cleanup. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 93449366d7d8..2f56981b051e 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -576,19 +576,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) if (endpoint->data->aggregation) { if (!endpoint->toward_ipa) { - u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); u32 limit; val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); - val |= u32_encode_bits(aggr_size, - AGGR_BYTE_LIMIT_FMASK); + + limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); + val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK); limit = IPA_AGGR_TIME_LIMIT_DEFAULT; limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK); - val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK); + /* AGGR_PKT_LIMIT is 0 (unlimited) */ + if (endpoint->data->rx.aggr_close_eof) val |= AGGR_SW_EOF_ACTIVE_FMASK; /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ From a442b3c7554898ffafbc77ff49b08ec224caabd8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 07:58:44 -0500 Subject: [PATCH 0591/2056] net: ipa: always report GSI state errors We check the state of an event ring or channel both before and after any GSI command issued that will change that state. In most--but not all--cases, if the state is something different than expected we report an error message. Add error messages where missing, so that all unexpected states provide information about what went wrong. Drop the parentheses around the state value shown in all cases. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi.c | 54 ++++++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 55226b264e3c..7e4e54ee09b1 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -358,13 +358,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id) /* Get initial event ring state */ evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id); - - if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) + if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) { + dev_err(gsi->dev, "bad event ring state %u before alloc\n", + evt_ring->state); return -EINVAL; + } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state (%u) after alloc\n", + dev_err(gsi->dev, "bad event ring state %u after alloc\n", evt_ring->state); ret = -EIO; } @@ -381,14 +383,14 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id) if (state != GSI_EVT_RING_STATE_ALLOCATED && state != GSI_EVT_RING_STATE_ERROR) { - dev_err(gsi->dev, "bad event ring state (%u) before reset\n", + dev_err(gsi->dev, "bad event ring state %u before reset\n", evt_ring->state); return; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) - dev_err(gsi->dev, "bad event ring state (%u) after reset\n", + dev_err(gsi->dev, "bad event ring state %u after reset\n", evt_ring->state); } @@ -399,14 +401,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) int ret; if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n", + dev_err(gsi->dev, "bad event ring state %u before dealloc\n", evt_ring->state); return; } ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC); if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) - dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n", + dev_err(gsi->dev, "bad event ring state %u after dealloc\n", evt_ring->state); } @@ -448,21 +450,23 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; + struct device *dev = gsi->dev; enum gsi_channel_state state; int ret; /* Get initial channel state */ state = gsi_channel_state(channel); - if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) + if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) { + dev_err(dev, "bad channel state %u before alloc\n", state); return -EINVAL; + } ret = gsi_channel_command(channel, GSI_CH_ALLOCATE); /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(gsi->dev, "bad channel state (%u) after alloc\n", - state); + dev_err(dev, "bad channel state %u after alloc\n", state); ret = -EIO; } @@ -472,21 +476,23 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id) /* Start an ALLOCATED channel */ static int gsi_channel_start_command(struct gsi_channel *channel) { + struct device *dev = channel->gsi->dev; enum gsi_channel_state state; int ret; state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED && - state != GSI_CHANNEL_STATE_STOPPED) + state != GSI_CHANNEL_STATE_STOPPED) { + dev_err(dev, "bad channel state %u before start\n", state); return -EINVAL; + } ret = gsi_channel_command(channel, GSI_CH_START); /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_STARTED) { - dev_err(channel->gsi->dev, - "bad channel state (%u) after start\n", state); + dev_err(dev, "bad channel state %u after start\n", state); ret = -EIO; } @@ -496,13 +502,16 @@ static int gsi_channel_start_command(struct gsi_channel *channel) /* Stop a GSI channel in STARTED state */ static int gsi_channel_stop_command(struct gsi_channel *channel) { + struct device *dev = channel->gsi->dev; enum gsi_channel_state state; int ret; state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_STARTED && - state != GSI_CHANNEL_STATE_STOP_IN_PROC) + state != GSI_CHANNEL_STATE_STOP_IN_PROC) { + dev_err(dev, "bad channel state %u before stop\n", state); return -EINVAL; + } ret = gsi_channel_command(channel, GSI_CH_STOP); @@ -515,8 +524,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) if (state == GSI_CHANNEL_STATE_STOP_IN_PROC) return -EAGAIN; - dev_err(channel->gsi->dev, - "bad channel state (%u) after stop\n", state); + dev_err(dev, "bad channel state %u after stop\n", state); return -EIO; } @@ -524,6 +532,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel) /* Reset a GSI channel in ALLOCATED or ERROR state. */ static void gsi_channel_reset_command(struct gsi_channel *channel) { + struct device *dev = channel->gsi->dev; enum gsi_channel_state state; int ret; @@ -532,8 +541,7 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_STOPPED && state != GSI_CHANNEL_STATE_ERROR) { - dev_err(channel->gsi->dev, - "bad channel state (%u) before reset\n", state); + dev_err(dev, "bad channel state %u before reset\n", state); return; } @@ -542,21 +550,20 @@ static void gsi_channel_reset_command(struct gsi_channel *channel) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) - dev_err(channel->gsi->dev, - "bad channel state (%u) after reset\n", state); + dev_err(dev, "bad channel state %u after reset\n", state); } /* Deallocate an ALLOCATED GSI channel */ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) { struct gsi_channel *channel = &gsi->channel[channel_id]; + struct device *dev = gsi->dev; enum gsi_channel_state state; int ret; state = gsi_channel_state(channel); if (state != GSI_CHANNEL_STATE_ALLOCATED) { - dev_err(gsi->dev, - "bad channel state (%u) before dealloc\n", state); + dev_err(dev, "bad channel state %u before dealloc\n", state); return; } @@ -565,8 +572,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) /* Channel state will normally have been updated */ state = gsi_channel_state(channel); if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED) - dev_err(gsi->dev, - "bad channel state (%u) after dealloc\n", state); + dev_err(dev, "bad channel state %u after dealloc\n", state); } /* Ring an event ring doorbell, reporting the last entry processed by the AP. From 8463488af4bebd87cc70270c413b4679cec6e378 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 07:58:45 -0500 Subject: [PATCH 0592/2056] net: ipa: standarize more GSI error messages Make minor updates to error messages reported in "gsi.c": - Use local variables to reduce multi-line function calls - Don't use parentheses in messages - Do some slight rewording in a few cases Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi.c | 57 ++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 28 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 7e4e54ee09b1..c84b8d92caa9 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -336,6 +336,7 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, { struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; struct completion *completion = &evt_ring->completion; + struct device *dev = gsi->dev; u32 val; val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); @@ -344,8 +345,8 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion)) return 0; /* Success! */ - dev_err(gsi->dev, "GSI command %u to event ring %u timed out " - "(state is %u)\n", opcode, evt_ring_id, evt_ring->state); + dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", + opcode, evt_ring_id, evt_ring->state); return -ETIMEDOUT; } @@ -431,6 +432,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) struct completion *completion = &channel->completion; u32 channel_id = gsi_channel_id(channel); struct gsi *gsi = channel->gsi; + struct device *dev = gsi->dev; u32 val; val = u32_encode_bits(channel_id, CH_CHID_FMASK); @@ -439,8 +441,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) return 0; /* Success! */ - dev_err(gsi->dev, - "GSI command %u to channel %u timed out (state is %u)\n", + dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", opcode, channel_id, gsi_channel_state(channel)); return -ETIMEDOUT; @@ -1154,8 +1155,8 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) break; default: dev_err(gsi->dev, - "%s: unrecognized type 0x%08x\n", - __func__, gsi_intr); + "unrecognized interrupt type 0x%08x\n", + gsi_intr); break; } } while (intr_mask); @@ -1259,7 +1260,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) if (ring->virt && addr % size) { dma_free_coherent(dev, size, ring->virt, ring->addr); dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", - size); + size); return -EINVAL; /* Not a good error value, but distinct */ } else if (!ring->virt) { return -ENOMEM; @@ -1650,12 +1651,13 @@ static void gsi_channel_teardown(struct gsi *gsi) /* Setup function for GSI. GSI firmware must be loaded and initialized */ int gsi_setup(struct gsi *gsi, bool legacy) { + struct device *dev = gsi->dev; u32 val; /* Here is where we first touch the GSI hardware */ val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); if (!(val & ENABLED_FMASK)) { - dev_err(gsi->dev, "GSI has not been enabled\n"); + dev_err(dev, "GSI has not been enabled\n"); return -EIO; } @@ -1663,24 +1665,24 @@ int gsi_setup(struct gsi *gsi, bool legacy) gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); if (!gsi->channel_count) { - dev_err(gsi->dev, "GSI reports zero channels supported\n"); + dev_err(dev, "GSI reports zero channels supported\n"); return -EINVAL; } if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) { - dev_warn(gsi->dev, - "limiting to %u channels (hardware supports %u)\n", + dev_warn(dev, + "limiting to %u channels; hardware supports %u\n", GSI_CHANNEL_COUNT_MAX, gsi->channel_count); gsi->channel_count = GSI_CHANNEL_COUNT_MAX; } gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK); if (!gsi->evt_ring_count) { - dev_err(gsi->dev, "GSI reports zero event rings supported\n"); + dev_err(dev, "GSI reports zero event rings supported\n"); return -EINVAL; } if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) { - dev_warn(gsi->dev, - "limiting to %u event rings (hardware supports %u)\n", + dev_warn(dev, + "limiting to %u event rings; hardware supports %u\n", GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count); gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX; } @@ -1766,19 +1768,19 @@ static bool gsi_channel_data_valid(struct gsi *gsi, /* Make sure channel ids are in the range driver supports */ if (channel_id >= GSI_CHANNEL_COUNT_MAX) { - dev_err(dev, "bad channel id %u (must be less than %u)\n", + dev_err(dev, "bad channel id %u; must be less than %u\n", channel_id, GSI_CHANNEL_COUNT_MAX); return false; } if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) { - dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id); + dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id); return false; } if (!data->channel.tlv_count || data->channel.tlv_count > GSI_TLV_MAX) { - dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n", + dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n", channel_id, data->channel.tlv_count, GSI_TLV_MAX); return false; } @@ -1796,13 +1798,13 @@ static bool gsi_channel_data_valid(struct gsi *gsi, } if (!is_power_of_2(data->channel.tre_count)) { - dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n", + dev_err(dev, "channel %u bad tre_count %u; not power of 2\n", channel_id, data->channel.tre_count); return false; } if (!is_power_of_2(data->channel.event_count)) { - dev_err(dev, "channel %u bad event_count %u (not power of 2)\n", + dev_err(dev, "channel %u bad event_count %u; not power of 2\n", channel_id, data->channel.event_count); return false; } @@ -1956,6 +1958,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, u32 count, const struct ipa_gsi_endpoint_data *data, bool modem_alloc) { + struct device *dev = &pdev->dev; struct resource *res; resource_size_t size; unsigned int irq; @@ -1963,7 +1966,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, gsi_validate_build(); - gsi->dev = &pdev->dev; + gsi->dev = dev; /* The GSI layer performs NAPI on all endpoints. NAPI requires a * network device structure, but the GSI layer does not have one, @@ -1974,43 +1977,41 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, /* Get the GSI IRQ and request for it to wake the system */ ret = platform_get_irq_byname(pdev, "gsi"); if (ret <= 0) { - dev_err(gsi->dev, - "DT error %d getting \"gsi\" IRQ property\n", ret); + dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret); return ret ? : -EINVAL; } irq = ret; ret = request_irq(irq, gsi_isr, 0, "gsi", gsi); if (ret) { - dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret); + dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret); return ret; } gsi->irq = irq; ret = enable_irq_wake(gsi->irq); if (ret) - dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret); + dev_warn(dev, "error %d enabling gsi wake irq\n", ret); gsi->irq_wake_enabled = !ret; /* Get GSI memory range and map it */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); if (!res) { - dev_err(gsi->dev, - "DT error getting \"gsi\" memory property\n"); + dev_err(dev, "DT error getting \"gsi\" memory property\n"); ret = -ENODEV; goto err_disable_irq_wake; } size = resource_size(res); if (res->start > U32_MAX || size > U32_MAX - res->start) { - dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n"); + dev_err(dev, "DT memory resource \"gsi\" out of range\n"); ret = -EINVAL; goto err_disable_irq_wake; } gsi->virt = ioremap(res->start, size); if (!gsi->virt) { - dev_err(gsi->dev, "unable to remap \"gsi\" memory\n"); + dev_err(dev, "unable to remap \"gsi\" memory\n"); ret = -ENOMEM; goto err_disable_irq_wake; } From 722208ea3e2a19f927cd507366547b2546444697 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 07:58:46 -0500 Subject: [PATCH 0593/2056] net: ipa: kill IPA_MEM_UC_OFFSET The microcontroller shared memory area is at the beginning of the IPA resident memory. IPA_MEM_UC_OFFSET was defined as the offset within that region where it's found, but it's 0, and it's never actually used. Just get rid of the definition, and move some of the description it had to be above the definition of the ipa_uc_mem_area structure. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_uc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index a1f8db00d55a..9f9980ec2ed3 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -35,12 +35,6 @@ */ /* Supports hardware interface version 0x2000 */ -/* Offset relative to the base of the IPA shared address space of the - * shared region used for communication with the microcontroller. The - * region is 128 bytes in size, but only the first 40 bytes are used. - */ -#define IPA_MEM_UC_OFFSET 0x0000 - /* Delay to allow a the microcontroller to save state when crashing */ #define IPA_SEND_DELAY 100 /* microseconds */ @@ -60,6 +54,10 @@ * @hw_state: state of hardware (including error type information) * @warning_counter: counter of non-fatal hardware errors * @interface_version: hardware-reported interface version + * + * A shared memory area at the base of IPA resident memory is used for + * communication with the microcontroller. The region is 128 bytes in + * size, but only the first 40 bytes (structured this way) are used. */ struct ipa_uc_mem_area { u8 command; /* enum ipa_uc_command */ From f8d34dfdf3f32a40a18df3c160cb5ec8394e0fb8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 08:33:00 -0500 Subject: [PATCH 0594/2056] net: ipa: head-of-line block registers are RX only The INIT_HOL_BLOCK_EN and INIT_HOL_BLOCK_TIMER endpoint registers are only valid for RX endpoints. Have ipa_endpoint_modem_hol_block_clear_all() skip writing these registers for TX endpoints. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 2f56981b051e..d414328b35c9 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -686,7 +686,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) for (i = 0; i < IPA_ENDPOINT_MAX; i++) { struct ipa_endpoint *endpoint = &ipa->endpoint[i]; - if (endpoint->ee_id != GSI_EE_MODEM) + if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) continue; (void)ipa_endpoint_init_hol_block_timer(endpoint, 0); From 9b63f09378ff9df220d154a89910a3c0f8e036e6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 08:33:01 -0500 Subject: [PATCH 0595/2056] net: ipa: metadata_mask register is RX only The INIT_HDR_METADATA_MASK endpoint configuration register is only valid for RX endpoints. Rather than writing a zero to that register for TX endpoints, avoid writing the register at all. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index d414328b35c9..89878ac3c789 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -530,7 +530,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); /* Note that HDR_ENDIANNESS indicates big endian header fields */ - if (!endpoint->toward_ipa && endpoint->data->qmap) + if (endpoint->data->qmap) val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); iowrite32(val, endpoint->ipa->reg_virt + offset); @@ -1305,10 +1305,10 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) (void)ipa_endpoint_program_suspend(endpoint, false); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_aggr(endpoint); + ipa_endpoint_init_hdr_metadata_mask(endpoint); } ipa_endpoint_init_cfg(endpoint); ipa_endpoint_init_hdr(endpoint); - ipa_endpoint_init_hdr_metadata_mask(endpoint); ipa_endpoint_init_mode(endpoint); ipa_endpoint_status(endpoint); } From 00b9102afadf08af6cd283400809163f7d4680af Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 08:33:02 -0500 Subject: [PATCH 0596/2056] net: ipa: mode register is TX only The INIT_MODE endpoint configuration register is only valid for TX endpoints. Rather than writing a zero to that register for RX endpoints, avoid writing the register at all. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 89878ac3c789..eab8409a8be9 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -541,7 +541,7 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); u32 val; - if (endpoint->toward_ipa && endpoint->data->dma_mode) { + if (endpoint->data->dma_mode) { enum ipa_endpoint_name name = endpoint->data->dma_endpoint; u32 dma_endpoint_id; @@ -552,7 +552,7 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) } else { val = u32_encode_bits(IPA_BASIC, MODE_FMASK); } - /* Other bitfields unspecified (and 0) */ + /* All other bits unspecified (and 0) */ iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -1300,6 +1300,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ipa_endpoint_init_aggr(endpoint); ipa_endpoint_init_deaggr(endpoint); ipa_endpoint_init_seq(endpoint); + ipa_endpoint_init_mode(endpoint); } else { if (endpoint->ipa->version == IPA_VERSION_3_5_1) (void)ipa_endpoint_program_suspend(endpoint, false); @@ -1309,7 +1310,6 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) } ipa_endpoint_init_cfg(endpoint); ipa_endpoint_init_hdr(endpoint); - ipa_endpoint_init_mode(endpoint); ipa_endpoint_status(endpoint); } From 8b97bcb7bb2629e0840c2bebdfff050f890caac9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 08:33:03 -0500 Subject: [PATCH 0597/2056] net: ipa: clarify endpoint register macro constraints A handful of registers are valid only for RX endpoints, and some others are valid only for TX endpoints. For these endpoints, add a comment above their defined offset macro that indicates the endpoints to which they apply. Extend the endpoint parameter naming convention as well, to make these constraints more explicit. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_reg.h | 43 +++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 7ae8015798ae..eb4e39fa7d4b 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -32,10 +32,12 @@ struct ipa; * parameter is supplied to the offset macro. The "ee" value is a member of * the gsi_ee enumerated type. * - * The offset of a register dependent on endpoint id is computed by a macro - * that is supplied a parameter "ep". The "ep" value is assumed to be less - * than the maximum endpoint value for the current hardware, and that will - * not exceed IPA_ENDPOINT_MAX. + * The offset of a register dependent on endpoint ID is computed by a macro + * that is supplied a parameter "ep", "txep", or "rxep". A register with an + * "ep" parameter is valid for any endpoint; a register with a "txep" or + * "rxep" parameter is valid only for TX or RX endpoints, respectively. The + * "*ep" value is assumed to be less than the maximum valid endpoint ID + * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX. * * The offset of registers related to filter and route tables is computed * by a macro that is supplied a parameter "er". The "er" represents an @@ -292,11 +294,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4) #define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10) -#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \ - (0x00000818 + 0x0070 * (ep)) +/* Valid only for RX (IPA producer) endpoints */ +#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \ + (0x00000818 + 0x0070 * (rxep)) -#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \ - (0x00000820 + 0x0070 * (ep)) +/* Valid only for TX (IPA consumer) endpoints */ +#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \ + (0x00000820 + 0x0070 * (txep)) #define MODE_FMASK GENMASK(2, 0) #define DEST_PIPE_INDEX_FMASK GENMASK(8, 4) #define BYTE_THRESHOLD_FMASK GENMASK(27, 12) @@ -315,19 +319,21 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) #define AGGR_FORCE_CLOSE_FMASK GENMASK(22, 22) #define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK GENMASK(24, 24) -#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \ - (0x0000082c + 0x0070 * (ep)) +/* Valid only for RX (IPA producer) endpoints */ +#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \ + (0x0000082c + 0x0070 * (rxep)) #define HOL_BLOCK_EN_FMASK GENMASK(0, 0) -/* The next register is valid only for RX (IPA producer) endpoints */ -#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \ - (0x00000830 + 0x0070 * (ep)) +/* Valid only for RX (IPA producer) endpoints */ +#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \ + (0x00000830 + 0x0070 * (rxep)) /* The next fields are present for IPA v4.2 only */ #define BASE_VALUE_FMASK GENMASK(4, 0) #define SCALE_FMASK GENMASK(12, 8) -#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \ - (0x00000834 + 0x0070 * (ep)) +/* Valid only for TX (IPA consumer) endpoints */ +#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \ + (0x00000834 + 0x0070 * (txep)) #define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0) #define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7) #define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8) @@ -337,8 +343,9 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) (0x00000838 + 0x0070 * (ep)) #define RSRC_GRP_FMASK GENMASK(1, 0) -#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \ - (0x0000083c + 0x0070 * (ep)) +/* Valid only for TX (IPA consumer) endpoints */ +#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \ + (0x0000083c + 0x0070 * (txep)) #define HPS_SEQ_TYPE_FMASK GENMASK(3, 0) #define DPS_SEQ_TYPE_FMASK GENMASK(7, 4) #define HPS_REP_SEQ_TYPE_FMASK GENMASK(11, 8) @@ -352,7 +359,7 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version) /* The next field is present for IPA v4.0 and above */ #define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9) -/* "er" is either an endpoint id (for filters) or a route id (for routes) */ +/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */ #define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \ (0x0000085c + 0x0070 * (er)) #define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0) From 547c8788549460149f3b9fbacdc6a7f8ccecc27f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Jun 2020 08:33:04 -0500 Subject: [PATCH 0598/2056] net: ipa: HOL_BLOCK_EN_FMASK is a 1-bit mask The convention throughout the IPA driver is to directly use single-bit field mask values, rather than using (for example) u32_encode_bits() to set or clear them. Fix the one place that doesn't follow that convention, which sets HOL_BLOCK_EN_FMASK in ipa_endpoint_init_hol_block_enable(). Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index eab8409a8be9..a7b5a6407e8f 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -674,7 +674,7 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) u32 offset; u32 val; - val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK); + val = enable ? HOL_BLOCK_EN_FMASK : 0; offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); iowrite32(val, endpoint->ipa->reg_virt + offset); } From 8d7aab3515fa1f9774939537419fecf5247689a9 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 18 Jun 2020 11:46:11 -0700 Subject: [PATCH 0599/2056] ice: implement snapshot for device capabilities Add a new devlink region used for capturing a snapshot of the device capabilities buffer which is reported by the firmware over the AdminQ. This information can useful in debugging driver and firmware interactions. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 66 +++++++++++++++----- drivers/net/ethernet/intel/ice/ice_common.h | 3 + drivers/net/ethernet/intel/ice/ice_devlink.c | 59 +++++++++++++++++ 4 files changed, 115 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index a4cda8212e64..7486d010a619 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -373,6 +373,7 @@ struct ice_pf { struct devlink_port devlink_port; struct devlink_region *nvm_region; + struct devlink_region *devcaps_region; /* OS reserved IRQ details */ struct msix_entry *msix_entries; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index bce0e1281168..e3cb7bd1ecab 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1823,20 +1823,27 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, } /** - * ice_aq_discover_caps - query function/device capabilities + * ice_aq_list_caps - query function/device capabilities * @hw: pointer to the HW struct - * @buf: a virtual buffer to hold the capabilities - * @buf_size: Size of the virtual buffer - * @cap_count: cap count needed if AQ err==ENOMEM - * @opc: capabilities type to discover - pass in the command opcode + * @buf: a buffer to hold the capabilities + * @buf_size: size of the buffer + * @cap_count: if not NULL, set to the number of capabilities reported + * @opc: capabilities type to discover, device or function * @cd: pointer to command details structure or NULL * - * Get the function(0x000a)/device(0x000b) capabilities description from - * the firmware. + * Get the function (0x000A) or device (0x000B) capabilities description from + * firmware and store it in the buffer. + * + * If the cap_count pointer is not NULL, then it is set to the number of + * capabilities firmware will report. Note that if the buffer size is too + * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The + * cap_count will still be updated in this case. It is recommended that the + * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that + * firmware could return) to avoid this. */ -static enum ice_status -ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, - enum ice_adminq_opc opc, struct ice_sq_cd *cd) +enum ice_status +ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, + enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aqc_list_caps *cmd; struct ice_aq_desc desc; @@ -1849,12 +1856,43 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, return ICE_ERR_PARAM; ice_fill_dflt_direct_cmd_desc(&desc, opc); - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); - if (!status) - ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc); - else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) + + if (cap_count) *cap_count = le32_to_cpu(cmd->count); + + return status; +} + +/** + * ice_aq_discover_caps - query function/device capabilities + * @hw: pointer to the HW struct + * @buf: a virtual buffer to hold the capabilities + * @buf_size: Size of the virtual buffer + * @cap_count: cap count needed if AQ err==ENOMEM + * @opc: capabilities type to discover - pass in the command opcode + * @cd: pointer to command details structure or NULL + * + * Get the function(0x000a)/device(0x000b) capabilities description from + * the firmware. + * + * NOTE: this function has the side effect of updating the hw->dev_caps or + * hw->func_caps by way of calling ice_parse_caps. + */ +static enum ice_status +ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, + enum ice_adminq_opc opc, struct ice_sq_cd *cd) +{ + u32 local_cap_count = 0; + enum ice_status status; + + status = ice_aq_list_caps(hw, buf, buf_size, &local_cap_count, + opc, cd); + if (!status) + ice_parse_caps(hw, buf, local_cap_count, opc); + else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) + *cap_count = local_cap_count; + return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9b9e50d2398b..e72529e9f022 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -87,6 +87,9 @@ enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); +enum ice_status +ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, + enum ice_adminq_opc opc, struct ice_sq_cd *cd); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index a73d06e06b5d..3ea470e8cfa2 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -397,12 +397,60 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink, return 0; } +/** + * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities + * @devlink: the devlink instance + * @extack: extended ACK response structure + * @data: on exit points to snapshot data buffer + * + * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for + * the device-caps devlink region. It captures a snapshot of the device + * capabilities reported by firmware. + * + * @returns zero on success, and updates the data pointer. Returns a non-zero + * error code on failure. + */ +static int +ice_devlink_devcaps_snapshot(struct devlink *devlink, + struct netlink_ext_ack *extack, u8 **data) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + enum ice_status status; + void *devcaps; + + devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN); + if (!devcaps) + return -ENOMEM; + + status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL, + ice_aqc_opc_list_dev_caps, NULL); + if (status) { + dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n", + status, hw->adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities"); + vfree(devcaps); + return -EIO; + } + + *data = (u8 *)devcaps; + + return 0; +} + static const struct devlink_region_ops ice_nvm_region_ops = { .name = "nvm-flash", .destructor = vfree, .snapshot = ice_devlink_nvm_snapshot, }; +static const struct devlink_region_ops ice_devcaps_region_ops = { + .name = "device-caps", + .destructor = vfree, + .snapshot = ice_devlink_devcaps_snapshot, +}; + /** * ice_devlink_init_regions - Initialize devlink regions * @pf: the PF device structure @@ -424,6 +472,15 @@ void ice_devlink_init_regions(struct ice_pf *pf) PTR_ERR(pf->nvm_region)); pf->nvm_region = NULL; } + + pf->devcaps_region = devlink_region_create(devlink, + &ice_devcaps_region_ops, 10, + ICE_AQ_MAX_BUF_LEN); + if (IS_ERR(pf->devcaps_region)) { + dev_err(dev, "failed to create device-caps devlink region, err %ld\n", + PTR_ERR(pf->devcaps_region)); + pf->devcaps_region = NULL; + } } /** @@ -436,4 +493,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf) { if (pf->nvm_region) devlink_region_destroy(pf->nvm_region); + if (pf->devcaps_region) + devlink_region_destroy(pf->devcaps_region); } From a3b658cfb66497525278cbf852913a04dbaae992 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Tue, 30 Jun 2020 14:49:41 -0400 Subject: [PATCH 0600/2056] bonding: allow xfrm offload setup post-module-load At the moment, bonding xfrm crypto offload can only be set up if the bonding module is loaded with active-backup mode already set. We need to be able to make this work with bonds set to AB after the bonding driver has already been loaded. So what's done here is: 1) move #define BOND_XFRM_FEATURES to net/bonding.h so it can be used by both bond_main.c and bond_options.c 2) set BOND_XFRM_FEATURES in bond_dev->hw_features universally, rather than only when loading in AB mode 3) wire up xfrmdev_ops universally too 4) disable BOND_XFRM_FEATURES in bond_dev->features if not AB 5) exit early (non-AB case) from bond_ipsec_offload_ok, to prevent a performance hit from traversing into the underlying drivers 5) toggle BOND_XFRM_FEATURES in bond_dev->wanted_features and call netdev_change_features() from bond_option_mode_set() In my local testing, I can change bonding modes back and forth on the fly, have hardware offload work when I'm in AB, and see no performance penalty to non-AB software encryption, despite having xfrm bits all wired up for all modes now. Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Reported-by: Huy Nguyen CC: Saeed Mahameed CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 19 ++++++++++--------- drivers/net/bonding/bond_options.c | 8 ++++++++ include/net/bonding.h | 5 +++++ 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b3479584cc16..2adf6ce20a38 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -434,6 +434,9 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); struct net_device *slave_dev = curr_active->dev; + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) + return true; + if (!(slave_dev->xfrmdev_ops && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); @@ -1218,11 +1221,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_ALL_TSO) -#ifdef CONFIG_XFRM_OFFLOAD -#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ - NETIF_F_GSO_ESP) -#endif /* CONFIG_XFRM_OFFLOAD */ - #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_ALL_TSO) @@ -4654,8 +4652,7 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ - if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) - bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; + bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; bond->xs = NULL; #endif /* CONFIG_XFRM_OFFLOAD */ @@ -4678,11 +4675,15 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4; #ifdef CONFIG_XFRM_OFFLOAD - if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) - bond_dev->hw_features |= BOND_XFRM_FEATURES; + bond_dev->hw_features |= BOND_XFRM_FEATURES; #endif /* CONFIG_XFRM_OFFLOAD */ bond_dev->features |= bond_dev->hw_features; bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; +#ifdef CONFIG_XFRM_OFFLOAD + /* Disable XFRM features if this isn't an active-backup config */ + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) + bond_dev->features &= ~BOND_XFRM_FEATURES; +#endif /* CONFIG_XFRM_OFFLOAD */ } /* Destroy a bonding device. diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index ddb3916d3506..9abfaae1c6f7 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -767,6 +767,14 @@ static int bond_option_mode_set(struct bonding *bond, if (newval->value == BOND_MODE_ALB) bond->params.tlb_dynamic_lb = 1; +#ifdef CONFIG_XFRM_OFFLOAD + if (newval->value == BOND_MODE_ACTIVEBACKUP) + bond->dev->wanted_features |= BOND_XFRM_FEATURES; + else + bond->dev->wanted_features &= ~BOND_XFRM_FEATURES; + netdev_change_features(bond->dev); +#endif /* CONFIG_XFRM_OFFLOAD */ + /* don't cache arp_validate between modes */ bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; bond->params.mode = newval->value; diff --git a/include/net/bonding.h b/include/net/bonding.h index a00e1764e9b1..7d132cc1e584 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -86,6 +86,11 @@ #define bond_for_each_slave_rcu(bond, pos, iter) \ netdev_for_each_lower_private_rcu((bond)->dev, pos, iter) +#ifdef CONFIG_XFRM_OFFLOAD +#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) +#endif /* CONFIG_XFRM_OFFLOAD */ + #ifdef CONFIG_NET_POLL_CONTROLLER extern atomic_t netpoll_block_tx; From b3c3890489f6d3794916958d1663ab6aecb0290b Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Mon, 29 Jun 2020 17:27:45 -0700 Subject: [PATCH 0601/2056] ice: avoid unnecessary single-member variable-length structs There are a number of structures that consist of a one-element array as the only struct member. Some of those are unused so remove them. Others are used to index into a buffer/array consisting of a variable number of a different data or structure type. Those are unnecessary since we can use simple pointer arithmetic or index directly into the buffer to access individual elements of the buffer/array. Additional code cleanups were done near areas affected by this change. Signed-off-by: Bruce Allan Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 39 +----------- drivers/net/ethernet/intel/ice/ice_common.c | 29 ++++----- drivers/net/ethernet/intel/ice/ice_common.h | 2 +- drivers/net/ethernet/intel/ice/ice_dcb.c | 4 +- drivers/net/ethernet/intel/ice/ice_sched.c | 61 ++++++++----------- drivers/net/ethernet/intel/ice/ice_sched.h | 2 +- drivers/net/ethernet/intel/ice/ice_switch.c | 14 ++--- 7 files changed, 51 insertions(+), 100 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 92f82f2a8af4..a55dc1594daa 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -215,13 +215,6 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; -/* The response buffer is as follows. Note that the length of the - * elements array varies with the length of the command response. - */ -struct ice_aqc_get_sw_cfg_resp { - struct ice_aqc_get_sw_cfg_resp_elem elements[1]; -}; - /* These resource type defines are used for all switch resource * commands where a resource type is required, such as: * Get Resource Allocation command (indirect 0x0204) @@ -695,14 +688,6 @@ struct ice_aqc_sched_elem_cmd { __le32 addr_low; }; -/* This is the buffer for: - * Suspend Nodes (indirect 0x0409) - * Resume Nodes (indirect 0x040A) - */ -struct ice_aqc_suspend_resume_elem { - __le32 teid[1]; -}; - struct ice_aqc_elem_info_bw { __le16 bw_profile_idx; __le16 bw_alloc; @@ -756,14 +741,6 @@ struct ice_aqc_add_elem { struct ice_aqc_txsched_elem_data generic[1]; }; -struct ice_aqc_conf_elem { - struct ice_aqc_txsched_elem_data generic[1]; -}; - -struct ice_aqc_get_elem { - struct ice_aqc_txsched_elem_data generic[1]; -}; - struct ice_aqc_get_topo_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; struct ice_aqc_txsched_elem_data @@ -835,10 +812,6 @@ struct ice_aqc_rl_profile_elem { __le16 rl_encode; }; -struct ice_aqc_rl_profile_generic_elem { - struct ice_aqc_rl_profile_elem generic[1]; -}; - /* Query Scheduler Resource Allocation (indirect 0x0412) * This indirect command retrieves the scheduler resources allocated by * EMP Firmware to the given PF. @@ -1584,10 +1557,6 @@ struct ice_aqc_dis_txq_item { (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) }; -struct ice_aqc_dis_txq { - struct ice_aqc_dis_txq_item qgrps[1]; -}; - /* Configure Firmware Logging Command (indirect 0xFF09) * Logging Information Read Response (indirect 0xFF10) * Note: The 0xFF10 command has no input parameters. @@ -1636,12 +1605,7 @@ enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_MAX, }; -/* This is the buffer for both of the logging commands. - * The entry array size depends on the datalen parameter in the descriptor. - * There will be a total of datalen / 2 entries. - */ -struct ice_aqc_fw_logging_data { - __le16 entry[1]; +/* Defines for both above FW logging command/response buffers */ #define ICE_AQC_FW_LOG_ID_S 0 #define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) @@ -1654,7 +1618,6 @@ struct ice_aqc_fw_logging_data { #define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ #define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ #define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ -}; /* Get/Clear FW Log (indirect 0xFF11) */ struct ice_aqc_get_clear_fw_log { diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e3cb7bd1ecab..622ab1f0e18f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -440,30 +440,24 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), sw); } -#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ - (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) -#define ICE_FW_LOG_DESC_SIZE_MAX \ - ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) - /** * ice_get_fw_log_cfg - get FW logging configuration * @hw: pointer to the HW struct */ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) { - struct ice_aqc_fw_logging_data *config; struct ice_aq_desc desc; enum ice_status status; + __le16 *config; u16 size; - size = ICE_FW_LOG_DESC_SIZE_MAX; + size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX; config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL); if (!config) return ICE_ERR_NO_MEMORY; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); status = ice_aq_send_cmd(hw, &desc, config, size, NULL); @@ -474,7 +468,7 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { u16 v, m, flgs; - v = le16_to_cpu(config->entry[i]); + v = le16_to_cpu(config[i]); m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S; @@ -526,11 +520,11 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) */ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) { - struct ice_aqc_fw_logging_data *data = NULL; struct ice_aqc_fw_logging *cmd; enum ice_status status = 0; u16 i, chgs = 0, len = 0; struct ice_aq_desc desc; + __le16 *data = NULL; u8 actv_evnts = 0; void *buf = NULL; @@ -571,8 +565,9 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) continue; if (!data) { - data = devm_kzalloc(ice_hw_to_dev(hw), - ICE_FW_LOG_DESC_SIZE_MAX, + data = devm_kcalloc(ice_hw_to_dev(hw), + sizeof(*data), + ICE_AQC_FW_LOG_ID_MAX, GFP_KERNEL); if (!data) return ICE_ERR_NO_MEMORY; @@ -580,7 +575,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) val = i << ICE_AQC_FW_LOG_ID_S; val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; - data->entry[chgs++] = cpu_to_le16(val); + data[chgs++] = cpu_to_le16(val); } /* Only enable FW logging if at least one module is specified. @@ -599,7 +594,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; buf = data; - len = ICE_FW_LOG_DESC_SIZE(chgs); + len = sizeof(*data) * chgs; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); } } @@ -629,7 +624,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) continue; } - v = le16_to_cpu(data->entry[i]); + v = le16_to_cpu(data[i]); m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; } @@ -3690,14 +3685,14 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, */ enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, - struct ice_aqc_get_elem *buf) + struct ice_aqc_txsched_elem_data *buf) { u16 buf_size, num_elem_ret = 0; enum ice_status status; buf_size = sizeof(*buf); memset(buf, 0, buf_size); - buf->generic[0].node_teid = cpu_to_le32(node_teid); + buf->node_teid = cpu_to_le32(node_teid); status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, NULL); if (status || num_elem_ret != 1) diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index e72529e9f022..d1238f43e872 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -155,5 +155,5 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, - struct ice_aqc_get_elem *buf); + struct ice_aqc_txsched_elem_data *buf); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index adb8dab765c8..2cecc9d08005 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -1362,7 +1362,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, struct ice_aqc_port_ets_elem *buf) { struct ice_sched_node *node, *tc_node; - struct ice_aqc_get_elem elem; + struct ice_aqc_txsched_elem_data elem; enum ice_status status = 0; u32 teid1, teid2; u8 i, j; @@ -1404,7 +1404,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, /* new TC */ status = ice_sched_query_elem(pi->hw, teid2, &elem); if (!status) - status = ice_sched_add_node(pi, 1, &elem.generic[0]); + status = ice_sched_add_node(pi, 1, &elem); if (status) break; /* update the TC number */ diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 0475134295e4..294257b4d138 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -129,7 +129,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, */ enum ice_status ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_get_elem *buf, u16 buf_size, + struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, @@ -149,8 +149,8 @@ enum ice_status ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_aqc_txsched_elem_data *info) { + struct ice_aqc_txsched_elem_data elem; struct ice_sched_node *parent; - struct ice_aqc_get_elem elem; struct ice_sched_node *node; enum ice_status status; struct ice_hw *hw; @@ -195,7 +195,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node->parent = parent; node->tx_sched_layer = layer; parent->children[parent->num_children++] = node; - memcpy(&node->info, &elem.generic[0], sizeof(node->info)); + node->info = elem; return 0; } @@ -423,7 +423,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req, */ static enum ice_status ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_conf_elem *buf, u16 buf_size, + struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_cfgd, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, @@ -443,8 +443,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, * Suspend scheduling elements (0x0409) */ static enum ice_status -ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_suspend_resume_elem *buf, +ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, @@ -464,8 +463,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, * resume scheduling elements (0x040A) */ static enum ice_status -ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_suspend_resume_elem *buf, +ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) { return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, @@ -506,9 +504,9 @@ static enum ice_status ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, bool suspend) { - struct ice_aqc_suspend_resume_elem *buf; u16 i, buf_size, num_elem_ret = 0; enum ice_status status; + __le32 *buf; buf_size = sizeof(*buf) * num_nodes; buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); @@ -516,7 +514,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, return ICE_ERR_NO_MEMORY; for (i = 0; i < num_nodes; i++) - buf->teid[i] = cpu_to_le32(node_teids[i]); + buf[i] = cpu_to_le32(node_teids[i]); if (suspend) status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, @@ -591,7 +589,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) */ static enum ice_status ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, - u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf, + u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) { struct ice_aqc_rl_profile *cmd; @@ -622,13 +620,11 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, */ static enum ice_status ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, - struct ice_aqc_rl_profile_generic_elem *buf, - u16 buf_size, u16 *num_profiles_added, - struct ice_sq_cd *cd) + struct ice_aqc_rl_profile_elem *buf, u16 buf_size, + u16 *num_profiles_added, struct ice_sq_cd *cd) { - return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, - num_profiles, buf, - buf_size, num_profiles_added, cd); + return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, + buf, buf_size, num_profiles_added, cd); } /** @@ -644,13 +640,12 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, */ static enum ice_status ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, - struct ice_aqc_rl_profile_generic_elem *buf, - u16 buf_size, u16 *num_profiles_removed, - struct ice_sq_cd *cd) + struct ice_aqc_rl_profile_elem *buf, u16 buf_size, + u16 *num_profiles_removed, struct ice_sq_cd *cd) { return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, - num_profiles, buf, - buf_size, num_profiles_removed, cd); + num_profiles, buf, buf_size, + num_profiles_removed, cd); } /** @@ -666,7 +661,7 @@ static enum ice_status ice_sched_del_rl_profile(struct ice_hw *hw, struct ice_aqc_rl_profile_info *rl_info) { - struct ice_aqc_rl_profile_generic_elem *buf; + struct ice_aqc_rl_profile_elem *buf; u16 num_profiles_removed; enum ice_status status; u16 num_profiles = 1; @@ -675,8 +670,7 @@ ice_sched_del_rl_profile(struct ice_hw *hw, return ICE_ERR_IN_USE; /* Safe to remove profile ID */ - buf = (struct ice_aqc_rl_profile_generic_elem *) - &rl_info->profile; + buf = &rl_info->profile; status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), &num_profiles_removed, NULL); if (status || num_profiles_removed != num_profiles) @@ -1867,7 +1861,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) * @node: pointer to node * @info: node info to update * - * It updates the HW DB, and local SW DB of node. It updates the scheduling + * Update the HW DB, and local SW DB of node. Update the scheduling * parameters of node from argument info data buffer (Info->data buf) and * returns success or error on config sched element failure. The caller * needs to hold scheduler lock. @@ -1876,18 +1870,18 @@ static enum ice_status ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, struct ice_aqc_txsched_elem_data *info) { - struct ice_aqc_conf_elem buf; + struct ice_aqc_txsched_elem_data buf; enum ice_status status; u16 elem_cfgd = 0; u16 num_elems = 1; - buf.generic[0] = *info; + buf = *info; /* Parent TEID is reserved field in this aq call */ - buf.generic[0].parent_teid = 0; + buf.parent_teid = 0; /* Element type is reserved field in this aq call */ - buf.generic[0].data.elem_type = 0; + buf.data.elem_type = 0; /* Flags is reserved field in this aq call */ - buf.generic[0].data.flags = 0; + buf.data.flags = 0; /* Update HW DB */ /* Configure element node */ @@ -2131,9 +2125,9 @@ static struct ice_aqc_rl_profile_info * ice_sched_add_rl_profile(struct ice_port_info *pi, enum ice_rl_type rl_type, u32 bw, u8 layer_num) { - struct ice_aqc_rl_profile_generic_elem *buf; struct ice_aqc_rl_profile_info *rl_prof_elem; u16 profiles_added = 0, num_profiles = 1; + struct ice_aqc_rl_profile_elem *buf; enum ice_status status; struct ice_hw *hw; u8 profile_type; @@ -2182,8 +2176,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); /* Create new entry in HW DB */ - buf = (struct ice_aqc_rl_profile_generic_elem *) - &rl_prof_elem->profile; + buf = &rl_prof_elem->profile; status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), &profiles_added, NULL); if (status || profiles_added != num_profiles) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index f0593cfb6521..0e55ae0d446f 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -56,7 +56,7 @@ struct ice_sched_agg_info { /* FW AQ command calls */ enum ice_status ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, - struct ice_aqc_get_elem *buf, u16 buf_size, + struct ice_aqc_txsched_elem_data *buf, u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd); enum ice_status ice_sched_init_port(struct ice_port_info *pi); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index ff7d16ac693e..0414be3f47dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -87,7 +87,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) * @num_elems: pointer to number of elements * @cd: pointer to command details structure or NULL * - * Get switch configuration (0x0200) to be placed in 'buff'. + * Get switch configuration (0x0200) to be placed in buf. * This admin command returns information such as initial VSI/port number * and switch ID it belongs to. * @@ -104,13 +104,13 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) * parsing the response buffer. */ static enum ice_status -ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, +ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf, u16 buf_size, u16 *req_desc, u16 *num_elems, struct ice_sq_cd *cd) { struct ice_aqc_get_sw_cfg *cmd; - enum ice_status status; struct ice_aq_desc desc; + enum ice_status status; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg); cmd = &desc.params.get_sw_conf; @@ -550,7 +550,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type, */ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) { - struct ice_aqc_get_sw_cfg_resp *rbuf; + struct ice_aqc_get_sw_cfg_resp_elem *rbuf; enum ice_status status; u16 req_desc = 0; u16 num_elems; @@ -568,19 +568,19 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) * writing a non-zero value in req_desc */ do { + struct ice_aqc_get_sw_cfg_resp_elem *ele; + status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN, &req_desc, &num_elems, NULL); if (status) break; - for (i = 0; i < num_elems; i++) { - struct ice_aqc_get_sw_cfg_resp_elem *ele; + for (i = 0, ele = rbuf; i < num_elems; i++, ele++) { u16 pf_vf_num, swid, vsi_port_num; bool is_vf = false; u8 res_type; - ele = rbuf[i].elements; vsi_port_num = le16_to_cpu(ele->vsi_port_num) & ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M; From 66486d8943bac36821f66b7e2d33e4c7c691e113 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Mon, 29 Jun 2020 17:27:46 -0700 Subject: [PATCH 0602/2056] ice: replace single-element array used for C struct hack Convert the pre-C90-extension "C struct hack" method (using a single- element array at the end of a structure for implementing variable-length types) to the preferred use of C99 flexible array member. Additional code cleanups were done near areas affected by this change. Signed-off-by: Bruce Allan Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 23 +++--- drivers/net/ethernet/intel/ice/ice_base.c | 2 +- drivers/net/ethernet/intel/ice/ice_common.c | 74 ++++++++++--------- drivers/net/ethernet/intel/ice/ice_dcb.h | 4 +- .../net/ethernet/intel/ice/ice_flex_pipe.c | 35 +++++---- .../net/ethernet/intel/ice/ice_flex_type.h | 39 +++++----- drivers/net/ethernet/intel/ice/ice_lib.c | 2 +- drivers/net/ethernet/intel/ice/ice_sched.c | 4 +- drivers/net/ethernet/intel/ice/ice_switch.c | 36 ++++----- drivers/net/ethernet/intel/ice/ice_xsk.c | 6 +- 10 files changed, 111 insertions(+), 114 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index a55dc1594daa..99c39249613a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -267,7 +267,7 @@ struct ice_aqc_alloc_free_res_elem { #define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M \ (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S) __le16 num_elems; - struct ice_aqc_res_elem elem[1]; + struct ice_aqc_res_elem elem[]; }; /* Add VSI (indirect 0x0210) @@ -561,8 +561,8 @@ struct ice_sw_rule_lkup_rx_tx { * lookup-type */ __le16 hdr_len; - u8 hdr[1]; -} __packed; + u8 hdr[]; +}; /* Add/Update/Remove large action command/response entry * "index" is returned as part of a response to a successful Add command, and @@ -571,7 +571,6 @@ struct ice_sw_rule_lkup_rx_tx { struct ice_sw_rule_lg_act { __le16 index; /* Index in large action table */ __le16 size; - __le32 act[1]; /* array of size for actions */ /* Max number of large actions */ #define ICE_MAX_LG_ACT 4 /* Bit 0:1 - Action type */ @@ -622,6 +621,7 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_STAT_COUNT 0x7 #define ICE_LG_ACT_STAT_COUNT_S 3 #define ICE_LG_ACT_STAT_COUNT_M (0x7F << ICE_LG_ACT_STAT_COUNT_S) + __le32 act[]; /* array of size for actions */ }; /* Add/Update/Remove VSI list command/response entry @@ -631,7 +631,7 @@ struct ice_sw_rule_lg_act { struct ice_sw_rule_vsi_list { __le16 index; /* Index of VSI/Prune list */ __le16 number_vsi; - __le16 vsi[1]; /* Array of number_vsi VSI numbers */ + __le16 vsi[]; /* Array of number_vsi VSI numbers */ }; /* Query VSI list command/response entry */ @@ -738,7 +738,7 @@ struct ice_aqc_txsched_topo_grp_info_hdr { struct ice_aqc_add_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; - struct ice_aqc_txsched_elem_data generic[1]; + struct ice_aqc_txsched_elem_data generic[]; }; struct ice_aqc_get_topo_elem { @@ -749,7 +749,7 @@ struct ice_aqc_get_topo_elem { struct ice_aqc_delete_elem { struct ice_aqc_txsched_topo_grp_info_hdr hdr; - __le32 teid[1]; + __le32 teid[]; }; /* Query Port ETS (indirect 0x040E) @@ -1510,7 +1510,7 @@ struct ice_aqc_add_tx_qgrp { __le32 parent_teid; u8 num_txqs; u8 rsvd[3]; - struct ice_aqc_add_txqs_perq txqs[1]; + struct ice_aqc_add_txqs_perq txqs[]; }; /* Disable Tx LAN Queues (indirect 0x0C31) */ @@ -1548,14 +1548,13 @@ struct ice_aqc_dis_txq_item { u8 num_qs; u8 rsvd; /* The length of the q_id array varies according to num_qs */ - __le16 q_id[1]; - /* This only applies from F8 onward */ #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S 15 #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q \ (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET \ (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S) -}; + __le16 q_id[]; +} __packed; /* Configure Firmware Logging Command (indirect 0xFF09) * Logging Information Read Response (indirect 0xFF10) @@ -1679,7 +1678,7 @@ struct ice_aqc_get_pkg_info { /* Get Package Info List response buffer format (0x0C43) */ struct ice_aqc_get_pkg_info_resp { __le32 count; - struct ice_aqc_get_pkg_info pkg_info[1]; + struct ice_aqc_get_pkg_info pkg_info[]; }; /* Lan Queue Overflow Event (direct, 0x1001) */ diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index d620d26d42ed..87008476d8fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -635,10 +635,10 @@ int ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, struct ice_aqc_add_tx_qgrp *qg_buf) { + u8 buf_len = struct_size(qg_buf, txqs, 1); struct ice_tlan_ctx tlan_ctx = { 0 }; struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; - u8 buf_len = sizeof(*qg_buf); struct ice_hw *hw = &pf->hw; enum ice_status status; u16 pf_q; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 622ab1f0e18f..57fd815c41bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1536,7 +1536,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) enum ice_status status; u16 buf_len; - buf_len = struct_size(buf, elem, num - 1); + buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; @@ -1553,7 +1553,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) if (status) goto ice_alloc_res_exit; - memcpy(res, buf->elem, sizeof(buf->elem) * num); + memcpy(res, buf->elem, sizeof(*buf->elem) * num); ice_alloc_res_exit: kfree(buf); @@ -1574,7 +1574,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) enum ice_status status; u16 buf_len; - buf_len = struct_size(buf, elem, num - 1); + buf_len = struct_size(buf, elem, num); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; @@ -1582,7 +1582,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) /* Prepare buffer to free resource. */ buf->num_elems = cpu_to_le16(num); buf->res_type = cpu_to_le16(type); - memcpy(buf->elem, res, sizeof(buf->elem) * num); + memcpy(buf->elem, res, sizeof(*buf->elem) * num); status = ice_aq_alloc_free_res(hw, num, buf, buf_len, ice_aqc_opc_free_res, NULL); @@ -2922,10 +2922,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size, struct ice_sq_cd *cd) { - u16 i, sum_header_size, sum_q_size = 0; struct ice_aqc_add_tx_qgrp *list; struct ice_aqc_add_txqs *cmd; struct ice_aq_desc desc; + u16 i, sum_size = 0; cmd = &desc.params.add_txqs; @@ -2937,18 +2937,13 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps, if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS) return ICE_ERR_PARAM; - sum_header_size = num_qgrps * - (sizeof(*qg_list) - sizeof(*qg_list->txqs)); - - list = qg_list; - for (i = 0; i < num_qgrps; i++) { - struct ice_aqc_add_txqs_perq *q = list->txqs; - - sum_q_size += list->num_txqs * sizeof(*q); - list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); + for (i = 0, list = qg_list; i < num_qgrps; i++) { + sum_size += struct_size(list, txqs, list->num_txqs); + list = (struct ice_aqc_add_tx_qgrp *)(list->txqs + + list->num_txqs); } - if (buf_size != (sum_header_size + sum_q_size)) + if (buf_size != sum_size) return ICE_ERR_PARAM; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); @@ -2976,6 +2971,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, enum ice_disq_rst_src rst_src, u16 vmvf_num, struct ice_sq_cd *cd) { + struct ice_aqc_dis_txq_item *item; struct ice_aqc_dis_txqs *cmd; struct ice_aq_desc desc; enum ice_status status; @@ -3025,16 +3021,16 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, */ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - for (i = 0; i < num_qgrps; ++i) { - /* Calculate the size taken up by the queue IDs in this group */ - sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id); - - /* Add the size of the group header */ - sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id); + for (i = 0, item = qg_list; i < num_qgrps; i++) { + u16 item_size = struct_size(item, q_id, item->num_qs); /* If the num of queues is even, add 2 bytes of padding */ - if ((qg_list[i].num_qs % 2) == 0) - sz += 2; + if ((item->num_qs % 2) == 0) + item_size += 2; + + sz += item_size; + + item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size); } if (buf_size != sz) @@ -3423,24 +3419,32 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, struct ice_sq_cd *cd) { enum ice_status status = ICE_ERR_DOES_NOT_EXIST; - struct ice_aqc_dis_txq_item qg_list; + struct ice_aqc_dis_txq_item *qg_list; struct ice_q_ctx *q_ctx; - u16 i; + struct ice_hw *hw; + u16 i, buf_size; if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return ICE_ERR_CFG; + hw = pi->hw; + if (!num_queues) { /* if queue is disabled already yet the disable queue command * has to be sent to complete the VF reset, then call * ice_aq_dis_lan_txq without any queue information */ if (rst_src) - return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, + return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src, vmvf_num, NULL); return ICE_ERR_CFG; } + buf_size = struct_size(qg_list, q_id, 1); + qg_list = kzalloc(buf_size, GFP_KERNEL); + if (!qg_list) + return ICE_ERR_NO_MEMORY; + mutex_lock(&pi->sched_lock); for (i = 0; i < num_queues; i++) { @@ -3449,23 +3453,22 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, node = ice_sched_find_node_by_teid(pi->root, q_teids[i]); if (!node) continue; - q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]); + q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]); if (!q_ctx) { - ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n", + ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n", q_handles[i]); continue; } if (q_ctx->q_handle != q_handles[i]) { - ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n", + ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n", q_ctx->q_handle, q_handles[i]); continue; } - qg_list.parent_teid = node->info.parent_teid; - qg_list.num_qs = 1; - qg_list.q_id[0] = cpu_to_le16(q_ids[i]); - status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list, - sizeof(qg_list), rst_src, vmvf_num, - cd); + qg_list->parent_teid = node->info.parent_teid; + qg_list->num_qs = 1; + qg_list->q_id[0] = cpu_to_le16(q_ids[i]); + status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src, + vmvf_num, cd); if (status) break; @@ -3473,6 +3476,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues, q_ctx->q_handle = ICE_INVAL_Q_HANDLE; } mutex_unlock(&pi->sched_lock); + kfree(qg_list); return status; } diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h index ee138f9bdc7c..d7e5e6178a21 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb.h @@ -87,7 +87,7 @@ struct ice_lldp_org_tlv { __be16 typelen; __be32 ouisubtype; - u8 tlvinfo[1]; + u8 tlvinfo[]; } __packed; struct ice_cee_tlv_hdr { @@ -109,7 +109,7 @@ struct ice_cee_feat_tlv { #define ICE_CEE_FEAT_TLV_WILLING_M 0x40 #define ICE_CEE_FEAT_TLV_ERR_M 0x20 u8 subtype; - u8 tlvinfo[1]; + u8 tlvinfo[]; }; struct ice_cee_app_prio { diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 4420fc02f7e7..3c217e51b27e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1121,8 +1121,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw) u16 size; u32 i; - size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) * - (ICE_PKG_CNT - 1)); + size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT); pkg_info = kzalloc(size, GFP_KERNEL); if (!pkg_info) return ICE_ERR_NO_MEMORY; @@ -1180,7 +1179,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) u32 seg_count; u32 i; - if (len < sizeof(*pkg)) + if (len < struct_size(pkg, seg_offset, 1)) return ICE_ERR_BUF_TOO_SHORT; if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ || @@ -1195,7 +1194,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) return ICE_ERR_CFG; /* make sure segment array fits in package length */ - if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset))) + if (len < struct_size(pkg, seg_offset, seg_count)) return ICE_ERR_BUF_TOO_SHORT; /* all segments must fit within length */ @@ -1300,7 +1299,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg, } /* Check if FW is compatible with the OS package */ - size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1); + size = struct_size(pkg, pkg_info, ICE_PKG_CNT); pkg = kzalloc(size, GFP_KERNEL); if (!pkg) return ICE_ERR_NO_MEMORY; @@ -1764,13 +1763,13 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port) goto ice_create_tunnel_err; sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM, - sizeof(*sect_rx)); + struct_size(sect_rx, tcam, 1)); if (!sect_rx) goto ice_create_tunnel_err; sect_rx->count = cpu_to_le16(1); sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM, - sizeof(*sect_tx)); + struct_size(sect_tx, tcam, 1)); if (!sect_tx) goto ice_create_tunnel_err; sect_tx->count = cpu_to_le16(1); @@ -1847,7 +1846,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all) } /* size of section - there is at least one entry */ - size = struct_size(sect_rx, tcam, count - 1); + size = struct_size(sect_rx, tcam, count); bld = ice_pkg_buf_alloc(hw); if (!bld) { @@ -3324,10 +3323,10 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk, u32 id; id = ice_sect_id(blk, ICE_VEC_TBL); - p = (struct ice_pkg_es *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p) + - vec_size - - sizeof(p->es[0])); + p = ice_pkg_buf_alloc_section(bld, id, + struct_size(p, es, 1) + + vec_size - + sizeof(p->es[0])); if (!p) return ICE_ERR_MAX_LIMIT; @@ -3360,8 +3359,8 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk, u32 id; id = ice_sect_id(blk, ICE_PROF_TCAM); - p = (struct ice_prof_id_section *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + p = ice_pkg_buf_alloc_section(bld, id, + struct_size(p, entry, 1)); if (!p) return ICE_ERR_MAX_LIMIT; @@ -3396,8 +3395,8 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld, u32 id; id = ice_sect_id(blk, ICE_XLT1); - p = (struct ice_xlt1_section *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + p = ice_pkg_buf_alloc_section(bld, id, + struct_size(p, value, 1)); if (!p) return ICE_ERR_MAX_LIMIT; @@ -3431,8 +3430,8 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld, case ICE_VSI_MOVE: case ICE_VSIG_REM: id = ice_sect_id(blk, ICE_XLT2); - p = (struct ice_xlt2_section *) - ice_pkg_buf_alloc_section(bld, id, sizeof(*p)); + p = ice_pkg_buf_alloc_section(bld, id, + struct_size(p, value, 1)); if (!p) return ICE_ERR_MAX_LIMIT; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index a6f391eac8ff..c1c99a267a98 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -22,7 +22,7 @@ struct ice_fv { struct ice_pkg_hdr { struct ice_pkg_ver pkg_format_ver; __le32 seg_count; - __le32 seg_offset[1]; + __le32 seg_offset[]; }; /* generic segment */ @@ -53,12 +53,12 @@ struct ice_device_id_entry { struct ice_seg { struct ice_generic_seg_hdr hdr; __le32 device_table_count; - struct ice_device_id_entry device_table[1]; + struct ice_device_id_entry device_table[]; }; struct ice_nvm_table { __le32 table_count; - __le32 vers[1]; + __le32 vers[]; }; struct ice_buf { @@ -68,7 +68,7 @@ struct ice_buf { struct ice_buf_table { __le32 buf_count; - struct ice_buf buf_array[1]; + struct ice_buf buf_array[]; }; /* global metadata specific segment */ @@ -101,11 +101,12 @@ struct ice_section_entry { struct ice_buf_hdr { __le16 section_count; __le16 data_end; - struct ice_section_entry section_entry[1]; + struct ice_section_entry section_entry[]; }; #define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \ - sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz)) + struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\ + (ent_sz)) /* ice package section IDs */ #define ICE_SID_XLT0_SW 10 @@ -198,17 +199,17 @@ struct ice_label { struct ice_label_section { __le16 count; - struct ice_label label[1]; + struct ice_label label[]; }; #define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ - sizeof(struct ice_label_section) - sizeof(struct ice_label), \ - sizeof(struct ice_label)) + struct_size((struct ice_label_section *)0, label, 1) - \ + sizeof(struct ice_label), sizeof(struct ice_label)) struct ice_sw_fv_section { __le16 count; __le16 base_offset; - struct ice_fv fv[1]; + struct ice_fv fv[]; }; /* The BOOST TCAM stores the match packet header in reverse order, meaning @@ -245,30 +246,30 @@ struct ice_boost_tcam_entry { struct ice_boost_tcam_section { __le16 count; __le16 reserved; - struct ice_boost_tcam_entry tcam[1]; + struct ice_boost_tcam_entry tcam[]; }; #define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \ - sizeof(struct ice_boost_tcam_section) - \ + struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \ sizeof(struct ice_boost_tcam_entry), \ sizeof(struct ice_boost_tcam_entry)) struct ice_xlt1_section { __le16 count; __le16 offset; - u8 value[1]; -} __packed; + u8 value[]; +}; struct ice_xlt2_section { __le16 count; __le16 offset; - __le16 value[1]; + __le16 value[]; }; struct ice_prof_redir_section { __le16 count; __le16 offset; - u8 redir_value[1]; + u8 redir_value[]; }; /* package buffer building */ @@ -327,7 +328,7 @@ struct ice_tunnel_table { struct ice_pkg_es { __le16 count; __le16 offset; - struct ice_fv_word es[1]; + struct ice_fv_word es[]; }; struct ice_es { @@ -461,8 +462,8 @@ struct ice_prof_tcam_entry { struct ice_prof_id_section { __le16 count; - struct ice_prof_tcam_entry entry[1]; -} __packed; + struct ice_prof_tcam_entry entry[]; +}; struct ice_prof_tcam { u32 sid; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 2e3a39cea2c0..8a4c7b8b95df 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1667,7 +1667,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings) u16 q_idx = 0; int err = 0; - qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL); + qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL); if (!qg_buf) return -ENOMEM; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 294257b4d138..1c29cfa1cf33 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -238,7 +238,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, enum ice_status status; u16 buf_size; - buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1); + buf_size = struct_size(buf, teid, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; @@ -825,7 +825,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, size_t buf_size; u32 teid; - buf_size = struct_size(buf, generic, num_nodes - 1); + buf_size = struct_size(buf, generic, num_nodes); buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 0414be3f47dd..ccbe1cc64295 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -29,25 +29,17 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0, 0x81, 0, 0, 0}; #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \ - (sizeof(struct ice_aqc_sw_rules_elem) - \ - sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ - sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1) + (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \ + (DUMMY_ETH_HDR_LEN * \ + sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0]))) #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \ - (sizeof(struct ice_aqc_sw_rules_elem) - \ - sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ - sizeof(struct ice_sw_rule_lkup_rx_tx) - 1) + (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr)) #define ICE_SW_RULE_LG_ACT_SIZE(n) \ - (sizeof(struct ice_aqc_sw_rules_elem) - \ - sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ - sizeof(struct ice_sw_rule_lg_act) - \ - sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \ - ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act))) + (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \ + ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0]))) #define ICE_SW_RULE_VSI_LIST_SIZE(n) \ - (sizeof(struct ice_aqc_sw_rules_elem) - \ - sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \ - sizeof(struct ice_sw_rule_vsi_list) - \ - sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \ - ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi))) + (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \ + ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0]))) /** * ice_init_def_sw_recp - initialize the recipe book keeping tables @@ -449,7 +441,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, enum ice_status status; u16 buf_len; - buf_len = sizeof(*sw_buf); + buf_len = struct_size(sw_buf, elem, 1); sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL); if (!sw_buf) return ICE_ERR_NO_MEMORY; @@ -856,8 +848,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, m_ent->fltr_info.fwd_id.hw_vsi_id; act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT; - act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & - ICE_LG_ACT_VSI_LIST_ID_M; + act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M; if (m_ent->vsi_count > 1) act |= ICE_LG_ACT_VSI_LIST; lg_act->pdata.lg_act.act[0] = cpu_to_le32(act); @@ -2037,7 +2028,8 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction) hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle); s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE : - ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + ICE_SW_RULE_RX_TX_NO_HDR_SIZE; + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); if (!s_rule) return ICE_ERR_NO_MEMORY; @@ -2691,7 +2683,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 buf_len; /* Allocate resource */ - buf_len = sizeof(*buf); + buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; @@ -2729,7 +2721,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items, u16 buf_len; /* Free resource */ - buf_len = sizeof(*buf); + buf_len = struct_size(buf, elem, 1); buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index b6f928c9e9c9..6badfd62dc63 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -206,12 +206,14 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_ring *tx_ring, *rx_ring; struct ice_q_vector *q_vector; + u16 size; int err; if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) return -EINVAL; - qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL); + size = struct_size(qg_buf, txqs, 1); + qg_buf = kzalloc(size, GFP_KERNEL); if (!qg_buf) return -ENOMEM; @@ -228,7 +230,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (ice_is_xdp_ena_vsi(vsi)) { struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx]; - memset(qg_buf, 0, sizeof(*qg_buf)); + memset(qg_buf, 0, size); qg_buf->num_txqs = 1; err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf); if (err) From b97e9d9d67c88bc413a3c27734d45d98d8d52b00 Mon Sep 17 00:00:00 2001 From: Danny Lin Date: Wed, 1 Jul 2020 16:01:52 -0700 Subject: [PATCH 0603/2056] net: sched: Allow changing default qdisc to FQ-PIE Similar to fq_codel and the other qdiscs that can set as default, fq_pie is also suitable for general use without explicit configuration, which makes it a valid choice for this. This is useful in situations where a painless out-of-the-box solution for reducing bufferbloat is desired but fq_codel is not necessarily the best choice. For example, fq_pie can be better for DASH streaming, but there could be more cases where it's the better choice of the two simple AQMs available in the kernel. Signed-off-by: Danny Lin Signed-off-by: David S. Miller --- net/sched/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 84badf00647e..a3b37d88800e 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -468,6 +468,9 @@ choice config DEFAULT_FQ_CODEL bool "Fair Queue Controlled Delay" if NET_SCH_FQ_CODEL + config DEFAULT_FQ_PIE + bool "Flow Queue Proportional Integral controller Enhanced" if NET_SCH_FQ_PIE + config DEFAULT_SFQ bool "Stochastic Fair Queue" if NET_SCH_SFQ @@ -480,6 +483,7 @@ config DEFAULT_NET_SCH default "pfifo_fast" if DEFAULT_PFIFO_FAST default "fq" if DEFAULT_FQ default "fq_codel" if DEFAULT_FQ_CODEL + default "fq_pie" if DEFAULT_FQ_PIE default "sfq" if DEFAULT_SFQ default "pfifo_fast" endif From 767659f65000e6a19fee3b9bf6cda2839968329a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 30 Jun 2020 21:24:44 +0200 Subject: [PATCH 0604/2056] selftests: mptcp: add option to specify size of file to transfer The script generates two random files that are then sent via tcp and mptcp connections. In order to compare throughput over consecutive runs add an option to provide the file size on the command line: "-f 128000". Also add an option, -t, to enable tcp tests. This is useful to compare throughput of mptcp connections and tcp connections. Example: run tests with a 4mb file size, 300ms delay 0.01% loss, default gso/tso/gro settings and with large write/blocking io: mptcp_connect.sh -t -f $((4 * 1024 * 1024)) -d 300 -l 0.01% -r 0 -e "" -m mmap Signed-off-by: Florian Westphal Reviewed-by: Matthieu Baerts Signed-off-by: David S. Miller --- .../selftests/net/mptcp/mptcp_connect.sh | 52 ++++++++++++++----- 1 file changed, 39 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index acf02e156d20..8f7145c413b9 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -3,7 +3,7 @@ time_start=$(date +%s) -optstring="S:R:d:e:l:r:h4cm:" +optstring="S:R:d:e:l:r:h4cm:f:t" ret=0 sin="" sout="" @@ -21,6 +21,8 @@ testmode="" sndbuf=0 rcvbuf=0 options_log=true +do_tcp=0 +filesize=0 if [ $tc_loss -eq 100 ];then tc_loss=1% @@ -40,9 +42,11 @@ usage() { echo -e "\t-e: ethtool features to disable, e.g.: \"-e tso -e gso\" (default: randomly disable any of tso/gso/gro)" echo -e "\t-4: IPv4 only: disable IPv6 tests (default: test both IPv4 and IPv6)" echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)" + echo -e "\t-f: size of file to transfer in bytes (default random)" echo -e "\t-S: set sndbuf value (default: use kernel default)" echo -e "\t-R: set rcvbuf value (default: use kernel default)" echo -e "\t-m: test mode (poll, sendfile; default: poll)" + echo -e "\t-t: also run tests with TCP (use twice to non-fallback tcp)" } while getopts "$optstring" option;do @@ -94,6 +98,12 @@ while getopts "$optstring" option;do "m") testmode="$OPTARG" ;; + "f") + filesize="$OPTARG" + ;; + "t") + do_tcp=$((do_tcp+1)) + ;; "?") usage $0 exit 1 @@ -449,20 +459,25 @@ make_file() { local name=$1 local who=$2 + local SIZE=$filesize + local ksize + local rem - local SIZE TSIZE - SIZE=$((RANDOM % (1024 * 8))) - TSIZE=$((SIZE * 1024)) + if [ $SIZE -eq 0 ]; then + local MAXSIZE=$((1024 * 1024 * 8)) + local MINSIZE=$((1024 * 256)) - dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null + SIZE=$(((RANDOM * RANDOM + MINSIZE) % MAXSIZE)) + fi - SIZE=$((RANDOM % 1024)) - SIZE=$((SIZE + 128)) - TSIZE=$((TSIZE + SIZE)) - dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null + ksize=$((SIZE / 1024)) + rem=$((SIZE - (ksize * 1024))) + + dd if=/dev/urandom of="$name" bs=1024 count=$ksize 2> /dev/null + dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$rem 2> /dev/null echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name" - echo "Created $name (size $TSIZE) containing data sent by $who" + echo "Created $name (size $(du -b "$name")) containing data sent by $who" } run_tests_lo() @@ -497,9 +512,11 @@ run_tests_lo() return 1 fi - # don't bother testing fallback tcp except for loopback case. - if [ ${listener_ns} != ${connector_ns} ]; then - return 0 + if [ $do_tcp -eq 0 ]; then + # don't bother testing fallback tcp except for loopback case. + if [ ${listener_ns} != ${connector_ns} ]; then + return 0 + fi fi do_transfer ${listener_ns} ${connector_ns} MPTCP TCP ${connect_addr} ${local_addr} @@ -516,6 +533,15 @@ run_tests_lo() return 1 fi + if [ $do_tcp -gt 1 ] ;then + do_transfer ${listener_ns} ${connector_ns} TCP TCP ${connect_addr} ${local_addr} + lret=$? + if [ $lret -ne 0 ]; then + ret=$lret + return 1 + fi + fi + return 0 } From a6b118febbab3f6454057612b355d0b667c1fafa Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 30 Jun 2020 21:24:45 +0200 Subject: [PATCH 0605/2056] mptcp: add receive buffer auto-tuning When mptcp is used, userspace doesn't read from the tcp (subflow) socket but from the parent (mptcp) socket receive queue. skbs are moved from the subflow socket to the mptcp rx queue either from 'data_ready' callback (if mptcp socket can be locked), a work queue, or the socket receive function. This means tcp_rcv_space_adjust() is never called and thus no receive buffer size auto-tuning is done. An earlier (not merged) patch added tcp_rcv_space_adjust() calls to the function that moves skbs from subflow to mptcp socket. While this enabled autotuning, it also meant tuning was done even if userspace was reading the mptcp socket very slowly. This adds mptcp_rcv_space_adjust() and calls it after userspace has read data from the mptcp socket rx queue. Its very similar to tcp_rcv_space_adjust, with two differences: 1. The rtt estimate is the largest one observed on a subflow 2. The rcvbuf size and window clamp of all subflows is adjusted to the mptcp-level rcvbuf. Otherwise, we get spurious drops at tcp (subflow) socket level if the skbs are not moved to the mptcp socket fast enough. Before: time mptcp_connect.sh -t -f $((4*1024*1024)) -d 300 -l 0.01% -r 0 -e "" -m mmap [..] ns4 MPTCP -> ns3 (10.0.3.2:10108 ) MPTCP (duration 40823ms) [ OK ] ns4 MPTCP -> ns3 (10.0.3.2:10109 ) TCP (duration 23119ms) [ OK ] ns4 TCP -> ns3 (10.0.3.2:10110 ) MPTCP (duration 5421ms) [ OK ] ns4 MPTCP -> ns3 (dead:beef:3::2:10111) MPTCP (duration 41446ms) [ OK ] ns4 MPTCP -> ns3 (dead:beef:3::2:10112) TCP (duration 23427ms) [ OK ] ns4 TCP -> ns3 (dead:beef:3::2:10113) MPTCP (duration 5426ms) [ OK ] Time: 1396 seconds After: ns4 MPTCP -> ns3 (10.0.3.2:10108 ) MPTCP (duration 5417ms) [ OK ] ns4 MPTCP -> ns3 (10.0.3.2:10109 ) TCP (duration 5427ms) [ OK ] ns4 TCP -> ns3 (10.0.3.2:10110 ) MPTCP (duration 5422ms) [ OK ] ns4 MPTCP -> ns3 (dead:beef:3::2:10111) MPTCP (duration 5415ms) [ OK ] ns4 MPTCP -> ns3 (dead:beef:3::2:10112) TCP (duration 5422ms) [ OK ] ns4 TCP -> ns3 (dead:beef:3::2:10113) MPTCP (duration 5423ms) [ OK ] Time: 296 seconds Signed-off-by: Florian Westphal Reviewed-by: Matthieu Baerts Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 123 ++++++++++++++++++++++++++++++++++++++++--- net/mptcp/protocol.h | 7 +++ net/mptcp/subflow.c | 5 +- 3 files changed, 127 insertions(+), 8 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 28ec26d97f96..fa137a9c42d1 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -179,13 +179,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, return false; } - if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { - int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf); - - if (rcvbuf > sk->sk_rcvbuf) - sk->sk_rcvbuf = rcvbuf; - } - tp = tcp_sk(ssk); do { u32 map_remaining, offset; @@ -916,6 +909,100 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, return copied; } +/* receive buffer autotuning. See tcp_rcv_space_adjust for more information. + * + * Only difference: Use highest rtt estimate of the subflows in use. + */ +static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) +{ + struct mptcp_subflow_context *subflow; + struct sock *sk = (struct sock *)msk; + u32 time, advmss = 1; + u64 rtt_us, mstamp; + + sock_owned_by_me(sk); + + if (copied <= 0) + return; + + msk->rcvq_space.copied += copied; + + mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); + time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); + + rtt_us = msk->rcvq_space.rtt_us; + if (rtt_us && time < (rtt_us >> 3)) + return; + + rtt_us = 0; + mptcp_for_each_subflow(msk, subflow) { + const struct tcp_sock *tp; + u64 sf_rtt_us; + u32 sf_advmss; + + tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); + + sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); + sf_advmss = READ_ONCE(tp->advmss); + + rtt_us = max(sf_rtt_us, rtt_us); + advmss = max(sf_advmss, advmss); + } + + msk->rcvq_space.rtt_us = rtt_us; + if (time < (rtt_us >> 3) || rtt_us == 0) + return; + + if (msk->rcvq_space.copied <= msk->rcvq_space.space) + goto new_measure; + + if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && + !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { + int rcvmem, rcvbuf; + u64 rcvwin, grow; + + rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; + + grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); + + do_div(grow, msk->rcvq_space.space); + rcvwin += (grow << 1); + + rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER); + while (tcp_win_from_space(sk, rcvmem) < advmss) + rcvmem += 128; + + do_div(rcvwin, advmss); + rcvbuf = min_t(u64, rcvwin * rcvmem, + sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); + + if (rcvbuf > sk->sk_rcvbuf) { + u32 window_clamp; + + window_clamp = tcp_win_from_space(sk, rcvbuf); + WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); + + /* Make subflows follow along. If we do not do this, we + * get drops at subflow level if skbs can't be moved to + * the mptcp rx queue fast enough (announced rcv_win can + * exceed ssk->sk_rcvbuf). + */ + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk; + + ssk = mptcp_subflow_tcp_sock(subflow); + WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); + tcp_sk(ssk)->window_clamp = window_clamp; + } + } + } + + msk->rcvq_space.space = msk->rcvq_space.copied; +new_measure: + msk->rcvq_space.copied = 0; + msk->rcvq_space.time = mstamp; +} + static bool __mptcp_move_skbs(struct mptcp_sock *msk) { unsigned int moved = 0; @@ -1028,6 +1115,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, set_bit(MPTCP_DATA_READY, &msk->flags); } out_err: + mptcp_rcv_space_adjust(msk, copied); + release_sock(sk); return copied; } @@ -1241,6 +1330,7 @@ static int mptcp_init_sock(struct sock *sk) return ret; sk_sockets_allocated_inc(sk); + sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2]; return 0; @@ -1423,6 +1513,22 @@ struct sock *mptcp_sk_clone(const struct sock *sk, return nsk; } +void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) +{ + const struct tcp_sock *tp = tcp_sk(ssk); + + msk->rcvq_space.copied = 0; + msk->rcvq_space.rtt_us = 0; + + msk->rcvq_space.time = tp->tcp_mstamp; + + /* initial rcv_space offering made to peer */ + msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, + TCP_INIT_CWND * tp->advmss); + if (msk->rcvq_space.space == 0) + msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; +} + static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, bool kern) { @@ -1471,6 +1577,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, list_add(&subflow->node, &msk->conn_list); inet_sk_state_store(newsk, TCP_ESTABLISHED); + mptcp_rcv_space_init(msk, ssk); bh_unlock_sock(new_mptcp_sock); __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); @@ -1631,6 +1738,8 @@ void mptcp_finish_connect(struct sock *ssk) atomic64_set(&msk->snd_una, msk->write_seq); mptcp_pm_new_connection(msk, 0); + + mptcp_rcv_space_init(msk, ssk); } static void mptcp_sock_graft(struct sock *sk, struct socket *parent) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 1d05d9841b5c..a6412ff0fddb 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -209,6 +209,12 @@ struct mptcp_sock { struct socket *subflow; /* outgoing connect/listener/!mp_capable */ struct sock *first; struct mptcp_pm_data pm; + struct { + u32 space; /* bytes copied in last measurement window */ + u32 copied; /* bytes copied in this measurement window */ + u64 time; /* start time of measurement window */ + u64 rtt_us; /* last maximum rtt of subflows */ + } rcvq_space; }; #define mptcp_for_each_subflow(__msk, __subflow) \ @@ -369,6 +375,7 @@ void mptcp_get_options(const struct sk_buff *skb, struct mptcp_options_received *mp_opt); void mptcp_finish_connect(struct sock *sk); +void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk); void mptcp_data_ready(struct sock *sk, struct sock *ssk); bool mptcp_finish_join(struct sock *sk); void mptcp_data_acked(struct sock *sk); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 664aa9158363..e1e19c76e267 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -225,8 +225,10 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) pr_fallback(mptcp_sk(subflow->conn)); } - if (mptcp_check_fallback(sk)) + if (mptcp_check_fallback(sk)) { + mptcp_rcv_space_init(mptcp_sk(parent), sk); return; + } if (subflow->mp_capable) { pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), @@ -1118,6 +1120,7 @@ static void subflow_state_change(struct sock *sk) if (subflow_simultaneous_connect(sk)) { mptcp_do_fallback(sk); + mptcp_rcv_space_init(mptcp_sk(parent), sk); pr_fallback(mptcp_sk(parent)); subflow->conn_finished = 1; if (inet_sk_state_load(parent) == TCP_SYN_SENT) { From 99126abec5e5778583b959cb164f1888374917d3 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 1 Jul 2020 17:48:52 -0700 Subject: [PATCH 0606/2056] bpf: selftests: A few improvements to network_helpers.c This patch makes a few changes to the network_helpers.c 1) Enforce SO_RCVTIMEO and SO_SNDTIMEO This patch enforces timeout to the network fds through setsockopt SO_RCVTIMEO and SO_SNDTIMEO. It will remove the need for SOCK_NONBLOCK that requires a more demanding timeout logic with epoll/select, e.g. epoll_create, epoll_ctrl, and then epoll_wait for timeout. That removes the need for connect_wait() from the cgroup_skb_sk_lookup.c. The needed change is made in cgroup_skb_sk_lookup.c. 2) start_server(): Add optional addr_str and port to start_server(). That removes the need of the start_server_with_port(). The caller can pass addr_str==NULL and/or port==0. I have a future tcp-hdr-opt test that will pass a non-NULL addr_str and it is in general useful for other future tests. "int timeout_ms" is also added to control the timeout on the "accept(listen_fd)". 3) connect_to_fd(): Fully use the server_fd. The server sock address has already been obtained from getsockname(server_fd). The sockaddr includes the family, so the "int family" arg is redundant. Since the server address is obtained from server_fd, there is little reason not to get the server's socket type from the server_fd also. getsockopt(server_fd) can be used to do that, so "int type" arg is also removed. "int timeout_ms" is added. 4) connect_fd_to_fd(): "int timeout_ms" is added. Some code is also refactored to connect_fd_to_addr() which is shared with connect_to_fd(). 5) Preserve errno: Some callers need to check errno, e.g. cgroup_skb_sk_lookup.c. Make changes to do it more consistently in save_errno_close() and log_err(). Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200702004852.2103003-1-kafai@fb.com --- tools/testing/selftests/bpf/network_helpers.c | 185 ++++++++++-------- tools/testing/selftests/bpf/network_helpers.h | 9 +- .../bpf/prog_tests/cgroup_skb_sk_lookup.c | 12 +- .../bpf/prog_tests/connect_force_port.c | 10 +- .../bpf/prog_tests/load_bytes_relative.c | 4 +- .../selftests/bpf/prog_tests/tcp_rtt.c | 4 +- 6 files changed, 124 insertions(+), 100 deletions(-) diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index e36dd1a1780d..acd08715be2e 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -7,8 +7,6 @@ #include -#include - #include #include #include @@ -17,8 +15,13 @@ #include "network_helpers.h" #define clean_errno() (errno == 0 ? "None" : strerror(errno)) -#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ - __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) +#define log_err(MSG, ...) ({ \ + int __save = errno; \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, clean_errno(), \ + ##__VA_ARGS__); \ + errno = __save; \ +}) struct ipv4_packet pkt_v4 = { .eth.h_proto = __bpf_constant_htons(ETH_P_IP), @@ -37,7 +40,34 @@ struct ipv6_packet pkt_v6 = { .tcp.doff = 5, }; -int start_server_with_port(int family, int type, __u16 port) +static int settimeo(int fd, int timeout_ms) +{ + struct timeval timeout = { .tv_sec = 3 }; + + if (timeout_ms > 0) { + timeout.tv_sec = timeout_ms / 1000; + timeout.tv_usec = (timeout_ms % 1000) * 1000; + } + + if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout, + sizeof(timeout))) { + log_err("Failed to set SO_RCVTIMEO"); + return -1; + } + + if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout, + sizeof(timeout))) { + log_err("Failed to set SO_SNDTIMEO"); + return -1; + } + + return 0; +} + +#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; }) + +int start_server(int family, int type, const char *addr_str, __u16 port, + int timeout_ms) { struct sockaddr_storage addr = {}; socklen_t len; @@ -48,120 +78,119 @@ int start_server_with_port(int family, int type, __u16 port) sin->sin_family = AF_INET; sin->sin_port = htons(port); + if (addr_str && + inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) { + log_err("inet_pton(AF_INET, %s)", addr_str); + return -1; + } len = sizeof(*sin); } else { struct sockaddr_in6 *sin6 = (void *)&addr; sin6->sin6_family = AF_INET6; sin6->sin6_port = htons(port); + if (addr_str && + inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) { + log_err("inet_pton(AF_INET6, %s)", addr_str); + return -1; + } len = sizeof(*sin6); } - fd = socket(family, type | SOCK_NONBLOCK, 0); + fd = socket(family, type, 0); if (fd < 0) { log_err("Failed to create server socket"); return -1; } + if (settimeo(fd, timeout_ms)) + goto error_close; + if (bind(fd, (const struct sockaddr *)&addr, len) < 0) { log_err("Failed to bind socket"); - close(fd); - return -1; + goto error_close; } if (type == SOCK_STREAM) { if (listen(fd, 1) < 0) { log_err("Failed to listed on socket"); - close(fd); - return -1; + goto error_close; } } return fd; + +error_close: + save_errno_close(fd); + return -1; } -int start_server(int family, int type) +static int connect_fd_to_addr(int fd, + const struct sockaddr_storage *addr, + socklen_t addrlen) { - return start_server_with_port(family, type, 0); -} - -static const struct timeval timeo_sec = { .tv_sec = 3 }; -static const size_t timeo_optlen = sizeof(timeo_sec); - -int connect_to_fd(int family, int type, int server_fd) -{ - int fd, save_errno; - - fd = socket(family, type, 0); - if (fd < 0) { - log_err("Failed to create client socket"); - return -1; - } - - if (connect_fd_to_fd(fd, server_fd) < 0 && errno != EINPROGRESS) { - save_errno = errno; - close(fd); - errno = save_errno; - return -1; - } - - return fd; -} - -int connect_fd_to_fd(int client_fd, int server_fd) -{ - struct sockaddr_storage addr; - socklen_t len = sizeof(addr); - int save_errno; - - if (setsockopt(client_fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec, - timeo_optlen)) { - log_err("Failed to set SO_RCVTIMEO"); - return -1; - } - - if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { - log_err("Failed to get server addr"); - return -1; - } - - if (connect(client_fd, (const struct sockaddr *)&addr, len) < 0) { - if (errno != EINPROGRESS) { - save_errno = errno; - log_err("Failed to connect to server"); - errno = save_errno; - } + if (connect(fd, (const struct sockaddr *)addr, addrlen)) { + log_err("Failed to connect to server"); return -1; } return 0; } -int connect_wait(int fd) +int connect_to_fd(int server_fd, int timeout_ms) { - struct epoll_event ev = {}, events[2]; - int timeout_ms = 1000; - int efd, nfd; + struct sockaddr_storage addr; + struct sockaddr_in *addr_in; + socklen_t addrlen, optlen; + int fd, type; - efd = epoll_create1(EPOLL_CLOEXEC); - if (efd < 0) { - log_err("Failed to open epoll fd"); + optlen = sizeof(type); + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); return -1; } - ev.events = EPOLLRDHUP | EPOLLOUT; - ev.data.fd = fd; - - if (epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev) < 0) { - log_err("Failed to register fd=%d on epoll fd=%d", fd, efd); - close(efd); + addrlen = sizeof(addr); + if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) { + log_err("Failed to get server addr"); return -1; } - nfd = epoll_wait(efd, events, ARRAY_SIZE(events), timeout_ms); - if (nfd < 0) - log_err("Failed to wait for I/O event on epoll fd=%d", efd); + addr_in = (struct sockaddr_in *)&addr; + fd = socket(addr_in->sin_family, type, 0); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } - close(efd); - return nfd; + if (settimeo(fd, timeout_ms)) + goto error_close; + + if (connect_fd_to_addr(fd, &addr, addrlen)) + goto error_close; + + return fd; + +error_close: + save_errno_close(fd); + return -1; +} + +int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) +{ + struct sockaddr_storage addr; + socklen_t len = sizeof(addr); + + if (settimeo(client_fd, timeout_ms)) + return -1; + + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { + log_err("Failed to get server addr"); + return -1; + } + + if (connect_fd_to_addr(client_fd, &addr, len)) + return -1; + + return 0; } diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 6a8009605670..f580e82fda58 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -33,10 +33,9 @@ struct ipv6_packet { } __packed; extern struct ipv6_packet pkt_v6; -int start_server(int family, int type); -int start_server_with_port(int family, int type, __u16 port); -int connect_to_fd(int family, int type, int server_fd); -int connect_fd_to_fd(int client_fd, int server_fd); -int connect_wait(int client_fd); +int start_server(int family, int type, const char *addr, __u16 port, + int timeout_ms); +int connect_to_fd(int server_fd, int timeout_ms); +int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); #endif diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c index 059047af7df3..464edc1c1708 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c @@ -13,7 +13,7 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk) socklen_t addr_len = sizeof(addr); __u32 duration = 0; - serv_sk = start_server(AF_INET6, SOCK_STREAM); + serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); if (CHECK(serv_sk < 0, "start_server", "failed to start server\n")) return; @@ -24,17 +24,13 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk) *g_serv_port = addr.sin6_port; /* Client outside of test cgroup should fail to connect by timeout. */ - err = connect_fd_to_fd(out_sk, serv_sk); + err = connect_fd_to_fd(out_sk, serv_sk, 1000); if (CHECK(!err || errno != EINPROGRESS, "connect_fd_to_fd", "unexpected result err %d errno %d\n", err, errno)) goto cleanup; - err = connect_wait(out_sk); - if (CHECK(err, "connect_wait", "unexpected result %d\n", err)) - goto cleanup; - /* Client inside test cgroup should connect just fine. */ - in_sk = connect_to_fd(AF_INET6, SOCK_STREAM, serv_sk); + in_sk = connect_to_fd(serv_sk, 0); if (CHECK(in_sk < 0, "connect_to_fd", "errno %d\n", errno)) goto cleanup; @@ -85,7 +81,7 @@ void test_cgroup_skb_sk_lookup(void) * differs from that of testing cgroup. Moving selftests process to * testing cgroup won't change cgroup id of an already created socket. */ - out_sk = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0); + out_sk = socket(AF_INET6, SOCK_STREAM, 0); if (CHECK_FAIL(out_sk < 0)) return; diff --git a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c index 17bbf76812ca..9229db2f5ca5 100644 --- a/tools/testing/selftests/bpf/prog_tests/connect_force_port.c +++ b/tools/testing/selftests/bpf/prog_tests/connect_force_port.c @@ -114,7 +114,7 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type) goto close_bpf_object; } - fd = connect_to_fd(family, type, server_fd); + fd = connect_to_fd(server_fd, 0); if (fd < 0) { err = -1; goto close_bpf_object; @@ -137,25 +137,25 @@ void test_connect_force_port(void) if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server_with_port(AF_INET, SOCK_STREAM, 60123); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM)); close(server_fd); - server_fd = start_server_with_port(AF_INET6, SOCK_STREAM, 60124); + server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM)); close(server_fd); - server_fd = start_server_with_port(AF_INET, SOCK_DGRAM, 60123); + server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM)); close(server_fd); - server_fd = start_server_with_port(AF_INET6, SOCK_DGRAM, 60124); + server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM)); diff --git a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c index c1168e4a9036..5a2a689dbb68 100644 --- a/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c +++ b/tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c @@ -23,7 +23,7 @@ void test_load_bytes_relative(void) if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server(AF_INET, SOCK_STREAM); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; @@ -49,7 +49,7 @@ void test_load_bytes_relative(void) if (CHECK_FAIL(err)) goto close_bpf_object; - client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd); + client_fd = connect_to_fd(server_fd, 0); if (CHECK_FAIL(client_fd < 0)) goto close_bpf_object; close(client_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c index 9013a0c01eed..d207e968e6b1 100644 --- a/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c +++ b/tools/testing/selftests/bpf/prog_tests/tcp_rtt.c @@ -118,7 +118,7 @@ static int run_test(int cgroup_fd, int server_fd) goto close_bpf_object; } - client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd); + client_fd = connect_to_fd(server_fd, 0); if (client_fd < 0) { err = -1; goto close_bpf_object; @@ -161,7 +161,7 @@ void test_tcp_rtt(void) if (CHECK_FAIL(cgroup_fd < 0)) return; - server_fd = start_server(AF_INET, SOCK_STREAM); + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); if (CHECK_FAIL(server_fd < 0)) goto close_cgroup_fd; From 811d7e375d08312dba23f3b6bf7e58ec14aa5dcb Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Wed, 1 Jul 2020 17:48:58 -0700 Subject: [PATCH 0607/2056] bpf: selftests: Restore netns after each test It is common for networking tests creating its netns and making its own setting under this new netns (e.g. changing tcp sysctl). If the test forgot to restore to the original netns, it would affect the result of other tests. This patch saves the original netns at the beginning and then restores it after every test. Since the restore "setns()" is not expensive, it does it on all tests without tracking if a test has created a new netns or not. The new restore_netns() could also be done in test__end_subtest() such that each subtest will get an automatic netns reset. However, the individual test would lose flexibility to have total control on netns for its own subtests. In some cases, forcing a test to do unnecessary netns re-configure for each subtest is time consuming. e.g. In my vm, forcing netns re-configure on each subtest in sk_assign.c increased the runtime from 1s to 8s. On top of that, test_progs.c is also doing per-test (instead of per-subtest) cleanup for cgroup. Thus, this patch also does per-test restore_netns(). The only existing per-subtest cleanup is reset_affinity() and no test is depending on this. Thus, it is removed from test__end_subtest() to give a consistent expectation to the individual tests. test_progs.c only ensures any affinity/netns/cgroup change made by an earlier test does not affect the following tests. Signed-off-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200702004858.2103728-1-kafai@fb.com --- tools/testing/selftests/bpf/test_progs.c | 23 +++++++++++++++++++++-- tools/testing/selftests/bpf/test_progs.h | 2 ++ 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index ef05d2f0e7bb..104e833d0087 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -121,6 +121,24 @@ static void reset_affinity() { } } +static void save_netns(void) +{ + env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); + if (env.saved_netns_fd == -1) { + perror("open(/proc/self/ns/net)"); + exit(-1); + } +} + +static void restore_netns(void) +{ + if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { + stdio_restore(); + perror("setns(CLONE_NEWNS)"); + exit(-1); + } +} + void test__end_subtest() { struct prog_test_def *test = env.test; @@ -138,8 +156,6 @@ void test__end_subtest() test->test_num, test->subtest_num, test->subtest_name, sub_error_cnt ? "FAIL" : "OK"); - reset_affinity(); - free(test->subtest_name); test->subtest_name = NULL; } @@ -655,6 +671,7 @@ int main(int argc, char **argv) return -1; } + save_netns(); stdio_hijack(); for (i = 0; i < prog_test_cnt; i++) { struct prog_test_def *test = &prog_test_defs[i]; @@ -696,6 +713,7 @@ int main(int argc, char **argv) test->error_cnt ? "FAIL" : "OK"); reset_affinity(); + restore_netns(); if (test->need_cgroup_cleanup) cleanup_cgroup_environment(); } @@ -719,6 +737,7 @@ int main(int argc, char **argv) free_str_set(&env.subtest_selector.blacklist); free_str_set(&env.subtest_selector.whitelist); free(env.subtest_selector.num_set); + close(env.saved_netns_fd); if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) return EXIT_FAILURE; diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index ec31f382e7fd..6e09bf738473 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -80,6 +80,8 @@ struct test_env { int sub_succ_cnt; /* successful sub-tests */ int fail_cnt; /* total failed tests + sub-tests */ int skip_cnt; /* skipped tests */ + + int saved_netns_fd; }; extern struct test_env env; From e4266b991fead8eb996688e82ff39f6cc59ef7dd Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Thu, 2 Jul 2020 10:13:05 +0200 Subject: [PATCH 0608/2056] bridge: uapi: mrp: Extend MRP attributes to get the status Add MRP attribute IFLA_BRIDGE_MRP_INFO to allow the userspace to get the current state of the MRP instances. This is a nested attribute that contains other attributes like, ring id, index of primary and secondary port, priority, ring state, ring role. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index caa6914a3e53..c114c1c2bd53 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -166,6 +166,7 @@ enum { IFLA_BRIDGE_MRP_RING_STATE, IFLA_BRIDGE_MRP_RING_ROLE, IFLA_BRIDGE_MRP_START_TEST, + IFLA_BRIDGE_MRP_INFO, __IFLA_BRIDGE_MRP_MAX, }; @@ -228,6 +229,22 @@ enum { #define IFLA_BRIDGE_MRP_START_TEST_MAX (__IFLA_BRIDGE_MRP_START_TEST_MAX - 1) +enum { + IFLA_BRIDGE_MRP_INFO_UNSPEC, + IFLA_BRIDGE_MRP_INFO_RING_ID, + IFLA_BRIDGE_MRP_INFO_P_IFINDEX, + IFLA_BRIDGE_MRP_INFO_S_IFINDEX, + IFLA_BRIDGE_MRP_INFO_PRIO, + IFLA_BRIDGE_MRP_INFO_RING_STATE, + IFLA_BRIDGE_MRP_INFO_RING_ROLE, + IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, + IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, + IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, + __IFLA_BRIDGE_MRP_INFO_MAX, +}; + +#define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1) + struct br_mrp_instance { __u32 ring_id; __u32 p_ifindex; From df42ef227dc4fdb7acc8d755e029ed27a2e814f8 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Thu, 2 Jul 2020 10:13:06 +0200 Subject: [PATCH 0609/2056] bridge: mrp: Add br_mrp_fill_info Add the function br_mrp_fill_info which populates the MRP attributes regarding the status of each MRP instance. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp_netlink.c | 64 +++++++++++++++++++++++++++++++++++++ net/bridge/br_private.h | 7 ++++ 2 files changed, 71 insertions(+) diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c index 34b3a8776991..c4f5c356811f 100644 --- a/net/bridge/br_mrp_netlink.c +++ b/net/bridge/br_mrp_netlink.c @@ -304,6 +304,70 @@ int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, return 0; } +int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) +{ + struct nlattr *tb, *mrp_tb; + struct br_mrp *mrp; + + mrp_tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP); + if (!mrp_tb) + return -EMSGSIZE; + + list_for_each_entry_rcu(mrp, &br->mrp_list, list) { + struct net_bridge_port *p; + + tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP_INFO); + if (!tb) + goto nla_info_failure; + + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ID, + mrp->ring_id)) + goto nla_put_failure; + + p = rcu_dereference(mrp->p_port); + if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_P_IFINDEX, + p->dev->ifindex)) + goto nla_put_failure; + + p = rcu_dereference(mrp->s_port); + if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_S_IFINDEX, + p->dev->ifindex)) + goto nla_put_failure; + + if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO, + mrp->prio)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_STATE, + mrp->ring_state)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ROLE, + mrp->ring_role)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, + mrp->test_interval)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, + mrp->test_max_miss)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, + mrp->test_monitor)) + goto nla_put_failure; + + nla_nest_end(skb, tb); + } + nla_nest_end(skb, mrp_tb); + + return 0; + +nla_put_failure: + nla_nest_cancel(skb, tb); + +nla_info_failure: + nla_nest_cancel(skb, mrp_tb); + + return -EMSGSIZE; +} + int br_mrp_port_open(struct net_device *dev, u8 loc) { struct net_bridge_port *p; diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6a7d8e218ae7..65d2c163a24a 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -1317,6 +1317,7 @@ int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb); bool br_mrp_enabled(struct net_bridge *br); void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p); +int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br); #else static inline int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *attr, int cmd, @@ -1339,6 +1340,12 @@ static inline void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p) { } + +static inline int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) +{ + return 0; +} + #endif /* br_netlink.c */ From 36a8e8e26542056bbd7eb5e047cadee30587d230 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Thu, 2 Jul 2020 10:13:07 +0200 Subject: [PATCH 0610/2056] bridge: Extend br_fill_ifinfo to return MPR status This patch extends the function br_fill_ifinfo to return also the MRP status for each instance on a bridge. It also adds a new filter RTEXT_FILTER_MRP to return the MRP status only when this is set, not to interfer with the vlans. The MRP status is return only on the bridge interfaces. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 1 + net/bridge/br_netlink.c | 25 ++++++++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 879e64950a0a..9b814c92de12 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -778,6 +778,7 @@ enum { #define RTEXT_FILTER_BRVLAN (1 << 1) #define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2) #define RTEXT_FILTER_SKIP_STATS (1 << 3) +#define RTEXT_FILTER_MRP (1 << 4) /* End of information exported to user level */ diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 240e260e3461..c532fa65c983 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -453,6 +453,28 @@ static int br_fill_ifinfo(struct sk_buff *skb, rcu_read_unlock(); if (err) goto nla_put_failure; + + nla_nest_end(skb, af); + } + + if (filter_mask & RTEXT_FILTER_MRP) { + struct nlattr *af; + int err; + + if (!br_mrp_enabled(br) || port) + goto done; + + af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); + if (!af) + goto nla_put_failure; + + rcu_read_lock(); + err = br_mrp_fill_info(skb, br); + rcu_read_unlock(); + + if (err) + goto nla_put_failure; + nla_nest_end(skb, af); } @@ -516,7 +538,8 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_bridge_port *port = br_port_get_rtnl(dev); if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) && - !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) + !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) && + !(filter_mask & RTEXT_FILTER_MRP)) return 0; return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, From fec371f624632bfa6ae4374fdb6f885f9961c42d Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 2 Jul 2020 12:05:58 +0300 Subject: [PATCH 0611/2056] net: macb: do not set again bit 0 of queue_mask Bit 0 of queue_mask is set at the beginning of macb_probe_queues() function. Do not set it again after reading DGFG6 but instead use "|=" operator. Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 52582e8ed90e..1bc2810f3dc4 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3497,9 +3497,7 @@ static void macb_probe_queues(void __iomem *mem, return; /* bit 0 is never set but queue 0 always exists */ - *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff; - - *queue_mask |= 0x1; + *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) if (*queue_mask & (1 << hw_q)) From b7ab39b35935982d8306e3a97c57d8a232968f86 Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 2 Jul 2020 12:05:59 +0300 Subject: [PATCH 0612/2056] net: macb: use hweight32() to count set bits in queue_mask Use hweight32() to count set bits in queue_mask. Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 1bc2810f3dc4..7668b6ae8822 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3482,8 +3482,6 @@ static void macb_probe_queues(void __iomem *mem, unsigned int *queue_mask, unsigned int *num_queues) { - unsigned int hw_q; - *queue_mask = 0x1; *num_queues = 1; @@ -3498,10 +3496,7 @@ static void macb_probe_queues(void __iomem *mem, /* bit 0 is never set but queue 0 always exists */ *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; - - for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q) - if (*queue_mask & (1 << hw_q)) - (*num_queues)++; + *num_queues = hweight32(*queue_mask); } static int macb_clk_init(struct platform_device *pdev, struct clk **pclk, From 580d395cb9ab60261586bf8ddd5d54de2f29d4fa Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 2 Jul 2020 12:06:00 +0300 Subject: [PATCH 0613/2056] net: macb: do not initialize queue variable Do not initialize queue variable. It is already initialized in for loops. Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 7668b6ae8822..c31c15e99e3c 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1467,7 +1467,7 @@ static void macb_hresp_error_task(unsigned long data) { struct macb *bp = (struct macb *)data; struct net_device *dev = bp->dev; - struct macb_queue *queue = bp->queues; + struct macb_queue *queue; unsigned int q; u32 ctrl; From 8932b5a533db72317181ab7f9e7290f9dd3f5d1b Mon Sep 17 00:00:00 2001 From: Claudiu Beznea Date: Thu, 2 Jul 2020 12:06:01 +0300 Subject: [PATCH 0614/2056] net: macb: remove is_udp variable Remove is_udp variable that is used in only one place and use ip_hdr(skb)->protocol == IPPROTO_UDP check instead. Signed-off-by: Claudiu Beznea Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index c31c15e99e3c..6c99a519ab06 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1933,7 +1933,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags; unsigned int desc_cnt, nr_frags, frag_size, f; unsigned int hdrlen; - bool is_lso, is_udp = 0; + bool is_lso; netdev_tx_t ret = NETDEV_TX_OK; if (macb_clear_csum(skb)) { @@ -1949,10 +1949,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev) is_lso = (skb_shinfo(skb)->gso_size != 0); if (is_lso) { - is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP); - /* length of headers */ - if (is_udp) + if (ip_hdr(skb)->protocol == IPPROTO_UDP) /* only queue eth + ip headers separately for UDP */ hdrlen = skb_transport_offset(skb); else From ffa76e38b71415aa23d6366ab7d715a5c122755f Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 2 Jul 2020 17:18:10 +0800 Subject: [PATCH 0615/2056] ksz884x: mark pcidev_suspend() as __maybe_unused In certain configurations without power management support, gcc report the following warning: drivers/net/ethernet/micrel/ksz884x.c:7182:12: warning: 'pcidev_suspend' defined but not used [-Wunused-function] 7182 | static int pcidev_suspend(struct device *dev_d) | ^~~~~~~~~~~~~~ Mark pcidev_suspend() as __maybe_unused to make it clear. Fixes: 64120615d140 ("ksz884x: use generic power management") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 24901342ecc0..2ce7304d3753 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -7179,7 +7179,7 @@ static int __maybe_unused pcidev_resume(struct device *dev_d) return 0; } -static int pcidev_suspend(struct device *dev_d) +static int __maybe_unused pcidev_suspend(struct device *dev_d) { int i; struct platform_info *info = dev_get_drvdata(dev_d); From 4e1a69116869b26d244c2522bd1631967b62ffce Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 2 Jul 2020 17:19:46 +0800 Subject: [PATCH 0616/2056] mlx4: Mark PM functions as __maybe_unused In certain configurations without power management support, the following warnings happen: drivers/net/ethernet/mellanox/mlx4/main.c:4388:12: warning: 'mlx4_resume' defined but not used [-Wunused-function] 4388 | static int mlx4_resume(struct device *dev_d) | ^~~~~~~~~~~ drivers/net/ethernet/mellanox/mlx4/main.c:4373:12: warning: 'mlx4_suspend' defined but not used [-Wunused-function] 4373 | static int mlx4_suspend(struct device *dev_d) | ^~~~~~~~~~~~ Mark these functions as __maybe_unused to make it clear to the compiler that this is going to happen based on the configuration, which is the standard for these types of functions. Fixes: 0e3e206a3e12 ("mlx4: use generic power management") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 4cae7db8d49c..954c22c79f6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4370,7 +4370,7 @@ static const struct pci_error_handlers mlx4_err_handler = { .resume = mlx4_pci_resume, }; -static int mlx4_suspend(struct device *dev_d) +static int __maybe_unused mlx4_suspend(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); @@ -4385,7 +4385,7 @@ static int mlx4_suspend(struct device *dev_d) return 0; } -static int mlx4_resume(struct device *dev_d) +static int __maybe_unused mlx4_resume(struct device *dev_d) { struct pci_dev *pdev = to_pci_dev(dev_d); struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); From 66eba76763fd5689a1e2102d13b7da24cb61974f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 2 Jul 2020 06:25:34 -0500 Subject: [PATCH 0617/2056] net: ipa: move version test inside ipa_endpoint_program_delay() IPA version 4.2 has a hardware quirk that affects endpoint delay mode, so it isn't used there. Isolate the test that avoids using delay mode for that version inside ipa_endpoint_program_delay(), rather than making that check in the caller. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index a7b5a6407e8f..7f4bea18bd02 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -318,7 +318,9 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) { /* assert(endpoint->toward_ipa); */ - (void)ipa_endpoint_init_ctrl(endpoint, enable); + /* Delay mode doesn't work properly for IPA v4.2 */ + if (endpoint->ipa->version != IPA_VERSION_4_2) + (void)ipa_endpoint_init_ctrl(endpoint, enable); } /* Returns previous suspend state (true means it was enabled) */ @@ -1294,8 +1296,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) static void ipa_endpoint_program(struct ipa_endpoint *endpoint) { if (endpoint->toward_ipa) { - if (endpoint->ipa->version != IPA_VERSION_4_2) - ipa_endpoint_program_delay(endpoint, false); + ipa_endpoint_program_delay(endpoint, false); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_aggr(endpoint); ipa_endpoint_init_deaggr(endpoint); From fff899716f88f84afef4c952a3d7b8b48a070c4c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 2 Jul 2020 06:25:35 -0500 Subject: [PATCH 0618/2056] net: ipa: always handle suspend workaround IPA version 3.5.1 has a hardware quirk that requires special handling if an RX endpoint is suspended while aggregation is active. This handling is implemented by ipa_endpoint_suspend_aggr(). Have ipa_endpoint_program_suspend() be responsible for calling ipa_endpoint_suspend_aggr() if suspend mode is being enabled on an endpoint. If the endpoint does not support aggregation, or if aggregation isn't active, this call will continue to have no effect. Move the definition of ipa_endpoint_suspend_aggr() up in the file so its definition precedes the new earlier reference to it. This requires ipa_endpoint_aggr_active() and ipa_endpoint_force_close() to be moved as well. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 125 +++++++++++++++++---------------- 1 file changed, 63 insertions(+), 62 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 7f4bea18bd02..d6ef5b8647bf 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -323,13 +323,73 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) (void)ipa_endpoint_init_ctrl(endpoint, enable); } -/* Returns previous suspend state (true means it was enabled) */ +static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) +{ + u32 mask = BIT(endpoint->endpoint_id); + struct ipa *ipa = endpoint->ipa; + u32 offset; + u32 val; + + /* assert(mask & ipa->available); */ + offset = ipa_reg_state_aggr_active_offset(ipa->version); + val = ioread32(ipa->reg_virt + offset); + + return !!(val & mask); +} + +static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) +{ + u32 mask = BIT(endpoint->endpoint_id); + struct ipa *ipa = endpoint->ipa; + + /* assert(mask & ipa->available); */ + iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); +} + +/** + * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt + * @endpoint_id: Endpoint on which to emulate a suspend + * + * Emulate suspend IPA interrupt to unsuspend an endpoint suspended + * with an open aggregation frame. This is to work around a hardware + * issue in IPA version 3.5.1 where the suspend interrupt will not be + * generated when it should be. + */ +static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) +{ + struct ipa *ipa = endpoint->ipa; + + if (!endpoint->data->aggregation) + return; + + /* Nothing to do if the endpoint doesn't have aggregation open */ + if (!ipa_endpoint_aggr_active(endpoint)) + return; + + /* Force close aggregation */ + ipa_endpoint_force_close(endpoint); + + ipa_interrupt_simulate_suspend(ipa->interrupt); +} + +/* Returns previous suspend state (true means suspend was enabled) */ static bool ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) { + bool suspended; + /* assert(!endpoint->toward_ipa); */ - return ipa_endpoint_init_ctrl(endpoint, enable); + suspended = ipa_endpoint_init_ctrl(endpoint, enable); + + /* A client suspended with an open aggregation frame will not + * generate a SUSPEND IPA interrupt. If enabling suspend, have + * ipa_endpoint_suspend_aggr() handle this. + */ + if (enable && !suspended) + ipa_endpoint_suspend_aggr(endpoint); + + return suspended; } /* Enable or disable delay or suspend mode on all modem endpoints */ @@ -1144,29 +1204,6 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa) ipa_endpoint_default_route_set(ipa, 0); } -static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) -{ - u32 mask = BIT(endpoint->endpoint_id); - struct ipa *ipa = endpoint->ipa; - u32 offset; - u32 val; - - /* assert(mask & ipa->available); */ - offset = ipa_reg_state_aggr_active_offset(ipa->version); - val = ioread32(ipa->reg_virt + offset); - - return !!(val & mask); -} - -static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) -{ - u32 mask = BIT(endpoint->endpoint_id); - struct ipa *ipa = endpoint->ipa; - - /* assert(mask & ipa->available); */ - iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); -} - /** * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active * @endpoint: Endpoint to be reset @@ -1366,34 +1403,6 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint) endpoint->endpoint_id); } -/** - * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt - * @endpoint_id: Endpoint on which to emulate a suspend - * - * Emulate suspend IPA interrupt to unsuspend an endpoint suspended - * with an open aggregation frame. This is to work around a hardware - * issue in IPA version 3.5.1 where the suspend interrupt will not be - * generated when it should be. - */ -static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) -{ - struct ipa *ipa = endpoint->ipa; - - /* assert(ipa->version == IPA_VERSION_3_5_1); */ - - if (!endpoint->data->aggregation) - return; - - /* Nothing to do if the endpoint doesn't have aggregation open */ - if (!ipa_endpoint_aggr_active(endpoint)) - return; - - /* Force close aggregation */ - ipa_endpoint_force_close(endpoint); - - ipa_interrupt_simulate_suspend(ipa->interrupt); -} - void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) { struct device *dev = &endpoint->ipa->pdev->dev; @@ -1409,16 +1418,8 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) /* IPA v3.5.1 doesn't use channel stop for suspend */ stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; - if (!endpoint->toward_ipa && !stop_channel) { - /* Due to a hardware bug, a client suspended with an open - * aggregation frame will not generate a SUSPEND IPA - * interrupt. We work around this by force-closing the - * aggregation frame, then simulating the arrival of such - * an interrupt. - */ + if (!endpoint->toward_ipa && !stop_channel) (void)ipa_endpoint_program_suspend(endpoint, true); - ipa_endpoint_suspend_aggr(endpoint); - } ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); if (ret) From b07f283ef3d0c97f67e5f446bb027c4456d75f1f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 2 Jul 2020 06:25:36 -0500 Subject: [PATCH 0619/2056] net: ipa: move version test inside ipa_endpoint_program_suspend() IPA version 4.0+ does not support endpoint suspend. Put a test at the top of ipa_endpoint_program_suspend() that returns immediately if suspend is not supported rather than making that check in the caller. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index d6ef5b8647bf..df4202794e69 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -378,6 +378,9 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) { bool suspended; + if (endpoint->ipa->version != IPA_VERSION_3_5_1) + return enable; /* For IPA v4.0+, no change made */ + /* assert(!endpoint->toward_ipa); */ suspended = ipa_endpoint_init_ctrl(endpoint, enable); @@ -395,26 +398,22 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) /* Enable or disable delay or suspend mode on all modem endpoints */ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) { - bool support_suspend; u32 endpoint_id; /* DELAY mode doesn't work correctly on IPA v4.2 */ if (ipa->version == IPA_VERSION_4_2) return; - /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */ - support_suspend = ipa->version == IPA_VERSION_3_5_1; - for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; if (endpoint->ee_id != GSI_EE_MODEM) continue; - /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */ + /* Set TX delay mode or RX suspend mode */ if (endpoint->toward_ipa) ipa_endpoint_program_delay(endpoint, enable); - else if (support_suspend) + else (void)ipa_endpoint_program_suspend(endpoint, enable); } } @@ -1248,8 +1247,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) gsi_channel_reset(gsi, endpoint->channel_id, false); /* Make sure the channel isn't suspended */ - if (endpoint->ipa->version == IPA_VERSION_3_5_1) - suspended = ipa_endpoint_program_suspend(endpoint, false); + suspended = ipa_endpoint_program_suspend(endpoint, false); /* Start channel and do a 1 byte read */ ret = gsi_channel_start(gsi, endpoint->channel_id); @@ -1340,8 +1338,7 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ipa_endpoint_init_seq(endpoint); ipa_endpoint_init_mode(endpoint); } else { - if (endpoint->ipa->version == IPA_VERSION_3_5_1) - (void)ipa_endpoint_program_suspend(endpoint, false); + (void)ipa_endpoint_program_suspend(endpoint, false); ipa_endpoint_init_hdr_ext(endpoint); ipa_endpoint_init_aggr(endpoint); ipa_endpoint_init_hdr_metadata_mask(endpoint); @@ -1416,11 +1413,11 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint) if (!endpoint->toward_ipa) ipa_endpoint_replenish_disable(endpoint); - /* IPA v3.5.1 doesn't use channel stop for suspend */ - stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; - if (!endpoint->toward_ipa && !stop_channel) + if (!endpoint->toward_ipa) (void)ipa_endpoint_program_suspend(endpoint, true); + /* IPA v3.5.1 doesn't use channel stop for suspend */ + stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel); if (ret) dev_err(dev, "error %d suspending channel %u\n", ret, @@ -1437,11 +1434,11 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint) if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id))) return; - /* IPA v3.5.1 doesn't use channel start for resume */ - start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; - if (!endpoint->toward_ipa && !start_channel) + if (!endpoint->toward_ipa) (void)ipa_endpoint_program_suspend(endpoint, false); + /* IPA v3.5.1 doesn't use channel start for resume */ + start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1; ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel); if (ret) dev_err(dev, "error %d resuming channel %u\n", ret, From fb57c3ea98519f811b37f299e0ac4988a021fe2a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 2 Jul 2020 06:25:37 -0500 Subject: [PATCH 0620/2056] net: ipa: simplify ipa_endpoint_program() Have functions that write endpoint configuration registers return immediately if they are not valid for the direction of transfer for the endpoint. This allows most of the calls in ipa_endpoint_program() to be made unconditionally. Reorder the register writes to match the order of their definition (based on offset). Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index df4202794e69..99115a2a29ae 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -588,6 +588,9 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) u32 val = 0; u32 offset; + if (endpoint->toward_ipa) + return; /* Register not valid for TX endpoints */ + offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); /* Note that HDR_ENDIANNESS indicates big endian header fields */ @@ -602,6 +605,9 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); u32 val; + if (!endpoint->toward_ipa) + return; /* Register not valid for RX endpoints */ + if (endpoint->data->dma_mode) { enum ipa_endpoint_name name = endpoint->data->dma_endpoint; u32 dma_endpoint_id; @@ -760,6 +766,9 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); u32 val = 0; + if (!endpoint->toward_ipa) + return; /* Register not valid for RX endpoints */ + /* DEAGGR_HDR_LEN is 0 */ /* PACKET_OFFSET_VALID is 0 */ /* PACKET_OFFSET_LOCATION is ignored (not valid) */ @@ -774,6 +783,9 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) u32 seq_type = endpoint->seq_type; u32 val = 0; + if (!endpoint->toward_ipa) + return; /* Register not valid for RX endpoints */ + /* Sequencer type is made up of four nibbles */ val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK); val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK); @@ -1330,21 +1342,18 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) static void ipa_endpoint_program(struct ipa_endpoint *endpoint) { - if (endpoint->toward_ipa) { + if (endpoint->toward_ipa) ipa_endpoint_program_delay(endpoint, false); - ipa_endpoint_init_hdr_ext(endpoint); - ipa_endpoint_init_aggr(endpoint); - ipa_endpoint_init_deaggr(endpoint); - ipa_endpoint_init_seq(endpoint); - ipa_endpoint_init_mode(endpoint); - } else { + else (void)ipa_endpoint_program_suspend(endpoint, false); - ipa_endpoint_init_hdr_ext(endpoint); - ipa_endpoint_init_aggr(endpoint); - ipa_endpoint_init_hdr_metadata_mask(endpoint); - } ipa_endpoint_init_cfg(endpoint); ipa_endpoint_init_hdr(endpoint); + ipa_endpoint_init_hdr_ext(endpoint); + ipa_endpoint_init_hdr_metadata_mask(endpoint); + ipa_endpoint_init_mode(endpoint); + ipa_endpoint_init_aggr(endpoint); + ipa_endpoint_init_deaggr(endpoint); + ipa_endpoint_init_seq(endpoint); ipa_endpoint_status(endpoint); } From e44f65fd666c73944d6f2462cea0559ce5508721 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 2 Jul 2020 15:22:23 +0100 Subject: [PATCH 0621/2056] xen-netfront: remove redundant assignment to variable 'act' The variable act is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 468f3f6f1425..860a0cce346d 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -856,7 +856,7 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, { struct xdp_frame *xdpf; u32 len = rx->status; - u32 act = XDP_PASS; + u32 act; int err; xdp->data_hard_start = page_address(pdata); From af3c38d3fb7c3003842974b1de454ecebbd8bc83 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:27:35 +0100 Subject: [PATCH 0622/2056] sfc: support setting MTU even if not privileged to configure MAC fully Unprivileged functions (such as VFs) may set their MTU by use of the 'control' field of MC_CMD_SET_MAC_EXT, as used in efx_mcdi_set_mtu(). If calling efx_ef10_mac_reconfigure() from efx_change_mtu(), and the NIC supports the above (SET_MAC_ENHANCED capability), use it rather than efx_mcdi_set_mac(). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 15 ++++++--------- drivers/net/ethernet/sfc/efx.c | 4 ---- drivers/net/ethernet/sfc/efx_common.c | 12 ++++++------ drivers/net/ethernet/sfc/efx_common.h | 2 +- drivers/net/ethernet/sfc/ethtool_common.c | 2 +- drivers/net/ethernet/sfc/net_driver.h | 2 +- drivers/net/ethernet/sfc/siena.c | 2 +- 7 files changed, 16 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 5faf2f0e4d62..a51925b45fc7 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3306,20 +3306,17 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) return rc; } -static int efx_ef10_mac_reconfigure(struct efx_nic *efx) +static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only) { + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + efx_mcdi_filter_sync_rx_mode(efx); + if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED)) + return efx_mcdi_set_mtu(efx); return efx_mcdi_set_mac(efx); } -static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx) -{ - efx_mcdi_filter_sync_rx_mode(efx); - - return 0; -} - static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type) { MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN); @@ -4038,7 +4035,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .stop_stats = efx_port_dummy_op_void, .set_id_led = efx_mcdi_set_id_led, .push_irq_moderation = efx_ef10_push_irq_moderation, - .reconfigure_mac = efx_ef10_mac_reconfigure_vf, + .reconfigure_mac = efx_ef10_mac_reconfigure, .check_mac_fault = efx_mcdi_mac_check_fault, .reconfigure_port = efx_mcdi_port_reconfigure, .get_wol = efx_ef10_get_wol_vf, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 028d826ab147..418676aba43a 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -169,10 +169,6 @@ static int efx_init_port(struct efx_nic *efx) efx->port_initialized = true; - /* Reconfigure the MAC before creating dma queues (required for - * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ - efx_mac_reconfigure(efx); - /* Ensure the PHY advertises the correct flow control settings */ rc = efx->phy_op->reconfigure(efx); if (rc && rc != -EPERM) diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index a2f744377aaa..26dca2f9a363 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -139,11 +139,11 @@ void efx_destroy_reset_workqueue(void) /* We assume that efx->type->reconfigure_mac will always try to sync RX * filters and therefore needs to read-lock the filter table against freeing */ -void efx_mac_reconfigure(struct efx_nic *efx) +void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only) { if (efx->type->reconfigure_mac) { down_read(&efx->filter_sem); - efx->type->reconfigure_mac(efx); + efx->type->reconfigure_mac(efx, mtu_only); up_read(&efx->filter_sem); } } @@ -158,7 +158,7 @@ static void efx_mac_work(struct work_struct *data) mutex_lock(&efx->mac_lock); if (efx->port_enabled) - efx_mac_reconfigure(efx); + efx_mac_reconfigure(efx, false); mutex_unlock(&efx->mac_lock); } @@ -190,7 +190,7 @@ int efx_set_mac_address(struct net_device *net_dev, void *data) /* Reconfigure the MAC */ mutex_lock(&efx->mac_lock); - efx_mac_reconfigure(efx); + efx_mac_reconfigure(efx, false); mutex_unlock(&efx->mac_lock); return 0; @@ -304,7 +304,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu) mutex_lock(&efx->mac_lock); net_dev->mtu = new_mtu; - efx_mac_reconfigure(efx); + efx_mac_reconfigure(efx, true); mutex_unlock(&efx->mac_lock); efx_start_all(efx); @@ -486,7 +486,7 @@ static void efx_start_port(struct efx_nic *efx) efx->port_enabled = true; /* Ensure MAC ingress/egress is enabled */ - efx_mac_reconfigure(efx); + efx_mac_reconfigure(efx, false); mutex_unlock(&efx->mac_lock); } diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index 73c355fc2590..4056f68f04e5 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -95,7 +95,7 @@ static inline void efx_init_mcdi_logging(struct efx_nic *efx) {} static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {} #endif -void efx_mac_reconfigure(struct efx_nic *efx); +void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only); int efx_set_mac_address(struct net_device *net_dev, void *data); void efx_set_rx_mode(struct net_device *net_dev); int efx_set_features(struct net_device *net_dev, netdev_features_t data); diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index c96595e50234..738d9be86899 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -241,7 +241,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev, /* Reconfigure the MAC. The PHY *may* generate a link state change event * if the user just changed the advertised capabilities, but there's no * harm doing this twice */ - efx_mac_reconfigure(efx); + efx_mac_reconfigure(efx, false); out: mutex_unlock(&efx->mac_lock); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index e0b84b2e3bd2..65a106878186 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1356,7 +1356,7 @@ struct efx_nic_type { void (*push_irq_moderation)(struct efx_channel *channel); int (*reconfigure_port)(struct efx_nic *efx); void (*prepare_enable_fc_tx)(struct efx_nic *efx); - int (*reconfigure_mac)(struct efx_nic *efx); + int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only); bool (*check_mac_fault)(struct efx_nic *efx); void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); int (*set_wol)(struct efx_nic *efx, u32 type); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index ffe193f03352..a7dcd2d3c09b 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -633,7 +633,7 @@ static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats, return SIENA_STAT_COUNT; } -static int siena_mac_reconfigure(struct efx_nic *efx) +static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unused) { MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN); int rc; From bc32442176d7289a71a2e940e5e8ea63cd0a23bb Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:27:51 +0100 Subject: [PATCH 0623/2056] sfc: remove max_interrupt_mode All NICs supported by this driver are capable of MSI-X interrupts (only Falcon A1 wasn't, and that's now hived off into its own driver), so no need for a nic-type parameter. Besides, the code that checked it was buggy anyway (the following assignment that checked min_interrupt_mode overrode it). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 -- drivers/net/ethernet/sfc/efx_channels.c | 6 ------ drivers/net/ethernet/sfc/net_driver.h | 3 --- drivers/net/ethernet/sfc/siena.c | 1 - 4 files changed, 12 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index a51925b45fc7..88522b683cc7 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4108,7 +4108,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .can_rx_scatter = true, .always_rx_scatter = true, .min_interrupt_mode = EFX_INT_MODE_MSIX, - .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, .mcdi_max_ver = 2, @@ -4245,7 +4244,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .always_rx_scatter = true, .option_descriptors = true, .min_interrupt_mode = EFX_INT_MODE_LEGACY, - .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, .offload_features = EF10_OFFLOAD_FEATURES, .mcdi_max_ver = 2, diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 2f9db219513a..2220b9507336 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -557,12 +557,6 @@ int efx_init_channels(struct efx_nic *efx) } /* Higher numbered interrupt modes are less capable! */ - if (WARN_ON_ONCE(efx->type->max_interrupt_mode > - efx->type->min_interrupt_mode)) { - return -EIO; - } - efx->interrupt_mode = max(efx->type->max_interrupt_mode, - interrupt_mode); efx->interrupt_mode = min(efx->type->min_interrupt_mode, interrupt_mode); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 65a106878186..e536c1e12f86 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1317,8 +1317,6 @@ struct efx_udp_tunnel { * @option_descriptors: NIC supports TX option descriptors * @min_interrupt_mode: Lowest capability interrupt mode supported * from &enum efx_int_mode. - * @max_interrupt_mode: Highest capability interrupt mode supported - * from &enum efx_int_mode. * @timer_period_max: Maximum period of interrupt timer (in ticks) * @offload_features: net_device feature flags for protocol offload * features implemented in hardware @@ -1492,7 +1490,6 @@ struct efx_nic_type { bool always_rx_scatter; bool option_descriptors; unsigned int min_interrupt_mode; - unsigned int max_interrupt_mode; unsigned int timer_period_max; netdev_features_t offload_features; int mcdi_max_ver; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index a7dcd2d3c09b..e438853f64a3 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1085,7 +1085,6 @@ const struct efx_nic_type siena_a0_nic_type = { .can_rx_scatter = true, .option_descriptors = false, .min_interrupt_mode = EFX_INT_MODE_LEGACY, - .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE), From e4ff3232102080e8491f2fecb83446e1cbf5b87c Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:28:13 +0100 Subject: [PATCH 0624/2056] sfc: move modparam 'interrupt_mode' out of common channel code EF100 only supports MSI-X, so there's no need for the new driver to expose this old module parameter. Since it's now visible to the linker, we have to rename it internally to efx_interrupt_mode to avoid symbol collisions in non-modular builds. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 4 ++++ drivers/net/ethernet/sfc/efx_channels.c | 7 ++----- drivers/net/ethernet/sfc/efx_channels.h | 2 ++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 418676aba43a..2725d1d62d25 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -64,6 +64,10 @@ void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) * *************************************************************************/ +module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); +MODULE_PARM_DESC(interrupt_mode, + "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); + /* * Use separate channels for TX and RX events * diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 2220b9507336..50356db39ae5 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -23,10 +23,7 @@ * 1 => MSI * 2 => legacy */ -static unsigned int interrupt_mode; -module_param(interrupt_mode, uint, 0444); -MODULE_PARM_DESC(interrupt_mode, - "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); +unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), * i.e. the number of CPUs among which we may distribute simultaneous @@ -558,7 +555,7 @@ int efx_init_channels(struct efx_nic *efx) /* Higher numbered interrupt modes are less capable! */ efx->interrupt_mode = min(efx->type->min_interrupt_mode, - interrupt_mode); + efx_interrupt_mode); efx->max_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS; diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h index 8d7b8c4142d7..86dd058d40f3 100644 --- a/drivers/net/ethernet/sfc/efx_channels.h +++ b/drivers/net/ethernet/sfc/efx_channels.h @@ -11,6 +11,8 @@ #ifndef EFX_CHANNELS_H #define EFX_CHANNELS_H +extern unsigned int efx_interrupt_mode; + int efx_probe_interrupts(struct efx_nic *efx); void efx_remove_interrupts(struct efx_nic *efx); int efx_soft_enable_interrupts(struct efx_nic *efx); From 67e6398e2e058d0ec126f47ac123cca590c7a2ba Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:29:09 +0100 Subject: [PATCH 0625/2056] sfc: move modparam 'rss_cpus' out of common channel code Instead of exposing this old module parameter on the new driver (thus having to keep it forever after for compatibility), let's confine it to the old one; if we find later that we need the feature, we ought to support it properly, with ethtool set-channels. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 3 +++ drivers/net/ethernet/sfc/efx_channels.c | 4 +--- drivers/net/ethernet/sfc/efx_channels.h | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 2725d1d62d25..6094f59d49a7 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -68,6 +68,9 @@ module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444); MODULE_PARM_DESC(interrupt_mode, "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); +module_param(rss_cpus, uint, 0444); +MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); + /* * Use separate channels for TX and RX events * diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 50356db39ae5..466257b9abbf 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -32,9 +32,7 @@ unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX; * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. * The default (0) means to assign an interrupt to each core. */ -static unsigned int rss_cpus; -module_param(rss_cpus, uint, 0444); -MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); +unsigned int rss_cpus; static unsigned int irq_adapt_low_thresh = 8000; module_param(irq_adapt_low_thresh, uint, 0644); diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h index 86dd058d40f3..2d71dc9a33dd 100644 --- a/drivers/net/ethernet/sfc/efx_channels.h +++ b/drivers/net/ethernet/sfc/efx_channels.h @@ -12,6 +12,7 @@ #define EFX_CHANNELS_H extern unsigned int efx_interrupt_mode; +extern unsigned int rss_cpus; int efx_probe_interrupts(struct efx_nic *efx); void efx_remove_interrupts(struct efx_nic *efx); From f9cac93e5b3eefc5c6340ba4efd3628f2347e1ef Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:29:24 +0100 Subject: [PATCH 0626/2056] sfc: make tx_queues_per_channel variable at runtime Siena needs four TX queues (csum * highpri), EF10 needs two (csum), and EF100 only needs one (as checksumming is controlled entirely by the transmit descriptor). Rather than having various bits of ad-hoc code to decide which queues to set up etc., put the knowledge of how many TXQs a channel has in one place. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 13 +++++----- drivers/net/ethernet/sfc/efx_channels.c | 4 +-- drivers/net/ethernet/sfc/efx_common.c | 1 + drivers/net/ethernet/sfc/net_driver.h | 34 ++++++++++--------------- drivers/net/ethernet/sfc/siena.c | 1 + drivers/net/ethernet/sfc/tx.c | 8 ++++-- 6 files changed, 30 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 88522b683cc7..be15640c160a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -600,6 +600,7 @@ static int efx_ef10_probe(struct efx_nic *efx) * However, until we use TX option descriptors we need two TX queues * per channel. */ + efx->tx_queues_per_channel = 2; efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride; if (!efx->max_vis) { netif_err(efx, drv, efx->net_dev, "error determining max VIs\n"); @@ -607,7 +608,7 @@ static int efx_ef10_probe(struct efx_nic *efx) goto fail5; } efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS, - efx->max_vis / EFX_TXQ_TYPES); + efx->max_vis / efx->tx_queues_per_channel); efx->max_tx_channels = efx->max_channels; if (WARN_ON(efx->max_channels == 0)) { rc = -EIO; @@ -1120,17 +1121,17 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx, */ static int efx_ef10_dimension_resources(struct efx_nic *efx) { + unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel, + efx_separate_tx_channels ? 2 : 1); + unsigned int channel_vis, pio_write_vi_base, max_vis; struct efx_ef10_nic_data *nic_data = efx->nic_data; unsigned int uc_mem_map_size, wc_mem_map_size; - unsigned int min_vis = max(EFX_TXQ_TYPES, - efx_separate_tx_channels ? 2 : 1); - unsigned int channel_vis, pio_write_vi_base, max_vis; void __iomem *membase; int rc; channel_vis = max(efx->n_channels, ((efx->n_tx_channels + efx->n_extra_tx_channels) * - EFX_TXQ_TYPES) + + efx->tx_queues_per_channel) + efx->n_xdp_channels * efx->xdp_tx_per_channel); if (efx->max_vis && efx->max_vis < channel_vis) { netif_dbg(efx, drv, efx->net_dev, @@ -1219,7 +1220,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx) */ efx->max_channels = nic_data->n_allocated_vis; efx->max_tx_channels = - nic_data->n_allocated_vis / EFX_TXQ_TYPES; + nic_data->n_allocated_vis / efx->tx_queues_per_channel; efx_mcdi_free_vis(efx); return -EAGAIN; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 466257b9abbf..c3edebf523b6 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -726,7 +726,7 @@ void efx_remove_channel(struct efx_channel *channel) efx_for_each_channel_rx_queue(rx_queue, channel) efx_remove_rx_queue(rx_queue); - efx_for_each_possible_channel_tx_queue(tx_queue, channel) + efx_for_each_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); channel->type->post_remove(channel); @@ -1090,7 +1090,7 @@ void efx_stop_channels(struct efx_nic *efx) efx_for_each_channel(channel, efx) { efx_for_each_channel_rx_queue(rx_queue, channel) efx_fini_rx_queue(rx_queue); - efx_for_each_possible_channel_tx_queue(tx_queue, channel) + efx_for_each_channel_tx_queue(tx_queue, channel) efx_fini_tx_queue(tx_queue); } } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 26dca2f9a363..c84123456c01 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1036,6 +1036,7 @@ int efx_init_struct(struct efx_nic *efx, INIT_WORK(&efx->mac_work, efx_mac_work); init_waitqueue_head(&efx->flush_wq); + efx->tx_queues_per_channel = 1; efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE; efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index e536c1e12f86..4ded155b12e9 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -867,6 +867,7 @@ struct efx_async_filter_insertion { * @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_tx_channels: Number of channels used for TX * @n_extra_tx_channels: Number of extra channels with TX queues + * @tx_queues_per_channel: number of TX queues probed on each channel * @n_xdp_channels: Number of channels used for XDP TX * @xdp_channel_offset: Offset of zeroth channel used for XPD TX. * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel. @@ -1031,6 +1032,7 @@ struct efx_nic { unsigned tx_channel_offset; unsigned n_tx_channels; unsigned n_extra_tx_channels; + unsigned int tx_queues_per_channel; unsigned int n_xdp_channels; unsigned int xdp_channel_offset; unsigned int xdp_tx_per_channel; @@ -1529,7 +1531,7 @@ static inline struct efx_tx_queue * efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) { EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels || - type >= EFX_TXQ_TYPES); + type >= efx->tx_queues_per_channel); return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; } @@ -1551,38 +1553,28 @@ static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) return true; } +static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel) +{ + if (efx_channel_is_xdp_tx(channel)) + return channel->efx->xdp_tx_per_channel; + return channel->efx->tx_queues_per_channel; +} + static inline struct efx_tx_queue * efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) { - EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_tx_queues(channel) || - type >= EFX_TXQ_TYPES); + EFX_WARN_ON_ONCE_PARANOID(type >= efx_channel_num_tx_queues(channel)); return &channel->tx_queue[type]; } -static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) -{ - return !(tx_queue->efx->net_dev->num_tc < 2 && - tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); -} - /* Iterate over all TX queues belonging to a channel */ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ if (!efx_channel_has_tx_queues(_channel)) \ ; \ else \ for (_tx_queue = (_channel)->tx_queue; \ - _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ - (efx_tx_queue_used(_tx_queue) || \ - efx_channel_is_xdp_tx(_channel)); \ - _tx_queue++) - -/* Iterate over all possible TX queues belonging to a channel */ -#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ - if (!efx_channel_has_tx_queues(_channel)) \ - ; \ - else \ - for (_tx_queue = (_channel)->tx_queue; \ - _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ + _tx_queue < (_channel)->tx_queue + \ + efx_channel_num_tx_queues(_channel); \ _tx_queue++) static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index e438853f64a3..4c5881a3bfe4 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -279,6 +279,7 @@ static int siena_probe_nic(struct efx_nic *efx) efx->max_channels = EFX_MAX_CHANNELS; efx->max_vis = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS; + efx->tx_queues_per_channel = 2; efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index ed20f6aef435..76ff394f5b58 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -569,6 +569,10 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; + /* Only Siena supported highpri queues */ + if (efx_nic_rev(efx) > EFX_REV_SIENA_A0) + return -EOPNOTSUPP; + num_tc = mqprio->num_tc; if (num_tc > EFX_MAX_TX_TC) @@ -585,10 +589,10 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, } if (num_tc > net_dev->num_tc) { + efx->tx_queues_per_channel = 4; /* Initialise high-priority queues as necessary */ efx_for_each_channel(channel, efx) { - efx_for_each_possible_channel_tx_queue(tx_queue, - channel) { + efx_for_each_channel_tx_queue(tx_queue, channel) { if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) continue; if (!tx_queue->buffer) { From 69a704962e8cab43bd62301e104c222e7d23e361 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:29:40 +0100 Subject: [PATCH 0627/2056] sfc: commonise netif_set_real_num[tr]x_queues calls While we're at it, also check them for failure. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 3 --- drivers/net/ethernet/sfc/efx_channels.c | 7 ++++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 6094f59d49a7..befd253af918 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -336,9 +336,6 @@ static int efx_probe_nic(struct efx_nic *efx) sizeof(efx->rss_context.rx_hash_key)); efx_set_default_rx_indir_table(efx, &efx->rss_context); - netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); - netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); - /* Initialise the interrupt moderation settings */ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true, diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index c3edebf523b6..30358f3f48ca 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -856,6 +856,7 @@ int efx_set_channels(struct efx_nic *efx) struct efx_channel *channel; struct efx_tx_queue *tx_queue; int xdp_queue_number; + int rc; efx->tx_channel_offset = efx_separate_tx_channels ? @@ -894,7 +895,11 @@ int efx_set_channels(struct efx_nic *efx) } } } - return 0; + + rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); + if (rc) + return rc; + return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); } bool efx_default_channel_want_txqs(struct efx_channel *channel) From a81dcd85a7c1bc548ce8fb635623970fdee5887d Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:29:58 +0100 Subject: [PATCH 0628/2056] sfc: assign TXQs without gaps Since we only allocate VIs for the number of TXQs we actually need, we cannot naively use "channel * TXQ_TYPES + txq" for the TXQ number, as this has gaps (when efx->tx_queues_per_channel < EFX_TXQ_TYPES) and thus overruns the driver's VI allocations, causing the firmware to reject the MC_CMD_INIT_TXQ based on INSTANCE. Thus, we distinguish INSTANCE (stored in tx_queue->queue) from LABEL (tx_queue->label); the former is allocated starting from 0 in efx_set_channels(), while the latter is simply the txq type (index in channel->tx_queue array). To simplify things, rather than changing tx_queues_per_channel after setting up TXQs, make Siena always probe its HIGHPRI queues at start of day, rather than deferring it until tc mqprio enables them. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 +- drivers/net/ethernet/sfc/efx_channels.c | 38 ++++++++++++++----- drivers/net/ethernet/sfc/ethtool_common.c | 7 ++-- drivers/net/ethernet/sfc/farch.c | 6 +-- drivers/net/ethernet/sfc/mcdi_functions.c | 4 +- drivers/net/ethernet/sfc/net_driver.h | 5 ++- drivers/net/ethernet/sfc/nic_common.h | 2 +- drivers/net/ethernet/sfc/selftest.c | 18 ++++----- drivers/net/ethernet/sfc/siena.c | 2 +- drivers/net/ethernet/sfc/tx.c | 46 +++-------------------- 10 files changed, 58 insertions(+), 72 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index be15640c160a..311a2d2a906d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2244,7 +2244,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx) static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) { - bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD; struct efx_channel *channel = tx_queue->channel; struct efx_nic *efx = tx_queue->efx; struct efx_ef10_nic_data *nic_data; diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 30358f3f48ca..dd4f30ea48a8 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -524,7 +524,8 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) for (j = 0; j < EFX_TXQ_TYPES; j++) { tx_queue = &channel->tx_queue[j]; tx_queue->efx = efx; - tx_queue->queue = i * EFX_TXQ_TYPES + j; + tx_queue->queue = -1; + tx_queue->label = j; tx_queue->channel = channel; } @@ -853,8 +854,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) int efx_set_channels(struct efx_nic *efx) { - struct efx_channel *channel; struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + unsigned int next_queue = 0; int xdp_queue_number; int rc; @@ -884,14 +886,30 @@ int efx_set_channels(struct efx_nic *efx) else channel->rx_queue.core_index = -1; - efx_for_each_channel_tx_queue(tx_queue, channel) { - tx_queue->queue -= (efx->tx_channel_offset * - EFX_TXQ_TYPES); - - if (efx_channel_is_xdp_tx(channel) && - xdp_queue_number < efx->xdp_tx_queue_count) { - efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - xdp_queue_number++; + if (channel->channel >= efx->tx_channel_offset) { + if (efx_channel_is_xdp_tx(channel)) { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); + /* We may have a few left-over XDP TX + * queues owing to xdp_tx_queue_count + * not dividing evenly by EFX_TXQ_TYPES. + * We still allocate and probe those + * TXQs, but never use them. + */ + if (xdp_queue_number < efx->xdp_tx_queue_count) + efx->xdp_tx_queues[xdp_queue_number] = tx_queue; + xdp_queue_number++; + } + } else { + efx_for_each_channel_tx_queue(tx_queue, channel) { + tx_queue->queue = next_queue++; + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n", + channel->channel, tx_queue->label, + tx_queue->queue); + } } } } diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index 738d9be86899..37a4409e759e 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -287,8 +287,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, } #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel -#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue -#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue +#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label #define EFX_LOOPBACK_NAME(_mode, _counter) \ "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) @@ -316,11 +315,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx, efx_for_each_channel_tx_queue(tx_queue, channel) { efx_fill_test(test_index++, strings, data, - &lb_tests->tx_sent[tx_queue->queue], + &lb_tests->tx_sent[tx_queue->label], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_sent")); efx_fill_test(test_index++, strings, data, - &lb_tests->tx_done[tx_queue->queue], + &lb_tests->tx_done[tx_queue->label], EFX_TX_QUEUE_NAME(tx_queue), EFX_LOOPBACK_NAME(mode, "tx_done")); } diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index dbbb898adddb..d07eeaad9bdf 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -379,7 +379,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) void efx_farch_tx_init(struct efx_tx_queue *tx_queue) { - int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD; struct efx_nic *efx = tx_queue->efx; efx_oword_t reg; @@ -395,7 +395,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue) FRF_AZ_TX_DESCQ_EVQ_ID, tx_queue->channel->channel, FRF_AZ_TX_DESCQ_OWNER_ID, 0, - FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, + FRF_AZ_TX_DESCQ_LABEL, tx_queue->label, FRF_AZ_TX_DESCQ_SIZE, __ffs(tx_queue->txd.entries), FRF_AZ_TX_DESCQ_TYPE, 0, @@ -409,7 +409,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue) EFX_POPULATE_OWORD_1(reg, FRF_BZ_TX_PACE, - (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ? FFE_BZ_TX_PACE_OFF : FFE_BZ_TX_PACE_RESERVED); efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, tx_queue->queue); diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c index 962d8395d958..b3a8aa88db06 100644 --- a/drivers/net/ethernet/sfc/mcdi_functions.c +++ b/drivers/net/ethernet/sfc/mcdi_functions.c @@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) { MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / EFX_BUF_SIZE)); - bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; + bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD; size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; struct efx_channel *channel = tx_queue->channel; struct efx_nic *efx = tx_queue->efx; @@ -176,7 +176,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); - MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); + MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 4ded155b12e9..0bf11ebb03cf 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -189,6 +189,8 @@ struct efx_tx_buffer { * * @efx: The associated Efx NIC * @queue: DMA queue number + * @label: Label for TX completion events. + * Is our index within @channel->tx_queue array. * @tso_version: Version of TSO in use for this queue. * @channel: The associated channel * @core_txq: The networking core TX queue structure @@ -250,7 +252,8 @@ struct efx_tx_buffer { struct efx_tx_queue { /* Members which don't change on the fast path */ struct efx_nic *efx ____cacheline_aligned_in_smp; - unsigned queue; + unsigned int queue; + unsigned int label; unsigned int tso_version; struct efx_channel *channel; struct netdev_queue *core_txq; diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index fd474d9e55e4..813f288ab3fe 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -90,7 +90,7 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) /* XXX is this a thing on EF100? */ static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) { - if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD) return tx_queue - EFX_TXQ_TYPE_OFFLOAD; else return tx_queue + EFX_TXQ_TYPE_OFFLOAD; diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c index 1ae369022d7d..e71d6d37a317 100644 --- a/drivers/net/ethernet/sfc/selftest.c +++ b/drivers/net/ethernet/sfc/selftest.c @@ -445,7 +445,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) if (rc != NETDEV_TX_OK) { netif_err(efx, drv, efx->net_dev, "TX queue %d could not transmit packet %d of " - "%d in %s loopback test\n", tx_queue->queue, + "%d in %s loopback test\n", tx_queue->label, i + 1, state->packet_count, LOOPBACK_MODE(efx)); @@ -497,7 +497,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, netif_err(efx, drv, efx->net_dev, "TX queue %d saw only %d out of an expected %d " "TX completion events in %s loopback test\n", - tx_queue->queue, tx_done, state->packet_count, + tx_queue->label, tx_done, state->packet_count, LOOPBACK_MODE(efx)); rc = -ETIMEDOUT; /* Allow to fall through so we see the RX errors as well */ @@ -508,15 +508,15 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, netif_dbg(efx, drv, efx->net_dev, "TX queue %d saw only %d out of an expected %d " "received packets in %s loopback test\n", - tx_queue->queue, rx_good, state->packet_count, + tx_queue->label, rx_good, state->packet_count, LOOPBACK_MODE(efx)); rc = -ETIMEDOUT; /* Fall through */ } /* Update loopback test structure */ - lb_tests->tx_sent[tx_queue->queue] += state->packet_count; - lb_tests->tx_done[tx_queue->queue] += tx_done; + lb_tests->tx_sent[tx_queue->label] += state->packet_count; + lb_tests->tx_done[tx_queue->label] += tx_done; lb_tests->rx_good += rx_good; lb_tests->rx_bad += rx_bad; @@ -542,8 +542,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, state->flush = false; netif_dbg(efx, drv, efx->net_dev, - "TX queue %d testing %s loopback with %d packets\n", - tx_queue->queue, LOOPBACK_MODE(efx), + "TX queue %d (hw %d) testing %s loopback with %d packets\n", + tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx), state->packet_count); efx_iterate_state(efx); @@ -570,7 +570,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, netif_dbg(efx, drv, efx->net_dev, "TX queue %d passed %s loopback test with a burst length " - "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), + "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx), state->packet_count); return 0; @@ -660,7 +660,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, /* Test all enabled types of TX queue */ efx_for_each_channel_tx_queue(tx_queue, channel) { - state->offload_csum = (tx_queue->queue & + state->offload_csum = (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD); rc = efx_test_loopback(tx_queue, &tests->loopback[mode]); diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 4c5881a3bfe4..219fb3a0c9d0 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -279,7 +279,7 @@ static int siena_probe_nic(struct efx_nic *efx) efx->max_channels = EFX_MAX_CHANNELS; efx->max_vis = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS; - efx->tx_queues_per_channel = 2; + efx->tx_queues_per_channel = 4; efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 76ff394f5b58..1bcf50ab95d9 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -551,8 +551,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) /* Must be inverse of queue lookup in efx_hard_start_xmit() */ tx_queue->core_txq = netdev_get_tx_queue(efx->net_dev, - tx_queue->queue / EFX_TXQ_TYPES + - ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? + tx_queue->channel->channel + + ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ? efx->n_tx_channels : 0)); } @@ -561,10 +561,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, { struct efx_nic *efx = netdev_priv(net_dev); struct tc_mqprio_qopt *mqprio = type_data; - struct efx_channel *channel; - struct efx_tx_queue *tx_queue; unsigned tc, num_tc; - int rc; if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; @@ -588,40 +585,9 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, net_dev->tc_to_txq[tc].count = efx->n_tx_channels; } - if (num_tc > net_dev->num_tc) { - efx->tx_queues_per_channel = 4; - /* Initialise high-priority queues as necessary */ - efx_for_each_channel(channel, efx) { - efx_for_each_channel_tx_queue(tx_queue, channel) { - if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) - continue; - if (!tx_queue->buffer) { - rc = efx_probe_tx_queue(tx_queue); - if (rc) - return rc; - } - if (!tx_queue->initialised) - efx_init_tx_queue(tx_queue); - efx_init_tx_queue_core_txq(tx_queue); - } - } - } else { - /* Reduce number of classes before number of queues */ - net_dev->num_tc = num_tc; - } - - rc = netif_set_real_num_tx_queues(net_dev, - max_t(int, num_tc, 1) * - efx->n_tx_channels); - if (rc) - return rc; - - /* Do not destroy high-priority queues when they become - * unused. We would have to flush them first, and it is - * fairly difficult to flush a subset of TX queues. Leave - * it to efx_fini_channels(). - */ - net_dev->num_tc = num_tc; - return 0; + + return netif_set_real_num_tx_queues(net_dev, + max_t(int, num_tc, 1) * + efx->n_tx_channels); } From 79de6e7cb8ac9bd0c98be663d414f85ef90c1196 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:30:23 +0100 Subject: [PATCH 0629/2056] sfc: don't call tx_limit_len if NIC type doesn't have one EF100 doesn't need to split up large DMAs. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/tx_common.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 2a058b76d1f0..11b64c609550 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -298,7 +298,11 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, /* Map the fragment taking account of NIC-dependent DMA limits. */ do { buffer = efx_tx_queue_get_insert_buffer(tx_queue); - dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); + + if (nic_type->tx_limit_len) + dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); + else + dma_len = len; buffer->len = dma_len; buffer->dma_addr = dma_addr; From 965470ee76988af234e23d559f5915747fe1b073 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:30:39 +0100 Subject: [PATCH 0630/2056] sfc: factor out efx_mcdi_filter_table_down() from _remove() _down() merely removes all our filters and VLANs, it doesn't free efx->filter_state itself. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_filters.c | 37 ++++++++++++++++--------- drivers/net/ethernet/sfc/mcdi_filters.h | 1 + 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 74ee06fe0996..283f68264b66 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -1459,7 +1459,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) table->must_restore_filters = false; } -void efx_mcdi_filter_table_remove(struct efx_nic *efx) +void efx_mcdi_filter_table_down(struct efx_nic *efx) { struct efx_mcdi_filter_table *table = efx->filter_state; MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); @@ -1467,21 +1467,11 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx) unsigned int filter_idx; int rc; - efx_mcdi_filter_cleanup_vlans(efx); - efx->filter_state = NULL; - /* - * If we were called without locking, then it's not safe to free - * the table as others might be using it. So we just WARN, leak - * the memory, and potentially get an inconsistent filter table - * state. - * This should never actually happen. - */ - if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) - return; - if (!table) return; + efx_mcdi_filter_cleanup_vlans(efx); + for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { spec = efx_mcdi_filter_entry_spec(table, filter_idx); if (!spec) @@ -1501,6 +1491,27 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx) __func__, filter_idx); kfree(spec); } +} + +void efx_mcdi_filter_table_remove(struct efx_nic *efx) +{ + struct efx_mcdi_filter_table *table = efx->filter_state; + + efx_mcdi_filter_table_down(efx); + + efx->filter_state = NULL; + /* + * If we were called without locking, then it's not safe to free + * the table as others might be using it. So we just WARN, leak + * the memory, and potentially get an inconsistent filter table + * state. + * This should never actually happen. + */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + if (!table) + return; vfree(table->entry); kfree(table); diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h index 03a8bf74c733..23f9d08d071d 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.h +++ b/drivers/net/ethernet/sfc/mcdi_filters.h @@ -93,6 +93,7 @@ struct efx_mcdi_filter_table { }; int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining); +void efx_mcdi_filter_table_down(struct efx_nic *efx); void efx_mcdi_filter_table_remove(struct efx_nic *efx); void efx_mcdi_filter_table_restore(struct efx_nic *efx); From d700fe014ec13cf96df8df4de6ee46034008ff85 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:30:56 +0100 Subject: [PATCH 0631/2056] sfc: commonise efx_fini_dmaq Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 42 ++--------------------- drivers/net/ethernet/sfc/mcdi_functions.c | 38 ++++++++++++++++++++ drivers/net/ethernet/sfc/mcdi_functions.h | 1 + 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 311a2d2a906d..d2101d2a55be 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3117,44 +3117,6 @@ static void efx_ef10_ev_test_generate(struct efx_channel *channel) netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); } -static int efx_ef10_fini_dmaq(struct efx_nic *efx) -{ - struct efx_tx_queue *tx_queue; - struct efx_rx_queue *rx_queue; - struct efx_channel *channel; - int pending; - - /* If the MC has just rebooted, the TX/RX queues will have already been - * torn down, but efx->active_queues needs to be set to zero. - */ - if (efx->must_realloc_vis) { - atomic_set(&efx->active_queues, 0); - return 0; - } - - /* Do not attempt to write to the NIC during EEH recovery */ - if (efx->state != STATE_RECOVERY) { - efx_for_each_channel(channel, efx) { - efx_for_each_channel_rx_queue(rx_queue, channel) - efx_mcdi_rx_fini(rx_queue); - efx_for_each_channel_tx_queue(tx_queue, channel) - efx_mcdi_tx_fini(tx_queue); - } - - wait_event_timeout(efx->flush_wq, - atomic_read(&efx->active_queues) == 0, - msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); - pending = atomic_read(&efx->active_queues); - if (pending) { - netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", - pending); - return -ETIMEDOUT; - } - } - - return 0; -} - static void efx_ef10_prepare_flr(struct efx_nic *efx) { atomic_set(&efx->active_queues, 0); @@ -4026,7 +3988,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .reset = efx_ef10_reset, .probe_port = efx_mcdi_port_probe, .remove_port = efx_mcdi_port_remove, - .fini_dmaq = efx_ef10_fini_dmaq, + .fini_dmaq = efx_fini_dmaq, .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, @@ -4134,7 +4096,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .reset = efx_ef10_reset, .probe_port = efx_mcdi_port_probe, .remove_port = efx_mcdi_port_remove, - .fini_dmaq = efx_ef10_fini_dmaq, + .fini_dmaq = efx_fini_dmaq, .prepare_flr = efx_ef10_prepare_flr, .finish_flr = efx_port_dummy_op_void, .describe_stats = efx_ef10_describe_stats, diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c index b3a8aa88db06..92b9a741c286 100644 --- a/drivers/net/ethernet/sfc/mcdi_functions.c +++ b/drivers/net/ethernet/sfc/mcdi_functions.c @@ -341,6 +341,44 @@ void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue) outbuf, outlen, rc); } +int efx_fini_dmaq(struct efx_nic *efx) +{ + struct efx_tx_queue *tx_queue; + struct efx_rx_queue *rx_queue; + struct efx_channel *channel; + int pending; + + /* If the MC has just rebooted, the TX/RX queues will have already been + * torn down, but efx->active_queues needs to be set to zero. + */ + if (efx->must_realloc_vis) { + atomic_set(&efx->active_queues, 0); + return 0; + } + + /* Do not attempt to write to the NIC during EEH recovery */ + if (efx->state != STATE_RECOVERY) { + efx_for_each_channel(channel, efx) { + efx_for_each_channel_rx_queue(rx_queue, channel) + efx_mcdi_rx_fini(rx_queue); + efx_for_each_channel_tx_queue(tx_queue, channel) + efx_mcdi_tx_fini(tx_queue); + } + + wait_event_timeout(efx->flush_wq, + atomic_read(&efx->active_queues) == 0, + msecs_to_jiffies(EFX_MAX_FLUSH_TIME)); + pending = atomic_read(&efx->active_queues); + if (pending) { + netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n", + pending); + return -ETIMEDOUT; + } + } + + return 0; +} + int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode) { switch (vi_window_mode) { diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h index ca4a5ac1a66b..687be8b00cd8 100644 --- a/drivers/net/ethernet/sfc/mcdi_functions.h +++ b/drivers/net/ethernet/sfc/mcdi_functions.h @@ -26,6 +26,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue); void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue); +int efx_fini_dmaq(struct efx_nic *efx); int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode); int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index); From 31f4cbd4014360ec45ddb19fa1bf5ae97e42ec5b Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:31:19 +0100 Subject: [PATCH 0632/2056] sfc: initialise RSS context ID to 'no RSS context' in efx_init_struct() Previously this was only happening in ef10-specific code. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 -- drivers/net/ethernet/sfc/efx_common.c | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d2101d2a55be..cb7b634a1150 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -552,8 +552,6 @@ static int efx_ef10_probe(struct efx_nic *efx) } nic_data->warm_boot_count = rc; - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - /* In case we're recovering from a crash (kexec), we want to * cancel any outstanding request by the previous user of this * function. We send a special message using the least diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index c84123456c01..5667694c6514 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1017,6 +1017,7 @@ int efx_init_struct(struct efx_nic *efx, efx->rx_packet_ts_offset = efx->type->rx_ts_offset - efx->type->rx_prefix_size; INIT_LIST_HEAD(&efx->rss_context.list); + efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; mutex_init(&efx->rss_lock); efx->vport_id = EVB_PORT_ID_ASSIGNED; spin_lock_init(&efx->stats_lock); From bcacac7a8cd9939a991fd20acb1b9f57251363b9 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:31:43 +0100 Subject: [PATCH 0633/2056] sfc: get drvinfo driver name from outside the common code Since ethtool_common.o will be built into both sfc and sfc_ef100 drivers, it can't use KBUILD_MODNAME directly. Instead, make it reference a string provided by the individual driver code. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ethtool.c | 2 ++ drivers/net/ethernet/sfc/ethtool_common.c | 2 +- drivers/net/ethernet/sfc/ethtool_common.h | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 48a96ed6b7d0..9828516bd82d 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -221,6 +221,8 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, return 0; } +const char *efx_driver_name = KBUILD_MODNAME; + const struct ethtool_ops efx_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index 37a4409e759e..e9a5a66529bf 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -104,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); - strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->driver, efx_driver_name, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); efx_mcdi_print_fwver(efx, info->fw_version, sizeof(info->fw_version)); diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index 7bfbbd08a1ef..3f3aaa92fbb5 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -11,6 +11,8 @@ #ifndef EFX_ETHTOOL_COMMON_H #define EFX_ETHTOOL_COMMON_H +extern const char *efx_driver_name; + void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info); u32 efx_ethtool_get_msglevel(struct net_device *net_dev); From 805d22bf92f1541cbb5b5544f628864610e2c9a2 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:32:04 +0100 Subject: [PATCH 0634/2056] sfc_ef100: add EF100 to NIC-revision enumeration Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/nic_common.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index 813f288ab3fe..e04b6817cde3 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -21,6 +21,7 @@ enum { */ EFX_REV_SIENA_A0 = 3, EFX_REV_HUNT_A0 = 4, + EFX_REV_EF100 = 5, }; static inline int efx_nic_rev(struct efx_nic *efx) From 39c965f4e663fa53955f77435068748448514da1 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:32:24 +0100 Subject: [PATCH 0635/2056] sfc_ef100: populate BUFFER_SIZE_BYTES in INIT_RXQ The QDMA subsystem on EF100 needs this information. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_functions.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c index 92b9a741c286..d8a3af86ef78 100644 --- a/drivers/net/ethernet/sfc/mcdi_functions.c +++ b/drivers/net/ethernet/sfc/mcdi_functions.c @@ -267,20 +267,22 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue) void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) { - MCDI_DECLARE_BUF(inbuf, - MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / - EFX_BUF_SIZE)); struct efx_channel *channel = efx_rx_queue_channel(rx_queue); size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE; + MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN); struct efx_nic *efx = rx_queue->efx; + unsigned int buffer_size; dma_addr_t dma_addr; - size_t inlen; int rc; int i; BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0); rx_queue->scatter_n = 0; rx_queue->scatter_len = 0; + if (efx->type->revision == EFX_REV_EF100) + buffer_size = efx->rx_page_buf_step; + else + buffer_size = 0; MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel); @@ -292,6 +294,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) INIT_RXQ_IN_FLAG_TIMESTAMP, 1); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id); + MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size); dma_addr = rx_queue->rxd.buf.dma_addr; @@ -303,9 +306,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue) dma_addr += EFX_BUF_SIZE; } - inlen = MC_CMD_INIT_RXQ_IN_LEN(entries); - - rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen, + rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf), NULL, 0, NULL); if (rc) netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n", From b3007dfd5b059cc873a8b4c24a1e337cdcec9011 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:34:27 +0100 Subject: [PATCH 0636/2056] sfc_ef100: NVRAM selftest support code We have yet another new scheme for NVRAM, and a corresponding new MCDI. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi.c | 62 +++++++++++++++++++++++++++++++++ drivers/net/ethernet/sfc/mcdi.h | 1 + 2 files changed, 63 insertions(+) diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 244fb621d17b..6c49740a178e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1621,6 +1621,35 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) return rc; } +/* This function finds types using the new NVRAM_PARTITIONS mcdi. */ +static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number, + u32 *nvram_types) +{ + efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, + GFP_KERNEL); + size_t outlen; + int rc; + + if (!outbuf) + return -ENOMEM; + + BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0, + outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen); + if (rc) + goto fail; + + *number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS); + + memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID), + *number * sizeof(u32)); + +fail: + kfree(outbuf); + return rc; +} + int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, bool *protected_out) @@ -1674,6 +1703,39 @@ static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) } } +/* This function tests nvram partitions using the new mcdi partition lookup scheme */ +int efx_new_mcdi_nvram_test_all(struct efx_nic *efx) +{ + u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, + GFP_KERNEL); + unsigned int number; + int rc, i; + + if (!nvram_types) + return -ENOMEM; + + rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types); + if (rc) + goto fail; + + /* Require at least one check */ + rc = -EAGAIN; + + for (i = 0; i < number; i++) { + if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP || + nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG) + continue; + + rc = efx_mcdi_nvram_test(efx, nvram_types[i]); + if (rc) + goto fail; + } + +fail: + kfree(nvram_types); + return rc; +} + int efx_mcdi_nvram_test_all(struct efx_nic *efx) { u32 nvram_types; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 10f064f761a5..e053adfe82b0 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -345,6 +345,7 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, size_t *size_out, size_t *erase_size_out, bool *protected_out); +int efx_new_mcdi_nvram_test_all(struct efx_nic *efx); int efx_mcdi_nvram_test_all(struct efx_nic *efx); int efx_mcdi_handle_assertion(struct efx_nic *efx); void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); From b6d02dd2ffd4ac9b08a963762eb3d1ee0502ffc6 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 2 Jul 2020 17:34:43 +0100 Subject: [PATCH 0637/2056] sfc_ef100: helper function to set default RSS table of given size Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_filters.c | 21 +++++++++++++++++++++ drivers/net/ethernet/sfc/mcdi_filters.h | 2 ++ 2 files changed, 23 insertions(+) diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 283f68264b66..5a74d880b733 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -2276,3 +2276,24 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, return 0; return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL); } + +int efx_mcdi_push_default_indir_table(struct efx_nic *efx, + unsigned int rss_spread) +{ + int rc = 0; + + if (efx->rss_spread == rss_spread) + return 0; + + efx->rss_spread = rss_spread; + if (!efx->filter_state) + return 0; + + efx_mcdi_rx_free_indir_table(efx); + if (rss_spread > 1) { + efx_set_default_rx_indir_table(efx, &efx->rss_context); + rc = efx->type->rx_push_rss_config(efx, false, + efx->rss_context.rx_indir_table, NULL); + } + return rc; +} diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h index 23f9d08d071d..06426aa9f2f3 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.h +++ b/drivers/net/ethernet/sfc/mcdi_filters.h @@ -155,6 +155,8 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, __attribute__ ((unused)), const u8 *key __attribute__ ((unused))); +int efx_mcdi_push_default_indir_table(struct efx_nic *efx, + unsigned int rss_spread); int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx); int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, struct efx_rss_context *ctx); From 046cc3dd9a2507b8e111714807ab8bf15ea5bb70 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 2 Jul 2020 19:45:37 -0700 Subject: [PATCH 0638/2056] bpf: Fix build without CONFIG_STACKTRACE Without CONFIG_STACKTRACE stack_trace_save_tsk() is not defined. Let get_callchain_entry_for_task() to always return NULL in such cases. Fixes: fa28dcb82a38 ("bpf: Introduce helper bpf_get_task_stack()") Reported-by: kernel test robot Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200703024537.79971-1-songliubraving@fb.com --- kernel/bpf/stackmap.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 5ad72ab2276b..a6c361ed7937 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -351,6 +351,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, static struct perf_callchain_entry * get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) { +#ifdef CONFIG_STACKTRACE struct perf_callchain_entry *entry; int rctx; @@ -380,6 +381,9 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) put_callchain_entry(rctx); return entry; +#else /* CONFIG_STACKTRACE */ + return NULL; +#endif } BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, From b3ea4c4fdc673acbb4d8333b7f4c9bd3a9287730 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Mon, 6 Apr 2020 16:00:35 +0300 Subject: [PATCH 0639/2056] net/mlx5e: Change reporters create functions to return void Creation of devlink health reporters is not fatal for mlx5e instance load. In case of error in reporter's creation, the return value is ignored. Change all reporters creation functions to return void. In addition, with this change, a failure in creating a reporter, will not prevent the driver from trying to create the next reporter in the list. Signed-off-by: Eran Ben Elisha Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/health.c | 15 +++------------ .../net/ethernet/mellanox/mlx5/core/en/health.h | 6 +++--- .../ethernet/mellanox/mlx5/core/en/reporter_rx.c | 5 ++--- .../ethernet/mellanox/mlx5/core/en/reporter_tx.c | 5 ++--- 4 files changed, 10 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 7283443868f3..d0625ee923d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -97,19 +97,10 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * return 0; } -int mlx5e_health_create_reporters(struct mlx5e_priv *priv) +void mlx5e_health_create_reporters(struct mlx5e_priv *priv) { - int err; - - err = mlx5e_reporter_tx_create(priv); - if (err) - return err; - - err = mlx5e_reporter_rx_create(priv); - if (err) - return err; - - return 0; + mlx5e_reporter_tx_create(priv); + mlx5e_reporter_rx_create(priv); } void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 38f97f79ef16..895d03d56c9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -16,7 +16,7 @@ static inline bool cqe_syndrome_needs_recover(u8 syndrome) syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR; } -int mlx5e_reporter_tx_create(struct mlx5e_priv *priv); +void mlx5e_reporter_tx_create(struct mlx5e_priv *priv); void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); @@ -26,7 +26,7 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); -int mlx5e_reporter_rx_create(struct mlx5e_priv *priv); +void mlx5e_reporter_rx_create(struct mlx5e_priv *priv); void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq); void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); @@ -46,7 +46,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv); int mlx5e_health_report(struct mlx5e_priv *priv, struct devlink_health_reporter *reporter, char *err_str, struct mlx5e_err_ctx *err_ctx); -int mlx5e_health_create_reporters(struct mlx5e_priv *priv); +void mlx5e_health_create_reporters(struct mlx5e_priv *priv); void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv); void mlx5e_health_channels_update(struct mlx5e_priv *priv); int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index c209579fc213..5161a1954577 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -563,7 +563,7 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { #define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500 -int mlx5e_reporter_rx_create(struct mlx5e_priv *priv) +void mlx5e_reporter_rx_create(struct mlx5e_priv *priv) { struct devlink *devlink = priv_to_devlink(priv->mdev); struct devlink_health_reporter *reporter; @@ -575,10 +575,9 @@ int mlx5e_reporter_rx_create(struct mlx5e_priv *priv) if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", PTR_ERR(reporter)); - return PTR_ERR(reporter); + return; } priv->rx_reporter = reporter; - return 0; } void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9805fc085512..b95dc15f23b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -406,7 +406,7 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500 -int mlx5e_reporter_tx_create(struct mlx5e_priv *priv) +void mlx5e_reporter_tx_create(struct mlx5e_priv *priv) { struct devlink_health_reporter *reporter; struct mlx5_core_dev *mdev = priv->mdev; @@ -421,10 +421,9 @@ int mlx5e_reporter_tx_create(struct mlx5e_priv *priv) netdev_warn(priv->netdev, "Failed to create tx reporter, err = %ld\n", PTR_ERR(reporter)); - return PTR_ERR(reporter); + return; } priv->tx_reporter = reporter; - return 0; } void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) From e74e28aee1a2382814eae8249651cc4747460d9f Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 18:50:49 +0300 Subject: [PATCH 0640/2056] net/mlx5e: Add a flush timeout define During queue's recovery, driver waits for flush. The flush timeout is set to 2 seconds. Add a define for this value for the benefit of RX and TX reporters. Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/health.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c | 3 ++- drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 895d03d56c9d..2938553a7606 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -33,6 +33,7 @@ void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq); void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq); #define MLX5E_REPORTER_PER_Q_MAX_LEN 256 +#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000 struct mlx5e_err_ctx { int (*recover)(void *ctx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 5161a1954577..495a3e6bf82b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -29,7 +29,8 @@ static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq) { - unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + unsigned long exp_time = jiffies + + msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC); while (time_before(jiffies, exp_time)) { if (icosq->cc == icosq->pc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index b95dc15f23b9..6eb2971231d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -5,7 +5,8 @@ static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) { - unsigned long exp_time = jiffies + msecs_to_jiffies(2000); + unsigned long exp_time = jiffies + + msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC); while (time_before(jiffies, exp_time)) { if (sq->cc == sq->pc) From b9961af7b8acf6420845cd1660228fe374710df9 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 11:24:41 +0300 Subject: [PATCH 0641/2056] net/mlx5e: Remove redundant RQ state query When received a CQE error, the driver inspect the syndrome given by the firmware. RQ recovery is initiated only as a result of a fatal syndrome; syndrome which set the RQ into an error state. Hence no need to query the RQ state at the beginning of the recovery process. Add additional debug prints before recovering. Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en/reporter_rx.c | 18 +----------------- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 6 +++--- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 4 +++- 3 files changed, 7 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 495a3e6bf82b..b8b32aef1363 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -124,25 +124,9 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state) static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx) { - struct mlx5_core_dev *mdev; - struct net_device *dev; - struct mlx5e_rq *rq; - u8 state; + struct mlx5e_rq *rq = ctx; int err; - rq = ctx; - mdev = rq->mdev; - dev = rq->netdev; - err = mlx5e_query_rq_state(mdev, rq->rqn, &state); - if (err) { - netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n", - rq->rqn, err); - goto out; - } - - if (state != MLX5_RQC_STATE_ERR) - goto out; - mlx5e_deactivate_rq(rq); mlx5e_free_rx_descs(rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index e9d4a61b6bbb..be7692897fc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -268,7 +268,7 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) } } -static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn, +static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, struct mlx5_err_cqe *err_cqe) { struct mlx5_cqwq *wq = &cq->wq; @@ -277,8 +277,8 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn, ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); netdev_err(cq->channel->netdev, - "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", - cq->mcq.cqn, ci, sqn, + "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", + cq->mcq.cqn, ci, qn, get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), err_cqe->syndrome, err_cqe->vendor_err_synd); mlx5_dump_err_cqe(cq->mdev, err_cqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8b42f729a4f7..350f9c54e508 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1187,8 +1187,10 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; if (cqe_syndrome_needs_recover(err_cqe->syndrome) && - !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) + !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { + mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); queue_work(rq->channel->priv->wq, &rq->recover_work); + } } void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) From 4537f524b4401ae9a741b849c5e0d49c9b2b6803 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 12:18:22 +0300 Subject: [PATCH 0642/2056] net/mlx5e: Align RX/TX reporters diagnose output format Change the hierarchy of the RX reporter 'Common config' in the diagnose output to match the 'Common config' of the TX reporter which reflects that CQ is a helper to the traffic queues. Before: $ devlink health diagnose pci/0000:00:0b.0 reporter rx Common config: RQ: type: 2 stride size: 2048 size: 8 CQ: stride size: 64 size: 1024 RQs: ... After: $ devlink health diagnose pci/0000:00:0b.0 reporter rx Common config: RQ: type: 2 stride size: 2048 size: 8 CQ: stride size: 64 size: 1024 RQs: ... Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index b8b32aef1363..f1edde1ab8bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -284,11 +284,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg); if (err) goto unlock; - err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg); + err = mlx5e_reporter_named_obj_nest_end(fmsg); if (err) goto unlock; From 5d95c816608cd6e7ccfe3d4c8083465973b9f3d1 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 13:30:51 +0300 Subject: [PATCH 0643/2056] net/mlx5e: Move RQ helpers to txrx.h Use txrx.h to contain helper function regarding TX/RX. In the coming patches, I will add more RQ helpers. Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 20 ------------------- .../mellanox/mlx5/core/en/reporter_rx.c | 1 + .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 20 +++++++++++++++++++ 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2957edb7e0b7..c44669102626 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -852,26 +852,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); -static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) -{ - switch (rq->wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return mlx5_wq_ll_get_size(&rq->mpwqe.wq); - default: - return mlx5_wq_cyc_get_size(&rq->wqe.wq); - } -} - -static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) -{ - switch (rq->wq_type) { - case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - return rq->mpwqe.wq.cur_sz; - default: - return rq->wqe.wq.cur_sz; - } -} - bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index f1edde1ab8bc..bfdf9c185f02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -3,6 +3,7 @@ #include "health.h" #include "params.h" +#include "txrx.h" static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index be7692897fc1..ed9e0b8a6f9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -284,6 +284,26 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn, mlx5_dump_err_cqe(cq->mdev, err_cqe); } +static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_size(&rq->mpwqe.wq); + default: + return mlx5_wq_cyc_get_size(&rq->wqe.wq); + } +} + +static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return rq->mpwqe.wq.cur_sz; + default: + return rq->wqe.wq.cur_sz; + } +} + /* SW parser related functions */ struct mlx5e_swp_spec { From fc42d0de16debb2c8ba83e1345a55a18ff87e2d9 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 13:36:26 +0300 Subject: [PATCH 0644/2056] net/mlx5e: Add helper to get RQ WQE's head Add helper which retrieves the RQ WQE's head. Use this helper in RX reporter diagnose callback. Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/reporter_rx.c | 5 +---- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 10 ++++++++++ drivers/net/ethernet/mellanox/mlx5/core/wq.h | 4 ++++ 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index bfdf9c185f02..f0e639ef4ec5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -181,7 +181,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = rq->channel->priv; - struct mlx5e_params *params; struct mlx5e_icosq *icosq; u8 icosq_hw_state; int wqes_sz; @@ -189,7 +188,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, u16 wq_head; int err; - params = &priv->channels.params; icosq = &rq->channel->icosq; err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state); if (err) @@ -200,8 +198,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, return err; wqes_sz = mlx5e_rqwq_get_cur_sz(rq); - wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? - rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq); + wq_head = mlx5e_rqwq_get_head(rq); err = devlink_fmsg_obj_nest_start(fmsg); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index ed9e0b8a6f9e..d787e8fc2e99 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -304,6 +304,16 @@ static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq) } } +static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_head(&rq->mpwqe.wq); + default: + return mlx5_wq_cyc_get_head(&rq->wqe.wq); + } +} + /* SW parser related functions */ struct mlx5e_swp_spec { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 4cadc336593f..27dece35df7e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -290,4 +290,8 @@ static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) *wq->db = cpu_to_be32(wq->wqe_ctr); } +static inline u16 mlx5_wq_ll_get_head(struct mlx5_wq_ll *wq) +{ + return wq->head; +} #endif /* __MLX5_WQ_H__ */ From de6c6ab7e8c6431d67d1a87477fee2cfc0fa0845 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 14:41:13 +0300 Subject: [PATCH 0645/2056] net/mlx5e: Add helper to get the RQ WQE counter Add a helper which retrieves the RQ's WQE counter. Use this helper in the RX reporter diagnose callback. $ devlink health diagnose pci/0000:00:0b.0 reporter rx Common config: RQ: type: 2 stride size: 2048 size: 8 CQ: stride size: 64 size: 1024 RQs: channel ix: 0 rqn: 2113 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 CQ: cqn: 1032 HW status: 0 channel ix: 1 rqn: 2118 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 CQ: cqn: 1036 HW status: 0 Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/reporter_rx.c | 6 ++++++ drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 10 ++++++++++ drivers/net/ethernet/mellanox/mlx5/core/wq.h | 11 +++++++++++ 3 files changed, 27 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index f0e639ef4ec5..bec804295c52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -183,6 +183,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, struct mlx5e_priv *priv = rq->channel->priv; struct mlx5e_icosq *icosq; u8 icosq_hw_state; + u16 wqe_counter; int wqes_sz; u8 hw_state; u16 wq_head; @@ -199,6 +200,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, wqes_sz = mlx5e_rqwq_get_cur_sz(rq); wq_head = mlx5e_rqwq_get_head(rq); + wqe_counter = mlx5e_rqwq_get_wqe_counter(rq); err = devlink_fmsg_obj_nest_start(fmsg); if (err) @@ -220,6 +222,10 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; + err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter); + if (err) + return err; + err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index d787e8fc2e99..cf425a60cddc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -314,6 +314,16 @@ static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq) } } +static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq) +{ + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return mlx5_wq_ll_get_counter(&rq->mpwqe.wq); + default: + return mlx5_wq_cyc_get_counter(&rq->wqe.wq); + } +} + /* SW parser related functions */ struct mlx5e_swp_spec { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 27dece35df7e..e5c4dcd1425e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -172,6 +172,11 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) return !equal && !smaller; } +static inline u16 mlx5_wq_cyc_get_counter(struct mlx5_wq_cyc *wq) +{ + return wq->wqe_ctr; +} + static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) { return wq->fbc.sz_m1 + 1; @@ -294,4 +299,10 @@ static inline u16 mlx5_wq_ll_get_head(struct mlx5_wq_ll *wq) { return wq->head; } + +static inline u16 mlx5_wq_ll_get_counter(struct mlx5_wq_ll *wq) +{ + return wq->wqe_ctr; +} + #endif /* __MLX5_WQ_H__ */ From d5cbedd7fcb3f3a102d2b485acb5c09ac8085e49 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 18 May 2020 09:21:23 +0300 Subject: [PATCH 0646/2056] net/mlx5e: Rename reporter's helpers Change prefix to match resident file: %s/mlx5e_reporter_cq_diagnose/mlx5e_health_cq_diag_fmsg %s/mlx5e_reporter_cq_common_diagnose/mlx5e_health_cq_common_diag_fmsg %s/mlx5e_reporter_named_obj_nest_start/mlx5e_health_fmsg_named_obj_nest_start %s/mlx5e_reporter_named_obj_nest_end/mlx5e_health_fmsg_named_obj_nest_end Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/health.c | 20 ++++---- .../ethernet/mellanox/mlx5/core/en/health.h | 8 ++-- .../mellanox/mlx5/core/en/reporter_rx.c | 48 +++++++++---------- .../mellanox/mlx5/core/en/reporter_tx.c | 32 ++++++------- 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index d0625ee923d6..1b735b54b3ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -5,7 +5,7 @@ #include "lib/eq.h" #include "lib/mlx5.h" -int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) +int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) { int err; @@ -20,7 +20,7 @@ int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name) return 0; } -int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg) +int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg) { int err; @@ -35,7 +35,7 @@ int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg) return 0; } -int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { struct mlx5e_priv *priv = cq->channel->priv; u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {}; @@ -50,7 +50,7 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context); hw_status = MLX5_GET(cqc, cqc, status); - err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); if (err) return err; @@ -62,14 +62,14 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; return 0; } -int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) +int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) { u8 cq_log_stride; u32 cq_sz; @@ -78,7 +78,7 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * cq_sz = mlx5_cqwq_get_size(&cq->wq); cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); - err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); if (err) return err; @@ -90,7 +90,7 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg * if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; @@ -282,7 +282,7 @@ int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, lbl); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl); if (err) return err; @@ -294,7 +294,7 @@ int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 2938553a7606..6e48518d3d5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -21,10 +21,10 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv); void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); -int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); -int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); -int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); -int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg); +int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); +int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg); void mlx5e_reporter_rx_create(struct mlx5e_priv *priv); void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index bec804295c52..4e1a01d871b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -238,7 +238,7 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; - err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg); + err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg); if (err) return err; @@ -268,11 +268,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, rq_sz = mlx5e_rqwq_get_size(generic_rq); rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL)); - err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config"); if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); if (err) goto unlock; @@ -288,15 +288,15 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter, if (err) goto unlock; - err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg); + err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg); if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) goto unlock; @@ -329,7 +329,7 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); if (err) return err; @@ -339,15 +339,15 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_ if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "ICOSQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); if (err) return err; @@ -359,11 +359,11 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_ if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); if (err) return err; @@ -374,11 +374,11 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_ if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - return mlx5e_reporter_named_obj_nest_end(fmsg); + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg, @@ -391,7 +391,7 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); if (err) return err; @@ -401,15 +401,15 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ"); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); if (err) return err; @@ -421,11 +421,11 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "receive_buff"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff"); if (err) return err; @@ -435,11 +435,11 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - return mlx5e_reporter_named_obj_nest_end(fmsg); + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, @@ -451,7 +451,7 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice"); if (err) return err; @@ -461,7 +461,7 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv, if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 6eb2971231d8..9f712ff2faf9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -166,7 +166,7 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; - err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg); + err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg); if (err) return err; @@ -195,11 +195,11 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); sq_stride = MLX5_SEND_WQE_BB; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config"); if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); if (err) goto unlock; @@ -211,15 +211,15 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, if (err) goto unlock; - err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg); + err = mlx5e_health_cq_common_diag_fmsg(&generic_sq->cq, fmsg); if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) goto unlock; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) goto unlock; @@ -257,7 +257,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); if (err) return err; @@ -267,15 +267,15 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ"); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC"); if (err) return err; @@ -287,11 +287,11 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff"); if (err) return err; @@ -301,11 +301,11 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; - return mlx5e_reporter_named_obj_nest_end(fmsg); + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); } static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, @@ -317,7 +317,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return 0; - err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice"); + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice"); if (err) return err; @@ -327,7 +327,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, if (err) return err; - err = mlx5e_reporter_named_obj_nest_end(fmsg); + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; From 3c9d1699b8575bbb622ce3e8880b9ae41a905969 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Sun, 3 May 2020 15:04:15 +0300 Subject: [PATCH 0647/2056] net/mlx5e: Enhance CQ data on diagnose output Add CQ's consumer index and size to the CQ's diagnose output retruved on RX/TX reporter diadgnose. $ devlink health diagnose pci/0000:00:0b.0 reporter rx Common config: RQ: type: 2 stride size: 2048 size: 8 CQ: stride size: 64 size: 1024 RQs: channel ix: 0 rqn: 2413 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 CQ: cqn: 1032 HW status: 0 ci: 0 size: 1024 channel ix: 1 rqn: 2418 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 CQ: cqn: 1036 HW status: 0 ci: 0 size: 1024 $ devlink health diagnose pci/0000:00:0b.0 reporter tx Common Config: SQ: stride size: 64 size: 1024 CQ: stride size: 64 size: 1024 SQs: channel ix: 0 tc: 0 txq ix: 0 sqn: 2412 HW state: 1 stopped: false cc: 0 pc: 0 CQ: cqn: 1030 HW status: 0 ci: 0 size: 1024 channel ix: 1 tc: 0 txq ix: 1 sqn: 2417 HW state: 1 stopped: false cc: 5 pc: 5 CQ: cqn: 1034 HW status: 0 ci: 5 size: 1024 Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/health.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 1b735b54b3ab..4bd46e109dbe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -62,6 +62,14 @@ int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg) if (err) return err; + err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq)); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq)); + if (err) + return err; + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); if (err) return err; From 56837c2ae1e716fd29972a6314cb064947718454 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 30 Apr 2020 18:47:39 +0300 Subject: [PATCH 0648/2056] net/mlx5e: Add EQ info to TX/RX reporter's diagnose Enhance TX/RX reporter's diagnose to include info about the corresponding EQ. $ devlink health diagnose pci/0000:00:0b.0 reporter rx Common config: RQ: type: 2 stride size: 2048 size: 8 CQ: stride size: 64 size: 1024 RQs: channel ix: 0 rqn: 1713 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 CQ: cqn: 1032 HW status: 0 ci: 0 size: 1024 EQ: eqn: 7 irqn: 42 vecidx: 1 ci: 93 size: 2048 channel ix: 1 rqn: 1718 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 ICOSQ HW state: 1 CQ: cqn: 1036 HW status: 0 ci: 0 size: 1024 EQ: eqn: 8 irqn: 43 vecidx: 2 ci: 2 size: 2048 $ devlink health diagnose pci/0000:00:0b.0 reporter tx Common Config: SQ: stride size: 64 size: 1024 CQ: stride size: 64 size: 1024 SQs: channel ix: 0 tc: 0 txq ix: 0 sqn: 1712 HW state: 1 stopped: false cc: 91 pc: 91 CQ: cqn: 1030 HW status: 0 ci: 91 size: 1024 EQ: eqn: 7 irqn: 42 vecidx: 1 ci: 93 size: 2048 channel ix: 1 tc: 0 txq ix: 1 sqn: 1717 HW state: 1 stopped: false cc: 0 pc: 0 CQ: cqn: 1034 HW status: 0 ci: 0 size: 1024 EQ: eqn: 8 irqn: 43 vecidx: 2 ci: 2 size: 2048 Signed-off-by: Aya Levin Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/health.c | 31 +++++++++++++++++++ .../ethernet/mellanox/mlx5/core/en/health.h | 1 + .../mellanox/mlx5/core/en/reporter_rx.c | 4 +++ .../mellanox/mlx5/core/en/reporter_tx.c | 4 +++ 4 files changed, 40 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 4bd46e109dbe..3dc200bcfabd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -105,6 +105,37 @@ int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *f return 0; } +int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg) +{ + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ"); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "size", eq->core.nent); + if (err) + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + void mlx5e_health_create_reporters(struct mlx5e_priv *priv) { mlx5e_reporter_tx_create(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h index 6e48518d3d5b..b9aadddfd000 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h @@ -23,6 +23,7 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg); +int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg); int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name); int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 4e1a01d871b7..5f7fba74cfd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -242,6 +242,10 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; + err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg); + if (err) + return err; + err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9f712ff2faf9..465c7cc8d909 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -170,6 +170,10 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg, if (err) return err; + err = mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); + if (err) + return err; + err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; From b84921129bc85bea22551e98def8e70e2a44c043 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Sun, 3 May 2020 17:02:41 +0300 Subject: [PATCH 0649/2056] net/mlx5e: Enhance ICOSQ data on RX reporter's diagnose When the RQ is in striding RQ mode, it uses the ICOSQ as a helper queue. In this mode, RX reporter dumps more info about the ICOSQ and its related CQ. $ devlink health diagnose pci/0000:00:0b.0 reporter rx Common config: RQ: type: 2 stride size: 2048 size: 8 CQ: stride size: 64 size: 1024 RQs: channel ix: 0 rqn: 2413 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 CQ: cqn: 1032 HW status: 0 ci: 0 size: 1024 EQ: eqn: 7 irqn: 42 vecidx: 1 ci: 93 size: 2048 ICOSQ: sqn: 2411 HW state: 1 cc: 74 pc: 74 WQE size: 128 CQ: cqn: 1029 cc: 8 size: 128 channel ix: 1 rqn: 2418 HW state: 1 SW state: 5 WQE counter: 7 posted WQEs: 7 cc: 7 CQ: cqn: 1036 HW status: 0 ci: 0 size: 1024 EQ: eqn: 8 irqn: 43 vecidx: 2 ci: 2 size: 2048 ICOSQ: sqn: 2416 HW state: 1 cc: 74 pc: 74 WQE size: 128 CQ: cqn: 1033 cc: 8 size: 128 Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en/reporter_rx.c | 61 +++++++++++++++++-- 1 file changed, 57 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 5f7fba74cfd4..32ed1067e6dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -177,6 +177,59 @@ static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter, mlx5e_health_recover_channels(priv); } +static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state, + struct devlink_fmsg *fmsg) +{ + int err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ"); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn); + if (err) + return err; + + err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "WQE size", + mlx5_wq_cyc_get_size(&icosq->wq)); + if (err) + return err; + + err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ"); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq)); + if (err) + return err; + + err = mlx5e_health_fmsg_named_obj_nest_end(fmsg); + if (err) + return err; + + return mlx5e_health_fmsg_named_obj_nest_end(fmsg); +} + static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, struct devlink_fmsg *fmsg) { @@ -234,10 +287,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; - err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state); - if (err) - return err; - err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg); if (err) return err; @@ -246,6 +295,10 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq, if (err) return err; + err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg); + if (err) + return err; + err = devlink_fmsg_obj_nest_end(fmsg); if (err) return err; From e62055642797a6de80f3576c18e212cbbf5b4361 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Mon, 18 May 2020 12:31:38 +0300 Subject: [PATCH 0650/2056] net/mlx5e: Enhance TX timeout recovery Upon a TX timeout handle, if the TX reporter was not able to recover from the error, reopen the channels. If tried to reopen channels, do not loop over TX queues for timeout. With that, the reporters state and separation will better expose the driver's state. Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en/reporter_tx.c | 36 ++++++++++++++++--- .../net/ethernet/mellanox/mlx5/core/en_main.c | 14 ++------ 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 465c7cc8d909..826584380216 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -83,17 +83,40 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx) return err; } +struct mlx5e_tx_timeout_ctx { + struct mlx5e_txqsq *sq; + signed int status; +}; + static int mlx5e_tx_reporter_timeout_recover(void *ctx) { + struct mlx5e_tx_timeout_ctx *to_ctx; + struct mlx5e_priv *priv; struct mlx5_eq_comp *eq; struct mlx5e_txqsq *sq; int err; - sq = ctx; + to_ctx = ctx; + sq = to_ctx->sq; eq = sq->cq.mcq.eq; + priv = sq->channel->priv; err = mlx5e_health_channel_eq_recover(eq, sq->channel); - if (err) - clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + if (!err) { + to_ctx->status = 0; /* this sq recovered */ + return err; + } + + err = mlx5e_safe_reopen_channels(priv); + if (!err) { + to_ctx->status = 1; /* all channels recovered */ + return err; + } + + to_ctx->status = err; + clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); + netdev_err(priv->netdev, + "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", + err); return err; } @@ -389,9 +412,11 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) { struct mlx5e_priv *priv = sq->channel->priv; char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN]; + struct mlx5e_tx_timeout_ctx to_ctx = {}; struct mlx5e_err_ctx err_ctx = {}; - err_ctx.ctx = sq; + to_ctx.sq = sq; + err_ctx.ctx = &to_ctx; err_ctx.recover = mlx5e_tx_reporter_timeout_recover; err_ctx.dump = mlx5e_tx_reporter_dump_sq; snprintf(err_str, sizeof(err_str), @@ -399,7 +424,8 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, jiffies_to_usecs(jiffies - sq->txq->trans_start)); - return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); + mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx); + return to_ctx.status; } static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 046cfb0ea180..b04c8572adea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4367,8 +4367,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, tx_timeout_work); - bool report_failed = false; - int err; int i; rtnl_lock(); @@ -4386,18 +4384,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) continue; if (mlx5e_reporter_tx_timeout(sq)) - report_failed = true; + /* break if tried to reopened channels */ + break; } - if (!report_failed) - goto unlock; - - err = mlx5e_safe_reopen_channels(priv); - if (err) - netdev_err(priv->netdev, - "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n", - err); - unlock: mutex_unlock(&priv->state_lock); rtnl_unlock(); From 47ff6154fd234eb9efc353f8b71bcc669ddbde93 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 2 Jul 2020 21:57:00 -0700 Subject: [PATCH 0651/2056] net: bcmgenet: Allow changing carrier from user-space The GENET driver interfaces with internal MoCA interface as well as external MoCA chips like the BCM6802/6803 through a fixed link interface. It is desirable for the mocad user-space daemon to be able to control the carrier state based upon out of band messages that it receives from the MoCA chip. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index af924a8b885f..ee84a26bd8f3 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3647,6 +3647,22 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev) return &dev->stats; } +static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct bcmgenet_priv *priv = netdev_priv(dev); + + if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) || + priv->phy_interface != PHY_INTERFACE_MODE_MOCA) + return -EOPNOTSUPP; + + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + return 0; +} + static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_open = bcmgenet_open, .ndo_stop = bcmgenet_close, @@ -3660,6 +3676,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_poll_controller = bcmgenet_poll_controller, #endif .ndo_get_stats = bcmgenet_get_stats, + .ndo_change_carrier = bcmgenet_change_carrier, }; /* Array of GENET hardware parameters/characteristics */ From 91ffb9d3823098e8efea7520ad8082a4bfaee739 Mon Sep 17 00:00:00 2001 From: Daniel Drown Date: Fri, 3 Jul 2020 01:22:34 -0500 Subject: [PATCH 0652/2056] net/xen-netfront: add kernel TX timestamps This adds kernel TX timestamps to the xen-netfront driver. Tested with chrony on an AWS EC2 instance. Signed-off-by: Daniel Drown Signed-off-by: David S. Miller --- drivers/net/xen-netfront.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 860a0cce346d..ed995df19247 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -754,6 +754,9 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev /* First request has the packet length. */ first_tx->size = skb->len; + /* timestamp packet in software */ + skb_tx_timestamp(skb); + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); if (notify) notify_remote_via_irq(queue->tx_irq); @@ -2411,6 +2414,7 @@ static const struct ethtool_ops xennet_ethtool_ops = .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, + .get_ts_info = ethtool_op_get_ts_info, }; #ifdef CONFIG_SYSFS From 8ae4121bd89e3dce27b519ed469efbc15423af18 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 2 Jul 2020 21:31:59 -0700 Subject: [PATCH 0653/2056] bpf: Fix bpftool without skeleton code enabled Fix segfault from bpftool by adding emit_obj_refs_plain when skeleton code is disabled. Tested by deleting BUILD_BPF_SKELS in Makefile. We found this doing backports for Cilium when a testing image pulled in latest bpf-next bpftool, but kept using an older clang-7. # ./bpftool prog show Error: bpftool built without PID iterator support 3: cgroup_skb tag 7be49e3934a125ba gpl loaded_at 2020-07-01T08:01:29-0700 uid 0 Segmentation fault Fixes: d53dee3fe013 ("tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs") Reported-by: Joe Stringer Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/159375071997.14984.17404504293832961401.stgit@john-XPS-13-9370 --- tools/bpf/bpftool/pids.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index 2709be4de2b1..7d5416667c85 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -19,6 +19,7 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) return -ENOTSUP; } void delete_obj_refs_table(struct obj_refs_table *table) {} +void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {} #else /* BPFTOOL_WITHOUT_SKELETONS */ From 9ff79af3331277c69ac61cc75b2392eb3284e305 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 3 Jul 2020 11:17:19 -0700 Subject: [PATCH 0654/2056] selftests/bpf: Fix compilation error of bpf_iter_task_stack.c BPF selftests show a compilation error as follows: libbpf: invalid relo for 'entries' in special section 0xfff2; forgot to initialize global var?.. Fix it by initializing 'entries' to zeros. Fixes: c7568114bc56 ("selftests/bpf: Add bpf_iter test with bpf_get_task_stack()") Reported-by: Jesper Dangaard Brouer Signed-off-by: Song Liu Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200703181719.3747072-1-songliubraving@fb.com --- tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c index e40d32a2ed93..50e59a2e142e 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c @@ -7,7 +7,7 @@ char _license[] SEC("license") = "GPL"; #define MAX_STACK_TRACE_DEPTH 64 -unsigned long entries[MAX_STACK_TRACE_DEPTH]; +unsigned long entries[MAX_STACK_TRACE_DEPTH] = {}; #define SIZE_OF_ULONG (sizeof(unsigned long)) SEC("iter/task") From cd8700e45e73c5e5c32260ebc3092e50c1853e5d Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 3 Jul 2020 16:43:08 -0400 Subject: [PATCH 0655/2056] ipv6/ping: set skb->mark on icmpv6 sockets IPv6 ping sockets route based on fwmark, but do not yet set skb->mark. Add this. IPv4 ping sockets also do both. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv6/ping.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 98ac32b49d8c..6caa062f68e7 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -114,6 +114,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); ipcm6_init_sk(&ipc6, np); + ipc6.sockc.mark = sk->sk_mark; fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false); From 78b348f3f15bc0c84bd3559be59266a54f313f64 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 3 Jul 2020 16:23:34 -0500 Subject: [PATCH 0656/2056] net: ipa: introduce ipa_clock_rate() Create a new function that returns the current rate of the IPA core clock. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_clock.c | 6 ++++++ drivers/net/ipa/ipa_clock.h | 8 ++++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c index c5204fd58ac4..0fbc8b1bdf41 100644 --- a/drivers/net/ipa/ipa_clock.c +++ b/drivers/net/ipa/ipa_clock.c @@ -256,6 +256,12 @@ void ipa_clock_put(struct ipa *ipa) mutex_unlock(&clock->mutex); } +/* Return the current IPA core clock rate */ +u32 ipa_clock_rate(struct ipa *ipa) +{ + return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0; +} + /* Initialize IPA clocking */ struct ipa_clock *ipa_clock_init(struct device *dev) { diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h index bc52b35e6bb2..0f4e2877a6df 100644 --- a/drivers/net/ipa/ipa_clock.h +++ b/drivers/net/ipa/ipa_clock.h @@ -10,6 +10,14 @@ struct device; struct ipa; +/** + * ipa_clock_rate() - Return the current IPA core clock rate + * @ipa: IPA structure + * + * Return: The current clock rate (in Hz), or 0. + */ +u32 ipa_clock_rate(struct ipa *ipa); + /** * ipa_clock_init() - Initialize IPA clocking * @dev: IPA device From f13a8c3189d7e31b6cb70333ee61365d66974399 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 3 Jul 2020 16:23:35 -0500 Subject: [PATCH 0657/2056] net: ipa: fix HOLB timer calculation For IPA v4.2, the exact interpretation of the register that defines the timeout for avoiding head-of-line blocking was a little unclear. We're only assigning a 0 timeout to it right now, so that wasn't very important. But now that I know how it's supposed to work, I'm fixing it. The register represents a tick counter, where each tick is equal to 128 IPA core clock cycles. For IPA v3.5.1, the register contains a simple counter value. But for IPA v4.2, the register contains two fields, base and scale, which approximate the tick counter as: ticks = base << scale The base and scale values to use for a given tick count are computed using clever bit operations, and measures are taken to make the resulting time period as close as possible to that requested. There's no need for ipa_endpoint_init_hol_block_timer() to return an error, so change its return type to void. Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/ipa_endpoint.c | 78 +++++++++++++++++++--------------- 1 file changed, 43 insertions(+), 35 deletions(-) diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 99115a2a29ae..d4be12385bcc 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -21,6 +21,7 @@ #include "ipa_modem.h" #include "ipa_table.h" #include "ipa_gsi.h" +#include "ipa_clock.h" #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) @@ -675,63 +676,70 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) iowrite32(val, endpoint->ipa->reg_virt + offset); } -/* A return value of 0 indicates an error */ +/* The head-of-line blocking timer is defined as a tick count, where each + * tick represents 128 cycles of the IPA core clock. Return the value + * that should be written to that register that represents the timeout + * period provided. + */ static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds) { + u32 width; u32 scale; - u32 base; + u64 ticks; + u64 rate; + u32 high; u32 val; if (!microseconds) - return 0; /* invalid delay */ + return 0; /* Nothing to compute if timer period is 0 */ - /* Timer is represented in units of clock ticks. */ - if (ipa->version < IPA_VERSION_4_2) - return microseconds; /* XXX Needs to be computed */ + /* Use 64 bit arithmetic to avoid overflow... */ + rate = ipa_clock_rate(ipa); + ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); + /* ...but we still need to fit into a 32-bit register */ + WARN_ON(ticks > U32_MAX); - /* IPA v4.2 represents the tick count as base * scale */ - scale = 1; /* XXX Needs to be computed */ - if (scale > field_max(SCALE_FMASK)) - return 0; /* scale too big */ + /* IPA v3.5.1 just records the tick count */ + if (ipa->version == IPA_VERSION_3_5_1) + return (u32)ticks; - base = DIV_ROUND_CLOSEST(microseconds, scale); - if (base > field_max(BASE_VALUE_FMASK)) - return 0; /* microseconds too big */ + /* For IPA v4.2, the tick count is represented by base and + * scale fields within the 32-bit timer register, where: + * ticks = base << scale; + * The best precision is achieved when the base value is as + * large as possible. Find the highest set bit in the tick + * count, and extract the number of bits in the base field + * such that that high bit is included. + */ + high = fls(ticks); /* 1..32 */ + width = HWEIGHT32(BASE_VALUE_FMASK); + scale = high > width ? high - width : 0; + if (scale) { + /* If we're scaling, round up to get a closer result */ + ticks += 1 << (scale - 1); + /* High bit was set, so rounding might have affected it */ + if (fls(ticks) != high) + scale++; + } val = u32_encode_bits(scale, SCALE_FMASK); - val |= u32_encode_bits(base, BASE_VALUE_FMASK); + val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); return val; } -static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, - u32 microseconds) +/* If microseconds is 0, timeout is immediate */ +static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, + u32 microseconds) { u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; u32 offset; u32 val; - /* XXX We'll fix this when the register definition is clear */ - if (microseconds) { - struct device *dev = &ipa->pdev->dev; - - dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n", - endpoint_id); - microseconds = 0; - } - - if (microseconds) { - val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); - if (!val) - return -EINVAL; - } else { - val = 0; /* timeout is immediate */ - } offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); + val = ipa_reg_init_hol_block_timer_val(ipa, microseconds); iowrite32(val, ipa->reg_virt + offset); - - return 0; } static void @@ -756,7 +764,7 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) continue; - (void)ipa_endpoint_init_hol_block_timer(endpoint, 0); + ipa_endpoint_init_hol_block_timer(endpoint, 0); ipa_endpoint_init_hol_block_enable(endpoint, true); } } From b0d754ef35041f921a1a03319b570fdfac9a027e Mon Sep 17 00:00:00 2001 From: tannerlove Date: Fri, 3 Jul 2020 14:53:06 -0400 Subject: [PATCH 0658/2056] selftests/net: add ipv6 test coverage in rxtimestamp test Add the options --ipv4, --ipv6 to specify running over ipv4 and/or ipv6. If neither is specified, then run both. Signed-off-by: Tanner Love Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- tools/testing/selftests/net/rxtimestamp.c | 85 ++++++++++++++++------- 1 file changed, 59 insertions(+), 26 deletions(-) diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index d4ea86a13e52..c599d371cbbe 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -117,6 +117,8 @@ static struct option long_options[] = { { "udp", no_argument, 0, 'u' }, { "ip", no_argument, 0, 'i' }, { "strict", no_argument, 0, 'S' }, + { "ipv4", no_argument, 0, '4' }, + { "ipv6", no_argument, 0, '6' }, { NULL, 0, NULL, 0 }, }; @@ -272,37 +274,55 @@ void config_so_flags(int rcv, struct options o) error(1, errno, "Failed to set SO_TIMESTAMPING"); } -bool run_test_case(struct socket_type s, struct test_case t) +bool run_test_case(struct socket_type *s, int test_num, char ip_version, + bool strict) { - int port = (s.type == SOCK_RAW) ? 0 : next_port++; + union { + struct sockaddr_in6 addr6; + struct sockaddr_in addr4; + struct sockaddr addr_un; + } addr; int read_size = op_size; - struct sockaddr_in addr; + int src, dst, rcv, port; + socklen_t addr_size; bool failed = false; - int src, dst, rcv; - src = socket(AF_INET, s.type, s.protocol); + port = (s->type == SOCK_RAW) ? 0 : next_port++; + memset(&addr, 0, sizeof(addr)); + if (ip_version == '4') { + addr.addr4.sin_family = AF_INET; + addr.addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + addr.addr4.sin_port = htons(port); + addr_size = sizeof(addr.addr4); + if (s->type == SOCK_RAW) + read_size += 20; /* for IPv4 header */ + } else { + addr.addr6.sin6_family = AF_INET6; + addr.addr6.sin6_addr = in6addr_loopback; + addr.addr6.sin6_port = htons(port); + addr_size = sizeof(addr.addr6); + } + printf("Starting testcase %d over ipv%c...\n", test_num, ip_version); + src = socket(addr.addr_un.sa_family, s->type, + s->protocol); if (src < 0) error(1, errno, "Failed to open src socket"); - dst = socket(AF_INET, s.type, s.protocol); + dst = socket(addr.addr_un.sa_family, s->type, + s->protocol); if (dst < 0) error(1, errno, "Failed to open dst socket"); - memset(&addr, 0, sizeof(addr)); - addr.sin_family = AF_INET; - addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - addr.sin_port = htons(port); - - if (bind(dst, (struct sockaddr *)&addr, sizeof(addr)) < 0) + if (bind(dst, &addr.addr_un, addr_size) < 0) error(1, errno, "Failed to bind to port %d", port); - if (s.type == SOCK_STREAM && (listen(dst, 1) < 0)) + if (s->type == SOCK_STREAM && (listen(dst, 1) < 0)) error(1, errno, "Failed to listen"); - if (connect(src, (struct sockaddr *)&addr, sizeof(addr)) < 0) + if (connect(src, &addr.addr_un, addr_size) < 0) error(1, errno, "Failed to connect"); - if (s.type == SOCK_STREAM) { + if (s->type == SOCK_STREAM) { rcv = accept(dst, NULL, NULL); if (rcv < 0) error(1, errno, "Failed to accept"); @@ -311,17 +331,22 @@ bool run_test_case(struct socket_type s, struct test_case t) rcv = dst; } - config_so_flags(rcv, t.sockopt); + config_so_flags(rcv, test_cases[test_num].sockopt); usleep(20000); /* setsockopt for SO_TIMESTAMPING is asynchronous */ do_send(src); - if (s.type == SOCK_RAW) - read_size += 20; /* for IP header */ - failed = do_recv(rcv, read_size, t.expected); + failed = do_recv(rcv, read_size, test_cases[test_num].expected); close(rcv); close(src); + if (failed) { + printf("FAILURE in testcase %d over ipv%c ", test_num, + ip_version); + print_test_case(&test_cases[test_num]); + if (!strict && test_cases[test_num].warn_on_fail) + failed = false; + } return failed; } @@ -329,6 +354,8 @@ int main(int argc, char **argv) { bool all_protocols = true; bool all_tests = true; + bool cfg_ipv4 = false; + bool cfg_ipv6 = false; bool strict = false; int arg_index = 0; int failures = 0; @@ -369,6 +396,12 @@ int main(int argc, char **argv) case 'S': strict = true; break; + case '4': + cfg_ipv4 = true; + break; + case '6': + cfg_ipv6 = true; + break; default: error(1, 0, "Failed to parse parameters."); } @@ -382,14 +415,14 @@ int main(int argc, char **argv) for (t = 0; t < ARRAY_SIZE(test_cases); t++) { if (!all_tests && !test_cases[t].enabled) continue; - - printf("Starting testcase %d...\n", t); - if (run_test_case(socket_types[s], test_cases[t])) { - if (strict || !test_cases[t].warn_on_fail) + if (cfg_ipv4 || !cfg_ipv6) + if (run_test_case(&socket_types[s], t, '4', + strict)) + failures++; + if (cfg_ipv6 || !cfg_ipv4) + if (run_test_case(&socket_types[s], t, '6', + strict)) failures++; - printf("FAILURE in test case "); - print_test_case(&test_cases[t]); - } } } if (!failures) From 136bcd8425b84ce81bbcf0f4cbefa6735b122d03 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Thu, 2 Jul 2020 16:12:40 +0200 Subject: [PATCH 0659/2056] mvpp2: refactor BM pool init percpu code In mvpp2_swf_bm_pool_init_percpu(), a reference to a struct mvpp2_bm_pool is obtained traversing multiple structs, when a local variable already points to the same object. Fix it and, while at it, give the variable a meaningful name. Signed-off-by: Matteo Croce Signed-off-by: David S. Miller --- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 212fc3b54310..027de7291f92 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -907,28 +907,27 @@ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port) /* Initialize pools for swf, percpu buffers variant */ static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port) { - struct mvpp2_bm_pool *p; + struct mvpp2_bm_pool *bm_pool; int i; for (i = 0; i < port->nrxqs; i++) { - p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, - mvpp2_pools[MVPP2_BM_SHORT].pkt_size); - if (!p) + bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i, + mvpp2_pools[MVPP2_BM_SHORT].pkt_size); + if (!bm_pool) return -ENOMEM; - port->priv->bm_pools[i].port_map |= BIT(port->id); - mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id); + bm_pool->port_map |= BIT(port->id); + mvpp2_rxq_short_pool_set(port, i, bm_pool->id); } for (i = 0; i < port->nrxqs; i++) { - p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, - mvpp2_pools[MVPP2_BM_LONG].pkt_size); - if (!p) + bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs, + mvpp2_pools[MVPP2_BM_LONG].pkt_size); + if (!bm_pool) return -ENOMEM; - port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id); - mvpp2_rxq_long_pool_set(port, i, - port->priv->bm_pools[i + port->nrxqs].id); + bm_pool->port_map |= BIT(port->id); + mvpp2_rxq_long_pool_set(port, i, bm_pool->id); } port->pool_long = NULL; From b27db2274ba8a62512603ba874c1e992fb7de1f4 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Thu, 2 Jul 2020 16:12:41 +0200 Subject: [PATCH 0660/2056] mvpp2: use page_pool allocator Use the page_pool API for memory management. This is a prerequisite for native XDP support. Tested-by: Sven Auhagen Signed-off-by: Matteo Croce Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 8 + .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 156 +++++++++++++++--- 3 files changed, 140 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index cd8ddd1ef6f2..ef4f35ba077d 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -87,6 +87,7 @@ config MVPP2 depends on ARCH_MVEBU || COMPILE_TEST select MVMDIO select PHYLINK + select PAGE_POOL help This driver supports the network interface units in the Marvell ARMADA 375, 7K and 8K SoCs. diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 543a310ec102..4c16c9e9c1e5 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -15,6 +15,7 @@ #include #include #include +#include /* Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) @@ -820,6 +821,9 @@ struct mvpp2 { /* RSS Indirection tables */ struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES]; + + /* page_pool allocator */ + struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ]; }; struct mvpp2_pcpu_stats { @@ -1161,6 +1165,10 @@ struct mvpp2_rx_queue { /* Port's logic RXQ number to which physical RXQ is mapped */ int logic_rxq; + + /* XDP memory accounting */ + struct xdp_rxq_info xdp_rxq_short; + struct xdp_rxq_info xdp_rxq_long; }; struct mvpp2_bm_pool { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 027de7291f92..5d0e02c161a6 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -95,6 +95,22 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) return cpu % priv->nthreads; } +static struct page_pool * +mvpp2_create_page_pool(struct device *dev, int num, int len) +{ + struct page_pool_params pp_params = { + /* internal DMA mapping in page_pool */ + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = num, + .nid = NUMA_NO_NODE, + .dev = dev, + .dma_dir = DMA_FROM_DEVICE, + .max_len = len, + }; + + return page_pool_create(&pp_params); +} + /* These accessors should be used to access: * * - per-thread registers, where each thread has its own copy of the @@ -327,17 +343,25 @@ static inline int mvpp2_txq_phys(int port, int txq) return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; } -static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool) +/* Returns a struct page if page_pool is set, otherwise a buffer */ +static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool, + struct page_pool *page_pool) { + if (page_pool) + return page_pool_dev_alloc_pages(page_pool); + if (likely(pool->frag_size <= PAGE_SIZE)) return netdev_alloc_frag(pool->frag_size); - else - return kmalloc(pool->frag_size, GFP_ATOMIC); + + return kmalloc(pool->frag_size, GFP_ATOMIC); } -static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data) +static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, + struct page_pool *page_pool, void *data) { - if (likely(pool->frag_size <= PAGE_SIZE)) + if (page_pool) + page_pool_put_full_page(page_pool, virt_to_head_page(data), false); + else if (likely(pool->frag_size <= PAGE_SIZE)) skb_free_frag(data); else kfree(data); @@ -442,6 +466,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool, int buf_num) { + struct page_pool *pp = NULL; int i; if (buf_num > bm_pool->buf_num) { @@ -450,6 +475,9 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, buf_num = bm_pool->buf_num; } + if (priv->percpu_pools) + pp = priv->page_pool[bm_pool->id]; + for (i = 0; i < buf_num; i++) { dma_addr_t buf_dma_addr; phys_addr_t buf_phys_addr; @@ -458,14 +486,15 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool, &buf_dma_addr, &buf_phys_addr); - dma_unmap_single(dev, buf_dma_addr, - bm_pool->buf_size, DMA_FROM_DEVICE); + if (!pp) + dma_unmap_single(dev, buf_dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); data = (void *)phys_to_virt(buf_phys_addr); if (!data) break; - mvpp2_frag_free(bm_pool, data); + mvpp2_frag_free(bm_pool, pp, data); } /* Update BM driver with number of buffers removed from pool */ @@ -496,6 +525,9 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, int buf_num; u32 val; + if (priv->percpu_pools) + page_pool_destroy(priv->page_pool[bm_pool->id]); + buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); @@ -548,8 +580,20 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) { int i, err, poolnum = MVPP2_BM_POOLS_NUM; - if (priv->percpu_pools) + if (priv->percpu_pools) { poolnum = mvpp2_get_nrxqs(priv) * 2; + for (i = 0; i < poolnum; i++) { + /* the pool in use */ + int pn = i / (poolnum / 2); + + priv->page_pool[i] = + mvpp2_create_page_pool(dev, + mvpp2_pools[pn].buf_num, + mvpp2_pools[pn].pkt_size); + if (IS_ERR(priv->page_pool[i])) + return PTR_ERR(priv->page_pool[i]); + } + } dev_info(dev, "using %d %s buffers\n", poolnum, priv->percpu_pools ? "per-cpu" : "shared"); @@ -632,23 +676,31 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port, static void *mvpp2_buf_alloc(struct mvpp2_port *port, struct mvpp2_bm_pool *bm_pool, + struct page_pool *page_pool, dma_addr_t *buf_dma_addr, phys_addr_t *buf_phys_addr, gfp_t gfp_mask) { dma_addr_t dma_addr; + struct page *page; void *data; - data = mvpp2_frag_alloc(bm_pool); + data = mvpp2_frag_alloc(bm_pool, page_pool); if (!data) return NULL; - dma_addr = dma_map_single(port->dev->dev.parent, data, - MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { - mvpp2_frag_free(bm_pool, data); - return NULL; + if (page_pool) { + page = (struct page *)data; + dma_addr = page_pool_get_dma_addr(page); + data = page_to_virt(page); + } else { + dma_addr = dma_map_single(port->dev->dev.parent, data, + MVPP2_RX_BUF_SIZE(bm_pool->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_frag_free(bm_pool, NULL, data); + return NULL; + } } *buf_dma_addr = dma_addr; *buf_phys_addr = virt_to_phys(data); @@ -706,6 +758,7 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, int i, buf_size, total_size; dma_addr_t dma_addr; phys_addr_t phys_addr; + struct page_pool *pp = NULL; void *buf; if (port->priv->percpu_pools && @@ -726,8 +779,10 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port, return 0; } + if (port->priv->percpu_pools) + pp = port->priv->page_pool[bm_pool->id]; for (i = 0; i < buf_num; i++) { - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, + buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr, &phys_addr, GFP_KERNEL); if (!buf) break; @@ -2374,10 +2429,11 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, /* Create a specified Rx queue */ static int mvpp2_rxq_init(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) - { + struct mvpp2 *priv = port->priv; unsigned int thread; u32 rxq_dma; + int err; rxq->size = port->rx_ring_size; @@ -2415,7 +2471,43 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, /* Add number of descriptors ready for receiving packets */ mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); + if (priv->percpu_pools) { + err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id); + if (err < 0) + goto err_free_dma; + + err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id); + if (err < 0) + goto err_unregister_rxq_short; + + /* Every RXQ has a pool for short and another for long packets */ + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short, + MEM_TYPE_PAGE_POOL, + priv->page_pool[rxq->logic_rxq]); + if (err < 0) + goto err_unregister_rxq_long; + + err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long, + MEM_TYPE_PAGE_POOL, + priv->page_pool[rxq->logic_rxq + + port->nrxqs]); + if (err < 0) + goto err_unregister_mem_rxq_short; + } + return 0; + +err_unregister_mem_rxq_short: + xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short); +err_unregister_rxq_long: + xdp_rxq_info_unreg(&rxq->xdp_rxq_long); +err_unregister_rxq_short: + xdp_rxq_info_unreg(&rxq->xdp_rxq_short); +err_free_dma: + dma_free_coherent(port->dev->dev.parent, + rxq->size * MVPP2_DESC_ALIGNED_SIZE, + rxq->descs, rxq->descs_dma); + return err; } /* Push packets received by the RXQ to BM pool */ @@ -2449,6 +2541,12 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, { unsigned int thread; + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short)) + xdp_rxq_info_unreg(&rxq->xdp_rxq_short); + + if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long)) + xdp_rxq_info_unreg(&rxq->xdp_rxq_long); + mvpp2_rxq_drop_pkts(port, rxq); if (rxq->descs) @@ -2890,14 +2988,15 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, /* Allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, int pool) + struct mvpp2_bm_pool *bm_pool, + struct page_pool *page_pool, int pool) { dma_addr_t dma_addr; phys_addr_t phys_addr; void *buf; - buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr, - GFP_ATOMIC); + buf = mvpp2_buf_alloc(port, bm_pool, page_pool, + &dma_addr, &phys_addr, GFP_ATOMIC); if (!buf) return -ENOMEM; @@ -2956,6 +3055,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; + struct page_pool *pp = NULL; struct sk_buff *skb; unsigned int frag_size; dma_addr_t dma_addr; @@ -2989,6 +3089,9 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, DMA_FROM_DEVICE); prefetch(data); + if (port->priv->percpu_pools) + pp = port->priv->page_pool[pool]; + if (bm_pool->frag_size > PAGE_SIZE) frag_size = 0; else @@ -3000,15 +3103,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, goto err_drop_frame; } - err = mvpp2_rx_refill(port, bm_pool, pool); + err = mvpp2_rx_refill(port, bm_pool, pp, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; } - dma_unmap_single_attrs(dev->dev.parent, dma_addr, - bm_pool->buf_size, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); + if (pp) + page_pool_release_page(pp, virt_to_page(data)); + else + dma_unmap_single_attrs(dev->dev.parent, dma_addr, + bm_pool->buf_size, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); rcvd_pkts++; rcvd_bytes += rx_bytes; From 07dd0a7aae7f72af7cec18909581c2bb570edddc Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Thu, 2 Jul 2020 16:12:42 +0200 Subject: [PATCH 0661/2056] mvpp2: add basic XDP support Add XDP native support. By now only XDP_DROP, XDP_PASS and XDP_REDIRECT verdicts are supported. Co-developed-by: Sven Auhagen Signed-off-by: Sven Auhagen Signed-off-by: Matteo Croce Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 28 ++- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 165 +++++++++++++++++- 2 files changed, 185 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 4c16c9e9c1e5..f351e41c9da6 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -16,6 +16,18 @@ #include #include #include +#include +#include + +/* The PacketOffset field is measured in units of 32 bytes and is 3 bits wide, + * so the maximum offset is 7 * 32 = 224 + */ +#define MVPP2_SKB_HEADROOM min(max(XDP_PACKET_HEADROOM, NET_SKB_PAD), 224) + +#define MVPP2_XDP_PASS 0 +#define MVPP2_XDP_DROPPED BIT(0) +#define MVPP2_XDP_TX BIT(1) +#define MVPP2_XDP_REDIR BIT(2) /* Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) @@ -629,10 +641,12 @@ ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ ETH_HLEN + ETH_FCS_LEN, cache_line_size()) -#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + MVPP2_SKB_HEADROOM) #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ - ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) + ((total_size) - MVPP2_SKB_HEADROOM - MVPP2_SKB_SHINFO_SIZE) + +#define MVPP2_MAX_RX_BUF_SIZE (PAGE_SIZE - MVPP2_SKB_SHINFO_SIZE - MVPP2_SKB_HEADROOM) #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) #define MVPP2_BIT_TO_WORD(bit) ((bit) / 32) @@ -690,9 +704,9 @@ enum mvpp2_prs_l3_cast { #define MVPP2_BM_COOKIE_POOL_OFFS 8 #define MVPP2_BM_COOKIE_CPU_OFFS 24 -#define MVPP2_BM_SHORT_FRAME_SIZE 512 -#define MVPP2_BM_LONG_FRAME_SIZE 2048 -#define MVPP2_BM_JUMBO_FRAME_SIZE 10240 +#define MVPP2_BM_SHORT_FRAME_SIZE 704 /* frame size 128 */ +#define MVPP2_BM_LONG_FRAME_SIZE 2240 /* frame size 1664 */ +#define MVPP2_BM_JUMBO_FRAME_SIZE 10432 /* frame size 9856 */ /* BM short pool packet size * These value assure that for SWF the total number * of bytes allocated for each buffer will be 512 @@ -913,6 +927,8 @@ struct mvpp2_port { unsigned int ntxqs; struct net_device *dev; + struct bpf_prog *xdp_prog; + int pkt_size; /* Per-CPU port control */ @@ -932,6 +948,8 @@ struct mvpp2_port { struct mvpp2_pcpu_stats __percpu *stats; u64 *ethtool_stats; + unsigned long state; + /* Per-port work and its lock to gather hardware statistics */ struct mutex gather_stats_lock; struct delayed_work stats_work; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 5d0e02c161a6..5f740c96a02c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "mvpp2.h" #include "mvpp2_prs.h" @@ -105,6 +106,7 @@ mvpp2_create_page_pool(struct device *dev, int num, int len) .nid = NUMA_NO_NODE, .dev = dev, .dma_dir = DMA_FROM_DEVICE, + .offset = MVPP2_SKB_HEADROOM, .max_len = len, }; @@ -2462,7 +2464,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, put_cpu(); /* Set Offset */ - mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); + mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM); /* Set coalescing pkts and time */ mvpp2_rx_pkts_coal_set(port, rxq); @@ -3037,16 +3039,69 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; } +static int +mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, + struct bpf_prog *prog, struct xdp_buff *xdp, + struct page_pool *pp) +{ + unsigned int len, sync, err; + struct page *page; + u32 ret, act; + + len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; + act = bpf_prog_run_xdp(prog, xdp); + + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ + sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM; + sync = max(sync, len); + + switch (act) { + case XDP_PASS: + ret = MVPP2_XDP_PASS; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(port->dev, xdp, prog); + if (unlikely(err)) { + ret = MVPP2_XDP_DROPPED; + page = virt_to_head_page(xdp->data); + page_pool_put_page(pp, page, sync, true); + } else { + ret = MVPP2_XDP_REDIR; + } + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(port->dev, prog, act); + fallthrough; + case XDP_DROP: + page = virt_to_head_page(xdp->data); + page_pool_put_page(pp, page, sync, true); + ret = MVPP2_XDP_DROPPED; + break; + } + + return ret; +} + /* Main rx processing */ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; int rx_received; int rx_done = 0; + u32 xdp_ret = 0; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; + rcu_read_lock(); + + xdp_prog = READ_ONCE(port->xdp_prog); + /* Get number of received packets and clamp the to-do */ rx_received = mvpp2_rxq_received(port, rxq->id); if (rx_todo > rx_received) @@ -3061,7 +3116,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, dma_addr_t dma_addr; phys_addr_t phys_addr; u32 rx_status; - int pool, rx_bytes, err; + int pool, rx_bytes, err, ret; void *data; rx_done++; @@ -3097,6 +3152,33 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, else frag_size = bm_pool->frag_size; + if (xdp_prog) { + xdp.data_hard_start = data; + xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM; + xdp.data_end = xdp.data + rx_bytes; + xdp.frame_sz = PAGE_SIZE; + + if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE) + xdp.rxq = &rxq->xdp_rxq_short; + else + xdp.rxq = &rxq->xdp_rxq_long; + + xdp_set_data_meta_invalid(&xdp); + + ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp); + + if (ret) { + xdp_ret |= ret; + err = mvpp2_rx_refill(port, bm_pool, pp, pool); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + continue; + } + } + skb = build_skb(data, frag_size); if (!skb) { netdev_warn(port->dev, "skb build failed\n"); @@ -3119,7 +3201,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, rcvd_pkts++; rcvd_bytes += rx_bytes; - skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD); + skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); skb_put(skb, rx_bytes); skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); @@ -3134,6 +3216,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } + rcu_read_unlock(); + if (rcvd_pkts) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); @@ -3609,6 +3693,8 @@ static void mvpp2_start_dev(struct mvpp2_port *port) } netif_tx_start_all_queues(port->dev); + + clear_bit(0, &port->state); } /* Set hw internals when stopping port */ @@ -3616,6 +3702,8 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) { int i; + set_bit(0, &port->state); + /* Disable interrupts on all threads */ mvpp2_interrupts_disable(port); @@ -4022,6 +4110,10 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) } if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) { + if (port->xdp_prog) { + netdev_err(dev, "Jumbo frames are not supported with XDP\n"); + return -EINVAL; + } if (priv->percpu_pools) { netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu); mvpp2_bm_switch_buffers(priv, false); @@ -4160,6 +4252,72 @@ static int mvpp2_set_features(struct net_device *dev, return 0; } +static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) +{ + struct bpf_prog *prog = bpf->prog, *old_prog; + bool running = netif_running(port->dev); + bool reset = !prog != !port->xdp_prog; + + if (port->dev->mtu > ETH_DATA_LEN) { + NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled"); + return -EOPNOTSUPP; + } + + if (!port->priv->percpu_pools) { + NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP"); + return -EOPNOTSUPP; + } + + /* device is up and bpf is added/removed, must setup the RX queues */ + if (running && reset) { + mvpp2_stop_dev(port); + mvpp2_cleanup_rxqs(port); + mvpp2_cleanup_txqs(port); + } + + old_prog = xchg(&port->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + /* bpf is just replaced, RXQ and MTU are already setup */ + if (!reset) + return 0; + + /* device was up, restore the link */ + if (running) { + int ret = mvpp2_setup_rxqs(port); + + if (ret) { + netdev_err(port->dev, "mvpp2_setup_rxqs failed\n"); + return ret; + } + ret = mvpp2_setup_txqs(port); + if (ret) { + netdev_err(port->dev, "mvpp2_setup_txqs failed\n"); + return ret; + } + + mvpp2_start_dev(port); + } + + return 0; +} + +static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct mvpp2_port *port = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return mvpp2_xdp_setup(port, xdp); + case XDP_QUERY_PROG: + xdp->prog_id = port->xdp_prog ? port->xdp_prog->aux->id : 0; + return 0; + default: + return -EINVAL; + } +} + /* Ethtool methods */ static int mvpp2_ethtool_nway_reset(struct net_device *dev) @@ -4510,6 +4668,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, .ndo_set_features = mvpp2_set_features, + .ndo_bpf = mvpp2_xdp, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { From c2d6fe6163de80d7f7cf400ee351f56d6cdb7a5a Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Thu, 2 Jul 2020 16:12:43 +0200 Subject: [PATCH 0662/2056] mvpp2: XDP TX support Add the transmit part of XDP support, which includes: - support for XDP_TX in mvpp2_xdp() - .ndo_xdp_xmit hook for AF_XDP and XDP_REDIRECT with mvpp2 as destination mvpp2_xdp_submit_frame() is a generic function which is called by mvpp2_xdp_xmit_back() when doing XDP_TX, and by mvpp2_xdp_xmit when doing AF_XDP or XDP_REDIRECT target. The buffer allocation has been reworked to be able to map the buffers as DMA_FROM_DEVICE or DMA_BIDIRECTIONAL depending if native XDP is in use or not. Co-developed-by: Sven Auhagen Signed-off-by: Sven Auhagen Signed-off-by: Matteo Croce Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 13 +- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 304 +++++++++++++++--- 2 files changed, 272 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index f351e41c9da6..c52955b33fab 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1082,9 +1082,20 @@ struct mvpp2_rx_desc { }; }; +enum mvpp2_tx_buf_type { + MVPP2_TYPE_SKB, + MVPP2_TYPE_XDP_TX, + MVPP2_TYPE_XDP_NDO, +}; + struct mvpp2_txq_pcpu_buf { + enum mvpp2_tx_buf_type type; + /* Transmitted SKB */ - struct sk_buff *skb; + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; /* Physical address of transmitted buffer */ dma_addr_t dma; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 5f740c96a02c..d49a814311be 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -97,7 +97,8 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu) } static struct page_pool * -mvpp2_create_page_pool(struct device *dev, int num, int len) +mvpp2_create_page_pool(struct device *dev, int num, int len, + enum dma_data_direction dma_dir) { struct page_pool_params pp_params = { /* internal DMA mapping in page_pool */ @@ -105,7 +106,7 @@ mvpp2_create_page_pool(struct device *dev, int num, int len) .pool_size = num, .nid = NUMA_NO_NODE, .dev = dev, - .dma_dir = DMA_FROM_DEVICE, + .dma_dir = dma_dir, .offset = MVPP2_SKB_HEADROOM, .max_len = len, }; @@ -299,12 +300,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) static void mvpp2_txq_inc_put(struct mvpp2_port *port, struct mvpp2_txq_pcpu *txq_pcpu, - struct sk_buff *skb, - struct mvpp2_tx_desc *tx_desc) + void *data, + struct mvpp2_tx_desc *tx_desc, + enum mvpp2_tx_buf_type buf_type) { struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_put_index; - tx_buf->skb = skb; + tx_buf->type = buf_type; + if (buf_type == MVPP2_TYPE_SKB) + tx_buf->skb = data; + else + tx_buf->xdpf = data; tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc); tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) + mvpp2_txdesc_offset_get(port, tx_desc); @@ -527,9 +533,6 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, int buf_num; u32 val; - if (priv->percpu_pools) - page_pool_destroy(priv->page_pool[bm_pool->id]); - buf_num = mvpp2_check_hw_buf_num(priv, bm_pool); mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num); @@ -545,6 +548,9 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + if (priv->percpu_pools) + page_pool_destroy(priv->page_pool[bm_pool->id]); + dma_free_coherent(dev, bm_pool->size_bytes, bm_pool->virt_addr, bm_pool->dma_addr); @@ -580,9 +586,19 @@ static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv) static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) { + enum dma_data_direction dma_dir = DMA_FROM_DEVICE; int i, err, poolnum = MVPP2_BM_POOLS_NUM; + struct mvpp2_port *port; if (priv->percpu_pools) { + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + if (port->xdp_prog) { + dma_dir = DMA_BIDIRECTIONAL; + break; + } + } + poolnum = mvpp2_get_nrxqs(priv) * 2; for (i = 0; i < poolnum; i++) { /* the pool in use */ @@ -591,7 +607,8 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) priv->page_pool[i] = mvpp2_create_page_pool(dev, mvpp2_pools[pn].buf_num, - mvpp2_pools[pn].pkt_size); + mvpp2_pools[pn].pkt_size, + dma_dir); if (IS_ERR(priv->page_pool[i])) return PTR_ERR(priv->page_pool[i]); } @@ -2318,11 +2335,15 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_txq_pcpu_buf *tx_buf = txq_pcpu->buffs + txq_pcpu->txq_get_index; - if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) + if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) && + tx_buf->type != MVPP2_TYPE_XDP_TX) dma_unmap_single(port->dev->dev.parent, tx_buf->dma, tx_buf->size, DMA_TO_DEVICE); - if (tx_buf->skb) + if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb) dev_kfree_skb_any(tx_buf->skb); + else if (tx_buf->type == MVPP2_TYPE_XDP_TX || + tx_buf->type == MVPP2_TYPE_XDP_NDO) + xdp_return_frame(tx_buf->xdpf); mvpp2_txq_inc_get(txq_pcpu); } @@ -2810,7 +2831,7 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port) static int mvpp2_setup_txqs(struct mvpp2_port *port) { struct mvpp2_tx_queue *txq; - int queue, err, cpu; + int queue, err; for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; @@ -2819,8 +2840,8 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) goto err_cleanup; /* Assign this queue to a CPU */ - cpu = queue % num_present_cpus(); - netif_set_xps_queue(port->dev, cpumask_of(cpu), queue); + if (queue < num_possible_cpus()) + netif_set_xps_queue(port->dev, cpumask_of(queue), queue); } if (port->has_tx_irqs) { @@ -3039,6 +3060,164 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE; } +static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); + struct mvpp2_tx_queue *aggr_txq; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_queue *txq; + struct netdev_queue *nq; + + txq = port->txqs[txq_id]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + nq = netdev_get_tx_queue(port->dev, txq_id); + aggr_txq = &port->priv->aggr_txqs[thread]; + + txq_pcpu->reserved_num -= nxmit; + txq_pcpu->count += nxmit; + aggr_txq->count += nxmit; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, nxmit); + + if (txq_pcpu->count >= txq_pcpu->stop_threshold) + netif_tx_stop_queue(nq); + + u64_stats_update_begin(&stats->syncp); + stats->tx_bytes += nxmit_byte; + stats->tx_packets += nxmit; + u64_stats_update_end(&stats->syncp); + + /* Finalize TX processing */ + if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) + mvpp2_txq_done(port, txq, txq_pcpu); +} + +static int +mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, + struct xdp_frame *xdpf, bool dma_map) +{ + unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); + u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE | + MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; + enum mvpp2_tx_buf_type buf_type; + struct mvpp2_txq_pcpu *txq_pcpu; + struct mvpp2_tx_queue *aggr_txq; + struct mvpp2_tx_desc *tx_desc; + struct mvpp2_tx_queue *txq; + int ret = MVPP2_XDP_TX; + dma_addr_t dma_addr; + + txq = port->txqs[txq_id]; + txq_pcpu = per_cpu_ptr(txq->pcpu, thread); + aggr_txq = &port->priv->aggr_txqs[thread]; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) || + mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) { + ret = MVPP2_XDP_DROPPED; + goto out; + } + + /* Get a descriptor for the first part of the packet */ + tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, xdpf->len); + + if (dma_map) { + /* XDP_REDIRECT or AF_XDP */ + dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data, + xdpf->len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) { + mvpp2_txq_desc_put(txq); + ret = MVPP2_XDP_DROPPED; + goto out; + } + + buf_type = MVPP2_TYPE_XDP_NDO; + } else { + /* XDP_TX */ + struct page *page = virt_to_page(xdpf->data); + + dma_addr = page_pool_get_dma_addr(page) + + sizeof(*xdpf) + xdpf->headroom; + dma_sync_single_for_device(port->dev->dev.parent, dma_addr, + xdpf->len, DMA_BIDIRECTIONAL); + + buf_type = MVPP2_TYPE_XDP_TX; + } + + mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr); + + mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); + mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type); + +out: + return ret; +} + +static int +mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) +{ + struct xdp_frame *xdpf; + u16 txq_id; + int ret; + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return MVPP2_XDP_DROPPED; + + /* The first of the TX queues are used for XPS, + * the second half for XDP_TX + */ + txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); + + ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); + if (ret == MVPP2_XDP_TX) + mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); + + return ret; +} + +static int +mvpp2_xdp_xmit(struct net_device *dev, int num_frame, + struct xdp_frame **frames, u32 flags) +{ + struct mvpp2_port *port = netdev_priv(dev); + int i, nxmit_byte = 0, nxmit = num_frame; + u16 txq_id; + u32 ret; + + if (unlikely(test_bit(0, &port->state))) + return -ENETDOWN; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + /* The first of the TX queues are used for XPS, + * the second half for XDP_TX + */ + txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); + + for (i = 0; i < num_frame; i++) { + ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true); + if (ret == MVPP2_XDP_TX) { + nxmit_byte += frames[i]->len; + } else { + xdp_return_frame_rx_napi(frames[i]); + nxmit--; + } + } + + if (nxmit > 0) + mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); + + return nxmit; +} + static int mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, @@ -3069,6 +3248,13 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, ret = MVPP2_XDP_REDIR; } break; + case XDP_TX: + ret = mvpp2_xdp_xmit_back(port, xdp); + if (ret != MVPP2_XDP_TX) { + page = virt_to_head_page(xdp->data); + page_pool_put_page(pp, page, sync, true); + } + break; default: bpf_warn_invalid_xdp_action(act); fallthrough; @@ -3090,6 +3276,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; + enum dma_data_direction dma_dir; struct bpf_prog *xdp_prog; struct xdp_buff xdp; int rx_received; @@ -3139,13 +3326,19 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, if (rx_status & MVPP2_RXD_ERR_SUMMARY) goto err_drop_frame; + if (port->priv->percpu_pools) { + pp = port->priv->page_pool[pool]; + dma_dir = page_pool_get_dma_dir(pp); + } else { + dma_dir = DMA_FROM_DEVICE; + } + dma_sync_single_for_cpu(dev->dev.parent, dma_addr, rx_bytes + MVPP2_MH_SIZE, - DMA_FROM_DEVICE); - prefetch(data); + dma_dir); - if (port->priv->percpu_pools) - pp = port->priv->page_pool[pool]; + /* Prefetch header */ + prefetch(data); if (bm_pool->frag_size > PAGE_SIZE) frag_size = 0; @@ -3218,6 +3411,9 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, rcu_read_unlock(); + if (xdp_ret & MVPP2_XDP_REDIR) + xdp_do_flush_map(); + if (rcvd_pkts) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); @@ -3284,11 +3480,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, /* Last descriptor */ mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); } else { /* Descriptor in the middle: Not First, Not Last */ mvpp2_txdesc_cmd_set(port, tx_desc, 0); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); } } @@ -3326,7 +3522,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); } static inline int mvpp2_tso_put_data(struct sk_buff *skb, @@ -3355,14 +3551,14 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb, if (!left) { mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); if (last) { - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); return 0; } } else { mvpp2_txdesc_cmd_set(port, tx_desc, 0); } - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); return 0; } @@ -3475,12 +3671,12 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev) /* First and Last descriptor */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); - mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB); } else { /* First but not Last */ tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd); - mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB); /* Continue with other skb fragments */ if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { @@ -4159,6 +4355,33 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu) return err; } +static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) +{ + enum dma_data_direction dma_dir = DMA_FROM_DEVICE; + struct mvpp2 *priv = port->priv; + int err = -1, i; + + if (!priv->percpu_pools) + return err; + + if (!priv->page_pool) + return -ENOMEM; + + for (i = 0; i < priv->port_count; i++) { + port = priv->port_list[i]; + if (port->xdp_prog) { + dma_dir = DMA_BIDIRECTIONAL; + break; + } + } + + /* All pools are equal in terms of DMA direction */ + if (priv->page_pool[0]->p.dma_dir != dma_dir) + err = mvpp2_bm_switch_buffers(priv, true); + + return err; +} + static void mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -4268,13 +4491,15 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) return -EOPNOTSUPP; } - /* device is up and bpf is added/removed, must setup the RX queues */ - if (running && reset) { - mvpp2_stop_dev(port); - mvpp2_cleanup_rxqs(port); - mvpp2_cleanup_txqs(port); + if (port->ntxqs < num_possible_cpus() * 2) { + NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU"); + return -EOPNOTSUPP; } + /* device is up and bpf is added/removed, must setup the RX queues */ + if (running && reset) + mvpp2_stop(port->dev); + old_prog = xchg(&port->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); @@ -4284,21 +4509,11 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf) return 0; /* device was up, restore the link */ - if (running) { - int ret = mvpp2_setup_rxqs(port); + if (running) + mvpp2_open(port->dev); - if (ret) { - netdev_err(port->dev, "mvpp2_setup_rxqs failed\n"); - return ret; - } - ret = mvpp2_setup_txqs(port); - if (ret) { - netdev_err(port->dev, "mvpp2_setup_txqs failed\n"); - return ret; - } - - mvpp2_start_dev(port); - } + /* Check Page Pool DMA Direction */ + mvpp2_check_pagepool_dma(port); return 0; } @@ -4669,6 +4884,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, .ndo_set_features = mvpp2_set_features, .ndo_bpf = mvpp2_xdp, + .ndo_xdp_xmit = mvpp2_xdp_xmit, }; static const struct ethtool_ops mvpp2_eth_tool_ops = { From 39b963152469fa154b3e1c4ef4ceac3651f56731 Mon Sep 17 00:00:00 2001 From: Sven Auhagen Date: Thu, 2 Jul 2020 16:12:44 +0200 Subject: [PATCH 0663/2056] mvpp2: xdp ethtool stats Add ethtool statistics for XDP. Signed-off-by: Sven Auhagen Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2.h | 8 + .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 158 ++++++++++++++++-- 2 files changed, 148 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index c52955b33fab..32753cc771bf 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -846,6 +846,14 @@ struct mvpp2_pcpu_stats { u64 rx_bytes; u64 tx_packets; u64 tx_bytes; + /* XDP */ + u64 xdp_redirect; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_xmit_err; + u64 xdp_tx; + u64 xdp_tx_err; }; /* Per-CPU port control */ diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d49a814311be..d8e238ed533e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -1485,6 +1485,16 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port, writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } +enum { + ETHTOOL_XDP_REDIRECT, + ETHTOOL_XDP_PASS, + ETHTOOL_XDP_DROP, + ETHTOOL_XDP_TX, + ETHTOOL_XDP_TX_ERR, + ETHTOOL_XDP_XMIT, + ETHTOOL_XDP_XMIT_ERR, +}; + struct mvpp2_ethtool_counter { unsigned int offset; const char string[ETH_GSTRING_LEN]; @@ -1577,10 +1587,21 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = { { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" }, }; +static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = { + { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", }, + { ETHTOOL_XDP_PASS, "rx_xdp_pass", }, + { ETHTOOL_XDP_DROP, "rx_xdp_drop", }, + { ETHTOOL_XDP_TX, "rx_xdp_tx", }, + { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", }, + { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", }, + { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", }, +}; + #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs) (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \ ARRAY_SIZE(mvpp2_ethtool_port_regs) + \ (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \ - (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs))) + (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \ + ARRAY_SIZE(mvpp2_ethtool_xdp)) static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, u8 *data) @@ -1619,10 +1640,57 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, data += ETH_GSTRING_LEN; } } + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) { + strscpy(data, mvpp2_ethtool_xdp[i].string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } +} + +static void +mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats) +{ + unsigned int start; + unsigned int cpu; + + /* Gather XDP Statistics */ + for_each_possible_cpu(cpu) { + struct mvpp2_pcpu_stats *cpu_stats; + u64 xdp_redirect; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_xmit_err; + u64 xdp_tx; + u64 xdp_tx_err; + + cpu_stats = per_cpu_ptr(port->stats, cpu); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + xdp_redirect = cpu_stats->xdp_redirect; + xdp_pass = cpu_stats->xdp_pass; + xdp_drop = cpu_stats->xdp_drop; + xdp_xmit = cpu_stats->xdp_xmit; + xdp_xmit_err = cpu_stats->xdp_xmit_err; + xdp_tx = cpu_stats->xdp_tx; + xdp_tx_err = cpu_stats->xdp_tx_err; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + xdp_stats->xdp_redirect += xdp_redirect; + xdp_stats->xdp_pass += xdp_pass; + xdp_stats->xdp_drop += xdp_drop; + xdp_stats->xdp_xmit += xdp_xmit; + xdp_stats->xdp_xmit_err += xdp_xmit_err; + xdp_stats->xdp_tx += xdp_tx; + xdp_stats->xdp_tx_err += xdp_tx_err; + } } static void mvpp2_read_stats(struct mvpp2_port *port) { + struct mvpp2_pcpu_stats xdp_stats = {}; + const struct mvpp2_ethtool_counter *s; u64 *pstats; int i, q; @@ -1650,6 +1718,37 @@ static void mvpp2_read_stats(struct mvpp2_port *port) *pstats++ += mvpp2_read_index(port->priv, port->first_rxq + q, mvpp2_ethtool_rxq_regs[i].offset); + + /* Gather XDP Statistics */ + mvpp2_get_xdp_stats(port, &xdp_stats); + + for (i = 0, s = mvpp2_ethtool_xdp; + s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp); + s++, i++) { + switch (s->offset) { + case ETHTOOL_XDP_REDIRECT: + *pstats++ = xdp_stats.xdp_redirect; + break; + case ETHTOOL_XDP_PASS: + *pstats++ = xdp_stats.xdp_pass; + break; + case ETHTOOL_XDP_DROP: + *pstats++ = xdp_stats.xdp_drop; + break; + case ETHTOOL_XDP_TX: + *pstats++ = xdp_stats.xdp_tx; + break; + case ETHTOOL_XDP_TX_ERR: + *pstats++ = xdp_stats.xdp_tx_err; + break; + case ETHTOOL_XDP_XMIT: + *pstats++ = xdp_stats.xdp_xmit; + break; + case ETHTOOL_XDP_XMIT_ERR: + *pstats++ = xdp_stats.xdp_xmit_err; + break; + } + } } static void mvpp2_gather_hw_statistics(struct work_struct *work) @@ -3063,7 +3162,6 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte) { unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id()); - struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread); struct mvpp2_tx_queue *aggr_txq; struct mvpp2_txq_pcpu *txq_pcpu; struct mvpp2_tx_queue *txq; @@ -3085,11 +3183,6 @@ static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, if (txq_pcpu->count >= txq_pcpu->stop_threshold) netif_tx_stop_queue(nq); - u64_stats_update_begin(&stats->syncp); - stats->tx_bytes += nxmit_byte; - stats->tx_packets += nxmit; - u64_stats_update_end(&stats->syncp); - /* Finalize TX processing */ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) mvpp2_txq_done(port, txq, txq_pcpu); @@ -3162,6 +3255,7 @@ mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id, static int mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) { + struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); struct xdp_frame *xdpf; u16 txq_id; int ret; @@ -3176,8 +3270,19 @@ mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp) txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2); ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false); - if (ret == MVPP2_XDP_TX) + if (ret == MVPP2_XDP_TX) { + u64_stats_update_begin(&stats->syncp); + stats->tx_bytes += xdpf->len; + stats->tx_packets++; + stats->xdp_tx++; + u64_stats_update_end(&stats->syncp); + mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len); + } else { + u64_stats_update_begin(&stats->syncp); + stats->xdp_tx_err++; + u64_stats_update_end(&stats->syncp); + } return ret; } @@ -3188,6 +3293,7 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame, { struct mvpp2_port *port = netdev_priv(dev); int i, nxmit_byte = 0, nxmit = num_frame; + struct mvpp2_pcpu_stats *stats; u16 txq_id; u32 ret; @@ -3212,16 +3318,24 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame, } } - if (nxmit > 0) + if (likely(nxmit > 0)) mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte); + stats = this_cpu_ptr(port->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_bytes += nxmit_byte; + stats->tx_packets += nxmit; + stats->xdp_xmit += nxmit; + stats->xdp_xmit_err += num_frame - nxmit; + u64_stats_update_end(&stats->syncp); + return nxmit; } static int mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, - struct page_pool *pp) + struct page_pool *pp, struct mvpp2_pcpu_stats *stats) { unsigned int len, sync, err; struct page *page; @@ -3236,6 +3350,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, switch (act) { case XDP_PASS: + stats->xdp_pass++; ret = MVPP2_XDP_PASS; break; case XDP_REDIRECT: @@ -3246,6 +3361,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, page_pool_put_page(pp, page, sync, true); } else { ret = MVPP2_XDP_REDIR; + stats->xdp_redirect++; } break; case XDP_TX: @@ -3265,6 +3381,7 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq, page = virt_to_head_page(xdp->data); page_pool_put_page(pp, page, sync, true); ret = MVPP2_XDP_DROPPED; + stats->xdp_drop++; break; } @@ -3276,14 +3393,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; + struct mvpp2_pcpu_stats ps = {}; enum dma_data_direction dma_dir; struct bpf_prog *xdp_prog; struct xdp_buff xdp; int rx_received; int rx_done = 0; u32 xdp_ret = 0; - u32 rcvd_pkts = 0; - u32 rcvd_bytes = 0; rcu_read_lock(); @@ -3358,7 +3474,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, xdp_set_data_meta_invalid(&xdp); - ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp); + ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps); if (ret) { xdp_ret |= ret; @@ -3368,6 +3484,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, goto err_drop_frame; } + ps.rx_packets++; + ps.rx_bytes += rx_bytes; continue; } } @@ -3391,8 +3509,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, bm_pool->buf_size, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); - rcvd_pkts++; - rcvd_bytes += rx_bytes; + ps.rx_packets++; + ps.rx_bytes += rx_bytes; skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM); skb_put(skb, rx_bytes); @@ -3414,12 +3532,16 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, if (xdp_ret & MVPP2_XDP_REDIR) xdp_do_flush_map(); - if (rcvd_pkts) { + if (ps.rx_packets) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); u64_stats_update_begin(&stats->syncp); - stats->rx_packets += rcvd_pkts; - stats->rx_bytes += rcvd_bytes; + stats->rx_packets += ps.rx_packets; + stats->rx_bytes += ps.rx_bytes; + /* xdp */ + stats->xdp_redirect += ps.xdp_redirect; + stats->xdp_pass += ps.xdp_pass; + stats->xdp_drop += ps.xdp_drop; u64_stats_update_end(&stats->syncp); } From f0a5e4d7a594e0fe237d3dfafb069bb82f80f42f Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Wed, 1 Jul 2020 18:17:19 +0300 Subject: [PATCH 0664/2056] ipvs: allow connection reuse for unconfirmed conntrack MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit YangYuxi is reporting that connection reuse is causing one-second delay when SYN hits existing connection in TIME_WAIT state. Such delay was added to give time to expire both the IPVS connection and the corresponding conntrack. This was considered a rare case at that time but it is causing problem for some environments such as Kubernetes. As nf_conntrack_tcp_packet() can decide to release the conntrack in TIME_WAIT state and to replace it with a fresh NEW conntrack, we can use this to allow rescheduling just by tuning our check: if the conntrack is confirmed we can not schedule it to different real server and the one-second delay still applies but if new conntrack was created, we are free to select new real server without any delays. YangYuxi lists some of the problem reports: - One second connection delay in masquerading mode: https://marc.info/?t=151683118100004&r=1&w=2 - IPVS low throughput #70747 https://github.com/kubernetes/kubernetes/issues/70747 - Apache Bench can fill up ipvs service proxy in seconds #544 https://github.com/cloudnativelabs/kube-router/issues/544 - Additional 1s latency in `host -> service IP -> pod` https://github.com/kubernetes/kubernetes/issues/90854 Fixes: f719e3754ee2 ("ipvs: drop first packet to redirect conntrack") Co-developed-by: YangYuxi Signed-off-by: YangYuxi Signed-off-by: Julian Anastasov Reviewed-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 10 ++++------ net/netfilter/ipvs/ip_vs_core.c | 12 +++++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 0c9881241323..011f407b76fe 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1626,18 +1626,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) } #endif /* CONFIG_IP_VS_NFCT */ -/* Really using conntrack? */ -static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, - struct sk_buff *skb) +/* Using old conntrack that can not be redirected to another real server? */ +static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp, + struct sk_buff *skb) { #ifdef CONFIG_IP_VS_NFCT enum ip_conntrack_info ctinfo; struct nf_conn *ct; - if (!(cp->flags & IP_VS_CONN_F_NFCT)) - return false; ct = nf_ct_get(skb, &ctinfo); - if (ct) + if (ct && nf_ct_is_confirmed(ct)) return true; #endif return false; diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index ca3670152565..b4a6b7662f3f 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2066,14 +2066,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { - bool uses_ct = false, resched = false; + bool old_ct = false, resched = false; if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && unlikely(!atomic_read(&cp->dest->weight))) { resched = true; - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); } else if (is_new_conn_expected(cp, conn_reuse_mode)) { - uses_ct = ip_vs_conn_uses_conntrack(cp, skb); + old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); if (!atomic_read(&cp->n_control)) { resched = true; } else { @@ -2081,15 +2081,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int * that uses conntrack while it is still * referenced by controlled connection(s). */ - resched = !uses_ct; + resched = !old_ct; } } if (resched) { + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; if (!atomic_read(&cp->n_control)) ip_vs_conn_expire_now(cp); __ip_vs_conn_put(cp); - if (uses_ct) + if (old_ct) return NF_DROP; cp = NULL; } From 74cccc3d38438b346e40a4f8133cff3f0839ff84 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:11 +0200 Subject: [PATCH 0665/2056] netfilter: nf_tables: add NFTA_CHAIN_ID attribute This netlink attribute allows you to refer to chains inside a transaction as an alternative to the name and the handle. The chain binding support requires this new chain ID approach. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 3 +++ include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 15 ++++++++++++--- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6f0f6fca9ac3..3e5226684017 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1433,6 +1433,7 @@ struct nft_trans_chain { char *name; struct nft_stats __percpu *stats; u8 policy; + u32 chain_id; }; #define nft_trans_chain_update(trans) \ @@ -1443,6 +1444,8 @@ struct nft_trans_chain { (((struct nft_trans_chain *)trans->data)->stats) #define nft_trans_chain_policy(trans) \ (((struct nft_trans_chain *)trans->data)->policy) +#define nft_trans_chain_id(trans) \ + (((struct nft_trans_chain *)trans->data)->chain_id) struct nft_trans_table { bool update; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 4565456c0ef4..477779595b78 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -196,6 +196,7 @@ enum nft_table_attributes { * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING) * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes) * @NFTA_CHAIN_FLAGS: chain flags + * @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32) */ enum nft_chain_attributes { NFTA_CHAIN_UNSPEC, @@ -209,6 +210,7 @@ enum nft_chain_attributes { NFTA_CHAIN_COUNTERS, NFTA_CHAIN_PAD, NFTA_CHAIN_FLAGS, + NFTA_CHAIN_ID, __NFTA_CHAIN_MAX }; #define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7647ecfa0d40..650ef0dd0773 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -280,9 +280,15 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) if (trans == NULL) return ERR_PTR(-ENOMEM); - if (msg_type == NFT_MSG_NEWCHAIN) + if (msg_type == NFT_MSG_NEWCHAIN) { nft_activate_next(ctx->net, ctx->chain); + if (ctx->nla[NFTA_CHAIN_ID]) { + nft_trans_chain_id(trans) = + ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID])); + } + } + list_add_tail(&trans->list, &ctx->net->nft.commit_list); return trans; } @@ -1274,6 +1280,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = { .len = NFT_MODULE_AUTOLOAD_LIMIT }, [NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED }, [NFTA_CHAIN_FLAGS] = { .type = NLA_U32 }, + [NFTA_CHAIN_ID] = { .type = NLA_U32 }, }; static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = { @@ -2154,9 +2161,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, const struct nfgenmsg *nfmsg = nlmsg_data(nlh); u8 genmask = nft_genmask_next(net); int family = nfmsg->nfgen_family; + struct nft_chain *chain = NULL; const struct nlattr *attr; struct nft_table *table; - struct nft_chain *chain; u8 policy = NF_ACCEPT; struct nft_ctx ctx; u64 handle = 0; @@ -2181,7 +2188,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, return PTR_ERR(chain); } attr = nla[NFTA_CHAIN_HANDLE]; - } else { + } else if (nla[NFTA_CHAIN_NAME]) { chain = nft_chain_lookup(net, table, attr, genmask); if (IS_ERR(chain)) { if (PTR_ERR(chain) != -ENOENT) { @@ -2190,6 +2197,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, } chain = NULL; } + } else if (!nla[NFTA_CHAIN_ID]) { + return -EINVAL; } if (nla[NFTA_CHAIN_POLICY]) { From 837830a4b439bfeb86c70b0115c280377c84714b Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:16 +0200 Subject: [PATCH 0666/2056] netfilter: nf_tables: add NFTA_RULE_CHAIN_ID attribute This new netlink attribute allows you to add rules to chains by the chain ID. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 36 +++++++++++++++++++++--- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 477779595b78..2304d1b7ba5e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -240,6 +240,7 @@ enum nft_rule_attributes { NFTA_RULE_PAD, NFTA_RULE_ID, NFTA_RULE_POSITION_ID, + NFTA_RULE_CHAIN_ID, __NFTA_RULE_MAX }; #define NFTA_RULE_MAX (__NFTA_RULE_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 650ef0dd0773..fbe8f9209813 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2153,6 +2153,22 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return err; } +static struct nft_chain *nft_chain_lookup_byid(const struct net *net, + const struct nlattr *nla) +{ + u32 id = ntohl(nla_get_be32(nla)); + struct nft_trans *trans; + + list_for_each_entry(trans, &net->nft.commit_list, list) { + struct nft_chain *chain = trans->ctx.chain; + + if (trans->msg_type == NFT_MSG_NEWCHAIN && + id == nft_trans_chain_id(trans)) + return chain; + } + return ERR_PTR(-ENOENT); +} + static int nf_tables_newchain(struct net *net, struct sock *nlsk, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const nla[], @@ -2633,6 +2649,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = { .len = NFT_USERDATA_MAXLEN }, [NFTA_RULE_ID] = { .type = NLA_U32 }, [NFTA_RULE_POSITION_ID] = { .type = NLA_U32 }, + [NFTA_RULE_CHAIN_ID] = { .type = NLA_U32 }, }; static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net, @@ -3039,10 +3056,21 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, return PTR_ERR(table); } - chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask); - if (IS_ERR(chain)) { - NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); - return PTR_ERR(chain); + if (nla[NFTA_RULE_CHAIN]) { + chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], + genmask); + if (IS_ERR(chain)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); + return PTR_ERR(chain); + } + } else if (nla[NFTA_RULE_CHAIN_ID]) { + chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]); + if (IS_ERR(chain)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]); + return PTR_ERR(chain); + } + } else { + return -EINVAL; } if (nla[NFTA_RULE_HANDLE]) { From 51d70f181ff4e2c996ddf256af1efecd7d5864e5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:21 +0200 Subject: [PATCH 0667/2056] netfilter: nf_tables: add NFTA_VERDICT_CHAIN_ID attribute This netlink attribute allows you to identify the chain to jump/goto by means of the chain ID. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 16 +++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 2304d1b7ba5e..683e75126d68 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -471,11 +471,13 @@ enum nft_data_attributes { * * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts) * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING) + * @NFTA_VERDICT_CHAIN_ID: jump target chain ID (NLA_U32) */ enum nft_verdict_attributes { NFTA_VERDICT_UNSPEC, NFTA_VERDICT_CODE, NFTA_VERDICT_CHAIN, + NFTA_VERDICT_CHAIN_ID, __NFTA_VERDICT_MAX }; #define NFTA_VERDICT_MAX (__NFTA_VERDICT_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fbe8f9209813..d86602797a69 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -8242,6 +8242,7 @@ static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = { [NFTA_VERDICT_CODE] = { .type = NLA_U32 }, [NFTA_VERDICT_CHAIN] = { .type = NLA_STRING, .len = NFT_CHAIN_MAXNAMELEN - 1 }, + [NFTA_VERDICT_CHAIN_ID] = { .type = NLA_U32 }, }; static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, @@ -8278,10 +8279,19 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, break; case NFT_JUMP: case NFT_GOTO: - if (!tb[NFTA_VERDICT_CHAIN]) + if (tb[NFTA_VERDICT_CHAIN]) { + chain = nft_chain_lookup(ctx->net, ctx->table, + tb[NFTA_VERDICT_CHAIN], + genmask); + } else if (tb[NFTA_VERDICT_CHAIN_ID]) { + chain = nft_chain_lookup_byid(ctx->net, + tb[NFTA_VERDICT_CHAIN_ID]); + if (IS_ERR(chain)) + return PTR_ERR(chain); + } else { return -EINVAL; - chain = nft_chain_lookup(ctx->net, ctx->table, - tb[NFTA_VERDICT_CHAIN], genmask); + } + if (IS_ERR(chain)) return PTR_ERR(chain); if (nft_is_base_chain(chain)) From 67c49de4ad862c567088c5119cf125e566f56e7f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:25 +0200 Subject: [PATCH 0668/2056] netfilter: nf_tables: expose enum nft_chain_flags through UAPI This enum definition was never exposed through UAPI. Rename NFT_BASE_CHAIN to NFT_CHAIN_BASE for consistency. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 7 +------ include/uapi/linux/netfilter/nf_tables.h | 5 +++++ net/netfilter/nf_tables_api.c | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 3e5226684017..6d1e7da6e00a 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -921,11 +921,6 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, (expr) != (last); \ (expr) = nft_expr_next(expr)) -enum nft_chain_flags { - NFT_BASE_CHAIN = 0x1, - NFT_CHAIN_HW_OFFLOAD = 0x2, -}; - #define NFT_CHAIN_POLICY_UNSET U8_MAX /** @@ -1036,7 +1031,7 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai static inline bool nft_is_base_chain(const struct nft_chain *chain) { - return chain->flags & NFT_BASE_CHAIN; + return chain->flags & NFT_CHAIN_BASE; } int __nft_release_basechain(struct nft_ctx *ctx); diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 683e75126d68..2cf7cc3b50c1 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -184,6 +184,11 @@ enum nft_table_attributes { }; #define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1) +enum nft_chain_flags { + NFT_CHAIN_BASE = (1 << 0), + NFT_CHAIN_HW_OFFLOAD = (1 << 1), +}; + /** * enum nft_chain_attributes - nf_tables chain netlink attributes * diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d86602797a69..b7582a1c8dce 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1903,7 +1903,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, nft_basechain_hook_init(&basechain->ops, family, hook, chain); } - chain->flags |= NFT_BASE_CHAIN | flags; + chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && nft_chain_offload_priority(basechain) < 0) @@ -2255,7 +2255,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - flags |= chain->flags & NFT_BASE_CHAIN; + flags |= chain->flags & NFT_CHAIN_BASE; return nf_tables_updchain(&ctx, genmask, policy, flags); } From 04b7db414490ea9254d0c1d8930ea9571f8ce9f0 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:28 +0200 Subject: [PATCH 0669/2056] netfilter: nf_tables: add nft_chain_add() This patch adds a helper function to add the chain to the hashtable and the chain list. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b7582a1c8dce..a7cb9c07802b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1914,6 +1914,20 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, return 0; } +static int nft_chain_add(struct nft_table *table, struct nft_chain *chain) +{ + int err; + + err = rhltable_insert_key(&table->chains_ht, chain->name, + &chain->rhlhead, nft_chain_ht_params); + if (err) + return err; + + list_add_tail_rcu(&chain->list, &table->chains); + + return 0; +} + static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, u8 policy, u32 flags) { @@ -1991,16 +2005,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err < 0) goto err1; - err = rhltable_insert_key(&table->chains_ht, chain->name, - &chain->rhlhead, nft_chain_ht_params); - if (err) - goto err2; - trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); if (IS_ERR(trans)) { err = PTR_ERR(trans); - rhltable_remove(&table->chains_ht, &chain->rhlhead, - nft_chain_ht_params); goto err2; } @@ -2008,8 +2015,13 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (nft_is_base_chain(chain)) nft_trans_chain_policy(trans) = policy; + err = nft_chain_add(table, chain); + if (err < 0) { + nft_trans_destroy(trans); + goto err2; + } + table->use++; - list_add_tail_rcu(&chain->list, &table->chains); return 0; err2: From d0e2c7de92c7f2b3d355ad76b0bb9fc43d1beb87 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 30 Jun 2020 19:21:36 +0200 Subject: [PATCH 0670/2056] netfilter: nf_tables: add NFT_CHAIN_BINDING This new chain flag specifies that: * the kernel dynamically allocates the chain name, if no chain name is specified. * If the immediate expression that refers to this chain is removed, then this bound chain (and its content) is destroyed. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 13 +++- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 86 ++++++++++++++++++++---- net/netfilter/nft_immediate.c | 51 ++++++++++++++ 4 files changed, 138 insertions(+), 13 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6d1e7da6e00a..822c26766330 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -899,6 +899,8 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) return (void *)&rule->data[rule->dlen]; } +void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule); + static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -944,7 +946,8 @@ struct nft_chain { struct nft_table *table; u64 handle; u32 use; - u8 flags:6, + u8 flags:5, + bound:1, genmask:2; char *name; @@ -989,6 +992,14 @@ int nft_chain_validate_dependency(const struct nft_chain *chain, int nft_chain_validate_hooks(const struct nft_chain *chain, unsigned int hook_flags); +static inline bool nft_chain_is_bound(struct nft_chain *chain) +{ + return (chain->flags & NFT_CHAIN_BINDING) && chain->bound; +} + +void nft_chain_del(struct nft_chain *chain); +void nf_tables_chain_destroy(struct nft_ctx *ctx); + struct nft_stats { u64 bytes; u64 pkts; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 2cf7cc3b50c1..e00b4ae6174e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -187,6 +187,7 @@ enum nft_table_attributes { enum nft_chain_flags { NFT_CHAIN_BASE = (1 << 0), NFT_CHAIN_HW_OFFLOAD = (1 << 1), + NFT_CHAIN_BINDING = (1 << 2), }; /** diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index a7cb9c07802b..b8a970dad213 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1056,6 +1056,9 @@ static int nft_flush_table(struct nft_ctx *ctx) if (!nft_is_active_next(ctx->net, chain)) continue; + if (nft_chain_is_bound(chain)) + continue; + ctx->chain = chain; err = nft_delrule_by_chain(ctx); @@ -1098,6 +1101,9 @@ static int nft_flush_table(struct nft_ctx *ctx) if (!nft_is_active_next(ctx->net, chain)) continue; + if (nft_chain_is_bound(chain)) + continue; + ctx->chain = chain; err = nft_delchain(ctx); @@ -1413,13 +1419,12 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, lockdep_commit_lock_is_held(net)); if (nft_dump_stats(skb, stats)) goto nla_put_failure; - - if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) && - nla_put_be32(skb, NFTA_CHAIN_FLAGS, - htonl(NFT_CHAIN_HW_OFFLOAD))) - goto nla_put_failure; } + if (chain->flags && + nla_put_be32(skb, NFTA_CHAIN_FLAGS, htonl(chain->flags))) + goto nla_put_failure; + if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use))) goto nla_put_failure; @@ -1621,7 +1626,7 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) kvfree(chain->rules_next); } -static void nf_tables_chain_destroy(struct nft_ctx *ctx) +void nf_tables_chain_destroy(struct nft_ctx *ctx) { struct nft_chain *chain = ctx->chain; struct nft_hook *hook, *next; @@ -1928,6 +1933,8 @@ static int nft_chain_add(struct nft_table *table, struct nft_chain *chain) return 0; } +static u64 chain_id; + static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, u8 policy, u32 flags) { @@ -1936,6 +1943,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_base_chain *basechain; struct nft_stats __percpu *stats; struct net *net = ctx->net; + char name[NFT_NAME_MAXLEN]; struct nft_trans *trans; struct nft_chain *chain; struct nft_rule **rules; @@ -1947,6 +1955,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (nla[NFTA_CHAIN_HOOK]) { struct nft_chain_hook hook; + if (flags & NFT_CHAIN_BINDING) + return -EOPNOTSUPP; + err = nft_chain_parse_hook(net, nla, &hook, family, true); if (err < 0) return err; @@ -1976,16 +1987,33 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, return err; } } else { + if (flags & NFT_CHAIN_BASE) + return -EINVAL; + if (flags & NFT_CHAIN_HW_OFFLOAD) + return -EOPNOTSUPP; + chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (chain == NULL) return -ENOMEM; + + chain->flags = flags; } ctx->chain = chain; INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->table = table; - chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + + if (nla[NFTA_CHAIN_NAME]) { + chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL); + } else { + if (!(flags & NFT_CHAIN_BINDING)) + return -EINVAL; + + snprintf(name, sizeof(name), "__chain%llu", ++chain_id); + chain->name = kstrdup(name, GFP_KERNEL); + } + if (!chain->name) { err = -ENOMEM; goto err1; @@ -2976,8 +3004,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, kfree(rule); } -static void nf_tables_rule_release(const struct nft_ctx *ctx, - struct nft_rule *rule) +void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) { nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); nf_tables_rule_destroy(ctx, rule); @@ -3075,6 +3102,9 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); } + if (nft_chain_is_bound(chain)) + return -EOPNOTSUPP; + } else if (nla[NFTA_RULE_CHAIN_ID]) { chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]); if (IS_ERR(chain)) { @@ -3294,6 +3324,8 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk, NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); } + if (nft_chain_is_bound(chain)) + return -EOPNOTSUPP; } nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); @@ -5330,11 +5362,24 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, */ void nft_data_hold(const struct nft_data *data, enum nft_data_types type) { + struct nft_chain *chain; + struct nft_rule *rule; + if (type == NFT_DATA_VERDICT) { switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: - data->verdict.chain->use++; + chain = data->verdict.chain; + chain->use++; + + if (!nft_chain_is_bound(chain)) + break; + + chain->table->use++; + list_for_each_entry(rule, &chain->rules, list) + chain->use++; + + nft_chain_add(chain->table, chain); break; } } @@ -7474,7 +7519,7 @@ static void nft_obj_del(struct nft_object *obj) list_del_rcu(&obj->list); } -static void nft_chain_del(struct nft_chain *chain) +void nft_chain_del(struct nft_chain *chain) { struct nft_table *table = chain->table; @@ -7825,6 +7870,10 @@ static int __nf_tables_abort(struct net *net, bool autoload) kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); } else { + if (nft_chain_is_bound(trans->ctx.chain)) { + nft_trans_destroy(trans); + break; + } trans->ctx.table->use--; nft_chain_del(trans->ctx.chain); nf_tables_unregister_hook(trans->ctx.net, @@ -8321,10 +8370,23 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, static void nft_verdict_uninit(const struct nft_data *data) { + struct nft_chain *chain; + struct nft_rule *rule; + switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: - data->verdict.chain->use--; + chain = data->verdict.chain; + chain->use--; + + if (!nft_chain_is_bound(chain)) + break; + + chain->table->use--; + list_for_each_entry(rule, &chain->rules, list) + chain->use--; + + nft_chain_del(chain); break; } } diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index c7f0ef73d939..9e556638bb32 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -54,6 +54,23 @@ static int nft_immediate_init(const struct nft_ctx *ctx, if (err < 0) goto err1; + if (priv->dreg == NFT_REG_VERDICT) { + struct nft_chain *chain = priv->data.verdict.chain; + + switch (priv->data.verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + if (nft_chain_is_bound(chain)) { + err = -EBUSY; + goto err1; + } + chain->bound = true; + break; + default: + break; + } + } + return 0; err1: @@ -81,6 +98,39 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); } +static void nft_immediate_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + const struct nft_immediate_expr *priv = nft_expr_priv(expr); + const struct nft_data *data = &priv->data; + struct nft_ctx chain_ctx; + struct nft_chain *chain; + struct nft_rule *rule; + + if (priv->dreg != NFT_REG_VERDICT) + return; + + switch (data->verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + chain = data->verdict.chain; + + if (!nft_chain_is_bound(chain)) + break; + + chain_ctx = *ctx; + chain_ctx.chain = chain; + + list_for_each_entry(rule, &chain->rules, list) + nf_tables_rule_release(&chain_ctx, rule); + + nf_tables_chain_destroy(&chain_ctx); + break; + default: + break; + } +} + static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); @@ -170,6 +220,7 @@ static const struct nft_expr_ops nft_imm_ops = { .init = nft_immediate_init, .activate = nft_immediate_activate, .deactivate = nft_immediate_deactivate, + .destroy = nft_immediate_destroy, .dump = nft_immediate_dump, .validate = nft_immediate_validate, .offload = nft_immediate_offload, From c1f79a2eefdcc0aef5d7a911c27a3f75f1936ecd Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 4 Jul 2020 02:51:28 +0200 Subject: [PATCH 0671/2056] netfilter: nf_tables: reject unsupported chain flags Bail out if userspace sends unsupported chain flags. Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 3 +++ net/netfilter/nf_tables_api.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index e00b4ae6174e..42f351c1f5c5 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -189,6 +189,9 @@ enum nft_chain_flags { NFT_CHAIN_HW_OFFLOAD = (1 << 1), NFT_CHAIN_BINDING = (1 << 2), }; +#define NFT_CHAIN_FLAGS (NFT_CHAIN_BASE | \ + NFT_CHAIN_HW_OFFLOAD | \ + NFT_CHAIN_BINDING) /** * enum nft_chain_attributes - nf_tables chain netlink attributes diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b8a970dad213..f96785586f64 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2285,6 +2285,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, else if (chain) flags = chain->flags; + if (flags & ~NFT_CHAIN_FLAGS) + return -EOPNOTSUPP; + nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla); if (chain != NULL) { From 4365f35b12447118a1193b9a20e512da65034110 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 4 Jul 2020 10:13:42 +0530 Subject: [PATCH 0672/2056] bnx2x: Add Idlechk related register definitions. The patch adds register definitions required for Idlechk implementation. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 78 ++++++++++++++++++- 1 file changed, 76 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index a43dea259b12..bfc0e45d4a2b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -7639,6 +7639,82 @@ Theotherbitsarereservedandshouldbezero*/ (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7)) #define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80) +/* IdleChk registers */ +#define PXP_REG_HST_VF_DISABLED_ERROR_VALID 0x1030bc +#define PXP_REG_HST_VF_DISABLED_ERROR_DATA 0x1030b8 +#define PXP_REG_HST_PER_VIOLATION_VALID 0x1030e0 +#define PXP_REG_HST_INCORRECT_ACCESS_VALID 0x1030cc +#define PXP2_REG_RD_CPL_ERR_DETAILS 0x120778 +#define PXP2_REG_RD_CPL_ERR_DETAILS2 0x12077c +#define PXP2_REG_RQ_GARB 0x120748 +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q0 0x15c1bc +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q1 0x15c1c0 +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q2 0x15c1c4 +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q3 0x15c1c8 +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q4 0x15c1cc +#define PBF_REG_DISABLE_NEW_TASK_PROC_Q5 0x15c1d0 +#define PBF_REG_CREDIT_Q2 0x140344 +#define PBF_REG_CREDIT_Q3 0x140348 +#define PBF_REG_CREDIT_Q4 0x14034c +#define PBF_REG_CREDIT_Q5 0x140350 +#define PBF_REG_INIT_CRD_Q2 0x15c238 +#define PBF_REG_INIT_CRD_Q3 0x15c23c +#define PBF_REG_INIT_CRD_Q4 0x15c240 +#define PBF_REG_INIT_CRD_Q5 0x15c244 +#define PBF_REG_TASK_CNT_Q0 0x140374 +#define PBF_REG_TASK_CNT_Q1 0x140378 +#define PBF_REG_TASK_CNT_Q2 0x14037c +#define PBF_REG_TASK_CNT_Q3 0x140380 +#define PBF_REG_TASK_CNT_Q4 0x140384 +#define PBF_REG_TASK_CNT_Q5 0x140388 +#define PBF_REG_TASK_CNT_LB_Q 0x140370 +#define QM_REG_BYTECRD0 0x16e6fc +#define QM_REG_BYTECRD1 0x16e700 +#define QM_REG_BYTECRD2 0x16e704 +#define QM_REG_BYTECRD3 0x16e7ac +#define QM_REG_BYTECRD4 0x16e7b0 +#define QM_REG_BYTECRD5 0x16e7b4 +#define QM_REG_BYTECRD6 0x16e7b8 +#define QM_REG_BYTECRDCMDQ_0 0x16e6e8 +#define QM_REG_BYTECRDERRREG 0x16e708 +#define MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID 0xa714 +#define QM_REG_VOQCREDIT_2 0x1682d8 +#define QM_REG_VOQCREDIT_3 0x1682dc +#define QM_REG_VOQCREDIT_5 0x1682e4 +#define QM_REG_VOQCREDIT_6 0x1682e8 +#define QM_REG_VOQINITCREDIT_3 0x16806c +#define QM_REG_VOQINITCREDIT_6 0x168078 +#define QM_REG_FWVOQ0TOHWVOQ 0x16e7bc +#define QM_REG_FWVOQ1TOHWVOQ 0x16e7c0 +#define QM_REG_FWVOQ2TOHWVOQ 0x16e7c4 +#define QM_REG_FWVOQ3TOHWVOQ 0x16e7c8 +#define QM_REG_FWVOQ4TOHWVOQ 0x16e7cc +#define QM_REG_FWVOQ5TOHWVOQ 0x16e7d0 +#define QM_REG_FWVOQ6TOHWVOQ 0x16e7d4 +#define QM_REG_FWVOQ7TOHWVOQ 0x16e7d8 +#define NIG_REG_INGRESS_EOP_PORT0_EMPTY 0x104ec +#define NIG_REG_INGRESS_EOP_PORT1_EMPTY 0x104f8 +#define NIG_REG_INGRESS_RMP0_DSCR_EMPTY 0x10530 +#define NIG_REG_INGRESS_RMP1_DSCR_EMPTY 0x10538 +#define NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY 0x10508 +#define NIG_REG_EGRESS_MNG0_FIFO_EMPTY 0x10460 +#define NIG_REG_EGRESS_MNG1_FIFO_EMPTY 0x10474 +#define NIG_REG_EGRESS_DEBUG_FIFO_EMPTY 0x10418 +#define NIG_REG_EGRESS_DELAY0_EMPTY 0x10420 +#define NIG_REG_EGRESS_DELAY1_EMPTY 0x10428 +#define NIG_REG_LLH0_FIFO_EMPTY 0x10548 +#define NIG_REG_LLH1_FIFO_EMPTY 0x10558 +#define NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY 0x182a8 +#define NIG_REG_P0_TLLH_FIFO_EMPTY 0x18308 +#define NIG_REG_P0_HBUF_DSCR_EMPTY 0x18318 +#define NIG_REG_P1_HBUF_DSCR_EMPTY 0x18348 +#define NIG_REG_P0_RX_MACFIFO_EMPTY 0x18570 +#define NIG_REG_P0_TX_MACFIFO_EMPTY 0x18578 +#define NIG_REG_EGRESS_DELAY2_EMPTY 0x1862c +#define NIG_REG_EGRESS_DELAY3_EMPTY 0x18630 +#define NIG_REG_EGRESS_DELAY4_EMPTY 0x18634 +#define NIG_REG_EGRESS_DELAY5_EMPTY 0x18638 + /****************************************************************************** * Description: * Calculates crc 8 on a word value: polynomial 0-1-2-8 @@ -7697,6 +7773,4 @@ static inline u8 calc_crc8(u32 data, u8 crc) return crc_res; } - - #endif /* BNX2X_REG_H */ From cdf711f20b23087a96811e92f9600a9cf5ea23d6 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 4 Jul 2020 10:13:43 +0530 Subject: [PATCH 0673/2056] bnx2x: Add support for idlechk tests. This patch populates a database of idlechk tests (registers and predicates) and performs the idlechk using this data. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/Makefile | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 10 +- .../net/ethernet/broadcom/bnx2x/bnx2x_main.c | 7 + .../ethernet/broadcom/bnx2x/bnx2x_self_test.c | 3183 +++++++++++++++++ .../net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 2 + 5 files changed, 3196 insertions(+), 8 deletions(-) create mode 100644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile index 9fdfaa269af9..2523cfc7527d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/Makefile +++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile @@ -5,5 +5,5 @@ obj-$(CONFIG_BNX2X) += bnx2x.o -bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o +bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o bnx2x_self_test.o bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 4f5b2b81be3d..dee61d96680e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1979,6 +1979,9 @@ struct bnx2x_func_init_params { #define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx)) +/*self test*/ +int bnx2x_idle_chk(struct bnx2x *bp); + /** * bnx2x_set_mac_one - configure a single MAC address * @@ -2430,13 +2433,6 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err); #define HC_SEG_ACCESS_ATTN 4 #define HC_SEG_ACCESS_NORM 0 /*Driver decision 0-1*/ -static const u32 dmae_reg_go_c[] = { - DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, - DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, - DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, - DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 -}; - void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev); void bnx2x_notify_link_changed(struct bnx2x *bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index ea60cd436f5e..41aac4f7634a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -276,6 +276,13 @@ static const struct pci_device_id bnx2x_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl); +const u32 dmae_reg_go_c[] = { + DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3, + DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7, + DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11, + DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 +}; + /* Global resources for unloading a previously loaded device */ #define BNX2X_PREV_WAIT_NEEDED 1 static DEFINE_SEMAPHORE(bnx2x_prev_sem); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c new file mode 100644 index 000000000000..48f63ef2e6ea --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c @@ -0,0 +1,3183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include "bnx2x.h" + +#define NA 0xCD + +#define IDLE_CHK_E1 0x01 +#define IDLE_CHK_E1H 0x02 +#define IDLE_CHK_E2 0x04 +#define IDLE_CHK_E3A0 0x08 +#define IDLE_CHK_E3B0 0x10 + +#define IDLE_CHK_ERROR 1 +#define IDLE_CHK_ERROR_NO_TRAFFIC 2 +#define IDLE_CHK_WARNING 3 + +#define MAX_FAIL_MSG 256 + +/* statistics and error reporting */ +static int idle_chk_errors, idle_chk_warnings; + +/* masks for all chip types */ +static int is_e1, is_e1h, is_e2, is_e3a0, is_e3b0; + +/* struct for the argument list for a predicate in the self test databasei */ +struct st_pred_args { + u32 val1; /* value read from first register */ + u32 val2; /* value read from second register, if applicable */ + u32 imm1; /* 1st value in predicate condition, left-to-right */ + u32 imm2; /* 2nd value in predicate condition, left-to-right */ + u32 imm3; /* 3rd value in predicate condition, left-to-right */ + u32 imm4; /* 4th value in predicate condition, left-to-right */ +}; + +/* struct representing self test record - a single test */ +struct st_record { + u8 chip_mask; + u8 macro; + u32 reg1; + u32 reg2; + u16 loop; + u16 incr; + int (*bnx2x_predicate)(struct st_pred_args *pred_args); + u32 reg3; + u8 severity; + char *fail_msg; + struct st_pred_args pred_args; +}; + +/* predicates for self test */ +static int peq(struct st_pred_args *args) +{ + return (args->val1 == args->imm1); +} + +static int pneq(struct st_pred_args *args) +{ + return (args->val1 != args->imm1); +} + +static int pand_neq(struct st_pred_args *args) +{ + return ((args->val1 & args->imm1) != args->imm2); +} + +static int pand_neq_x2(struct st_pred_args *args) +{ + return (((args->val1 & args->imm1) != args->imm2) && + ((args->val1 & args->imm3) != args->imm4)); +} + +static int pneq_err(struct st_pred_args *args) +{ + return ((args->val1 != args->imm1) && (idle_chk_errors > args->imm2)); +} + +static int pgt(struct st_pred_args *args) +{ + return (args->val1 > args->imm1); +} + +static int pneq_r2(struct st_pred_args *args) +{ + return (args->val1 != args->val2); +} + +static int plt_sub_r2(struct st_pred_args *args) +{ + return (args->val1 < (args->val2 - args->imm1)); +} + +static int pne_sub_r2(struct st_pred_args *args) +{ + return (args->val1 != (args->val2 - args->imm1)); +} + +static int prsh_and_neq(struct st_pred_args *args) +{ + return (((args->val1 >> args->imm1) & args->imm2) != args->imm3); +} + +static int peq_neq_r2(struct st_pred_args *args) +{ + return ((args->val1 == args->imm1) && (args->val2 != args->imm2)); +} + +static int peq_neq_neq_r2(struct st_pred_args *args) +{ + return ((args->val1 == args->imm1) && (args->val2 != args->imm2) && + (args->val2 != args->imm3)); +} + +/* struct holding the database of self test checks (registers and predicates) */ +/* lines start from 2 since line 1 is heading in csv */ +#define ST_DB_LINES 468 +static struct st_record st_database[ST_DB_LINES] = { +/*line 2*/{(0x3), 1, 0x2114, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: ucorr_err_status is not 0", + {NA, NA, 0x0FF010, 0, NA, NA} }, + +/*line 3*/{(0x3), 1, 0x2114, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PCIE: ucorr_err_status - Unsupported request error", + {NA, NA, 0x100000, 0, NA, NA} }, + +/*line 4*/{(0x3), 1, 0x2120, + NA, 1, 0, pand_neq_x2, + NA, IDLE_CHK_WARNING, + "PCIE: corr_err_status is not 0x2000", + {NA, NA, 0x31C1, 0x2000, 0x31C1, 0} }, + +/*line 5*/{(0x3), 1, 0x2814, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: attentions register is not 0x40100", + {NA, NA, ~0x40100, 0, NA, NA} }, + +/*line 6*/{(0x2), 1, 0x281c, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: attentions register is not 0x40040100", + {NA, NA, ~0x40040100, 0, NA, NA} }, + +/*line 7*/{(0x2), 1, 0x2820, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: attentions register is not 0x40040100", + {NA, NA, ~0x40040100, 0, NA, NA} }, + +/*line 8*/{(0x3), 1, PXP2_REG_PGL_EXP_ROM2, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: There are outstanding read requests. Not all completios have arrived for read requests on tags that are marked with 0", + {NA, NA, 0xffffffff, NA, NA, NA} }, + +/*line 9*/{(0x3), 2, 0x212c, + NA, 4, 4, pneq_err, + NA, IDLE_CHK_WARNING, + "PCIE: error packet header is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 10*/{(0x1C), 1, 0x2104, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: ucorr_err_status is not 0", + {NA, NA, 0x0FD010, 0, NA, NA} }, + +/*line 11*/{(0x1C), 1, 0x2104, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PCIE: ucorr_err_status - Unsupported request error", + {NA, NA, 0x100000, 0, NA, NA} }, + +/*line 12*/{(0x1C), 1, 0x2104, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PCIE: ucorr_err_status - Flow Control Protocol Error", + {NA, NA, 0x2000, 0, NA, NA} }, + +/*line 13*/{(0x1C), 1, 0x2110, + NA, 1, 0, pand_neq_x2, + NA, IDLE_CHK_WARNING, + "PCIE: corr_err_status is not 0x2000", + {NA, NA, 0x31C1, 0x2000, 0x31C1, 0} }, + +/*line 14*/{(0x1C), 1, 0x2814, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PCIE: TTX_BRIDGE_FORWARD_ERR - Received master request while BME was 0", + {NA, NA, 0x2000000, 0, NA, NA} }, + +/*line 15*/{(0x1C), 1, 0x2814, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: Func 0 1: attentions register is not 0x2040902", + {NA, NA, ~0x2040902, 0, NA, NA} }, + +/*line 16*/{(0x1C), 1, 0x2854, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: Func 2 3 4: attentions register is not 0x10240902", + {NA, NA, ~0x10240902, 0, NA, NA} }, + +/*line 17*/{(0x1C), 1, 0x285c, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: Func 5 6 7: attentions register is not 0x10240902", + {NA, NA, ~0x10240902, 0, NA, NA} }, + +/*line 18*/{(0x18), 1, 0x3040, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "PCIE: Overflow in DLP2TLP buffer", + {NA, NA, 0x2, 0, NA, NA} }, + +/*line 19*/{(0x1C), 1, PXP2_REG_PGL_EXP_ROM2, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: There are outstanding read requests for tags 0-31. Not all completios have arrived for read requests on tags that are marked with 0", + {NA, NA, 0xffffffff, NA, NA, NA} }, + +/*line 20*/{(0x1C), 2, 0x211c, + NA, 4, 4, pneq_err, + NA, IDLE_CHK_WARNING, + "PCIE: error packet header is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 21*/{(0x1C), 1, PGLUE_B_REG_INCORRECT_RCV_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PGLUE_B: Packet received from PCIe not according to the rules", + {NA, NA, 0, NA, NA, NA} }, + +/*line 22*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_31_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: was_error for VFs 0-31 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 23*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_63_32, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: was_error for VFs 32-63 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 24*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_95_64, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: was_error for VFs 64-95 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 25*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_127_96, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: was_error for VFs 96-127 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 26*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_PF_7_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: was_error for PFs 0-7 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 27*/{(0x1C), 1, PGLUE_B_REG_RX_ERR_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Completion received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout; 1 - Unsupported Request; 2 - Completer Abort. (12) - valid bit", + {NA, NA, 0, NA, NA, NA} }, + +/*line 28*/{(0x1C), 1, PGLUE_B_REG_RX_TCPL_ERR_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: ATS TCPL received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout ; 1 - Unsupported Request; 2 - Completer Abort. (16:12) - OTB Entry ID. (17) - valid bit", + {NA, NA, 0, NA, NA, NA} }, + +/*line 29*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_31_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master write. Address(31:0) is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 30*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_63_32, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master write. Address(63:32) is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 31*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master write. Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID", + {NA, NA, 0, NA, NA, NA} }, + +/*line 32*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS2, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master write. Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request", + {NA, NA, 0, NA, NA, NA} }, + +/*line 33*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_31_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE: Error in master read address(31:0) is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 34*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_63_32, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master read address(63:32) is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 35*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master read Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID", + {NA, NA, 0, NA, NA, NA} }, + +/*line 36*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS2, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Error in master read Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request", + {NA, NA, 0, NA, NA, NA} }, + +/*line 37*/{(0x1C), 1, PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Target VF length violation access", + {NA, NA, 0, NA, NA, NA} }, + +/*line 38*/{(0x1C), 1, PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Target VF GRC space access failed permission check", + {NA, NA, 0, NA, NA, NA} }, + +/*line 39*/{(0x1C), 1, PGLUE_B_REG_TAGS_63_32, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: There are outstanding read requests for tags 32-63. Not all completios have arrived for read requests on tags that are marked with 0", + {NA, NA, 0xffffffff, NA, NA, NA} }, + +/*line 40*/{(0x1C), 3, PXP_REG_HST_VF_DISABLED_ERROR_VALID, + PXP_REG_HST_VF_DISABLED_ERROR_DATA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: Access to disabled VF took place", + {NA, NA, 0, NA, NA, NA} }, + +/*line 41*/{(0x1C), 1, PXP_REG_HST_PER_VIOLATION_VALID, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: Zone A permission violation occurred", + {NA, NA, 0, NA, NA, NA} }, + +/*line 42*/{(0x1C), 1, PXP_REG_HST_INCORRECT_ACCESS_VALID, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: Incorrect transaction took place", + {NA, NA, 0, NA, NA, NA} }, + +/*line 43*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: Completion received with error. Error details register is not 0. (15:0) - ECHO. (28:16) - Sub Request length plus start_offset_2_0 minus 1", + {NA, NA, 0, NA, NA, NA} }, + +/*line 44*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS2, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: Completion received with error. Error details 2nd register is not 0. (4:0) - VQ ID. (8:5) - client ID. (9) - valid bit", + {NA, NA, 0, NA, NA, NA} }, + +/*line 45*/{(0x1F), 1, PXP2_REG_RQ_VQ0_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ0 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 46*/{(0x1F), 1, PXP2_REG_RQ_VQ1_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ1 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 47*/{(0x1F), 1, PXP2_REG_RQ_VQ2_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ2 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 48*/{(0x1F), 1, PXP2_REG_RQ_VQ3_ENTRY_CNT, + NA, 1, 0, pgt, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ3 is not empty", + {NA, NA, 2, NA, NA, NA} }, + +/*line 49*/{(0x1F), 1, PXP2_REG_RQ_VQ4_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ4 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 50*/{(0x1F), 1, PXP2_REG_RQ_VQ5_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ5 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 51*/{(0x1F), 1, PXP2_REG_RQ_VQ6_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ6 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 52*/{(0x1F), 1, PXP2_REG_RQ_VQ7_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ7 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 53*/{(0x1F), 1, PXP2_REG_RQ_VQ8_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ8 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 54*/{(0x1F), 1, PXP2_REG_RQ_VQ9_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ9 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 55*/{(0x1F), 1, PXP2_REG_RQ_VQ10_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ10 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 56*/{(0x1F), 1, PXP2_REG_RQ_VQ11_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ11 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 57*/{(0x1F), 1, PXP2_REG_RQ_VQ12_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ12 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 58*/{(0x1F), 1, PXP2_REG_RQ_VQ13_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ13 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 59*/{(0x1F), 1, PXP2_REG_RQ_VQ14_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ14 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 60*/{(0x1F), 1, PXP2_REG_RQ_VQ15_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ15 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 61*/{(0x1F), 1, PXP2_REG_RQ_VQ16_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ16 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 62*/{(0x1F), 1, PXP2_REG_RQ_VQ17_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ17 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 63*/{(0x1F), 1, PXP2_REG_RQ_VQ18_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ18 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 64*/{(0x1F), 1, PXP2_REG_RQ_VQ19_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ19 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 65*/{(0x1F), 1, PXP2_REG_RQ_VQ20_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ20 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 66*/{(0x1F), 1, PXP2_REG_RQ_VQ21_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ21 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 67*/{(0x1F), 1, PXP2_REG_RQ_VQ22_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ22 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 68*/{(0x1F), 1, PXP2_REG_RQ_VQ23_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ23 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 69*/{(0x1F), 1, PXP2_REG_RQ_VQ24_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ24 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 70*/{(0x1F), 1, PXP2_REG_RQ_VQ25_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ25 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 71*/{(0x1F), 1, PXP2_REG_RQ_VQ26_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ26 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 72*/{(0x1F), 1, PXP2_REG_RQ_VQ27_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ27 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 73*/{(0x1F), 1, PXP2_REG_RQ_VQ28_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ28 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 74*/{(0x1F), 1, PXP2_REG_RQ_VQ29_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ29 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 75*/{(0x1F), 1, PXP2_REG_RQ_VQ30_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ30 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 76*/{(0x1F), 1, PXP2_REG_RQ_VQ31_ENTRY_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: VQ31 is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 77*/{(0x1F), 1, PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: rq_ufifo_num_of_entry is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 78*/{(0x1F), 1, PXP2_REG_RQ_RBC_DONE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: rq_rbc_done is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 79*/{(0x1F), 1, PXP2_REG_RQ_CFG_DONE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: rq_cfg_done is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 80*/{(0x3), 1, PXP2_REG_PSWRQ_BW_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: rq_read_credit and rq_write_credit are not 3", + {NA, NA, 0x1B, NA, NA, NA} }, + +/*line 81*/{(0x1F), 1, PXP2_REG_RD_START_INIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: rd_start_init is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 82*/{(0x1F), 1, PXP2_REG_RD_INIT_DONE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: rd_init_done is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 83*/{(0x1F), 3, PXP2_REG_RD_SR_CNT, + PXP2_REG_RD_SR_NUM_CFG, 1, 0, pne_sub_r2, + NA, IDLE_CHK_WARNING, + "PXP2: rd_sr_cnt is not equal to rd_sr_num_cfg", + {NA, NA, 1, NA, NA, NA} }, + +/*line 84*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT, + PXP2_REG_RD_BLK_NUM_CFG, 1, 0, pneq_r2, + NA, IDLE_CHK_WARNING, + "PXP2: rd_blk_cnt is not equal to rd_blk_num_cfg", + {NA, NA, NA, NA, NA, NA} }, + +/*line 85*/{(0x1F), 3, PXP2_REG_RD_SR_CNT, + PXP2_REG_RD_SR_NUM_CFG, 1, 0, plt_sub_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: There are more than two unused SRs", + {NA, NA, 3, NA, NA, NA} }, + +/*line 86*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT, + PXP2_REG_RD_BLK_NUM_CFG, 1, 0, plt_sub_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: There are more than two unused blocks", + {NA, NA, 2, NA, NA, NA} }, + +/*line 87*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: P0 All delivery ports are not idle", + {NA, NA, 1, NA, NA, NA} }, + +/*line 88*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: P1 All delivery ports are not idle", + {NA, NA, 1, NA, NA, NA} }, + +/*line 89*/{(0x1F), 2, PXP2_REG_RD_ALMOST_FULL_0, + NA, 11, 4, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: rd_almost_full is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 90*/{(0x1F), 1, PXP2_REG_RD_DISABLE_INPUTS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: PSWRD inputs are disabled", + {NA, NA, 0, NA, NA, NA} }, + +/*line 91*/{(0x1F), 1, PXP2_REG_HST_HEADER_FIFO_STATUS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: HST header FIFO status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 92*/{(0x1F), 1, PXP2_REG_HST_DATA_FIFO_STATUS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: HST data FIFO status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 93*/{(0x3), 1, PXP2_REG_PGL_WRITE_BLOCKED, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: pgl_write_blocked is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 94*/{(0x3), 1, PXP2_REG_PGL_READ_BLOCKED, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: pgl_read_blocked is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 95*/{(0x1C), 1, PXP2_REG_PGL_WRITE_BLOCKED, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: pgl_write_blocked is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 96*/{(0x1C), 1, PXP2_REG_PGL_READ_BLOCKED, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: pgl_read_blocked is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 97*/{(0x1F), 1, PXP2_REG_PGL_TXW_CDTS, + NA, 1, 0, prsh_and_neq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PXP2: There is data which is ready", + {NA, NA, 17, 1, 0, NA} }, + +/*line 98*/{(0x1F), 1, PXP_REG_HST_ARB_IS_IDLE, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: HST arbiter is not idle", + {NA, NA, 1, NA, NA, NA} }, + +/*line 99*/{(0x1F), 1, PXP_REG_HST_CLIENTS_WAITING_TO_ARB, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: HST one of the clients is waiting for delivery", + {NA, NA, 0, NA, NA, NA} }, + +/*line 100*/{(0x1E), 1, PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: HST Close the gates: Discarding internal writes", + {NA, NA, 0, NA, NA, NA} }, + +/*line 101*/{(0x1E), 1, PXP_REG_HST_DISCARD_DOORBELLS_STATUS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: HST Close the gates: Discarding doorbells", + {NA, NA, 0, NA, NA, NA} }, + +/*line 102*/{(0x1C), 1, PXP2_REG_RQ_GARB, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PXP2: PSWRQ Close the gates is asserted. Check AEU AFTER_INVERT registers for parity errors", + {NA, NA, 0x1000, 0, NA, NA} }, + +/*line 103*/{(0x1F), 1, DMAE_REG_GO_C0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 0 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 104*/{(0x1F), 1, DMAE_REG_GO_C1, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 1 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 105*/{(0x1F), 1, DMAE_REG_GO_C2, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 2 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 106*/{(0x1F), 1, DMAE_REG_GO_C3, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 3 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 107*/{(0x1F), 1, DMAE_REG_GO_C4, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 4 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 108*/{(0x1F), 1, DMAE_REG_GO_C5, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 5 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 109*/{(0x1F), 1, DMAE_REG_GO_C6, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 6 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 110*/{(0x1F), 1, DMAE_REG_GO_C7, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 7 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 111*/{(0x1F), 1, DMAE_REG_GO_C8, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 8 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 112*/{(0x1F), 1, DMAE_REG_GO_C9, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 9 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 113*/{(0x1F), 1, DMAE_REG_GO_C10, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 10 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 114*/{(0x1F), 1, DMAE_REG_GO_C11, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 11 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 115*/{(0x1F), 1, DMAE_REG_GO_C12, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 12 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 116*/{(0x1F), 1, DMAE_REG_GO_C13, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 13 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 117*/{(0x1F), 1, DMAE_REG_GO_C14, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 14 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 118*/{(0x1F), 1, DMAE_REG_GO_C15, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DMAE: command 15 go is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 119*/{(0x1F), 1, CFC_REG_ERROR_VECTOR, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CFC: error vector is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 120*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ARRIVING, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CFC: number of arriving LCIDs is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 121*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ALLOC, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CFC: number of alloc LCIDs is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 122*/{(0x1F), 1, CFC_REG_NUM_LCIDS_LEAVING, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CFC: number of leaving LCIDs is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 123*/{(0x1F), 7, CFC_REG_INFO_RAM, + CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_neq_r2, + CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC, + "CFC: AC is neither 0 nor 2 on connType 0 (ETH)", + {NA, NA, 0, 0, 2, NA} }, + +/*line 124*/{(0x1F), 7, CFC_REG_INFO_RAM, + CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2, + CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC, + "CFC: AC is not 0 on connType 1 (TOE)", + {NA, NA, 1, 0, NA, NA} }, + +/*line 125*/{(0x1F), 7, CFC_REG_INFO_RAM, + CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2, + CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC, + "CFC: AC is not 0 on connType 3 (iSCSI)", + {NA, NA, 3, 0, NA, NA} }, + +/*line 126*/{(0x1F), 7, CFC_REG_INFO_RAM, + CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2, + CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC, + "CFC: AC is not 0 on connType 4 (FCoE)", + {NA, NA, 4, 0, NA, NA} }, + +/*line 127*/{(0x1F), 2, QM_REG_QTASKCTR_0, + NA, 64, 4, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Queue is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 128*/{(0xF), 3, QM_REG_VOQCREDIT_0, + QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_0, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 129*/{(0xF), 3, QM_REG_VOQCREDIT_1, + QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_1, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 130*/{(0xF), 3, QM_REG_VOQCREDIT_4, + QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_4, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 131*/{(0x3), 3, QM_REG_PORT0BYTECRD, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: P0 Byte credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 132*/{(0x3), 3, QM_REG_PORT1BYTECRD, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: P1 Byte credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 133*/{(0x1F), 1, CCM_REG_CAM_OCCUP, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CCM: XX protection CAM is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 134*/{(0x1F), 1, TCM_REG_CAM_OCCUP, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: XX protection CAM is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 135*/{(0x1F), 1, UCM_REG_CAM_OCCUP, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: XX protection CAM is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 136*/{(0x1F), 1, XCM_REG_CAM_OCCUP, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: XX protection CAM is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 137*/{(0x1F), 1, BRB1_REG_NUM_OF_FULL_BLOCKS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "BRB1: BRB is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 138*/{(0x1F), 1, CSEM_REG_SLEEP_THREADS_VALID, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSEM: There are sleeping threads", + {NA, NA, 0, NA, NA, NA} }, + +/*line 139*/{(0x1F), 1, TSEM_REG_SLEEP_THREADS_VALID, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSEM: There are sleeping threads", + {NA, NA, 0, NA, NA, NA} }, + +/*line 140*/{(0x1F), 1, USEM_REG_SLEEP_THREADS_VALID, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USEM: There are sleeping threads", + {NA, NA, 0, NA, NA, NA} }, + +/*line 141*/{(0x1F), 1, XSEM_REG_SLEEP_THREADS_VALID, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSEM: There are sleeping threads", + {NA, NA, 0, NA, NA, NA} }, + +/*line 142*/{(0x1F), 1, CSEM_REG_SLOW_EXT_STORE_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSEM: External store FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 143*/{(0x1F), 1, TSEM_REG_SLOW_EXT_STORE_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSEM: External store FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 144*/{(0x1F), 1, USEM_REG_SLOW_EXT_STORE_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USEM: External store FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 145*/{(0x1F), 1, XSEM_REG_SLOW_EXT_STORE_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSEM: External store FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 146*/{(0x1F), 1, CSDM_REG_SYNC_PARSER_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSDM: Parser serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 147*/{(0x1F), 1, TSDM_REG_SYNC_PARSER_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSDM: Parser serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 148*/{(0x1F), 1, USDM_REG_SYNC_PARSER_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USDM: Parser serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 149*/{(0x1F), 1, XSDM_REG_SYNC_PARSER_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSDM: Parser serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 150*/{(0x1F), 1, CSDM_REG_SYNC_SYNC_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSDM: Parser SYNC serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 151*/{(0x1F), 1, TSDM_REG_SYNC_SYNC_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSDM: Parser SYNC serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 152*/{(0x1F), 1, USDM_REG_SYNC_SYNC_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USDM: Parser SYNC serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 153*/{(0x1F), 1, XSDM_REG_SYNC_SYNC_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSDM: Parser SYNC serial FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 154*/{(0x1F), 1, CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block", + {NA, NA, 1, NA, NA, NA} }, + +/*line 155*/{(0x1F), 1, TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block", + {NA, NA, 1, NA, NA, NA} }, + +/*line 156*/{(0x1F), 1, USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block", + {NA, NA, 1, NA, NA, NA} }, + +/*line 157*/{(0x1F), 1, XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block", + {NA, NA, 1, NA, NA, NA} }, + +/*line 158*/{(0x1F), 1, DORQ_REG_DQ_FILL_LVLF, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DORQ: DORQ queue is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 159*/{(0x1F), 1, CFC_REG_CFC_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CFC: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 160*/{(0x1F), 1, CDU_REG_CDU_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CDU: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 161*/{(0x1F), 1, CCM_REG_CCM_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 162*/{(0x1F), 1, TCM_REG_TCM_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 163*/{(0x1F), 1, UCM_REG_UCM_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 164*/{(0x1F), 1, XCM_REG_XCM_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 165*/{(0xF), 1, PBF_REG_PBF_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PBF: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 166*/{(0x1F), 1, TM_REG_TM_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TIMERS: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 167*/{(0x1F), 1, DORQ_REG_DORQ_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "DORQ: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 168*/{(0x1F), 1, SRC_REG_SRC_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "SRCH: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 169*/{(0x1F), 1, PRS_REG_PRS_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PRS: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 170*/{(0x1F), 1, BRB1_REG_BRB1_INT_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "BRB1: Interrupt status is not 0", + {NA, NA, ~0xFC00, 0, NA, NA} }, + +/*line 171*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XPB: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 172*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UPB: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 173*/{(0x1), 1, PXP2_REG_PXP2_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: Interrupt status 0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 174*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: Interrupt status 0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 175*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: Interrupt status 1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 176*/{(0x1F), 1, QM_REG_QM_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 177*/{(0x1F), 1, PXP_REG_PXP_INT_STS_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: P0 Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 178*/{(0x1F), 1, PXP_REG_PXP_INT_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: P1 Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 179*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_INT_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: Interrupt status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 180*/{(0x1F), 1, DORQ_REG_RSPA_CRD_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DORQ: Credit to XCM is not full", + {NA, NA, 2, NA, NA, NA} }, + +/*line 181*/{(0x1F), 1, DORQ_REG_RSPB_CRD_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "DORQ: Credit to UCM is not full", + {NA, NA, 2, NA, NA, NA} }, + +/*line 182*/{(0x3), 1, QM_REG_VOQCRDERRREG, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: Credit error register is not 0 (byte or credit overflow/underflow)", + {NA, NA, 0, NA, NA, NA} }, + +/*line 183*/{(0x1F), 1, DORQ_REG_DQ_FULL_ST, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "DORQ: DORQ queue is full", + {NA, NA, 0, NA, NA, NA} }, + +/*line 184*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "AEU: P0 AFTER_INVERT_1 is not 0", + {NA, NA, ~0xCFFC, 0, NA, NA} }, + +/*line 185*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "AEU: P0 AFTER_INVERT_2 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 186*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "AEU: P0 AFTER_INVERT_3 is not 0", + {NA, NA, ~0xFFFF0000, 0, NA, NA} }, + +/*line 187*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "AEU: P0 AFTER_INVERT_4 is not 0", + {NA, NA, ~0x801FFFFF, 0, NA, NA} }, + +/*line 188*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_1, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "AEU: P1 AFTER_INVERT_1 is not 0", + {NA, NA, ~0xCFFC, 0, NA, NA} }, + +/*line 189*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "AEU: P1 AFTER_INVERT_2 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 190*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_1, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "AEU: P1 AFTER_INVERT_3 is not 0", + {NA, NA, ~0xFFFF0000, 0, NA, NA} }, + +/*line 191*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "AEU: P1 AFTER_INVERT_4 is not 0", + {NA, NA, ~0x801FFFFF, 0, NA, NA} }, + +/*line 192*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_MCP, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "AEU: MCP AFTER_INVERT_1 is not 0", + {NA, NA, ~0xCFFC, 0, NA, NA} }, + +/*line 193*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_MCP, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "AEU: MCP AFTER_INVERT_2 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 194*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_MCP, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "AEU: MCP AFTER_INVERT_3 is not 0", + {NA, NA, ~0xFFFF0000, 0, NA, NA} }, + +/*line 195*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_MCP, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "AEU: MCP AFTER_INVERT_4 is not 0", + {NA, NA, ~0x801FFFFF, 0, NA, NA} }, + +/*line 196*/{(0xF), 5, PBF_REG_P0_CREDIT, + PBF_REG_P0_INIT_CRD, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_P0, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: P0 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 197*/{(0xF), 5, PBF_REG_P1_CREDIT, + PBF_REG_P1_INIT_CRD, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_P1, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: P1 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 198*/{(0xF), 3, PBF_REG_P4_CREDIT, + PBF_REG_P4_INIT_CRD, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: P4 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 199*/{(0x10), 5, PBF_REG_CREDIT_Q0, + PBF_REG_INIT_CRD_Q0, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_Q0, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q0 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 200*/{(0x10), 5, PBF_REG_CREDIT_Q1, + PBF_REG_INIT_CRD_Q1, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_Q1, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q1 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 201*/{(0x10), 5, PBF_REG_CREDIT_Q2, + PBF_REG_INIT_CRD_Q2, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_Q2, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q2 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 202*/{(0x10), 5, PBF_REG_CREDIT_Q3, + PBF_REG_INIT_CRD_Q3, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_Q3, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q3 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 203*/{(0x10), 5, PBF_REG_CREDIT_Q4, + PBF_REG_INIT_CRD_Q4, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_Q4, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q4 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 204*/{(0x10), 5, PBF_REG_CREDIT_Q5, + PBF_REG_INIT_CRD_Q5, 1, 0, pneq_r2, + PBF_REG_DISABLE_NEW_TASK_PROC_Q5, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q5 credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 205*/{(0x10), 3, PBF_REG_CREDIT_LB_Q, + PBF_REG_INIT_CRD_LB_Q, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: LB Q credit is not equal to init_crd", + {NA, NA, NA, NA, NA, NA} }, + +/*line 206*/{(0xF), 1, PBF_REG_P0_TASK_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: P0 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 207*/{(0xF), 1, PBF_REG_P1_TASK_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: P1 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 208*/{(0xF), 1, PBF_REG_P4_TASK_CNT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: P4 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 209*/{(0x10), 1, PBF_REG_TASK_CNT_Q0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q0 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 210*/{(0x10), 1, PBF_REG_TASK_CNT_Q1, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q1 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 211*/{(0x10), 1, PBF_REG_TASK_CNT_Q2, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q2 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 212*/{(0x10), 1, PBF_REG_TASK_CNT_Q3, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q3 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 213*/{(0x10), 1, PBF_REG_TASK_CNT_Q4, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q4 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 214*/{(0x10), 1, PBF_REG_TASK_CNT_Q5, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: Q5 task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 215*/{(0x10), 1, PBF_REG_TASK_CNT_LB_Q, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PBF: LB Q task_cnt is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 216*/{(0x1F), 1, XCM_REG_CFC_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: CFC_INIT_CRD is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 217*/{(0x1F), 1, UCM_REG_CFC_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: CFC_INIT_CRD is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 218*/{(0x1F), 1, TCM_REG_CFC_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: CFC_INIT_CRD is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 219*/{(0x1F), 1, CCM_REG_CFC_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CCM: CFC_INIT_CRD is not 1", + {NA, NA, 1, NA, NA, NA} }, + +/*line 220*/{(0x1F), 1, XCM_REG_XQM_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: XQM_INIT_CRD is not 32", + {NA, NA, 32, NA, NA, NA} }, + +/*line 221*/{(0x1F), 1, UCM_REG_UQM_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: UQM_INIT_CRD is not 32", + {NA, NA, 32, NA, NA, NA} }, + +/*line 222*/{(0x1F), 1, TCM_REG_TQM_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: TQM_INIT_CRD is not 32", + {NA, NA, 32, NA, NA, NA} }, + +/*line 223*/{(0x1F), 1, CCM_REG_CQM_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CCM: CQM_INIT_CRD is not 32", + {NA, NA, 32, NA, NA, NA} }, + +/*line 224*/{(0x1F), 1, XCM_REG_TM_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: TM_INIT_CRD is not 4", + {NA, NA, 4, NA, NA, NA} }, + +/*line 225*/{(0x1F), 1, UCM_REG_TM_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: TM_INIT_CRD is not 4", + {NA, NA, 4, NA, NA, NA} }, + +/*line 226*/{(0x1F), 1, XCM_REG_FIC0_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "XCM: FIC0_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 227*/{(0x1F), 1, UCM_REG_FIC0_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: FIC0_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 228*/{(0x1F), 1, TCM_REG_FIC0_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: FIC0_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 229*/{(0x1F), 1, CCM_REG_FIC0_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CCM: FIC0_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 230*/{(0x1F), 1, XCM_REG_FIC1_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: FIC1_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 231*/{(0x1F), 1, UCM_REG_FIC1_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: FIC1_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 232*/{(0x1F), 1, TCM_REG_FIC1_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: FIC1_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 233*/{(0x1F), 1, CCM_REG_FIC1_INIT_CRD, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CCM: FIC1_INIT_CRD is not 64", + {NA, NA, 64, NA, NA, NA} }, + +/*line 234*/{(0x1), 1, XCM_REG_XX_FREE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: XX_FREE differs from expected 31", + {NA, NA, 31, NA, NA, NA} }, + +/*line 235*/{(0x1E), 1, XCM_REG_XX_FREE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XCM: XX_FREE differs from expected 32", + {NA, NA, 32, NA, NA, NA} }, + +/*line 236*/{(0x1F), 1, UCM_REG_XX_FREE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "UCM: XX_FREE differs from expected 27", + {NA, NA, 27, NA, NA, NA} }, + +/*line 237*/{(0x7), 1, TCM_REG_XX_FREE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: XX_FREE differs from expected 32", + {NA, NA, 32, NA, NA, NA} }, + +/*line 238*/{(0x18), 1, TCM_REG_XX_FREE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TCM: XX_FREE differs from expected 29", + {NA, NA, 29, NA, NA, NA} }, + +/*line 239*/{(0x1F), 1, CCM_REG_XX_FREE, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CCM: XX_FREE differs from expected 24", + {NA, NA, 24, NA, NA, NA} }, + +/*line 240*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18000, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSEM: FOC0 credit less than initial credit", + {NA, NA, 0, NA, NA, NA} }, + +/*line 241*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18040, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSEM: FOC1 credit less than initial credit", + {NA, NA, 24, NA, NA, NA} }, + +/*line 242*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18080, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "XSEM: FOC2 credit less than initial credit", + {NA, NA, 12, NA, NA, NA} }, + +/*line 243*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18000, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USEM: FOC0 credit less than initial credit", + {NA, NA, 26, NA, NA, NA} }, + +/*line 244*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18040, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USEM: FOC1 credit less than initial credit", + {NA, NA, 78, NA, NA, NA} }, + +/*line 245*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18080, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USEM: FOC2 credit less than initial credit", + {NA, NA, 16, NA, NA, NA} }, + +/*line 246*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x180C0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "USEM: FOC3 credit less than initial credit", + {NA, NA, 32, NA, NA, NA} }, + +/*line 247*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18000, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSEM: FOC0 credit less than initial credit", + {NA, NA, 52, NA, NA, NA} }, + +/*line 248*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18040, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSEM: FOC1 credit less than initial credit", + {NA, NA, 24, NA, NA, NA} }, + +/*line 249*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18080, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSEM: FOC2 credit less than initial credit", + {NA, NA, 12, NA, NA, NA} }, + +/*line 250*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x180C0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "TSEM: FOC3 credit less than initial credit", + {NA, NA, 32, NA, NA, NA} }, + +/*line 251*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18000, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSEM: FOC0 credit less than initial credit", + {NA, NA, 16, NA, NA, NA} }, + +/*line 252*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18040, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSEM: FOC1 credit less than initial credit", + {NA, NA, 18, NA, NA, NA} }, + +/*line 253*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18080, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSEM: FOC2 credit less than initial credit", + {NA, NA, 48, NA, NA, NA} }, + +/*line 254*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x180C0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "CSEM: FOC3 credit less than initial credit", + {NA, NA, 14, NA, NA, NA} }, + +/*line 255*/{(0x1F), 1, PRS_REG_TSDM_CURRENT_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: TSDM current credit is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 256*/{(0x1F), 1, PRS_REG_TCM_CURRENT_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: TCM current credit is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 257*/{(0x1F), 1, PRS_REG_CFC_LD_CURRENT_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: CFC_LD current credit is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 258*/{(0x1F), 1, PRS_REG_CFC_SEARCH_CURRENT_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: CFC_SEARCH current credit is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 259*/{(0x1F), 1, PRS_REG_SRC_CURRENT_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: SRCH current credit is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 260*/{(0x1F), 1, PRS_REG_PENDING_BRB_PRS_RQ, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: PENDING_BRB_PRS_RQ is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 261*/{(0x1F), 2, PRS_REG_PENDING_BRB_CAC0_RQ, + NA, 5, 4, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: PENDING_BRB_CAC_RQ is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 262*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_LSB, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: SERIAL_NUM_STATUS_LSB is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 263*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_MSB, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "PRS: SERIAL_NUM_STATUS_MSB is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 264*/{(0x1F), 1, CDU_REG_ERROR_DATA, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CDU: ERROR_DATA is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 265*/{(0x1F), 1, CCM_REG_STORM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: STORM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 266*/{(0x1F), 1, CCM_REG_CSDM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: CSDM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 267*/{(0x1F), 1, CCM_REG_TSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: TSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 268*/{(0x1F), 1, CCM_REG_XSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: XSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 269*/{(0x1F), 1, CCM_REG_USEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: USEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 270*/{(0x1F), 1, CCM_REG_PBF_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "CCM: PBF declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 271*/{(0x1F), 1, TCM_REG_STORM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: STORM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 272*/{(0x1F), 1, TCM_REG_TSDM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: TSDM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 273*/{(0x1F), 1, TCM_REG_PRS_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: PRS declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 274*/{(0x1F), 1, TCM_REG_PBF_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: PBF declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 275*/{(0x1F), 1, TCM_REG_USEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: USEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 276*/{(0x1F), 1, TCM_REG_CSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "TCM: CSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 277*/{(0x1F), 1, UCM_REG_STORM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: STORM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 278*/{(0x1F), 1, UCM_REG_USDM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: USDM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 279*/{(0x1F), 1, UCM_REG_TSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: TSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 280*/{(0x1F), 1, UCM_REG_CSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: CSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 281*/{(0x1F), 1, UCM_REG_XSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: XSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 282*/{(0x1F), 1, UCM_REG_DORQ_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "UCM: DORQ declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 283*/{(0x1F), 1, XCM_REG_STORM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: STORM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 284*/{(0x1F), 1, XCM_REG_XSDM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: XSDM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 285*/{(0x1F), 1, XCM_REG_TSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: TSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 286*/{(0x1F), 1, XCM_REG_CSEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: CSEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 287*/{(0x1F), 1, XCM_REG_USEM_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: USEM declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 288*/{(0x1F), 1, XCM_REG_DORQ_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: DORQ declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 289*/{(0x1F), 1, XCM_REG_PBF_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: PBF declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 290*/{(0x1F), 1, XCM_REG_NIG0_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: NIG0 declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 291*/{(0x1F), 1, XCM_REG_NIG1_LENGTH_MIS, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "XCM: NIG1 declared message length unequal to actual", + {NA, NA, 0, NA, NA, NA} }, + +/*line 292*/{(0x1F), 1, QM_REG_XQM_WRC_FIFOLVL, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: XQM wrc_fifolvl is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 293*/{(0x1F), 1, QM_REG_UQM_WRC_FIFOLVL, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: UQM wrc_fifolvl is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 294*/{(0x1F), 1, QM_REG_TQM_WRC_FIFOLVL, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: TQM wrc_fifolvl is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 295*/{(0x1F), 1, QM_REG_CQM_WRC_FIFOLVL, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: CQM wrc_fifolvl is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 296*/{(0x1F), 1, QM_REG_QSTATUS_LOW, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: QSTATUS_LOW is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 297*/{(0x1F), 1, QM_REG_QSTATUS_HIGH, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: QSTATUS_HIGH is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 298*/{(0x1F), 1, QM_REG_PAUSESTATE0, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: PAUSESTATE0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 299*/{(0x1F), 1, QM_REG_PAUSESTATE1, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: PAUSESTATE1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 300*/{(0x1F), 1, QM_REG_OVFQNUM, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: OVFQNUM is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 301*/{(0x1F), 1, QM_REG_OVFERROR, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: OVFERROR is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 302*/{(0x1F), 6, QM_REG_PTRTBL, + NA, 64, 8, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: read and write variables not equal", + {NA, NA, NA, NA, NA, NA} }, + +/*line 303*/{(0x1F), 1, BRB1_REG_BRB1_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "BRB1: parity status is not 0", + {NA, NA, ~0x8, 0, NA, NA} }, + +/*line 304*/{(0x1F), 1, CDU_REG_CDU_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "CDU: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 305*/{(0x1F), 1, CFC_REG_CFC_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "CFC: parity status is not 0", + {NA, NA, ~0x2, 0, NA, NA} }, + +/*line 306*/{(0x1F), 1, CSDM_REG_CSDM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "CSDM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 307*/{(0x3), 1, DBG_REG_DBG_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "DBG: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 308*/{(0x1F), 1, DMAE_REG_DMAE_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "DMAE: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 309*/{(0x1F), 1, DORQ_REG_DORQ_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "DORQ: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 310*/{(0x1), 1, TCM_REG_TCM_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "TCM: parity status is not 0", + {NA, NA, ~0x3ffc0, 0, NA, NA} }, + +/*line 311*/{(0x1E), 1, TCM_REG_TCM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "TCM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 312*/{(0x1), 1, CCM_REG_CCM_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "CCM: parity status is not 0", + {NA, NA, ~0x3ffc0, 0, NA, NA} }, + +/*line 313*/{(0x1E), 1, CCM_REG_CCM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "CCM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 314*/{(0x1), 1, UCM_REG_UCM_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "UCM: parity status is not 0", + {NA, NA, ~0x3ffc0, 0, NA, NA} }, + +/*line 315*/{(0x1E), 1, UCM_REG_UCM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "UCM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 316*/{(0x1), 1, XCM_REG_XCM_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "XCM: parity status is not 0", + {NA, NA, ~0x3ffc0, 0, NA, NA} }, + +/*line 317*/{(0x1E), 1, XCM_REG_XCM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "XCM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 318*/{(0x1), 1, HC_REG_HC_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "HC: parity status is not 0", + {NA, NA, ~0x1, 0, NA, NA} }, + +/*line 319*/{(0x1), 1, MISC_REG_MISC_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "MISC: parity status is not 0", + {NA, NA, ~0x1, 0, NA, NA} }, + +/*line 320*/{(0x1F), 1, PRS_REG_PRS_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PRS: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 321*/{(0x1F), 1, PXP_REG_PXP_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 322*/{(0x1F), 1, QM_REG_QM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "QM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 323*/{(0x1), 1, SRC_REG_SRC_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "SRCH: parity status is not 0", + {NA, NA, ~0x4, 0, NA, NA} }, + +/*line 324*/{(0x1F), 1, TSDM_REG_TSDM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "TSDM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 325*/{(0x1F), 1, USDM_REG_USDM_PRTY_STS, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "USDM: parity status is not 0", + {NA, NA, ~0x20, 0, NA, NA} }, + +/*line 326*/{(0x1F), 1, XSDM_REG_XSDM_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "XSDM: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 327*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "XPB: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 328*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "UPB: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 329*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "CSEM: parity status 0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 330*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_0, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PXP2: parity status 0 is not 0", + {NA, NA, ~0xfff40020, 0, NA, NA} }, + +/*line 331*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_0, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PXP2: parity status 0 is not 0", + {NA, NA, ~0x20, 0, NA, NA} }, + +/*line 332*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "TSEM: parity status 0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 333*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "USEM: parity status 0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 334*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "XSEM: parity status 0 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 335*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "CSEM: parity status 1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 336*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_1, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "PXP2: parity status 1 is not 0", + {NA, NA, ~0x20, 0, NA, NA} }, + +/*line 337*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PXP2: parity status 1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 338*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "TSEM: parity status 1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 339*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "USEM: parity status 1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 340*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_1, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "XSEM: parity status 1 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 341*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGLUE_B: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 342*/{(0x2), 2, QM_REG_QTASKCTR_EXT_A_0, + NA, 64, 4, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Q_EXT_A (upper 64 queues), Queue is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 343*/{(0x2), 1, QM_REG_QSTATUS_LOW_EXT_A, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: QSTATUS_LOW_EXT_A is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 344*/{(0x2), 1, QM_REG_QSTATUS_HIGH_EXT_A, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: QSTATUS_HIGH_EXT_A is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 345*/{(0x1E), 1, QM_REG_PAUSESTATE2, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: PAUSESTATE2 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 346*/{(0x1E), 1, QM_REG_PAUSESTATE3, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: PAUSESTATE3 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 347*/{(0x2), 1, QM_REG_PAUSESTATE4, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: PAUSESTATE4 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 348*/{(0x2), 1, QM_REG_PAUSESTATE5, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: PAUSESTATE5 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 349*/{(0x2), 1, QM_REG_PAUSESTATE6, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: PAUSESTATE6 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 350*/{(0x2), 1, QM_REG_PAUSESTATE7, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "QM: PAUSESTATE7 is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 351*/{(0x2), 6, QM_REG_PTRTBL_EXT_A, + NA, 64, 8, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: read and write variables not equal in ext table", + {NA, NA, NA, NA, NA, NA} }, + +/*line 352*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_OCCURRED, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "MISC: system kill occurd;", + {NA, NA, 0, NA, NA, NA} }, + +/*line 353*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_0, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "MISC: system kill occurd; status_0 register", + {NA, NA, 0, NA, NA, NA} }, + +/*line 354*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "MISC: system kill occurd; status_1 register", + {NA, NA, 0, NA, NA, NA} }, + +/*line 355*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_2, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "MISC: system kill occurd; status_2 register", + {NA, NA, 0, NA, NA, NA} }, + +/*line 356*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_3, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "MISC: system kill occurd; status_3 register", + {NA, NA, 0, NA, NA, NA} }, + +/*line 357*/{(0x1E), 1, MISC_REG_PCIE_HOT_RESET, + NA, NA, NA, pneq, + NA, IDLE_CHK_WARNING, + "MISC: pcie_rst_b was asserted without perst assertion", + {NA, NA, 0, NA, NA, NA} }, + +/*line 358*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_ERROR, + "NIG: interrupt 0 is active", + {NA, NA, ~0x300, 0, NA, NA} }, + +/*line 359*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0, + NA, NA, NA, peq, + NA, IDLE_CHK_WARNING, + "NIG: Access to BMAC while not active. If tested on FPGA, ignore this warning", + {NA, NA, 0x300, NA, NA, NA} }, + +/*line 360*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_ERROR, + "NIG: interrupt 1 is active", + {NA, NA, 0x783FF03, 0, NA, NA} }, + +/*line 361*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_WARNING, + "NIG: port cos was paused too long", + {NA, NA, ~0x783FF0F, 0, NA, NA} }, + +/*line 362*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_WARNING, + "NIG: Got packets w/o Outer-VLAN in MF mode", + {NA, NA, 0xC, 0, NA, NA} }, + +/*line 363*/{(0x2), 1, NIG_REG_NIG_PRTY_STS, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_ERROR, + "NIG: parity interrupt is active", + {NA, NA, ~0xFFC00000, 0, NA, NA} }, + +/*line 364*/{(0x1C), 1, NIG_REG_NIG_PRTY_STS_0, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_ERROR, + "NIG: parity 0 interrupt is active", + {NA, NA, ~0xFFC00000, 0, NA, NA} }, + +/*line 365*/{(0x4), 1, NIG_REG_NIG_PRTY_STS_1, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_ERROR, + "NIG: parity 1 interrupt is active", + {NA, NA, 0xff, 0, NA, NA} }, + +/*line 366*/{(0x18), 1, NIG_REG_NIG_PRTY_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "NIG: parity 1 interrupt is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 367*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_WARNING, + "TSEM: interrupt 0 is active", + {NA, NA, ~0x10000000, 0, NA, NA} }, + +/*line 368*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0, + NA, NA, NA, peq, + NA, IDLE_CHK_WARNING, + "TSEM: interrupt 0 is active", + {NA, NA, 0x10000000, NA, NA, NA} }, + +/*line 369*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "TSEM: interrupt 1 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 370*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_WARNING, + "CSEM: interrupt 0 is active", + {NA, NA, ~0x10000000, 0, NA, NA} }, + +/*line 371*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0, + NA, NA, NA, peq, + NA, IDLE_CHK_WARNING, + "CSEM: interrupt 0 is active", + {NA, NA, 0x10000000, NA, NA, NA} }, + +/*line 372*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "CSEM: interrupt 1 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 373*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_WARNING, + "USEM: interrupt 0 is active", + {NA, NA, ~0x10000000, 0, NA, NA} }, + +/*line 374*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0, + NA, NA, NA, peq, + NA, IDLE_CHK_WARNING, + "USEM: interrupt 0 is active", + {NA, NA, 0x10000000, NA, NA, NA} }, + +/*line 375*/{(0x1F), 1, USEM_REG_USEM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "USEM: interrupt 1 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 376*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0, + NA, NA, NA, pand_neq, + NA, IDLE_CHK_WARNING, + "XSEM: interrupt 0 is active", + {NA, NA, ~0x10000000, 0, NA, NA} }, + +/*line 377*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0, + NA, NA, NA, peq, + NA, IDLE_CHK_WARNING, + "XSEM: interrupt 0 is active", + {NA, NA, 0x10000000, NA, NA, NA} }, + +/*line 378*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "XSEM: interrupt 1 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 379*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_0, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "TSDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 380*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "TSDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 381*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_0, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "CSDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 382*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "CSDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 383*/{(0x1F), 1, USDM_REG_USDM_INT_STS_0, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "USDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 384*/{(0x1F), 1, USDM_REG_USDM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "USDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 385*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_0, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "XSDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 386*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_1, + NA, NA, NA, pneq, + NA, IDLE_CHK_ERROR, + "XSDM: interrupt 0 is active", + {NA, NA, 0, NA, NA, NA} }, + +/*line 387*/{(0x2), 1, HC_REG_HC_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "HC: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 388*/{(0x1E), 1, MISC_REG_MISC_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "MISC: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 389*/{(0x1E), 1, SRC_REG_SRC_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "SRCH: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 390*/{(0xC), 3, QM_REG_BYTECRD0, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 0 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 391*/{(0xC), 3, QM_REG_BYTECRD1, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 1 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 392*/{(0xC), 3, QM_REG_BYTECRD2, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 2 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 393*/{(0x1C), 1, QM_REG_VOQCRDERRREG, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "QM: VOQ credit error register is not 0 (VOQ credit overflow/underflow)", + {NA, NA, 0xFFFF, 0, NA, NA} }, + +/*line 394*/{(0x1C), 1, QM_REG_BYTECRDERRREG, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "QM: Byte credit error register is not 0 (Byte credit overflow/underflow)", + {NA, NA, 0xFFF, 0, NA, NA} }, + +/*line 395*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_31_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: FLR request is set for VF addresses 31-0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 396*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_63_32, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: FLR request is set for VF addresses 63-32", + {NA, NA, 0, NA, NA, NA} }, + +/*line 397*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_95_64, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: FLR request is set for VF addresses 95-64", + {NA, NA, 0, NA, NA, NA} }, + +/*line 398*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_127_96, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: FLR request is set for VF addresses 127-96", + {NA, NA, 0, NA, NA, NA} }, + +/*line 399*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_PF_7_0, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: FLR request is set for PF addresses 7-0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 400*/{(0x1C), 1, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: SR-IOV disable request is set", + {NA, NA, 0, NA, NA, NA} }, + +/*line 401*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_A_REQUEST, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: Cfg-Space A request is set", + {NA, NA, 0, NA, NA, NA} }, + +/*line 402*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_B_REQUEST, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "PGL: Cfg-Space B request is set", + {NA, NA, 0, NA, NA, NA} }, + +/*line 403*/{(0x1C), 1, IGU_REG_ERROR_HANDLING_DATA_VALID, + NA, NA, 0, pneq, + NA, IDLE_CHK_WARNING, + "IGU: some unauthorized commands arrived to the IGU. Use igu_dump_fifo utility for more details", + {NA, NA, 0, NA, NA, NA} }, + +/*line 404*/{(0x1C), 1, IGU_REG_ATTN_WRITE_DONE_PENDING, + NA, NA, NA, pneq, + NA, IDLE_CHK_WARNING, + "IGU attention message write done pending is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 405*/{(0x1C), 1, IGU_REG_WRITE_DONE_PENDING, + NA, 5, 4, pneq, + NA, IDLE_CHK_WARNING, + "IGU MSI/MSIX message write done pending is not empty", + {NA, NA, 0, NA, NA, NA} }, + +/*line 406*/{(0x1C), 1, IGU_REG_IGU_PRTY_STS, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "IGU: parity status is not 0", + {NA, NA, 0, NA, NA, NA} }, + +/*line 407*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_0)", + {NA, NA, 0x4000000, 0, NA, NA} }, + +/*line 408*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_0)", + {NA, NA, 0x4000000, 0, NA, NA} }, + +/*line 409*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_1)", + {NA, NA, 0x4000000, 0, NA, NA} }, + +/*line 410*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID, + MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_1)", + {NA, NA, 0x4000000, 0, NA, NA} }, + +/*line 411*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN, + MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (MCP)", + {NA, NA, 0x4000000, 0, NA, NA} }, + +/*line 412*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID, + MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq, + NA, IDLE_CHK_ERROR, + "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (MCP)", + {NA, NA, 0x4000000, 0, NA, NA} }, + +/*line 413*/{(0x1C), 1, IGU_REG_SILENT_DROP, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "Some messages were not executed in the IGU", + {NA, NA, 0, NA, NA, NA} }, + +/*line 414*/{(0x1C), 1, PXP2_REG_PSWRQ_BW_CREDIT, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR, + "PXP2: rq_read_credit and rq_write_credit are not 5", + {NA, NA, 0x2D, NA, NA, NA} }, + +/*line 415*/{(0x1C), 1, IGU_REG_SB_CTRL_FSM, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state", + {NA, NA, 0, NA, NA, NA} }, + +/*line 416*/{(0x1C), 1, IGU_REG_INT_HANDLE_FSM, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "IGU: block is not in idle. INT_HANDLE_FSM should be zero in idle state", + {NA, NA, 0, NA, NA, NA} }, + +/*line 417*/{(0x1C), 1, IGU_REG_ATTN_FSM, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "IGU: block is not in idle. SB_ATTN_FSMshould be zeroor two in idle state", + {NA, NA, ~0x2, 0, NA, NA} }, + +/*line 418*/{(0x1C), 1, IGU_REG_CTRL_FSM, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state", + {NA, NA, ~0x1, 0, NA, NA} }, + +/*line 419*/{(0x1C), 1, IGU_REG_PXP_ARB_FSM, + NA, 1, 0, pand_neq, + NA, IDLE_CHK_WARNING, + "IGU: block is not in idle. SB_ARB_FSM should be zero in idle state", + {NA, NA, ~0x1, 0, NA, NA} }, + +/*line 420*/{(0x1C), 1, IGU_REG_PENDING_BITS_STATUS, + NA, 5, 4, pneq, + NA, IDLE_CHK_WARNING, + "IGU: block is not in idle. There are pending write done", + {NA, NA, 0, NA, NA, NA} }, + +/*line 421*/{(0x10), 3, QM_REG_VOQCREDIT_0, + QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_0, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 422*/{(0x10), 3, QM_REG_VOQCREDIT_1, + QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_1, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 423*/{(0x10), 3, QM_REG_VOQCREDIT_2, + QM_REG_VOQINITCREDIT_2, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_2, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 424*/{(0x10), 3, QM_REG_VOQCREDIT_3, + QM_REG_VOQINITCREDIT_3, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_3, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 425*/{(0x10), 3, QM_REG_VOQCREDIT_4, + QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_4, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 426*/{(0x10), 3, QM_REG_VOQCREDIT_5, + QM_REG_VOQINITCREDIT_5, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_5, VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 427*/{(0x10), 3, QM_REG_VOQCREDIT_6, + QM_REG_VOQINITCREDIT_6, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: VOQ_6 (LB VOQ), VOQ credit is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 428*/{(0x10), 3, QM_REG_BYTECRD0, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 0 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 429*/{(0x10), 3, QM_REG_BYTECRD1, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 1 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 430*/{(0x10), 3, QM_REG_BYTECRD2, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 2 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 431*/{(0x10), 3, QM_REG_BYTECRD3, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 3 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 432*/{(0x10), 3, QM_REG_BYTECRD4, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 4 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 433*/{(0x10), 3, QM_REG_BYTECRD5, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 5 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 434*/{(0x10), 3, QM_REG_BYTECRD6, + QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "QM: Byte credit 6 is not equal to initial credit", + {NA, NA, NA, NA, NA, NA} }, + +/*line 435*/{(0x10), 1, QM_REG_FWVOQ0TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq0 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 436*/{(0x10), 1, QM_REG_FWVOQ1TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq1 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 437*/{(0x10), 1, QM_REG_FWVOQ2TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq2 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 438*/{(0x10), 1, QM_REG_FWVOQ3TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq3 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 439*/{(0x10), 1, QM_REG_FWVOQ4TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq4 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 440*/{(0x10), 1, QM_REG_FWVOQ5TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq5 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 441*/{(0x10), 1, QM_REG_FWVOQ6TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq6 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 442*/{(0x10), 1, QM_REG_FWVOQ7TOHWVOQ, + NA, 1, 0, peq, + NA, IDLE_CHK_ERROR, + "QM: FwVoq7 is mapped to HwVoq7 (non-TX HwVoq)", + {NA, NA, 0x7, NA, NA, NA} }, + +/*line 443*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT0_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 0 EOP FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 444*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT1_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 1 EOP FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 445*/{(0x1F), 1, NIG_REG_INGRESS_EOP_LB_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: LB EOP FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 446*/{(0x1F), 1, NIG_REG_INGRESS_RMP0_DSCR_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 0 RX MCP descriptor FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 447*/{(0x1F), 1, NIG_REG_INGRESS_RMP1_DSCR_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 1 RX MCP descriptor FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 448*/{(0x1F), 1, NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF LB FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 449*/{(0x1F), 1, NIG_REG_EGRESS_MNG0_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 0 TX MCP FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 450*/{(0x1F), 1, NIG_REG_EGRESS_MNG1_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 1 TX MCP FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 451*/{(0x1F), 1, NIG_REG_EGRESS_DEBUG_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Debug FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 452*/{(0x1F), 1, NIG_REG_EGRESS_DELAY0_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF IF0 FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 453*/{(0x1F), 1, NIG_REG_EGRESS_DELAY1_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF IF1 FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 454*/{(0x1F), 1, NIG_REG_LLH0_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 0 RX LLH FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 455*/{(0x1F), 1, NIG_REG_LLH1_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 1 RX LLH FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 456*/{(0x1C), 1, NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 0 TX MCP FIFO for traffic going to the host is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 457*/{(0x1C), 1, NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 1 TX MCP FIFO for traffic going to the host is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 458*/{(0x1C), 1, NIG_REG_P0_TLLH_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 0 TX LLH FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 459*/{(0x1C), 1, NIG_REG_P1_TLLH_FIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 1 TX LLH FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 460*/{(0x1C), 1, NIG_REG_P0_HBUF_DSCR_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 0 RX MCP descriptor FIFO for traffic from the host is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 461*/{(0x1C), 1, NIG_REG_P1_HBUF_DSCR_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_WARNING, + "NIG: Port 1 RX MCP descriptor FIFO for traffic from the host is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 462*/{(0x18), 1, NIG_REG_P0_RX_MACFIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 0 RX MAC interface FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 463*/{(0x18), 1, NIG_REG_P1_RX_MACFIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 1 RX MAC interface FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 464*/{(0x18), 1, NIG_REG_P0_TX_MACFIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 0 TX MAC interface FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 465*/{(0x18), 1, NIG_REG_P1_TX_MACFIFO_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: Port 1 TX MAC interface FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 466*/{(0x10), 1, NIG_REG_EGRESS_DELAY2_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF IF2 FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 467*/{(0x10), 1, NIG_REG_EGRESS_DELAY3_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF IF3 FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 468*/{(0x10), 1, NIG_REG_EGRESS_DELAY4_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF IF4 FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, + +/*line 469*/{(0x10), 1, NIG_REG_EGRESS_DELAY5_EMPTY, + NA, 1, 0, pneq, + NA, IDLE_CHK_ERROR_NO_TRAFFIC, + "NIG: PBF IF5 FIFO is not empty", + {NA, NA, 1, NA, NA, NA} }, +}; + +/* handle self test fails according to severity and type */ +static void bnx2x_self_test_log(struct bnx2x *bp, u8 severity, char *message) +{ + switch (severity) { + case IDLE_CHK_ERROR: + BNX2X_ERR("ERROR %s", message); + idle_chk_errors++; + break; + case IDLE_CHK_ERROR_NO_TRAFFIC: + DP(NETIF_MSG_HW, "INFO %s", message); + break; + case IDLE_CHK_WARNING: + DP(NETIF_MSG_HW, "WARNING %s", message); + idle_chk_warnings++; + break; + } +} + +/* specific test for QM rd/wr pointers and rd/wr banks */ +static void bnx2x_idle_chk6(struct bnx2x *bp, + struct st_record *rec, char *message) +{ + u32 rd_ptr, wr_ptr, rd_bank, wr_bank; + int i; + + for (i = 0; i < rec->loop; i++) { + /* read regs */ + rec->pred_args.val1 = + REG_RD(bp, rec->reg1 + i * rec->incr); + rec->pred_args.val2 = + REG_RD(bp, rec->reg1 + i * rec->incr + 4); + + /* calc read and write pointers */ + rd_ptr = ((rec->pred_args.val1 & 0x3FFFFFC0) >> 6); + wr_ptr = ((((rec->pred_args.val1 & 0xC0000000) >> 30) & 0x3) | + ((rec->pred_args.val2 & 0x3FFFFF) << 2)); + + /* perfrom pointer test */ + if (rd_ptr != wr_ptr) { + snprintf(message, MAX_FAIL_MSG, + "QM: PTRTBL entry %d- rd_ptr is not equal to wr_ptr. Values are 0x%x and 0x%x\n", + i, rd_ptr, wr_ptr); + bnx2x_self_test_log(bp, rec->severity, message); + } + + /* calculate read and write banks */ + rd_bank = ((rec->pred_args.val1 & 0x30) >> 4); + wr_bank = (rec->pred_args.val1 & 0x03); + + /* perform bank test */ + if (rd_bank != wr_bank) { + snprintf(message, MAX_FAIL_MSG, + "QM: PTRTBL entry %d - rd_bank is not equal to wr_bank. Values are 0x%x 0x%x\n", + i, rd_bank, wr_bank); + bnx2x_self_test_log(bp, rec->severity, message); + } + } +} + +/* specific test for cfc info ram and cid cam */ +static void bnx2x_idle_chk7(struct bnx2x *bp, + struct st_record *rec, char *message) +{ + int i; + + /* iterate through lcids */ + for (i = 0; i < rec->loop; i++) { + /* make sure cam entry is valid (bit 0) */ + if ((REG_RD(bp, (rec->reg2 + i * 4)) & 0x1) != 0x1) + continue; + + /* get connection type (multiple reads due to widebus) */ + REG_RD(bp, (rec->reg1 + i * rec->incr)); + REG_RD(bp, (rec->reg1 + i * rec->incr + 4)); + rec->pred_args.val1 = + REG_RD(bp, (rec->reg1 + i * rec->incr + 8)); + REG_RD(bp, (rec->reg1 + i * rec->incr + 12)); + + /* obtain connection type */ + if (is_e1 || is_e1h) { + /* E1 E1H (bits 4..7) */ + rec->pred_args.val1 &= 0x78; + rec->pred_args.val1 >>= 3; + } else { + /* E2 E3A0 E3B0 (bits 26..29) */ + rec->pred_args.val1 &= 0x1E000000; + rec->pred_args.val1 >>= 25; + } + + /* get activity counter value */ + rec->pred_args.val2 = REG_RD(bp, rec->reg3 + i * 4); + + /* validate ac value is legal for con_type at idle state */ + if (rec->bnx2x_predicate(&rec->pred_args)) { + snprintf(message, MAX_FAIL_MSG, + "%s. Values are 0x%x 0x%x\n", rec->fail_msg, + rec->pred_args.val1, rec->pred_args.val2); + bnx2x_self_test_log(bp, rec->severity, message); + } + } +} + +/* self test procedure + * scan auto-generated database + * for each line: + * 1. compare chip mask + * 2. determine type (according to maro number) + * 3. read registers + * 4. call predicate + * 5. collate results and statistics + */ +int bnx2x_idle_chk(struct bnx2x *bp) +{ + u16 i; /* loop counter */ + u16 st_ind; /* self test database access index */ + struct st_record rec; /* current record variable */ + char message[MAX_FAIL_MSG]; /* message to log */ + + /*init stats*/ + idle_chk_errors = 0; + idle_chk_warnings = 0; + + /*create masks for all chip types*/ + is_e1 = CHIP_IS_E1(bp); + is_e1h = CHIP_IS_E1H(bp); + is_e2 = CHIP_IS_E2(bp); + is_e3a0 = CHIP_IS_E3A0(bp); + is_e3b0 = CHIP_IS_E3B0(bp); + + /*database main loop*/ + for (st_ind = 0; st_ind < ST_DB_LINES; st_ind++) { + rec = st_database[st_ind]; + + /*check if test applies to chip*/ + if (!((rec.chip_mask & IDLE_CHK_E1) && is_e1) && + !((rec.chip_mask & IDLE_CHK_E1H) && is_e1h) && + !((rec.chip_mask & IDLE_CHK_E2) && is_e2) && + !((rec.chip_mask & IDLE_CHK_E3A0) && is_e3a0) && + !((rec.chip_mask & IDLE_CHK_E3B0) && is_e3b0)) + continue; + + /* identify macro */ + switch (rec.macro) { + case 1: + /* read single reg and call predicate */ + rec.pred_args.val1 = REG_RD(bp, rec.reg1); + DP(BNX2X_MSG_IDLE, "mac1 add %x\n", rec.reg1); + if (rec.bnx2x_predicate(&rec.pred_args)) { + snprintf(message, sizeof(message), + "%s.Value is 0x%x\n", rec.fail_msg, + rec.pred_args.val1); + bnx2x_self_test_log(bp, rec.severity, message); + } + break; + case 2: + /* read repeatedly starting from reg1 and call + * predicate after each read + */ + for (i = 0; i < rec.loop; i++) { + rec.pred_args.val1 = + REG_RD(bp, rec.reg1 + i * rec.incr); + DP(BNX2X_MSG_IDLE, "mac2 add %x\n", rec.reg1); + if (rec.bnx2x_predicate(&rec.pred_args)) { + snprintf(message, sizeof(message), + "%s. Value is 0x%x in loop %d\n", + rec.fail_msg, + rec.pred_args.val1, i); + bnx2x_self_test_log(bp, rec.severity, + message); + } + } + break; + case 3: + /* read two regs and call predicate */ + rec.pred_args.val1 = REG_RD(bp, rec.reg1); + rec.pred_args.val2 = REG_RD(bp, rec.reg2); + DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x\n", + rec.reg1, rec.reg2); + if (rec.bnx2x_predicate(&rec.pred_args)) { + snprintf(message, sizeof(message), + "%s. Values are 0x%x 0x%x\n", + rec.fail_msg, rec.pred_args.val1, + rec.pred_args.val2); + bnx2x_self_test_log(bp, rec.severity, message); + } + break; + case 4: + /*unused to-date*/ + for (i = 0; i < rec.loop; i++) { + rec.pred_args.val1 = + REG_RD(bp, rec.reg1 + i * rec.incr); + rec.pred_args.val2 = + (REG_RD(bp, + rec.reg2 + i * rec.incr)) >> 1; + if (rec.bnx2x_predicate(&rec.pred_args)) { + snprintf(message, sizeof(message), + "%s. Values are 0x%x 0x%x in loop %d\n", + rec.fail_msg, + rec.pred_args.val1, + rec.pred_args.val2, i); + bnx2x_self_test_log(bp, rec.severity, + message); + } + } + break; + case 5: + /* compare two regs, pending + * the value of a condition reg + */ + rec.pred_args.val1 = REG_RD(bp, rec.reg1); + rec.pred_args.val2 = REG_RD(bp, rec.reg2); + DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x add3 %x\n", + rec.reg1, rec.reg2, rec.reg3); + if (REG_RD(bp, rec.reg3) != 0) { + if (rec.bnx2x_predicate(&rec.pred_args)) { + snprintf(message, sizeof(message), + "%s. Values are 0x%x 0x%x\n", + rec.fail_msg, + rec.pred_args.val1, + rec.pred_args.val2); + bnx2x_self_test_log(bp, rec.severity, + message); + } + } + break; + case 6: + /* compare read and write pointers + * and read and write banks in QM + */ + bnx2x_idle_chk6(bp, &rec, message); + break; + case 7: + /* compare cfc info cam with cid cam */ + bnx2x_idle_chk7(bp, &rec, message); + break; + default: + DP(BNX2X_MSG_IDLE, + "unknown macro in self test data base. macro %d line %d", + rec.macro, st_ind); + } + } + + /* abort if interface is not running */ + if (!netif_running(bp->dev)) + return idle_chk_errors; + + /* return value accorindg to statistics */ + if (idle_chk_errors == 0) { + DP(BNX2X_MSG_IDLE, + "completed successfully (logged %d warnings)\n", + idle_chk_warnings); + } else { + BNX2X_ERR("failed (with %d errors, %d warnings)\n", + idle_chk_errors, idle_chk_warnings); + } + return idle_chk_errors; +} diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 7e0919aa450e..0b193edb73b8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -23,6 +23,8 @@ #include "bnx2x_cmn.h" #include "bnx2x_sriov.h" +extern const u32 dmae_reg_go_c[]; + /* Statistics */ /* From a46665707144d768ed4c9d4a6ed51edfd8cd7edf Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Sat, 4 Jul 2020 10:13:44 +0530 Subject: [PATCH 0674/2056] bnx2x: Perform Idlechk dump during the debug collection. The patch adds driver changes to perform Idlechk dump during the debug data collection. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 41aac4f7634a..fd957212bc1b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1176,9 +1176,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int) } #endif if (IS_PF(bp)) { + int tmp_msg_en = bp->msg_enable; + bnx2x_fw_dump(bp); + bp->msg_enable |= NETIF_MSG_HW; + BNX2X_ERR("Idle check (1st round) ----------\n"); + bnx2x_idle_chk(bp); + BNX2X_ERR("Idle check (2nd round) ----------\n"); + bnx2x_idle_chk(bp); + bp->msg_enable = tmp_msg_en; bnx2x_mc_assert(bp); } + BNX2X_ERR("end crash dump -----------------\n"); } From f551e2fdaf81b7b561bb5dc590da13f21c4c1295 Mon Sep 17 00:00:00 2001 From: Tanner Love Date: Sat, 4 Jul 2020 16:45:14 -0400 Subject: [PATCH 0675/2056] selftests/net: update initializer syntax to use c99 designators Before, clang version 9 threw errors such as: error: use of GNU old-style field designator extension [-Werror,-Wgnu-designator] { tstamp: true, swtstamp: true } ^~~~~~~ .tstamp = Fix these warnings in tools/testing/selftests/net in the same manner as commit 121e357ac728 ("selftests/harness: Update named initializer syntax"). N.B. rxtimestamp.c is the only affected file in the directory. Signed-off-by: Tanner Love Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- tools/testing/selftests/net/rxtimestamp.c | 30 +++++++++++------------ 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index c599d371cbbe..221fdece47d4 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -68,44 +68,44 @@ static struct socket_type socket_types[] = { static struct test_case test_cases[] = { { {}, {} }, { - { so_timestamp: 1 }, - { tstamp: true } + { .so_timestamp = 1 }, + { .tstamp = true } }, { - { so_timestampns: 1 }, - { tstampns: true } + { .so_timestampns = 1 }, + { .tstampns = true } }, { - { so_timestamp: 1, so_timestampns: 1 }, - { tstampns: true } + { .so_timestamp = 1, .so_timestampns = 1 }, + { .tstampns = true } }, { - { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE }, + { .so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE }, {} }, { /* Loopback device does not support hw timestamps. */ - { so_timestamping: SOF_TIMESTAMPING_RX_HARDWARE }, + { .so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE }, {} }, { - { so_timestamping: SOF_TIMESTAMPING_SOFTWARE }, - warn_on_fail : true + { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE }, + .warn_on_fail = true }, { - { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE + { .so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE }, {} }, { - { so_timestamping: SOF_TIMESTAMPING_SOFTWARE + { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE }, - { swtstamp: true } + { .swtstamp = true } }, { - { so_timestamp: 1, so_timestamping: SOF_TIMESTAMPING_SOFTWARE + { .so_timestamp = 1, .so_timestamping = SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE }, - { tstamp: true, swtstamp: true } + { .tstamp = true, .swtstamp = true } }, }; From 83f0c10bc36f956102ce4a33c5fe596ae9891297 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 5 Jul 2020 01:30:15 +0200 Subject: [PATCH 0676/2056] net: use mptcp setsockopt function for SOL_SOCKET on mptcp sockets setsockopt(mptcp_fd, SOL_SOCKET, ...)... appears to work (returns 0), but it has no effect -- this is because the MPTCP layer never has a chance to copy the settings to the subflow socket. Skip the generic handling for the mptcp case and instead call the mptcp specific handler instead for SOL_SOCKET too. Next patch adds more specific handling for SOL_SOCKET to mptcp. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 3 +++ net/socket.c | 13 ++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index fa137a9c42d1..320f306ea85c 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1609,6 +1609,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname, pr_debug("msk=%p", msk); + if (level == SOL_SOCKET) + return sock_setsockopt(sk->sk_socket, level, optname, optval, optlen); + /* @@ the meaning of setsockopt() when the socket is connected and * there are multiple subflows is not yet defined. It is up to the * MPTCP-level socket to configure the subflows until the subflow diff --git a/net/socket.c b/net/socket.c index 976426d03f09..d87812a9ed4b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2080,6 +2080,17 @@ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size, return __sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); } +static bool sock_use_custom_sol_socket(const struct socket *sock) +{ + const struct sock *sk = sock->sk; + + /* Use sock->ops->setsockopt() for MPTCP */ + return IS_ENABLED(CONFIG_MPTCP) && + sk->sk_protocol == IPPROTO_MPTCP && + sk->sk_type == SOCK_STREAM && + (sk->sk_family == AF_INET || sk->sk_family == AF_INET6); +} + /* * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. @@ -2118,7 +2129,7 @@ static int __sys_setsockopt(int fd, int level, int optname, optval = (char __user __force *)kernel_optval; } - if (level == SOL_SOCKET) + if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) err = sock_setsockopt(sock, level, optname, optval, optlen); From fd1452d8ef988d228f5265147fde1017084404e4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 5 Jul 2020 01:30:16 +0200 Subject: [PATCH 0677/2056] mptcp: add REUSEADDR/REUSEPORT support This will e.g. make 'sshd restart' work when MPTCP is used, as we will now set this option on the listener socket instead of only the mptcp socket (where it has no effect). We still need to copy the setting to the master socket so that a subsequent getsockopt() returns the expected value. Reported-by: Christoph Paasch Suggested-by: Paolo Abeni Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 320f306ea85c..612f6d49f1bb 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1601,6 +1601,37 @@ static void mptcp_destroy(struct sock *sk) sk_sockets_allocated_dec(sk); } +static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = (struct sock *)msk; + struct socket *ssock; + int ret; + + switch (optname) { + case SO_REUSEPORT: + case SO_REUSEADDR: + lock_sock(sk); + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + release_sock(sk); + return -EINVAL; + } + + ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); + if (ret == 0) { + if (optname == SO_REUSEPORT) + sk->sk_reuseport = ssock->sk->sk_reuseport; + else if (optname == SO_REUSEADDR) + sk->sk_reuse = ssock->sk->sk_reuse; + } + release_sock(sk); + return ret; + } + + return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); +} + static int mptcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -1610,7 +1641,7 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname, pr_debug("msk=%p", msk); if (level == SOL_SOCKET) - return sock_setsockopt(sk->sk_socket, level, optname, optval, optlen); + return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); /* @@ the meaning of setsockopt() when the socket is connected and * there are multiple subflows is not yet defined. It is up to the From c9b95a13598750e2840d99322f844ec0ff9e6246 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 5 Jul 2020 01:30:17 +0200 Subject: [PATCH 0678/2056] mptcp: support IPV6_V6ONLY setsockopt Without this, Opensshd fails to open an ipv6 socket listening socket: error: setsockopt IPV6_V6ONLY: Operation not supported error: Bind to port 22 on :: failed: Address already in use. Opensshd opens an ipv4 and and ipv6 listening socket, but because IPV6_V6ONLY setsockopt fails, the port number is already in use. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 612f6d49f1bb..3ab060e30038 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1632,6 +1632,33 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); } +static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = (struct sock *)msk; + int ret = -EOPNOTSUPP; + struct socket *ssock; + + switch (optname) { + case IPV6_V6ONLY: + lock_sock(sk); + ssock = __mptcp_nmpc_socket(msk); + if (!ssock) { + release_sock(sk); + return -EINVAL; + } + + ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); + if (ret == 0) + sk->sk_ipv6only = ssock->sk->sk_ipv6only; + + release_sock(sk); + break; + } + + return ret; +} + static int mptcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -1655,6 +1682,9 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname, if (ssk) return tcp_setsockopt(ssk, level, optname, optval, optlen); + if (level == SOL_IPV6) + return mptcp_setsockopt_v6(msk, optname, optval, optlen); + return -EOPNOTSUPP; } From 143a102e3090532a624d25429b79284caa59ce9f Mon Sep 17 00:00:00 2001 From: Codrin Ciubotariu Date: Thu, 2 Jul 2020 18:17:23 +0300 Subject: [PATCH 0679/2056] net: dsa: microchip: split adjust_link() in phylink_mac_link_{up|down}() The DSA subsystem moved to phylink and adjust_link() became deprecated in the process. This patch removes adjust_link from the KSZ DSA switches and adds phylink_mac_link_up() and phylink_mac_link_down(). Signed-off-by: Codrin Ciubotariu Reviewed-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 3 ++- drivers/net/dsa/microchip/ksz9477.c | 3 ++- drivers/net/dsa/microchip/ksz_common.c | 32 ++++++++++++++++---------- drivers/net/dsa/microchip/ksz_common.h | 7 ++++-- 4 files changed, 29 insertions(+), 16 deletions(-) diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 47d65b77caf7..862306a9db2c 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1111,7 +1111,8 @@ static const struct dsa_switch_ops ksz8795_switch_ops = { .setup = ksz8795_setup, .phy_read = ksz_phy_read16, .phy_write = ksz_phy_write16, - .adjust_link = ksz_adjust_link, + .phylink_mac_link_down = ksz_mac_link_down, + .phylink_mac_link_up = ksz_mac_link_up, .port_enable = ksz_enable_port, .port_disable = ksz_disable_port, .get_strings = ksz8795_get_strings, diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 9a51b8a4de5d..9e4bdd950194 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1399,7 +1399,8 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .setup = ksz9477_setup, .phy_read = ksz9477_phy_read16, .phy_write = ksz9477_phy_write16, - .adjust_link = ksz_adjust_link, + .phylink_mac_link_down = ksz_mac_link_down, + .phylink_mac_link_up = ksz_mac_link_up, .port_enable = ksz_enable_port, .port_disable = ksz_disable_port, .get_strings = ksz9477_get_strings, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index fd1d6676ae4f..55ceaf00ece1 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -135,26 +135,34 @@ int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) } EXPORT_SYMBOL_GPL(ksz_phy_write16); -void ksz_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, + phy_interface_t interface) { struct ksz_device *dev = ds->priv; struct ksz_port *p = &dev->ports[port]; /* Read all MIB counters when the link is going down. */ - if (!phydev->link) { - p->read = true; - schedule_delayed_work(&dev->mib_read, 0); - } + p->read = true; + schedule_delayed_work(&dev->mib_read, 0); + mutex_lock(&dev->dev_mutex); - if (!phydev->link) - dev->live_ports &= ~(1 << port); - else - /* Remember which port is connected and active. */ - dev->live_ports |= (1 << port) & dev->on_ports; + dev->live_ports &= ~(1 << port); mutex_unlock(&dev->dev_mutex); } -EXPORT_SYMBOL_GPL(ksz_adjust_link); +EXPORT_SYMBOL_GPL(ksz_mac_link_down); + +void ksz_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, + phy_interface_t interface, struct phy_device *phydev, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct ksz_device *dev = ds->priv; + + /* Remember which port is connected and active. */ + mutex_lock(&dev->dev_mutex); + dev->live_ports |= (1 << port) & dev->on_ports; + mutex_unlock(&dev->dev_mutex); +} +EXPORT_SYMBOL_GPL(ksz_mac_link_up); int ksz_sset_count(struct dsa_switch *ds, int port, int sset) { diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index f2c9bb68fd33..c0224dd0cf8a 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -159,8 +159,11 @@ void ksz_init_mib_timer(struct ksz_device *dev); int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg); int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val); -void ksz_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev); +void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, + phy_interface_t interface); +void ksz_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, + phy_interface_t interface, struct phy_device *phydev, + int speed, int duplex, bool tx_pause, bool rx_pause); int ksz_sset_count(struct dsa_switch *ds, int port, int sset); void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf); int ksz_port_bridge_join(struct dsa_switch *ds, int port, From b20a6b29a811bee0e44b64958d415eb50436154c Mon Sep 17 00:00:00 2001 From: Codrin Ciubotariu Date: Thu, 2 Jul 2020 18:17:24 +0300 Subject: [PATCH 0680/2056] net: dsa: microchip: remove unused private members Private structure members live_ports, on_ports, rx_ports, tx_ports are initialized but not used anywhere. Let's remove them. Suggested-by: Russell King Signed-off-by: Codrin Ciubotariu Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz8795.c | 18 -------------- drivers/net/dsa/microchip/ksz9477.c | 23 ++---------------- drivers/net/dsa/microchip/ksz_common.c | 33 -------------------------- drivers/net/dsa/microchip/ksz_common.h | 8 ------- 4 files changed, 2 insertions(+), 80 deletions(-) diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 862306a9db2c..4202411627f1 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -731,15 +731,6 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port, ksz_pwrite8(dev, port, P_STP_CTRL, data); p->stp_state = state; - if (data & PORT_RX_ENABLE) - dev->rx_ports |= BIT(port); - else - dev->rx_ports &= ~BIT(port); - if (data & PORT_TX_ENABLE) - dev->tx_ports |= BIT(port); - else - dev->tx_ports &= ~BIT(port); - /* Port membership may share register with STP state. */ if (member >= 0 && member != p->member) ksz8795_cfg_port_member(dev, port, (u8)member); @@ -976,15 +967,8 @@ static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) p->phydev.duplex = 1; member = dev->port_mask; - dev->on_ports = dev->host_mask; - dev->live_ports = dev->host_mask; } else { member = dev->host_mask | p->vid_member; - dev->on_ports |= BIT(port); - - /* Link was detected before port is enabled. */ - if (p->phydev.link) - dev->live_ports |= BIT(port); } ksz8795_cfg_port_member(dev, port, member); } @@ -1112,9 +1096,7 @@ static const struct dsa_switch_ops ksz8795_switch_ops = { .phy_read = ksz_phy_read16, .phy_write = ksz_phy_write16, .phylink_mac_link_down = ksz_mac_link_down, - .phylink_mac_link_up = ksz_mac_link_up, .port_enable = ksz_enable_port, - .port_disable = ksz_disable_port, .get_strings = ksz8795_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 9e4bdd950194..b939e0b82aa0 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -452,15 +452,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, ksz_pwrite8(dev, port, P_STP_CTRL, data); p->stp_state = state; mutex_lock(&dev->dev_mutex); - if (data & PORT_RX_ENABLE) - dev->rx_ports |= (1 << port); - else - dev->rx_ports &= ~(1 << port); - if (data & PORT_TX_ENABLE) - dev->tx_ports |= (1 << port); - else - dev->tx_ports &= ~(1 << port); - /* Port membership may share register with STP state. */ if (member >= 0 && member != p->member) ksz9477_cfg_port_member(dev, port, (u8)member); @@ -1268,18 +1259,10 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) p->phydev.duplex = 1; } mutex_lock(&dev->dev_mutex); - if (cpu_port) { + if (cpu_port) member = dev->port_mask; - dev->on_ports = dev->host_mask; - dev->live_ports = dev->host_mask; - } else { + else member = dev->host_mask | p->vid_member; - dev->on_ports |= (1 << port); - - /* Link was detected before port is enabled. */ - if (p->phydev.link) - dev->live_ports |= (1 << port); - } mutex_unlock(&dev->dev_mutex); ksz9477_cfg_port_member(dev, port, member); @@ -1400,9 +1383,7 @@ static const struct dsa_switch_ops ksz9477_switch_ops = { .phy_read = ksz9477_phy_read16, .phy_write = ksz9477_phy_write16, .phylink_mac_link_down = ksz_mac_link_down, - .phylink_mac_link_up = ksz_mac_link_up, .port_enable = ksz_enable_port, - .port_disable = ksz_disable_port, .get_strings = ksz9477_get_strings, .get_ethtool_stats = ksz_get_ethtool_stats, .get_sset_count = ksz_sset_count, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 55ceaf00ece1..74f2216989ee 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -144,26 +144,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, /* Read all MIB counters when the link is going down. */ p->read = true; schedule_delayed_work(&dev->mib_read, 0); - - mutex_lock(&dev->dev_mutex); - dev->live_ports &= ~(1 << port); - mutex_unlock(&dev->dev_mutex); } EXPORT_SYMBOL_GPL(ksz_mac_link_down); -void ksz_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, - phy_interface_t interface, struct phy_device *phydev, - int speed, int duplex, bool tx_pause, bool rx_pause) -{ - struct ksz_device *dev = ds->priv; - - /* Remember which port is connected and active. */ - mutex_lock(&dev->dev_mutex); - dev->live_ports |= (1 << port) & dev->on_ports; - mutex_unlock(&dev->dev_mutex); -} -EXPORT_SYMBOL_GPL(ksz_mac_link_up); - int ksz_sset_count(struct dsa_switch *ds, int port, int sset) { struct ksz_device *dev = ds->priv; @@ -377,22 +360,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) } EXPORT_SYMBOL_GPL(ksz_enable_port); -void ksz_disable_port(struct dsa_switch *ds, int port) -{ - struct ksz_device *dev = ds->priv; - - if (!dsa_is_user_port(ds, port)) - return; - - dev->on_ports &= ~(1 << port); - dev->live_ports &= ~(1 << port); - - /* port_stp_state_set() will be called after to disable the port so - * there is no need to do anything. - */ -} -EXPORT_SYMBOL_GPL(ksz_disable_port); - struct ksz_device *ksz_switch_alloc(struct device *base, void *priv) { struct dsa_switch *ds; diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index c0224dd0cf8a..f7d92c1656b8 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -84,10 +84,6 @@ struct ksz_device { unsigned long mib_read_interval; u16 br_member; u16 member; - u16 live_ports; - u16 on_ports; /* ports enabled by DSA */ - u16 rx_ports; - u16 tx_ports; u16 mirror_rx; u16 mirror_tx; u32 features; /* chip specific features */ @@ -161,9 +157,6 @@ int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg); int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val); void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); -void ksz_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, - phy_interface_t interface, struct phy_device *phydev, - int speed, int duplex, bool tx_pause, bool rx_pause); int ksz_sset_count(struct dsa_switch *ds, int port, int sset); void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf); int ksz_port_bridge_join(struct dsa_switch *ds, int port, @@ -182,7 +175,6 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port, int ksz_port_mdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb); int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); -void ksz_disable_port(struct dsa_switch *ds, int port); /* Common register access functions */ From 063ad9bcc297c1f4cfd3a35a6a709b3fd52031f3 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Thu, 2 Jul 2020 22:31:42 +0530 Subject: [PATCH 0681/2056] netxen_nic: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. And they use PCI helper functions to do it. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. In this driver: netxen_nic_resume() calls netxen_nic_attach_func() which then invokes PCI helper functions like pci_enable_device(), pci_set_power_state() and pci_restore_state(). Other function: - netxen_io_slot_reset() also calls netxen_nic_attach_func(). Also, netxen_io_slot_reset() returns specific value based on the return value of netxen_nic_attach_func() as whole. Thus, cannot simply move some piece of code from netxen_nic_attach_func() to it. Hence, define a new function netxen_nic_attach_late_func() to do the tasks which has to be done after PCI helper functions have done their job. Now, netxen_nic_attach_func() invokes netxen_nic_attach_late_func(), thus netxen_io_slot_reset() behaves normally. And, netxen_nic_resume() calls netxen_nic_attach_late_func() to avoid PCI helper functions calls. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- .../ethernet/qlogic/netxen/netxen_nic_main.c | 59 +++++++++---------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 8067ea04d455..f21847739ef1 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1695,19 +1695,13 @@ static void netxen_nic_detach_func(struct netxen_adapter *adapter) clear_bit(__NX_RESETTING, &adapter->state); } -static int netxen_nic_attach_func(struct pci_dev *pdev) +static int netxen_nic_attach_late_func(struct pci_dev *pdev) { struct netxen_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; int err; - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); pci_set_master(pdev); - pci_restore_state(pdev); adapter->ahw.crb_win = -1; adapter->ahw.ocm_win = -1; @@ -1741,6 +1735,20 @@ static int netxen_nic_attach_func(struct pci_dev *pdev) return err; } +static int netxen_nic_attach_func(struct pci_dev *pdev) +{ + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + return netxen_nic_attach_late_func(pdev); +} + static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -1785,36 +1793,24 @@ static void netxen_nic_shutdown(struct pci_dev *pdev) pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int -netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused +netxen_nic_suspend(struct device *dev_d) { - struct netxen_adapter *adapter = pci_get_drvdata(pdev); - int retval; + struct netxen_adapter *adapter = dev_get_drvdata(dev_d); netxen_nic_detach_func(adapter); - retval = pci_save_state(pdev); - if (retval) - return retval; - - if (netxen_nic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } - - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (netxen_nic_wol_supported(adapter)) + device_wakeup_enable(dev_d); return 0; } -static int -netxen_nic_resume(struct pci_dev *pdev) +static int __maybe_unused +netxen_nic_resume(struct device *dev_d) { - return netxen_nic_attach_func(pdev); + return netxen_nic_attach_late_func(to_pci_dev(dev_d)); } -#endif static int netxen_nic_open(struct net_device *netdev) { @@ -3448,15 +3444,16 @@ static const struct pci_error_handlers netxen_err_handler = { .slot_reset = netxen_io_slot_reset, }; +static SIMPLE_DEV_PM_OPS(netxen_nic_pm_ops, + netxen_nic_suspend, + netxen_nic_resume); + static struct pci_driver netxen_driver = { .name = netxen_nic_driver_name, .id_table = netxen_pci_tbl, .probe = netxen_nic_probe, .remove = netxen_nic_remove, -#ifdef CONFIG_PM - .suspend = netxen_nic_suspend, - .resume = netxen_nic_resume, -#endif + .driver.pm = &netxen_nic_pm_ops, .shutdown = netxen_nic_shutdown, .err_handler = &netxen_err_handler }; From 7ada9a5e48e57c3c7e5217c19a6719a85b07b0ba Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Thu, 2 Jul 2020 22:31:43 +0530 Subject: [PATCH 0682/2056] qlcninc: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and taking care of register states. And they use PCI helper functions to do it. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. .suspend() calls __qlcnic_shutdown, which then calls qlcnic_82xx_shutdown; .resume() calls __qlcnic_resume, which then calls qlcnic_82xx_resume; Both ...82xx..() are define in drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c and are used only in drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c. Hence upgrade them and remove PCI function calls, like pci_save_state() and pci_enable_wake(), inside them Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 11 ++----- .../net/ethernet/qlogic/qlcnic/qlcnic_main.c | 33 ++++--------------- 2 files changed, 9 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 822aa393c370..35d891f4655a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -1649,7 +1649,6 @@ int qlcnic_82xx_shutdown(struct pci_dev *pdev) { struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; - int retval; netif_device_detach(netdev); @@ -1662,14 +1661,8 @@ int qlcnic_82xx_shutdown(struct pci_dev *pdev) clear_bit(__QLCNIC_RESETTING, &adapter->state); - retval = pci_save_state(pdev); - if (retval) - return retval; - - if (qlcnic_wol_supported(adapter)) { - pci_enable_wake(pdev, PCI_D3cold, 1); - pci_enable_wake(pdev, PCI_D3hot, 1); - } + if (qlcnic_wol_supported(adapter)) + device_wakeup_enable(&pdev->dev); return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 9dd6cb36f366..e52af092a793 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2811,35 +2811,17 @@ static void qlcnic_shutdown(struct pci_dev *pdev) pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused qlcnic_suspend(struct device *dev_d) { - int retval; - - retval = __qlcnic_shutdown(pdev); - if (retval) - return retval; - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; + return __qlcnic_shutdown(to_pci_dev(dev_d)); } -static int qlcnic_resume(struct pci_dev *pdev) +static int __maybe_unused qlcnic_resume(struct device *dev_d) { - struct qlcnic_adapter *adapter = pci_get_drvdata(pdev); - int err; - - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_power_state(pdev, PCI_D0); - pci_set_master(pdev); - pci_restore_state(pdev); + struct qlcnic_adapter *adapter = dev_get_drvdata(dev_d); return __qlcnic_resume(adapter); } -#endif static int qlcnic_open(struct net_device *netdev) { @@ -4258,15 +4240,14 @@ static const struct pci_error_handlers qlcnic_err_handler = { .resume = qlcnic_io_resume, }; +static SIMPLE_DEV_PM_OPS(qlcnic_pm_ops, qlcnic_suspend, qlcnic_resume); + static struct pci_driver qlcnic_driver = { .name = qlcnic_driver_name, .id_table = qlcnic_pci_tbl, .probe = qlcnic_probe, .remove = qlcnic_remove, -#ifdef CONFIG_PM - .suspend = qlcnic_suspend, - .resume = qlcnic_resume, -#endif + .driver.pm = &qlcnic_pm_ops, .shutdown = qlcnic_shutdown, .err_handler = &qlcnic_err_handler, #ifdef CONFIG_QLCNIC_SRIOV From 3f2628d62dec9505d5f19044e5f0401989a6b796 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:21 +0300 Subject: [PATCH 0683/2056] net: dsa: felix: clarify the intention of writes to MII_BMCR The driver appears to write to BMCR_SPEED and BMCR_DUPLEX, fields which are read-only, since they are actually configured through the vendor-specific IF_MODE (0x14) register. But the reason we're writing back the read-only values of MII_BMCR is to alter these writable fields: BMCR_RESET BMCR_LOOPBACK BMCR_ANENABLE BMCR_PDOWN BMCR_ISOLATE BMCR_ANRESTART In particular, the only field which is really relevant to this driver is BMCR_ANENABLE. Clarify that intention by spelling it out, using phy_set_bits and phy_clear_bits. The driver also made a few writes to BMCR_RESET and BMCR_ANRESTART which are unnecessary and may temporarily disrupt the link to the PHY. Remove them. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 2067776773f7..9f4c8343652f 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -815,7 +815,7 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, phy_write(pcs, ENETC_PCS_LINK_TIMER2, ENETC_PCS_LINK_TIMER2_VAL); - phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE); + phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE); } else { int speed; @@ -845,10 +845,7 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, ENETC_PCS_IF_MODE_SGMII_EN | ENETC_PCS_IF_MODE_SGMII_SPEED(speed)); - /* Yes, not a mistake: speed is given by IF_MODE. */ - phy_write(pcs, MII_BMCR, BMCR_RESET | - BMCR_SPEED1000 | - BMCR_FULLDPLX); + phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } } @@ -882,9 +879,7 @@ static void vsc9959_pcs_init_2500basex(struct phy_device *pcs, ENETC_PCS_IF_MODE_SGMII_EN | ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500)); - phy_write(pcs, MII_BMCR, BMCR_SPEED1000 | - BMCR_FULLDPLX | - BMCR_RESET); + phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs, From b1c7b87443c2c12c4fe095114736b8ad6f963f67 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:22 +0300 Subject: [PATCH 0684/2056] net: dsa: felix: support half-duplex link modes Ping tested: [ 11.808455] mscc_felix 0000:00:00.5 swp0: Link is Up - 1Gbps/Full - flow control rx/tx [ 11.816497] IPv6: ADDRCONF(NETDEV_CHANGE): swp0: link becomes ready [root@LS1028ARDB ~] # ethtool -s swp0 advertise 0x4 [ 18.844591] mscc_felix 0000:00:00.5 swp0: Link is Down [ 22.048337] mscc_felix 0000:00:00.5 swp0: Link is Up - 100Mbps/Half - flow control off [root@LS1028ARDB ~] # ip addr add 192.168.1.1/24 dev swp0 [root@LS1028ARDB ~] # ping 192.168.1.2 PING 192.168.1.2 (192.168.1.2): 56 data bytes (...) ^C--- 192.168.1.2 ping statistics --- 3 packets transmitted, 3 packets received, 0% packet loss round-trip min/avg/max = 0.383/0.611/1.051 ms [root@LS1028ARDB ~] # ethtool -s swp0 advertise 0x10 [ 355.637747] mscc_felix 0000:00:00.5 swp0: Link is Down [ 358.788034] mscc_felix 0000:00:00.5 swp0: Link is Up - 1Gbps/Half - flow control off [root@LS1028ARDB ~] # ping 192.168.1.2 PING 192.168.1.2 (192.168.1.2): 56 data bytes (...) ^C --- 192.168.1.2 ping statistics --- 16 packets transmitted, 16 packets received, 0% packet loss round-trip min/avg/max = 0.301/0.384/1.138 ms Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Reviewed-by: Russell King Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 4 +++- drivers/net/dsa/ocelot/felix_vsc9959.c | 23 +++++++++++++---------- include/linux/fsl/enetc_mdio.h | 1 + 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 75020af7f7a4..f54648dff0ec 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -194,13 +194,15 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port, return; } - /* No half-duplex. */ phylink_set_port_modes(mask); phylink_set(mask, Autoneg); phylink_set(mask, Pause); phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Half); phylink_set(mask, 1000baseT_Full); if (state->interface == PHY_INTERFACE_MODE_INTERNAL || diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 9f4c8343652f..94e946b26f90 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -817,12 +817,9 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE); } else { + u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN; int speed; - if (state->duplex == DUPLEX_HALF) { - phydev_err(pcs, "Half duplex not supported\n"); - return; - } switch (state->speed) { case SPEED_1000: speed = ENETC_PCS_SPEED_1000; @@ -841,9 +838,9 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, return; } - phy_write(pcs, ENETC_PCS_IF_MODE, - ENETC_PCS_IF_MODE_SGMII_EN | - ENETC_PCS_IF_MODE_SGMII_SPEED(speed)); + if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(speed); + if (state->duplex == DUPLEX_HALF) + if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } @@ -870,15 +867,18 @@ static void vsc9959_pcs_init_2500basex(struct phy_device *pcs, unsigned int link_an_mode, const struct phylink_link_state *state) { + u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) | + ENETC_PCS_IF_MODE_SGMII_EN; + if (link_an_mode == MLO_AN_INBAND) { phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n"); return; } - phy_write(pcs, ENETC_PCS_IF_MODE, - ENETC_PCS_IF_MODE_SGMII_EN | - ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500)); + if (state->duplex == DUPLEX_HALF) + if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; + phy_write(pcs, ENETC_PCS_IF_MODE, if_mode); phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } @@ -919,8 +919,11 @@ static void vsc9959_pcs_init(struct ocelot *ocelot, int port, linkmode_set_bit_array(phy_basic_ports_array, ARRAY_SIZE(phy_basic_ports_array), pcs->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, pcs->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, pcs->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, pcs->supported); linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported); if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX || pcs->interface == PHY_INTERFACE_MODE_USXGMII) diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h index 4875dd38af7e..2d9203314865 100644 --- a/include/linux/fsl/enetc_mdio.h +++ b/include/linux/fsl/enetc_mdio.h @@ -15,6 +15,7 @@ #define ENETC_PCS_IF_MODE_SGMII_EN BIT(0) #define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1) #define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2)) +#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3) /* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS From da1c4ba1f741ee8f7a7f88d52bc2482fae26200d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:23 +0300 Subject: [PATCH 0685/2056] net: dsa: felix: unconditionally configure MAC speed to 1000Mbps In VSC9959, the PCS is the one who performs rate adaptation (symbol duplication) to the speed negotiated by the PHY. The MAC is unaware of that and must remain configured for gigabit. If it is configured at OCELOT_SPEED_10 or OCELOT_SPEED_100, it'll start transmitting PAUSE frames out of control and never recover, _even if_ we then reconfigure it at OCELOT_SPEED_1000 afterwards. This patch fixes a bug that luckily did not have any functional impact. We were writing 10, 100, 1000 etc into this 2-bit field in DEV_CLOCK_CFG, but the hardware expects values in the range 0, 1, 2, 3. So all speed values were getting truncated to 0, which is OCELOT_SPEED_2500, and which also appears to be fine. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index f54648dff0ec..4d819cc45bed 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -240,9 +240,14 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, u32 mac_fc_cfg; /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and - * PORT_RST bits in CLOCK_CFG + * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is + * integrated is that the MAC speed is fixed and it's the PCS who is + * performing the rate adaptation, so we have to write "1000Mbps" into + * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default + * value). */ - ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(state->speed), + ocelot_port_writel(ocelot_port, + DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), DEV_CLOCK_CFG); /* Flow control. Link speed is only used here to evaluate the time From 151a7cee728a89d909096f552108692905b66aaa Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:24 +0300 Subject: [PATCH 0686/2056] net: dsa: felix: set proper pause frame timers based on link speed state->speed holds a value of 10, 100, 1000 or 2500, but SYS_MAC_FC_CFG_FC_LINK_SPEED expects a value in the range 0, 1, 2 or 3. So set the correct speed encoding into this register. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 4d819cc45bed..4684339012c5 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -250,10 +250,25 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), DEV_CLOCK_CFG); - /* Flow control. Link speed is only used here to evaluate the time - * specification in incoming pause frames. - */ - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(state->speed); + switch (state->speed) { + case SPEED_10: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3); + break; + case SPEED_100: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2); + break; + case SPEED_1000: + case SPEED_2500: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1); + break; + case SPEED_UNKNOWN: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(0); + break; + default: + dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", + port, state->speed); + return; + } /* handle Rx pause in all cases, with 2500base-X this is used for rate * adaptation. @@ -265,6 +280,10 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; + + /* Flow control. Link speed is only used here to evaluate the time + * specification in incoming pause frames. + */ ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); From b4c2354537b41a13805d28f6f5e8a811d8097e8a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:25 +0300 Subject: [PATCH 0687/2056] net: dsa: felix: delete .phylink_mac_an_restart code Phylink uses the .mac_an_restart method to offer the user an implementation of the "ethtool -r" behavior, when the media-side auto negotiation can be restarted by the local MAC PCS. This is the case for fiber modes 1000Base-X and 2500Base-X (IEEE clause 37) that don't have an Ethernet PHY connected locally, and the media is connected to the MAC PCS directly. On the other hand, the Cisco SGMII and USXGMII standards also have an auto negotiation mechanism based on IEEE 802.3 clause 37 (their respective specs require a MAC PCS and a PHY PCS to implement the same state machine, which is described in IEEE 802.3 "Auto-Negotiation Figure 37-6"), so the ability to restart auto-negotiation is intrinsically symmetrical (the MAC PCS can do it too). However, it appears that not all SGMII/USXGMII PHYs have logic to restart the MDI-side auto-negotiation process when they detect a transition of the SGMII link from data mode to configuration mode. Some do (VSC8234) and some don't (AR8033, MV88E1111). IEEE and/or Cisco specification wordings to not help to prove whether propagating the "AN restart" event from MII side ("mr_restart_an") to MDI side ("mr_restart_negotiation") is required behavior - neither of them specifies any mandatory interaction between the clause 37 AN state machine from Figure 37-6 and the clause 28 AN state machine from Figure 28-18. Therefore, even if a certain behavior could be proven as being required, real-life SGMII/USXGMII PHYs are inconsistent enough that a clause 37 AN restart cannot be used by phylink to reliably trigger a media-side renegotiation, when the user requests it via ethtool. The only remaining use that the .mac_an_restart callback might possibly have, given what we know now, is to implement some silicon quirks, but so far that has proven to not be necessary. So remove this code for now, since it never gets called and we don't foresee any circumstance in which it might be, either. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 10 ------- drivers/net/dsa/ocelot/felix.h | 1 - drivers/net/dsa/ocelot/felix_vsc9959.c | 37 -------------------------- 3 files changed, 48 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 4684339012c5..57c400a67f16 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -296,15 +296,6 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, state->speed); } -static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port) -{ - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); - - if (felix->info->pcs_an_restart) - felix->info->pcs_an_restart(ocelot, port); -} - static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int link_an_mode, phy_interface_t interface) @@ -810,7 +801,6 @@ static const struct dsa_switch_ops felix_switch_ops = { .phylink_validate = felix_phylink_validate, .phylink_mac_link_state = felix_phylink_mac_pcs_get_state, .phylink_mac_config = felix_phylink_mac_config, - .phylink_mac_an_restart = felix_phylink_mac_an_restart, .phylink_mac_link_down = felix_phylink_mac_link_down, .phylink_mac_link_up = felix_phylink_mac_link_up, .port_enable = felix_port_enable, diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index a891736ca006..4a4cebcf04a7 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -31,7 +31,6 @@ struct felix_info { void (*pcs_init)(struct ocelot *ocelot, int port, unsigned int link_an_mode, const struct phylink_link_state *state); - void (*pcs_an_restart)(struct ocelot *ocelot, int port); void (*pcs_link_state)(struct ocelot *ocelot, int port, struct phylink_link_state *state); int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 94e946b26f90..65f83386bad1 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -728,42 +728,6 @@ static int vsc9959_reset(struct ocelot *ocelot) return 0; } -static void vsc9959_pcs_an_restart_sgmii(struct phy_device *pcs) -{ - phy_set_bits(pcs, MII_BMCR, BMCR_ANRESTART); -} - -static void vsc9959_pcs_an_restart_usxgmii(struct phy_device *pcs) -{ - phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR, - USXGMII_BMCR_RESET | - USXGMII_BMCR_AN_EN | - USXGMII_BMCR_RST_AN); -} - -static void vsc9959_pcs_an_restart(struct ocelot *ocelot, int port) -{ - struct felix *felix = ocelot_to_felix(ocelot); - struct phy_device *pcs = felix->pcs[port]; - - if (!pcs) - return; - - switch (pcs->interface) { - case PHY_INTERFACE_MODE_SGMII: - case PHY_INTERFACE_MODE_QSGMII: - vsc9959_pcs_an_restart_sgmii(pcs); - break; - case PHY_INTERFACE_MODE_USXGMII: - vsc9959_pcs_an_restart_usxgmii(pcs); - break; - default: - dev_err(ocelot->dev, "Invalid PCS interface type %s\n", - phy_modes(pcs->interface)); - break; - } -} - /* We enable SGMII AN only when the PHY has managed = "in-band-status" in the * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed * into the PCS, which is retrieved out-of-band over MDIO. This also has the @@ -1411,7 +1375,6 @@ struct felix_info felix_info_vsc9959 = { .mdio_bus_alloc = vsc9959_mdio_bus_alloc, .mdio_bus_free = vsc9959_mdio_bus_free, .pcs_init = vsc9959_pcs_init, - .pcs_an_restart = vsc9959_pcs_an_restart, .pcs_link_state = vsc9959_pcs_link_state, .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, From 7e14a2dc8c656acea48729daf3f617aa76d62937 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 5 Jul 2020 19:16:26 +0300 Subject: [PATCH 0688/2056] net: dsa: felix: use resolved link config in mac_link_up() Phylink now requires that parameters established through auto-negotiation be written into the MAC at the time of the mac_link_up() callback. In the case of felix, that means taking the port out of reset, setting the correct timers for PAUSE frames, and enabling/disabling TX flow control. This patch also splits the inband and noinband configuration of the vsc9959 PCS (currently found in a function called "init") into 2 different functions, which have a nomenclature closer to phylink: "config", for inband setup, and "link_up", for noinband (forced) setup. This is necessary as a preparation step for giving up control of the PCS to phylink, which will be done in further patch series. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 118 +++++------ drivers/net/dsa/ocelot/felix.h | 10 +- drivers/net/dsa/ocelot/felix_vsc9959.c | 267 ++++++++++++++----------- 3 files changed, 213 insertions(+), 182 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 57c400a67f16..75652ed99b24 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -235,65 +235,10 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port, const struct phylink_link_state *state) { struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; struct felix *felix = ocelot_to_felix(ocelot); - u32 mac_fc_cfg; - /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and - * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is - * integrated is that the MAC speed is fixed and it's the PCS who is - * performing the rate adaptation, so we have to write "1000Mbps" into - * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default - * value). - */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), - DEV_CLOCK_CFG); - - switch (state->speed) { - case SPEED_10: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3); - break; - case SPEED_100: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2); - break; - case SPEED_1000: - case SPEED_2500: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1); - break; - case SPEED_UNKNOWN: - mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(0); - break; - default: - dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", - port, state->speed); - return; - } - - /* handle Rx pause in all cases, with 2500base-X this is used for rate - * adaptation. - */ - mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; - - if (state->pause & MLO_PAUSE_TX) - mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | - SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | - SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | - SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; - - /* Flow control. Link speed is only used here to evaluate the time - * specification in incoming pause frames. - */ - ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); - - ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); - - if (felix->info->pcs_init) - felix->info->pcs_init(ocelot, port, link_an_mode, state); - - if (felix->info->port_sched_speed_set) - felix->info->port_sched_speed_set(ocelot, port, - state->speed); + if (felix->info->pcs_config) + felix->info->pcs_config(ocelot, port, link_an_mode, state); } static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, @@ -317,8 +262,58 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct felix *felix = ocelot_to_felix(ocelot); + u32 mac_fc_cfg; - /* Enable MAC module */ + /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and + * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is + * integrated is that the MAC speed is fixed and it's the PCS who is + * performing the rate adaptation, so we have to write "1000Mbps" into + * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default + * value). + */ + ocelot_port_writel(ocelot_port, + DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), + DEV_CLOCK_CFG); + + switch (speed) { + case SPEED_10: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3); + break; + case SPEED_100: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2); + break; + case SPEED_1000: + case SPEED_2500: + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1); + break; + default: + dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", + port, speed); + return; + } + + /* handle Rx pause in all cases, with 2500base-X this is used for rate + * adaptation. + */ + mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; + + if (tx_pause) + mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; + + /* Flow control. Link speed is only used here to evaluate the time + * specification in incoming pause frames. + */ + ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); + + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); + + /* Undo the effects of felix_phylink_mac_link_down: + * enable MAC module + */ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); @@ -335,6 +330,13 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | QSYS_SWITCH_PORT_MODE_PORT_ENA, QSYS_SWITCH_PORT_MODE, port); + + if (felix->info->pcs_link_up) + felix->info->pcs_link_up(ocelot, port, link_an_mode, interface, + speed, duplex); + + if (felix->info->port_sched_speed_set) + felix->info->port_sched_speed_set(ocelot, port, speed); } static void felix_port_qos_map_init(struct ocelot *ocelot, int port) diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 4a4cebcf04a7..00137b64132b 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -28,9 +28,13 @@ struct felix_info { int imdio_pci_bar; int (*mdio_bus_alloc)(struct ocelot *ocelot); void (*mdio_bus_free)(struct ocelot *ocelot); - void (*pcs_init)(struct ocelot *ocelot, int port, - unsigned int link_an_mode, - const struct phylink_link_state *state); + void (*pcs_config)(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + const struct phylink_link_state *state); + void (*pcs_link_up)(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex); void (*pcs_link_state)(struct ocelot *ocelot, int port, struct phylink_link_state *state); int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 65f83386bad1..19614537b1ba 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -737,124 +737,54 @@ static int vsc9959_reset(struct ocelot *ocelot) * traffic if SGMII AN is enabled but not completed (acknowledged by us), so * setting MLO_AN_INBAND is actually required for those. */ -static void vsc9959_pcs_init_sgmii(struct phy_device *pcs, - unsigned int link_an_mode, - const struct phylink_link_state *state) -{ - if (link_an_mode == MLO_AN_INBAND) { - int bmsr, bmcr; - - /* Some PHYs like VSC8234 don't like it when AN restarts on - * their system side and they restart line side AN too, going - * into an endless link up/down loop. Don't restart PCS AN if - * link is up already. - * We do check that AN is enabled just in case this is the 1st - * call, PCS detects a carrier but AN is disabled from power on - * or by boot loader. - */ - bmcr = phy_read(pcs, MII_BMCR); - if (bmcr < 0) - return; - - bmsr = phy_read(pcs, MII_BMSR); - if (bmsr < 0) - return; - - if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS)) - return; - - /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001 - * for the MAC PCS in order to acknowledge the AN. - */ - phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII | - ADVERTISE_LPACK); - - phy_write(pcs, ENETC_PCS_IF_MODE, - ENETC_PCS_IF_MODE_SGMII_EN | - ENETC_PCS_IF_MODE_USE_SGMII_AN); - - /* Adjust link timer for SGMII */ - phy_write(pcs, ENETC_PCS_LINK_TIMER1, - ENETC_PCS_LINK_TIMER1_VAL); - phy_write(pcs, ENETC_PCS_LINK_TIMER2, - ENETC_PCS_LINK_TIMER2_VAL); - - phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE); - } else { - u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN; - int speed; - - switch (state->speed) { - case SPEED_1000: - speed = ENETC_PCS_SPEED_1000; - break; - case SPEED_100: - speed = ENETC_PCS_SPEED_100; - break; - case SPEED_10: - speed = ENETC_PCS_SPEED_10; - break; - case SPEED_UNKNOWN: - /* Silently don't do anything */ - return; - default: - phydev_err(pcs, "Invalid PCS speed %d\n", state->speed); - return; - } - - if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(speed); - if (state->duplex == DUPLEX_HALF) - if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; - - phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); - } -} - -/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane - * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have - * auto-negotiation of any link parameters. Electrically it is compatible with - * a single lane of XAUI. - * The hardware reference manual wants to call this mode SGMII, but it isn't - * really, since the fundamental features of SGMII: - * - Downgrading the link speed by duplicating symbols - * - Auto-negotiation - * are not there. - * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers - * because the clock frequency is actually given by a PLL configured in the - * Reset Configuration Word (RCW). - * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o - * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a - * lower link speed on line side, the system-side interface remains fixed at - * 2500 Mbps and we do rate adaptation through pause frames. - */ -static void vsc9959_pcs_init_2500basex(struct phy_device *pcs, - unsigned int link_an_mode, - const struct phylink_link_state *state) -{ - u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) | - ENETC_PCS_IF_MODE_SGMII_EN; - - if (link_an_mode == MLO_AN_INBAND) { - phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n"); - return; - } - - if (state->duplex == DUPLEX_HALF) - if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; - - phy_write(pcs, ENETC_PCS_IF_MODE, if_mode); - phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); -} - -static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs, +static void vsc9959_pcs_config_sgmii(struct phy_device *pcs, unsigned int link_an_mode, const struct phylink_link_state *state) { - if (link_an_mode != MLO_AN_INBAND) { - phydev_err(pcs, "USXGMII only supports in-band AN for now\n"); - return; - } + int bmsr, bmcr; + /* Some PHYs like VSC8234 don't like it when AN restarts on + * their system side and they restart line side AN too, going + * into an endless link up/down loop. Don't restart PCS AN if + * link is up already. + * We do check that AN is enabled just in case this is the 1st + * call, PCS detects a carrier but AN is disabled from power on + * or by boot loader. + */ + bmcr = phy_read(pcs, MII_BMCR); + if (bmcr < 0) + return; + + bmsr = phy_read(pcs, MII_BMSR); + if (bmsr < 0) + return; + + if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS)) + return; + + /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001 + * for the MAC PCS in order to acknowledge the AN. + */ + phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII | + ADVERTISE_LPACK); + + phy_write(pcs, ENETC_PCS_IF_MODE, + ENETC_PCS_IF_MODE_SGMII_EN | + ENETC_PCS_IF_MODE_USE_SGMII_AN); + + /* Adjust link timer for SGMII */ + phy_write(pcs, ENETC_PCS_LINK_TIMER1, + ENETC_PCS_LINK_TIMER1_VAL); + phy_write(pcs, ENETC_PCS_LINK_TIMER2, + ENETC_PCS_LINK_TIMER2_VAL); + + phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE); +} + +static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ /* Configure device ability for the USXGMII Replicator */ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE, USXGMII_ADVERTISE_SPEED(USXGMII_SPEED_2500) | @@ -864,9 +794,9 @@ static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs, USXGMII_ADVERTISE_FDX); } -static void vsc9959_pcs_init(struct ocelot *ocelot, int port, - unsigned int link_an_mode, - const struct phylink_link_state *state) +static void vsc9959_pcs_config(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + const struct phylink_link_state *state) { struct felix *felix = ocelot_to_felix(ocelot); struct phy_device *pcs = felix->pcs[port]; @@ -898,16 +828,110 @@ static void vsc9959_pcs_init(struct ocelot *ocelot, int port, pcs->supported); phy_advertise_supported(pcs); + if (!phylink_autoneg_inband(link_an_mode)) + return; + switch (pcs->interface) { case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: - vsc9959_pcs_init_sgmii(pcs, link_an_mode, state); + vsc9959_pcs_config_sgmii(pcs, link_an_mode, state); break; case PHY_INTERFACE_MODE_2500BASEX: - vsc9959_pcs_init_2500basex(pcs, link_an_mode, state); + phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n"); break; case PHY_INTERFACE_MODE_USXGMII: - vsc9959_pcs_init_usxgmii(pcs, link_an_mode, state); + vsc9959_pcs_config_usxgmii(pcs, link_an_mode, state); + break; + default: + dev_err(ocelot->dev, "Unsupported link mode %s\n", + phy_modes(pcs->interface)); + } +} + +static void vsc9959_pcs_link_up_sgmii(struct phy_device *pcs, + unsigned int link_an_mode, + int speed, int duplex) +{ + u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN; + + switch (speed) { + case SPEED_1000: + if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_1000); + break; + case SPEED_100: + if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_100); + break; + case SPEED_10: + if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_10); + break; + default: + phydev_err(pcs, "Invalid PCS speed %d\n", speed); + return; + } + + if (duplex == DUPLEX_HALF) + if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; + + phy_write(pcs, ENETC_PCS_IF_MODE, if_mode); + phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); +} + +/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane + * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have + * auto-negotiation of any link parameters. Electrically it is compatible with + * a single lane of XAUI. + * The hardware reference manual wants to call this mode SGMII, but it isn't + * really, since the fundamental features of SGMII: + * - Downgrading the link speed by duplicating symbols + * - Auto-negotiation + * are not there. + * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers + * because the clock frequency is actually given by a PLL configured in the + * Reset Configuration Word (RCW). + * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o + * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a + * lower link speed on line side, the system-side interface remains fixed at + * 2500 Mbps and we do rate adaptation through pause frames. + */ +static void vsc9959_pcs_link_up_2500basex(struct phy_device *pcs, + unsigned int link_an_mode, + int speed, int duplex) +{ + u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) | + ENETC_PCS_IF_MODE_SGMII_EN; + + if (duplex == DUPLEX_HALF) + if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF; + + phy_write(pcs, ENETC_PCS_IF_MODE, if_mode); + phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); +} + +static void vsc9959_pcs_link_up(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct phy_device *pcs = felix->pcs[port]; + + if (!pcs) + return; + + if (phylink_autoneg_inband(link_an_mode)) + return; + + switch (interface) { + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + vsc9959_pcs_link_up_sgmii(pcs, link_an_mode, speed, duplex); + break; + case PHY_INTERFACE_MODE_2500BASEX: + vsc9959_pcs_link_up_2500basex(pcs, link_an_mode, speed, + duplex); + break; + case PHY_INTERFACE_MODE_USXGMII: + phydev_err(pcs, "USXGMII only supports in-band AN for now\n"); break; default: dev_err(ocelot->dev, "Unsupported link mode %s\n", @@ -1374,7 +1398,8 @@ struct felix_info felix_info_vsc9959 = { .imdio_pci_bar = 0, .mdio_bus_alloc = vsc9959_mdio_bus_alloc, .mdio_bus_free = vsc9959_mdio_bus_free, - .pcs_init = vsc9959_pcs_init, + .pcs_config = vsc9959_pcs_config, + .pcs_link_up = vsc9959_pcs_link_up, .pcs_link_state = vsc9959_pcs_link_state, .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, From a61bf20831d7fc77395d97777a9b511790fff621 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:30:04 +0200 Subject: [PATCH 0689/2056] net: dsa: Add __percpu property to prevent warnings net/dsa/slave.c:505:13: warning: incorrect type in initializer (different address spaces) net/dsa/slave.c:505:13: expected void const [noderef] *__vpp_verify net/dsa/slave.c:505:13: got struct pcpu_sw_netstats * Add the needed _percpu property to prevent this warning. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa_priv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index adecf73bd608..1653e3377cb3 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -77,7 +77,7 @@ struct dsa_slave_priv { struct sk_buff * (*xmit)(struct sk_buff *skb, struct net_device *dev); - struct pcpu_sw_netstats *stats64; + struct pcpu_sw_netstats __percpu *stats64; struct gro_cells gcells; From ed6444ea03841d6a24093eb25dca2eecc1860ca9 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:30:05 +0200 Subject: [PATCH 0690/2056] net: dsa: tag_ksz: Fix __be16 warnings cpu_to_be16 returns a __be16 value. So what it is assigned to needs to have the same type to avoid warnings. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/tag_ksz.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 90d055c4df9e..bd1a3158d79a 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -156,8 +156,9 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, { struct dsa_port *dp = dsa_slave_to_port(dev); struct sk_buff *nskb; - u16 *tag; + __be16 *tag; u8 *addr; + u16 val; nskb = ksz_common_xmit(skb, dev, KSZ9477_INGRESS_TAG_LEN); if (!nskb) @@ -167,12 +168,12 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, tag = skb_put(nskb, KSZ9477_INGRESS_TAG_LEN); addr = skb_mac_header(nskb); - *tag = BIT(dp->index); + val = BIT(dp->index); if (is_link_local_ether_addr(addr)) - *tag |= KSZ9477_TAIL_TAG_OVERRIDE; + val |= KSZ9477_TAIL_TAG_OVERRIDE; - *tag = cpu_to_be16(*tag); + *tag = cpu_to_be16(val); return nskb; } From 802734ad7753294dae69c431c45e0503dc33e97a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:30:06 +0200 Subject: [PATCH 0691/2056] net: dsa: tag_lan9303: Fix __be16 warnings net/dsa/tag_lan9303.c:76:24: warning: incorrect type in assignment (different base types) net/dsa/tag_lan9303.c:76:24: expected unsigned short [usertype] net/dsa/tag_lan9303.c:76:24: got restricted __be16 [usertype] net/dsa/tag_lan9303.c:80:24: warning: incorrect type in assignment (different base types) net/dsa/tag_lan9303.c:80:24: expected unsigned short [usertype] net/dsa/tag_lan9303.c:80:24: got restricted __be16 [usertype] net/dsa/tag_lan9303.c:106:31: warning: restricted __be16 degrades to integer net/dsa/tag_lan9303.c:111:24: warning: cast to restricted __be16 net/dsa/tag_lan9303.c:111:24: warning: cast to restricted __be16 net/dsa/tag_lan9303.c:111:24: warning: cast to restricted __be16 net/dsa/tag_lan9303.c:111:24: warning: cast to restricted __be16 Make use of __be16 where appropriate to fix these warnings. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/tag_lan9303.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c index eb0e7a32e53d..ccfb6f641bbf 100644 --- a/net/dsa/tag_lan9303.c +++ b/net/dsa/tag_lan9303.c @@ -55,7 +55,8 @@ static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr) static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); - u16 *lan9303_tag; + __be16 *lan9303_tag; + u16 tag; /* insert a special VLAN tag between the MAC addresses * and the current ethertype field. @@ -72,12 +73,12 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) /* make room between MACs and Ether-Type */ memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN); - lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN); + lan9303_tag = (__be16 *)(skb->data + 2 * ETH_ALEN); + tag = lan9303_xmit_use_arl(dp, skb->data) ? + LAN9303_TAG_TX_USE_ALR : + dp->index | LAN9303_TAG_TX_STP_OVERRIDE; lan9303_tag[0] = htons(ETH_P_8021Q); - lan9303_tag[1] = lan9303_xmit_use_arl(dp, skb->data) ? - LAN9303_TAG_TX_USE_ALR : - dp->index | LAN9303_TAG_TX_STP_OVERRIDE; - lan9303_tag[1] = htons(lan9303_tag[1]); + lan9303_tag[1] = htons(tag); return skb; } @@ -85,7 +86,7 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { - u16 *lan9303_tag; + __be16 *lan9303_tag; u16 lan9303_tag1; unsigned int source_port; @@ -101,7 +102,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, * ^ * ->data */ - lan9303_tag = (u16 *)(skb->data - 2); + lan9303_tag = (__be16 *)(skb->data - 2); if (lan9303_tag[0] != htons(ETH_P_8021Q)) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n"); From 04d63f9d91d018d4ec31832677f4045706defc18 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:30:07 +0200 Subject: [PATCH 0692/2056] net: dsa: tag_mtk: Fix warnings for __be16 net/dsa/tag_mtk.c:84:13: warning: incorrect type in assignment (different base types) net/dsa/tag_mtk.c:84:13: expected restricted __be16 [usertype] hdr net/dsa/tag_mtk.c:84:13: got int net/dsa/tag_mtk.c:94:17: warning: restricted __be16 degrades to integer The result of a ntohs() is not __be16, but u16. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/tag_mtk.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c index d6619edd53e5..f602fc758d68 100644 --- a/net/dsa/tag_mtk.c +++ b/net/dsa/tag_mtk.c @@ -67,8 +67,9 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { + u16 hdr; int port; - __be16 *phdr, hdr; + __be16 *phdr; unsigned char *dest = eth_hdr(skb)->h_dest; bool is_multicast_skb = is_multicast_ether_addr(dest) && !is_broadcast_ether_addr(dest); From 99bac53d069a05dc680b3aa82614f964bc12f5f9 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:30:08 +0200 Subject: [PATCH 0693/2056] net: dsa: tag_qca.c: Fix warning for __be16 vs u16 net/dsa/tag_qca.c:48:15: warning: incorrect type in assignment (different base types) net/dsa/tag_qca.c:48:15: expected unsigned short [usertype] net/dsa/tag_qca.c:48:15: got restricted __be16 [usertype] net/dsa/tag_qca.c:68:13: warning: incorrect type in assignment (different base types) net/dsa/tag_qca.c:68:13: expected restricted __be16 [usertype] hdr net/dsa/tag_qca.c:68:13: got int net/dsa/tag_qca.c:71:16: warning: restricted __be16 degrades to integer net/dsa/tag_qca.c:81:17: warning: restricted __be16 degrades to integer Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/tag_qca.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 70db7c909f74..7066f5e697d7 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -31,7 +31,8 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); - u16 *phdr, hdr; + __be16 *phdr; + u16 hdr; if (skb_cow_head(skb, QCA_HDR_LEN) < 0) return NULL; @@ -39,7 +40,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) skb_push(skb, QCA_HDR_LEN); memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN); - phdr = (u16 *)(skb->data + 2 * ETH_ALEN); + phdr = (__be16 *)(skb->data + 2 * ETH_ALEN); /* Set the version field, and set destination port information */ hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S | @@ -54,8 +55,9 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 ver; + u16 hdr; int port; - __be16 *phdr, hdr; + __be16 *phdr; if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) return NULL; From f1931164f06fc645382ee6a79cdb456b66747a5d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:38:07 +0200 Subject: [PATCH 0694/2056] net: dsa: mv88e6xxx: Fix sparse warnings from GENMASK Oddly, GENMASK() requires signed bit numbers, so that it can compare them for < 0. If passed an unsigned type, we get warnings about the test never being true. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index e5430cf2ad71..1c541b074256 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -167,7 +167,7 @@ struct mv88e6xxx_irq { u16 masked; struct irq_chip chip; struct irq_domain *domain; - unsigned int nirqs; + int nirqs; }; /* state flags for mv88e6xxx_port_hwtstamp::state */ @@ -654,7 +654,7 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip) static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip) { - return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0); + return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0); } static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip) From 048442807aba49898a502fa7a87f4b48776359ff Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:38:08 +0200 Subject: [PATCH 0695/2056] net: dsa: mv88e6xxx: vlan_tci is __be16 The flow spec member vlan_tci is in network order. Hence comparisons should be made again network order values. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7627ea61e0ea..d995f5bf0d40 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1751,7 +1751,7 @@ static int mv88e6xxx_policy_insert(struct mv88e6xxx_chip *chip, int port, } if ((fs->flow_type & FLOW_EXT) && fs->m_ext.vlan_tci) { - if (fs->m_ext.vlan_tci != 0xffff) + if (fs->m_ext.vlan_tci != htons(0xffff)) return -EOPNOTSUPP; vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK; } From b672b35143777658cb8801c16427d797430f53e3 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:38:09 +0200 Subject: [PATCH 0696/2056] net: dsa: mv88e6xxx: Remove set but unused variable We don't act on any errors reading registers while handling watchdog interrupt. Since this is an interrupt handler, we cannot return such errors. So just remove the variable. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global2.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 8fd483020c5b..75b227d0f73b 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -876,19 +876,18 @@ static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip) static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq) { - int err; u16 reg; mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL, MV88E6390_G2_WDOG_CTL_PTR_EVENT); - err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®); + mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®); dev_info(chip->dev, "Watchdog event: 0x%04x", reg & MV88E6390_G2_WDOG_CTL_DATA_MASK); mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL, MV88E6390_G2_WDOG_CTL_PTR_HISTORY); - err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®); + mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, ®); dev_info(chip->dev, "Watchdog history: 0x%04x", reg & MV88E6390_G2_WDOG_CTL_DATA_MASK); From 0b5294483c356e87f520d4894d85a961fc1c4a39 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 21:38:10 +0200 Subject: [PATCH 0697/2056] net: dsa: mv88e6xxx: scratch: Fixup kerneldoc Correct parameters and add the missing ones. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/global2_scratch.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c index 33b7b9570d29..7c2c67405322 100644 --- a/drivers/net/dsa/mv88e6xxx/global2_scratch.c +++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c @@ -44,7 +44,8 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg, /** * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit * @chip: chip private data - * @nr: bit index + * @base_reg: base of scratch bits + * @offset: index of bit within the register * @set: is bit set? */ static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip, @@ -68,8 +69,9 @@ static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip, /** * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit * @chip: chip private data - * @nr: bit index - * @set: set if true, clear if false + * @base_reg: base of scratch bits + * @offset: index of bit within the register + * @set: should this bit be set? * * Helper function for dealing with the direction and data registers. */ @@ -165,6 +167,7 @@ static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip, * mv88e6352_g2_scratch_gpio_set_dir - set direction of gpio pin * @chip: chip private data * @pin: gpio index + * @input: should the gpio be an input, or an output? */ static int mv88e6352_g2_scratch_gpio_set_dir(struct mv88e6xxx_chip *chip, unsigned int pin, bool input) From 8caefe7e41b9ea2e1fdc806b45d224fe2ec0d618 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 22:36:23 +0200 Subject: [PATCH 0698/2056] net: dsa: b53: Fixup endianness warnings leX_to_cpu() expects to be passed an __leX type. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_spi.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c index f89f5308a99b..7abec8dab8ba 100644 --- a/drivers/net/dsa/b53/b53_spi.c +++ b/drivers/net/dsa/b53/b53_spi.c @@ -145,42 +145,52 @@ static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) { - int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2); + __le16 value; + int ret; + + ret = b53_spi_read(dev, page, reg, (u8 *)&value, 2); if (!ret) - *val = le16_to_cpu(*val); + *val = le16_to_cpu(value); return ret; } static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) { - int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4); + __le32 value; + int ret; + + ret = b53_spi_read(dev, page, reg, (u8 *)&value, 4); if (!ret) - *val = le32_to_cpu(*val); + *val = le32_to_cpu(value); return ret; } static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) { + __le64 value; int ret; *val = 0; - ret = b53_spi_read(dev, page, reg, (u8 *)val, 6); + ret = b53_spi_read(dev, page, reg, (u8 *)&value, 6); if (!ret) - *val = le64_to_cpu(*val); + *val = le64_to_cpu(value); return ret; } static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) { - int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8); + __le64 value; + int ret; + + ret = b53_spi_read(dev, page, reg, (u8 *)&value, 8); if (!ret) - *val = le64_to_cpu(*val); + *val = le64_to_cpu(value); return ret; } From f76b6ef11c5723e840c1fc49587d267297352e9e Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 22:36:24 +0200 Subject: [PATCH 0699/2056] net: dsa: bcm_sf2: Initialize __be16 with a __be16 value A __be16 variable should be initialised with a __be16 value. So add a htons(). In this case it is pointless, given the value being assigned is 0xffff, but it stops sparse from warnings. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2_cfp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index f707edc641cf..b54339477853 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -348,8 +348,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int queue_num, struct ethtool_rx_flow_spec *fs) { + __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff); struct ethtool_rx_flow_spec_input input = {}; - __be16 vlan_tci = 0 , vlan_m_tci = 0xffff; const struct cfp_udf_layout *layout; unsigned int slice_num, rule_index; struct ethtool_rx_flow_rule *flow; @@ -629,8 +629,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int queue_num, struct ethtool_rx_flow_spec *fs) { + __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff); struct ethtool_rx_flow_spec_input input = {}; - __be16 vlan_tci = 0, vlan_m_tci = 0xffff; unsigned int slice_num, rule_index[2]; const struct cfp_udf_layout *layout; struct ethtool_rx_flow_rule *flow; From c226e2716ee25ab8e6b868559be00a1c30282486 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 22:36:25 +0200 Subject: [PATCH 0700/2056] net: dsa: bcm_sf2: Pass GENMASK() signed bits Oddly, GENMASK() requires signed bit numbers, so that it can compare them for < 0. If passed an unsigned type, we get warnings about the test never being true. There is no danger of overflow here, udf is always a u8, so there is plenty of space when expanding to an int. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2_cfp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index b54339477853..d82cee5d9202 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -128,12 +128,12 @@ static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout) return count; } -static inline u32 udf_upper_bits(unsigned int num_udf) +static inline u32 udf_upper_bits(int num_udf) { return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1); } -static inline u32 udf_lower_bits(unsigned int num_udf) +static inline u32 udf_lower_bits(int num_udf) { return (u8)GENMASK(num_udf - 1, 0); } From da31735cb9db61dccea0bc9eeb581b41106a247a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 22:42:27 +0200 Subject: [PATCH 0701/2056] net: dsa: rtl8366: Pass GENMASK() signed bits Oddly, GENMASK() requires signed bit numbers, so that it can compare them for < 0. If passed an unsigned type, we get warnings about the test never being true. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/rtl8366.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index ac88caca5ad4..993cf3ac59d9 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -272,7 +272,7 @@ int rtl8366_init_vlan(struct realtek_smi *smi) /* For the CPU port, make all ports members of this * VLAN. */ - mask = GENMASK(smi->num_ports - 1, 0); + mask = GENMASK((int)smi->num_ports - 1, 0); else /* For all other ports, enable itself plus the * CPU port. From 9534f1e9084452afb3f8bc0fcdb81a95a6b8392a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 22:55:55 +0200 Subject: [PATCH 0702/2056] net: dsa: lan9303: fix variable 'res' set but not used Since lan9303_adjust_link() is a void function, there is no option to return an error. So just remove the variable and lets any errors be discarded. Cc: Egil Hjelmeland Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index cc17a44dd3a8..aa1142d6a9f5 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1042,7 +1042,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct lan9303 *chip = ds->priv; - int ctl, res; + int ctl; if (!phy_is_pseudo_fixed_link(phydev)) return; @@ -1063,15 +1063,14 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port, else ctl &= ~BMCR_FULLDPLX; - res = lan9303_phy_write(ds, port, MII_BMCR, ctl); + lan9303_phy_write(ds, port, MII_BMCR, ctl); if (port == chip->phy_addr_base) { /* Virtual Phy: Remove Turbo 200Mbit mode */ lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl); ctl &= ~LAN9303_VIRT_SPECIAL_TURBO; - res = regmap_write(chip->regmap, - LAN9303_VIRT_SPECIAL_CTRL, ctl); + regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl); } } From 5bd6ff0c6fe6c138aebe8d3bf7163420386c7ca7 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 5 Jul 2020 23:05:08 +0200 Subject: [PATCH 0703/2056] net: dsa: vitesse-vsc73xx: Convert to plain comments to avoid kerneldoc warnings The comments before struct vsc73xx_platform and struct vsc73xx_spi use kerneldoc format, but then fail to document the members of these structures. All the structure members are self evident, and the driver has not other kerneldoc comments, so change these to plain comments to avoid warnings. Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/vitesse-vsc73xx-platform.c | 2 +- drivers/net/dsa/vitesse-vsc73xx-spi.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c index 5e54a5726aa4..2a57f337b2a2 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-platform.c +++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c @@ -28,7 +28,7 @@ #define VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK 0xf #define VSC73XX_CMD_PLATFORM_REGISTER_SHIFT 2 -/** +/* * struct vsc73xx_platform - VSC73xx Platform state container */ struct vsc73xx_platform { diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c index e73c8fcddc9f..81eca4a5781d 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-spi.c +++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c @@ -26,7 +26,7 @@ #define VSC73XX_CMD_SPI_BLOCK_MASK 0x7 #define VSC73XX_CMD_SPI_SUBBLOCK_MASK 0xf -/** +/* * struct vsc73xx_spi - VSC73xx SPI state container */ struct vsc73xx_spi { From d4ce70b3b6b777e6a76e18b5c4ca9cb50085efb8 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 6 Jul 2020 14:27:44 +0530 Subject: [PATCH 0704/2056] sun/sungem: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. And they use PCI helper functions to do it. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. In this driver: gem_suspend() calls gem_do_stop() which in turn invokes pci_disable_device(). As the PCI helper function is not called at the end/start of the function body, breaking the function in two parts may change its behavior. The only other function invoking gem_do_stop() is gem_close(). Hence, gem_close() and gem_suspend() can do the required end steps on their own. The same case is with gem_resume(). Both gem_resume() and gem_open() invoke gem_do_start(). Again, make the caller functions do the required steps on their own. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sungem.c | 76 +++++++++++++++++-------------- 1 file changed, 43 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 2d392a7b179a..0e9d2cf7979b 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2139,20 +2139,6 @@ static int gem_do_start(struct net_device *dev) struct gem *gp = netdev_priv(dev); int rc; - /* Enable the cell */ - gem_get_cell(gp); - - /* Make sure PCI access and bus master are enabled */ - rc = pci_enable_device(gp->pdev); - if (rc) { - netdev_err(dev, "Failed to enable chip on PCI bus !\n"); - - /* Put cell and forget it for now, it will be considered as - * still asleep, a new sleep cycle may bring it back - */ - gem_put_cell(gp); - return -ENXIO; - } pci_set_master(gp->pdev); /* Init & setup chip hardware */ @@ -2230,13 +2216,6 @@ static void gem_do_stop(struct net_device *dev, int wol) /* Shut the PHY down eventually and setup WOL */ gem_stop_phy(gp, wol); - - /* Make sure bus master is disabled */ - pci_disable_device(gp->pdev); - - /* Cell not needed neither if no WOL */ - if (!wol) - gem_put_cell(gp); } static void gem_reset_task(struct work_struct *work) @@ -2288,26 +2267,53 @@ static void gem_reset_task(struct work_struct *work) static int gem_open(struct net_device *dev) { + struct gem *gp = netdev_priv(dev); + int rc; + /* We allow open while suspended, we just do nothing, * the chip will be initialized in resume() */ - if (netif_device_present(dev)) + if (netif_device_present(dev)) { + /* Enable the cell */ + gem_get_cell(gp); + + /* Make sure PCI access and bus master are enabled */ + rc = pci_enable_device(gp->pdev); + if (rc) { + netdev_err(dev, "Failed to enable chip on PCI bus !\n"); + + /* Put cell and forget it for now, it will be considered + *as still asleep, a new sleep cycle may bring it back + */ + gem_put_cell(gp); + return -ENXIO; + } return gem_do_start(dev); + } + return 0; } static int gem_close(struct net_device *dev) { - if (netif_device_present(dev)) + struct gem *gp = netdev_priv(dev); + + if (netif_device_present(dev)) { gem_do_stop(dev, 0); + /* Make sure bus master is disabled */ + pci_disable_device(gp->pdev); + + /* Cell not needed neither if no WOL */ + if (!gp->asleep_wol) + gem_put_cell(gp); + } return 0; } -#ifdef CONFIG_PM -static int gem_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused gem_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* Lock the network stack first to avoid racing with open/close, @@ -2336,15 +2342,19 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); + /* Cell not needed neither if no WOL */ + if (!gp->asleep_wol) + gem_put_cell(gp); + /* Unlock the network stack */ rtnl_unlock(); return 0; } -static int gem_resume(struct pci_dev *pdev) +static int __maybe_unused gem_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* See locking comment in gem_suspend */ @@ -2359,6 +2369,9 @@ static int gem_resume(struct pci_dev *pdev) return 0; } + /* Enable the cell */ + gem_get_cell(gp); + /* Restart chip. If that fails there isn't much we can do, we * leave things stopped. */ @@ -2375,7 +2388,6 @@ static int gem_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ static struct net_device_stats *gem_get_stats(struct net_device *dev) { @@ -3019,16 +3031,14 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } +static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume); static struct pci_driver gem_driver = { .name = GEM_MODULE_NAME, .id_table = gem_pci_tbl, .probe = gem_init_one, .remove = gem_remove_one, -#ifdef CONFIG_PM - .suspend = gem_suspend, - .resume = gem_resume, -#endif /* CONFIG_PM */ + .driver.pm = &gem_pm_ops, }; module_pci_driver(gem_driver); From b0db0cc2f695b70e36a776e708d5ccfff8add1d6 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 6 Jul 2020 14:27:45 +0530 Subject: [PATCH 0705/2056] sun/niu: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. The driver was calling pci_save/restore_state() which is no more needed. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/niu.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 9a5004f674c7..68541c823245 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9873,9 +9873,9 @@ static void niu_pci_remove_one(struct pci_dev *pdev) } } -static int niu_suspend(struct pci_dev *pdev, pm_message_t state) +static int niu_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); unsigned long flags; @@ -9897,14 +9897,12 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state) niu_stop_hw(np); spin_unlock_irqrestore(&np->lock, flags); - pci_save_state(pdev); - return 0; } -static int niu_resume(struct pci_dev *pdev) +static int niu_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); unsigned long flags; int err; @@ -9912,8 +9910,6 @@ static int niu_resume(struct pci_dev *pdev) if (!netif_running(dev)) return 0; - pci_restore_state(pdev); - netif_device_attach(dev); spin_lock_irqsave(&np->lock, flags); @@ -9930,13 +9926,14 @@ static int niu_resume(struct pci_dev *pdev) return err; } +static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume); + static struct pci_driver niu_pci_driver = { .name = DRV_MODULE_NAME, .id_table = niu_pci_tbl, .probe = niu_pci_init_one, .remove = niu_pci_remove_one, - .suspend = niu_suspend, - .resume = niu_resume, + .driver.pm = &niu_pm_ops, }; #ifdef CONFIG_SPARC64 From f193f4ebde3d61737c93e8091964ef6cd199076b Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 6 Jul 2020 14:27:46 +0530 Subject: [PATCH 0706/2056] sun/cassini: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index debd3c3fa6fb..5b11124450d6 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5172,10 +5172,9 @@ static void cas_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -#ifdef CONFIG_PM -static int cas_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused cas_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); unsigned long flags; @@ -5204,9 +5203,9 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int cas_resume(struct pci_dev *pdev) +static int cas_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); netdev_info(dev, "resuming\n"); @@ -5227,17 +5226,15 @@ static int cas_resume(struct pci_dev *pdev) mutex_unlock(&cp->pm_mutex); return 0; } -#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume); static struct pci_driver cas_driver = { .name = DRV_MODULE_NAME, .id_table = cas_pci_tbl, .probe = cas_init_one, .remove = cas_remove_one, -#ifdef CONFIG_PM - .suspend = cas_suspend, - .resume = cas_resume -#endif + .driver.pm = &cas_pm_ops, }; static int __init cas_init(void) From 0b8241fe3c4e172332f9fa690db6689e78e8e27f Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Mon, 6 Jul 2020 14:44:08 +0200 Subject: [PATCH 0707/2056] selftests: mptcp: capture pcap on both sides When investigating performance issues that involve latency / loss / reordering it is useful to have the pcap from the sender-side as it allows to easier infer the state of the sender's congestion-control, loss-recovery, etc. Allow the selftests to capture a pcap on both sender and receiver so that this information is not lost when reproducing. This patch also improves the file names. Instead of: ns4-5ee79a56-X4O6gS-ns3-5ee79a56-X4O6gS-MPTCP-MPTCP-10.0.3.1.pcap We now have something like for the same test: 5ee79a56-X4O6gS-ns3-ns4-MPTCP-MPTCP-10.0.3.1-10030-connector.pcap 5ee79a56-X4O6gS-ns3-ns4-MPTCP-MPTCP-10.0.3.1-10030-listener.pcap It was a connection from ns3 to ns4, better to start with ns3 then. The port is also added, easier to find the trace we want. Co-developed-by: Christoph Paasch Signed-off-by: Christoph Paasch Signed-off-by: Matthieu Baerts Signed-off-by: David S. Miller --- tools/testing/selftests/net/mptcp/mptcp_connect.sh | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index 8f7145c413b9..c0589e071f20 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -395,10 +395,14 @@ do_transfer() capuser="-Z $SUDO_USER" fi - local capfile="${listener_ns}-${connector_ns}-${cl_proto}-${srv_proto}-${connect_addr}.pcap" + local capfile="${rndh}-${connector_ns:0:3}-${listener_ns:0:3}-${cl_proto}-${srv_proto}-${connect_addr}-${port}" + local capopt="-i any -s 65535 -B 32768 ${capuser}" - ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 & - local cappid=$! + ip netns exec ${listener_ns} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 & + local cappid_listener=$! + + ip netns exec ${connector_ns} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 & + local cappid_connector=$! sleep 1 fi @@ -423,7 +427,8 @@ do_transfer() if $capture; then sleep 1 - kill $cappid + kill ${cappid_listener} + kill ${cappid_connector} fi local duration From bc0c3ae40a03d47709b5d9d86cc9ed0681691e03 Mon Sep 17 00:00:00 2001 From: Tang Bin Date: Mon, 6 Jul 2020 22:47:01 +0800 Subject: [PATCH 0708/2056] net/amd: Remove needless assignment and the extra brank lines The variable 'err = -ENODEV;' in au1000_probe() is duplicate, so remove redundant one. And remove the extra blank lines in the file au1000_eth.c Signed-off-by: Zhang Shengju Signed-off-by: Tang Bin Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/au1000_eth.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 9f6e3cc2ce80..75dbd221dc59 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -241,7 +241,6 @@ MODULE_LICENSE("GPL"); * ps: make sure the used irqs are configured properly in the board * specific irq-map */ - static void au1000_enable_mac(struct net_device *dev, int force_reset) { unsigned long flags; @@ -556,7 +555,6 @@ static int au1000_mii_probe(struct net_device *dev) return 0; } - /* * Buffer allocation/deallocation routines. The buffer descriptor returned * has the virtual and dma address of a buffer suitable for @@ -647,7 +645,6 @@ au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) /* * ethtool operations */ - static void au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -679,7 +676,6 @@ static const struct ethtool_ops au1000_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; - /* * Initialize the interface. * @@ -1255,7 +1251,6 @@ static int au1000_probe(struct platform_device *pdev) aup->rx_db_inuse[i] = pDB; } - err = -ENODEV; for (i = 0; i < NUM_TX_DMA; i++) { pDB = au1000_GetFreeDB(aup); if (!pDB) From 0fe665d42fd0844dbfb60f15b09d71fd0c409b00 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Mon, 6 Jul 2020 17:55:54 +0300 Subject: [PATCH 0709/2056] dpaa2-eth: fix draining of S/G cache On link down, the draining of the S/G cache should be done on all _possible_ CPUs not just the ones that are online in that moment. Fix this by changing the iterator. Fixes: d70446ee1f40 ("dpaa2-eth: send a scatter-gather FD instead of realloc-ing") Reported-by: Jakub Kicinski Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index bc1f1e0117b6..d0cc1dc49aaa 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1261,7 +1261,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) u16 count; int k, i; - for_each_online_cpu(k) { + for_each_possible_cpu(k) { sgt_cache = per_cpu_ptr(priv->sgt_cache, k); count = sgt_cache->count; From 9e06e8596bc87cad9dd18869a1b1b42132170dd9 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Mon, 6 Jul 2020 17:18:08 +0200 Subject: [PATCH 0710/2056] geneve: move all configuration under struct geneve_config This patch adds a new structure geneve_config and moves the per-device configuration attributes to it, like we already have in VXLAN with struct vxlan_config. This ends up being pretty invasive since those attributes are used everywhere. This allows us to clean up the argument lists for geneve_configure (4 arguments instead of 8) and geneve_nl2info (5 instead of 9). This also reduces the copy-paste of code setting those attributes between geneve_configure and geneve_changelink to a single memcpy, which would have avoided the bug fixed in commit 56c09de347e4 ("geneve: allow changing DF behavior after creation"). Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- drivers/net/geneve.c | 185 ++++++++++++++++++++----------------------- 1 file changed, 87 insertions(+), 98 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4661ef865807..e3d074008da2 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -48,6 +48,14 @@ struct geneve_dev_node { struct geneve_dev *geneve; }; +struct geneve_config { + struct ip_tunnel_info info; + bool collect_md; + bool use_udp6_rx_checksums; + bool ttl_inherit; + enum ifla_geneve_df df; +}; + /* Pseudo network device */ struct geneve_dev { struct geneve_dev_node hlist4; /* vni hash table for IPv4 socket */ @@ -56,17 +64,13 @@ struct geneve_dev { #endif struct net *net; /* netns for packet i/o */ struct net_device *dev; /* netdev for geneve tunnel */ - struct ip_tunnel_info info; struct geneve_sock __rcu *sock4; /* IPv4 socket used for geneve tunnel */ #if IS_ENABLED(CONFIG_IPV6) struct geneve_sock __rcu *sock6; /* IPv6 socket used for geneve tunnel */ #endif struct list_head next; /* geneve's per namespace list */ struct gro_cells gro_cells; - bool collect_md; - bool use_udp6_rx_checksums; - bool ttl_inherit; - enum ifla_geneve_df df; + struct geneve_config cfg; }; struct geneve_sock { @@ -132,8 +136,8 @@ static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(node, vni_list_head, hlist) { - if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) && - addr == node->geneve->info.key.u.ipv4.dst) + if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && + addr == node->geneve->cfg.info.key.u.ipv4.dst) return node->geneve; } return NULL; @@ -151,8 +155,8 @@ static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs, hash = geneve_net_vni_hash(vni); vni_list_head = &gs->vni_list[hash]; hlist_for_each_entry_rcu(node, vni_list_head, hlist) { - if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) && - ipv6_addr_equal(&addr6, &node->geneve->info.key.u.ipv6.dst)) + if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) && + ipv6_addr_equal(&addr6, &node->geneve->cfg.info.key.u.ipv6.dst)) return node->geneve; } return NULL; @@ -321,7 +325,7 @@ static int geneve_init(struct net_device *dev) return err; } - err = dst_cache_init(&geneve->info.dst_cache, GFP_KERNEL); + err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL); if (err) { free_percpu(dev->tstats); gro_cells_destroy(&geneve->gro_cells); @@ -334,7 +338,7 @@ static void geneve_uninit(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - dst_cache_destroy(&geneve->info.dst_cache); + dst_cache_destroy(&geneve->cfg.info.dst_cache); gro_cells_destroy(&geneve->gro_cells); free_percpu(dev->tstats); } @@ -654,19 +658,19 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) __u8 vni[3]; __u32 hash; - gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->info.key.tp_dst); + gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->cfg.info.key.tp_dst); if (gs) { gs->refcnt++; goto out; } - gs = geneve_socket_create(net, geneve->info.key.tp_dst, ipv6, - geneve->use_udp6_rx_checksums); + gs = geneve_socket_create(net, geneve->cfg.info.key.tp_dst, ipv6, + geneve->cfg.use_udp6_rx_checksums); if (IS_ERR(gs)) return PTR_ERR(gs); out: - gs->collect_md = geneve->collect_md; + gs->collect_md = geneve->cfg.collect_md; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) { rcu_assign_pointer(geneve->sock6, gs); @@ -679,7 +683,7 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) } node->geneve = geneve; - tunnel_id_to_vni(geneve->info.key.tun_id, vni); + tunnel_id_to_vni(geneve->cfg.info.key.tun_id, vni); hash = geneve_net_vni_hash(vni); hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]); return 0; @@ -688,11 +692,11 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6) static int geneve_open(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - bool metadata = geneve->collect_md; + bool metadata = geneve->cfg.collect_md; bool ipv4, ipv6; int ret = 0; - ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata; + ipv6 = geneve->cfg.info.mode & IP_TUNNEL_INFO_IPV6 || metadata; ipv4 = !ipv6 || metadata; #if IS_ENABLED(CONFIG_IPV6) if (ipv6) { @@ -791,7 +795,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->saddr = info->key.u.ipv4.src; tos = info->key.tos; - if ((tos == 1) && !geneve->collect_md) { + if ((tos == 1) && !geneve->cfg.collect_md) { tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb); use_cache = false; } @@ -840,7 +844,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; prio = info->key.tos; - if ((prio == 1) && !geneve->collect_md) { + if ((prio == 1) && !geneve->cfg.collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); use_cache = false; } @@ -893,22 +897,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, GENEVE_IPV4_HLEN + info->options_len); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - if (geneve->collect_md) { + if (geneve->cfg.collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); - if (geneve->ttl_inherit) + if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else ttl = key->ttl; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - if (geneve->df == GENEVE_DF_SET) { + if (geneve->cfg.df == GENEVE_DF_SET) { df = htons(IP_DF); - } else if (geneve->df == GENEVE_DF_INHERIT) { + } else if (geneve->cfg.df == GENEVE_DF_INHERIT) { struct ethhdr *eth = eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_IPV6) { @@ -927,7 +931,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return err; udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, - tos, ttl, df, sport, geneve->info.key.tp_dst, + tos, ttl, df, sport, geneve->cfg.info.key.tp_dst, !net_eq(geneve->net, dev_net(geneve->dev)), !(info->key.tun_flags & TUNNEL_CSUM)); return 0; @@ -954,13 +958,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - if (geneve->collect_md) { + if (geneve->cfg.collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; } else { prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), ip_hdr(skb), skb); - if (geneve->ttl_inherit) + if (geneve->cfg.ttl_inherit) ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); else ttl = key->ttl; @@ -972,7 +976,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, &fl6.saddr, &fl6.daddr, prio, ttl, - info->key.label, sport, geneve->info.key.tp_dst, + info->key.label, sport, geneve->cfg.info.key.tp_dst, !(info->key.tun_flags & TUNNEL_CSUM)); return 0; } @@ -984,7 +988,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) struct ip_tunnel_info *info = NULL; int err; - if (geneve->collect_md) { + if (geneve->cfg.collect_md) { info = skb_tunnel_info(skb); if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) { netdev_dbg(dev, "no tunnel metadata\n"); @@ -993,7 +997,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } } else { - info = &geneve->info; + info = &geneve->cfg.info; } rcu_read_lock(); @@ -1065,7 +1069,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) info->key.tp_src = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); - info->key.tp_dst = geneve->info.key.tp_dst; + info->key.tp_dst = geneve->cfg.info.key.tp_dst; return 0; } @@ -1227,13 +1231,13 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, *tun_on_same_port = false; *tun_collect_md = false; list_for_each_entry(geneve, &gn->geneve_list, next) { - if (info->key.tp_dst == geneve->info.key.tp_dst) { - *tun_collect_md = geneve->collect_md; + if (info->key.tp_dst == geneve->cfg.info.key.tp_dst) { + *tun_collect_md = geneve->cfg.collect_md; *tun_on_same_port = true; } - if (info->key.tun_id == geneve->info.key.tun_id && - info->key.tp_dst == geneve->info.key.tp_dst && - !memcmp(&info->key.u, &geneve->info.key.u, sizeof(info->key.u))) + if (info->key.tun_id == geneve->cfg.info.key.tun_id && + info->key.tp_dst == geneve->cfg.info.key.tp_dst && + !memcmp(&info->key.u, &geneve->cfg.info.key.u, sizeof(info->key.u))) t = geneve; } return t; @@ -1257,16 +1261,15 @@ static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, - const struct ip_tunnel_info *info, - bool metadata, bool ipv6_rx_csum, - bool ttl_inherit, enum ifla_geneve_df df) + const struct geneve_config *cfg) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); + const struct ip_tunnel_info *info = &cfg->info; bool tun_collect_md, tun_on_same_port; int err, encap_len; - if (metadata && !is_tnl_info_zero(info)) { + if (cfg->collect_md && !is_tnl_info_zero(info)) { NL_SET_ERR_MSG(extack, "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified"); return -EINVAL; @@ -1281,7 +1284,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, /* make enough headroom for basic scenario */ encap_len = GENEVE_BASE_HLEN + ETH_HLEN; - if (!metadata && ip_tunnel_info_af(info) == AF_INET) { + if (!cfg->collect_md && ip_tunnel_info_af(info) == AF_INET) { encap_len += sizeof(struct iphdr); dev->max_mtu -= sizeof(struct iphdr); } else { @@ -1290,7 +1293,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, } dev->needed_headroom = encap_len + ETH_HLEN; - if (metadata) { + if (cfg->collect_md) { if (tun_on_same_port) { NL_SET_ERR_MSG(extack, "There can be only one externally controlled device on a destination port"); @@ -1304,12 +1307,8 @@ static int geneve_configure(struct net *net, struct net_device *dev, } } - dst_cache_reset(&geneve->info.dst_cache); - geneve->info = *info; - geneve->collect_md = metadata; - geneve->use_udp6_rx_checksums = ipv6_rx_csum; - geneve->ttl_inherit = ttl_inherit; - geneve->df = df; + dst_cache_reset(&geneve->cfg.info.dst_cache); + memcpy(&geneve->cfg, cfg, sizeof(*cfg)); err = register_netdevice(dev); if (err) @@ -1327,11 +1326,10 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, - struct ip_tunnel_info *info, bool *metadata, - bool *use_udp6_rx_checksums, bool *ttl_inherit, - enum ifla_geneve_df *df, bool changelink) + struct geneve_config *cfg, bool changelink) { int attrtype; + struct ip_tunnel_info *info = &cfg->info; if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) { NL_SET_ERR_MSG(extack, @@ -1378,7 +1376,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], return -EINVAL; } info->key.tun_flags |= TUNNEL_CSUM; - *use_udp6_rx_checksums = true; + cfg->use_udp6_rx_checksums = true; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], "IPv6 support not enabled in the kernel"); @@ -1406,19 +1404,19 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_GENEVE_TTL_INHERIT]) { if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT])) - *ttl_inherit = true; + cfg->ttl_inherit = true; else - *ttl_inherit = false; + cfg->ttl_inherit = false; } else if (data[IFLA_GENEVE_TTL]) { info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); - *ttl_inherit = false; + cfg->ttl_inherit = false; } if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_DF]) - *df = nla_get_u8(data[IFLA_GENEVE_DF]); + cfg->df = nla_get_u8(data[IFLA_GENEVE_DF]); if (data[IFLA_GENEVE_LABEL]) { info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & @@ -1443,7 +1441,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], attrtype = IFLA_GENEVE_COLLECT_METADATA; goto change_notsup; } - *metadata = true; + cfg->collect_md = true; } if (data[IFLA_GENEVE_UDP_CSUM]) { @@ -1477,7 +1475,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) - *use_udp6_rx_checksums = false; + cfg->use_udp6_rx_checksums = false; #else NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX], "IPv6 support not enabled in the kernel"); @@ -1542,25 +1540,24 @@ static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - enum ifla_geneve_df df = GENEVE_DF_UNSET; - bool use_udp6_rx_checksums = false; - struct ip_tunnel_info info; - bool ttl_inherit = false; - bool metadata = false; + struct geneve_config cfg = { + .df = GENEVE_DF_UNSET, + .use_udp6_rx_checksums = false, + .ttl_inherit = false, + .collect_md = false, + }; int err; - init_tnl_info(&info, GENEVE_UDP_PORT); - err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, &ttl_inherit, &df, false); + init_tnl_info(&cfg.info, GENEVE_UDP_PORT); + err = geneve_nl2info(tb, data, extack, &cfg, false); if (err) return err; - err = geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums, ttl_inherit, df); + err = geneve_configure(net, dev, extack, &cfg); if (err) return err; - geneve_link_config(dev, &info, tb); + geneve_link_config(dev, &cfg.info, tb); return 0; } @@ -1616,40 +1613,28 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], { struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs4, *gs6; - struct ip_tunnel_info info; - bool metadata; - bool use_udp6_rx_checksums; - enum ifla_geneve_df df; - bool ttl_inherit; + struct geneve_config cfg; int err; /* If the geneve device is configured for metadata (or externally * controlled, for example, OVS), then nothing can be changed. */ - if (geneve->collect_md) + if (geneve->cfg.collect_md) return -EOPNOTSUPP; /* Start with the existing info. */ - memcpy(&info, &geneve->info, sizeof(info)); - metadata = geneve->collect_md; - use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; - ttl_inherit = geneve->ttl_inherit; - err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, &ttl_inherit, &df, true); + memcpy(&cfg, &geneve->cfg, sizeof(cfg)); + err = geneve_nl2info(tb, data, extack, &cfg, true); if (err) return err; - if (!geneve_dst_addr_equal(&geneve->info, &info)) { - dst_cache_reset(&info.dst_cache); - geneve_link_config(dev, &info, tb); + if (!geneve_dst_addr_equal(&geneve->cfg.info, &cfg.info)) { + dst_cache_reset(&cfg.info.dst_cache); + geneve_link_config(dev, &cfg.info, tb); } geneve_quiesce(geneve, &gs4, &gs6); - geneve->info = info; - geneve->collect_md = metadata; - geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; - geneve->ttl_inherit = ttl_inherit; - geneve->df = df; + memcpy(&geneve->cfg, &cfg, sizeof(cfg)); geneve_unquiesce(geneve, gs4, gs6); return 0; @@ -1683,9 +1668,9 @@ static size_t geneve_get_size(const struct net_device *dev) static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); - struct ip_tunnel_info *info = &geneve->info; - bool ttl_inherit = geneve->ttl_inherit; - bool metadata = geneve->collect_md; + struct ip_tunnel_info *info = &geneve->cfg.info; + bool ttl_inherit = geneve->cfg.ttl_inherit; + bool metadata = geneve->cfg.collect_md; __u8 tmp_vni[3]; __u32 vni; @@ -1718,7 +1703,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) goto nla_put_failure; - if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df)) + if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->cfg.df)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) @@ -1729,7 +1714,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) #if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, - !geneve->use_udp6_rx_checksums)) + !geneve->cfg.use_udp6_rx_checksums)) goto nla_put_failure; #endif @@ -1760,10 +1745,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, u8 name_assign_type, u16 dst_port) { struct nlattr *tb[IFLA_MAX + 1]; - struct ip_tunnel_info info; struct net_device *dev; LIST_HEAD(list_kill); int err; + struct geneve_config cfg = { + .df = GENEVE_DF_UNSET, + .use_udp6_rx_checksums = true, + .ttl_inherit = false, + .collect_md = true, + }; memset(tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, @@ -1771,9 +1761,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, if (IS_ERR(dev)) return dev; - init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, NULL, &info, - true, true, false, GENEVE_DF_UNSET); + init_tnl_info(&cfg.info, dst_port); + err = geneve_configure(net, dev, NULL, &cfg); if (err) { free_netdev(dev); return ERR_PTR(err); From 0dfda108bf3794acc4aeae7b32377dd98fa22a7d Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:13 +0300 Subject: [PATCH 0711/2056] net: qed: move static iro_arr[] out of header file Static variables (and functions, unless they're inline) should not be declared in header files. Move the static array iro_arr[] from "qed_hsi.h" to the sole place where it's used, "qed_init_ops.c". This eliminates lots of warnings (42 of them actually) against W=1+: In file included from drivers/net/ethernet/qlogic/qed/qed.h:51:0, from drivers/net/ethernet/qlogic/qed/qed_ooo.c:40: drivers/net/ethernet/qlogic/qed/qed_hsi.h:4421:18: warning: 'iro_arr' defined but not used [-Wunused-const-variable=] static const u32 iro_arr[] = { ^~~~~~~ Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 73 ------------------- .../net/ethernet/qlogic/qed/qed_init_ops.c | 73 +++++++++++++++++++ 2 files changed, 73 insertions(+), 73 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index d7e70625bdc4..1b5506c1364b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -4391,79 +4391,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, (IRO[66].base + ((roce_pf_id) * IRO[66].m1)) #define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size) -/* IRO Array */ -static const u32 iro_arr[] = { - 0x00000000, 0x00000000, 0x00080000, - 0x00003288, 0x00000088, 0x00880000, - 0x000058e8, 0x00000020, 0x00200000, - 0x00000b00, 0x00000008, 0x00040000, - 0x00000a80, 0x00000008, 0x00040000, - 0x00000000, 0x00000008, 0x00020000, - 0x00000080, 0x00000008, 0x00040000, - 0x00000084, 0x00000008, 0x00020000, - 0x00005718, 0x00000004, 0x00040000, - 0x00004dd0, 0x00000000, 0x00780000, - 0x00003e40, 0x00000000, 0x00780000, - 0x00004480, 0x00000000, 0x00780000, - 0x00003210, 0x00000000, 0x00780000, - 0x00003b50, 0x00000000, 0x00780000, - 0x00007f58, 0x00000000, 0x00780000, - 0x00005f58, 0x00000000, 0x00080000, - 0x00007100, 0x00000000, 0x00080000, - 0x0000aea0, 0x00000000, 0x00080000, - 0x00004398, 0x00000000, 0x00080000, - 0x0000a5a0, 0x00000000, 0x00080000, - 0x0000bde8, 0x00000000, 0x00080000, - 0x00000020, 0x00000004, 0x00040000, - 0x000056c8, 0x00000010, 0x00100000, - 0x0000c210, 0x00000030, 0x00300000, - 0x0000b088, 0x00000038, 0x00380000, - 0x00003d20, 0x00000080, 0x00400000, - 0x0000bf60, 0x00000000, 0x00040000, - 0x00004560, 0x00040080, 0x00040000, - 0x000001f8, 0x00000004, 0x00040000, - 0x00003d60, 0x00000080, 0x00200000, - 0x00008960, 0x00000040, 0x00300000, - 0x0000e840, 0x00000060, 0x00600000, - 0x00004618, 0x00000080, 0x00380000, - 0x00010738, 0x000000c0, 0x00c00000, - 0x000001f8, 0x00000002, 0x00020000, - 0x0000a2a0, 0x00000000, 0x01080000, - 0x0000a3a8, 0x00000008, 0x00080000, - 0x000001c0, 0x00000008, 0x00080000, - 0x000001f8, 0x00000008, 0x00080000, - 0x00000ac0, 0x00000008, 0x00080000, - 0x00002578, 0x00000008, 0x00080000, - 0x000024f8, 0x00000008, 0x00080000, - 0x00000280, 0x00000008, 0x00080000, - 0x00000680, 0x00080018, 0x00080000, - 0x00000b78, 0x00080018, 0x00020000, - 0x0000c640, 0x00000050, 0x003c0000, - 0x00012038, 0x00000018, 0x00100000, - 0x00011b00, 0x00000040, 0x00180000, - 0x000095d0, 0x00000050, 0x00200000, - 0x00008b10, 0x00000040, 0x00280000, - 0x00011640, 0x00000018, 0x00100000, - 0x0000c828, 0x00000048, 0x00380000, - 0x00011710, 0x00000020, 0x00200000, - 0x00004650, 0x00000080, 0x00100000, - 0x00003618, 0x00000010, 0x00100000, - 0x0000a968, 0x00000008, 0x00010000, - 0x000097a0, 0x00000008, 0x00010000, - 0x00011990, 0x00000008, 0x00010000, - 0x0000f018, 0x00000008, 0x00010000, - 0x00012628, 0x00000008, 0x00010000, - 0x00011da8, 0x00000008, 0x00010000, - 0x0000aa78, 0x00000030, 0x00100000, - 0x0000d768, 0x00000028, 0x00280000, - 0x00009a58, 0x00000018, 0x00180000, - 0x00009bd8, 0x00000008, 0x00080000, - 0x00013a18, 0x00000008, 0x00080000, - 0x000126e8, 0x00000018, 0x00180000, - 0x0000e608, 0x00500288, 0x00100000, - 0x00012970, 0x00000138, 0x00280000, -}; - /* Runtime array offsets */ #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 #define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 736702fb81b5..7e6c6389523b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -43,6 +43,79 @@ static u32 pxp_global_win[] = { 0, }; +/* IRO Array */ +static const u32 iro_arr[] = { + 0x00000000, 0x00000000, 0x00080000, + 0x00003288, 0x00000088, 0x00880000, + 0x000058e8, 0x00000020, 0x00200000, + 0x00000b00, 0x00000008, 0x00040000, + 0x00000a80, 0x00000008, 0x00040000, + 0x00000000, 0x00000008, 0x00020000, + 0x00000080, 0x00000008, 0x00040000, + 0x00000084, 0x00000008, 0x00020000, + 0x00005718, 0x00000004, 0x00040000, + 0x00004dd0, 0x00000000, 0x00780000, + 0x00003e40, 0x00000000, 0x00780000, + 0x00004480, 0x00000000, 0x00780000, + 0x00003210, 0x00000000, 0x00780000, + 0x00003b50, 0x00000000, 0x00780000, + 0x00007f58, 0x00000000, 0x00780000, + 0x00005f58, 0x00000000, 0x00080000, + 0x00007100, 0x00000000, 0x00080000, + 0x0000aea0, 0x00000000, 0x00080000, + 0x00004398, 0x00000000, 0x00080000, + 0x0000a5a0, 0x00000000, 0x00080000, + 0x0000bde8, 0x00000000, 0x00080000, + 0x00000020, 0x00000004, 0x00040000, + 0x000056c8, 0x00000010, 0x00100000, + 0x0000c210, 0x00000030, 0x00300000, + 0x0000b088, 0x00000038, 0x00380000, + 0x00003d20, 0x00000080, 0x00400000, + 0x0000bf60, 0x00000000, 0x00040000, + 0x00004560, 0x00040080, 0x00040000, + 0x000001f8, 0x00000004, 0x00040000, + 0x00003d60, 0x00000080, 0x00200000, + 0x00008960, 0x00000040, 0x00300000, + 0x0000e840, 0x00000060, 0x00600000, + 0x00004618, 0x00000080, 0x00380000, + 0x00010738, 0x000000c0, 0x00c00000, + 0x000001f8, 0x00000002, 0x00020000, + 0x0000a2a0, 0x00000000, 0x01080000, + 0x0000a3a8, 0x00000008, 0x00080000, + 0x000001c0, 0x00000008, 0x00080000, + 0x000001f8, 0x00000008, 0x00080000, + 0x00000ac0, 0x00000008, 0x00080000, + 0x00002578, 0x00000008, 0x00080000, + 0x000024f8, 0x00000008, 0x00080000, + 0x00000280, 0x00000008, 0x00080000, + 0x00000680, 0x00080018, 0x00080000, + 0x00000b78, 0x00080018, 0x00020000, + 0x0000c640, 0x00000050, 0x003c0000, + 0x00012038, 0x00000018, 0x00100000, + 0x00011b00, 0x00000040, 0x00180000, + 0x000095d0, 0x00000050, 0x00200000, + 0x00008b10, 0x00000040, 0x00280000, + 0x00011640, 0x00000018, 0x00100000, + 0x0000c828, 0x00000048, 0x00380000, + 0x00011710, 0x00000020, 0x00200000, + 0x00004650, 0x00000080, 0x00100000, + 0x00003618, 0x00000010, 0x00100000, + 0x0000a968, 0x00000008, 0x00010000, + 0x000097a0, 0x00000008, 0x00010000, + 0x00011990, 0x00000008, 0x00010000, + 0x0000f018, 0x00000008, 0x00010000, + 0x00012628, 0x00000008, 0x00010000, + 0x00011da8, 0x00000008, 0x00010000, + 0x0000aa78, 0x00000030, 0x00100000, + 0x0000d768, 0x00000028, 0x00280000, + 0x00009a58, 0x00000018, 0x00180000, + 0x00009bd8, 0x00000008, 0x00080000, + 0x00013a18, 0x00000008, 0x00080000, + 0x000126e8, 0x00000018, 0x00180000, + 0x0000e608, 0x00500288, 0x00100000, + 0x00012970, 0x00000138, 0x00280000, +}; + void qed_init_iro_array(struct qed_dev *cdev) { cdev->iro_arr = iro_arr; From c6b7314d53874d5d9ea7f43bd774c6d4ee596bdd Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:14 +0300 Subject: [PATCH 0712/2056] net: qed: cleanup global structs declarations Fix several sparse warnings by moving structs declarations into the corresponding header files: drivers/net/ethernet/qlogic/qed/qed_dcbx.c:2402:32: warning: symbol 'qed_dcbnl_ops_pass' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_ll2.c:2754:26: warning: symbol 'qed_ll2_ops_pass' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_ptp.c:449:30: warning: symbol 'qed_ptp_ops_pass' was not declared. Should it be static? drivers/net/ethernet/qlogic/qed/qed_sriov.c:5265:29: warning: symbol 'qed_iov_ops_pass' was not declared. Should it be static? (some of them were declared twice in different header files) Also make qed_hw_err_type_descr[] const while at it. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.h | 2 ++ drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 5 ----- drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 4 ---- drivers/net/ethernet/qlogic/qed/qed_l2.c | 12 ++---------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 2 ++ drivers/net/ethernet/qlogic/qed/qed_main.c | 4 ++-- drivers/net/ethernet/qlogic/qed/qed_ptp.c | 1 + drivers/net/ethernet/qlogic/qed/qed_ptp.h | 9 +++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 2 ++ 9 files changed, 20 insertions(+), 21 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_ptp.h diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index ba5f3927034c..e1798925b444 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -81,6 +81,8 @@ struct qed_dcbx_mib_meta_data { u32 addr; }; +extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; + #ifdef CONFIG_DCB int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *); diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 13c5ccfe06e7..19c85adf4ceb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -45,9 +45,4 @@ static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, } #endif /* CONFIG_QED_FCOE */ -#ifdef CONFIG_QED_LL2 -extern const struct qed_common_ops qed_common_ops_pass; -extern const struct qed_ll2_ops qed_ll2_ops_pass; -#endif - #endif /* _QED_FCOE_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index d6af7ea19bbb..dab7a5d09f87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -26,10 +26,6 @@ struct qed_iscsi_info { iscsi_event_cb_t event_cb; }; -#ifdef CONFIG_QED_LL2 -extern const struct qed_ll2_ops qed_ll2_ops_pass; -#endif - #if IS_ENABLED(CONFIG_QED_ISCSI) int qed_iscsi_alloc(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 03dc804c92a9..bf02748f5185 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -24,6 +24,7 @@ #include "qed.h" #include #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_dev_api.h" #include #include "qed_hsi.h" @@ -31,6 +32,7 @@ #include "qed_int.h" #include "qed_l2.h" #include "qed_mcp.h" +#include "qed_ptp.h" #include "qed_reg_addr.h" #include "qed_sp.h" #include "qed_sriov.h" @@ -2874,16 +2876,6 @@ static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac) return 0; } -#ifdef CONFIG_QED_SRIOV -extern const struct qed_iov_hv_ops qed_iov_ops_pass; -#endif - -#ifdef CONFIG_DCB -extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; -#endif - -extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; - static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 8356c7d4a193..500d0c4f8077 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -116,6 +116,8 @@ struct qed_ll2_info { struct qed_ll2_cbs cbs; }; +extern const struct qed_ll2_ops qed_ll2_ops_pass; + /** * @brief qed_ll2_acquire_connection - allocate resources, * starts rx & tx (if relevant) queues pair. Provides diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 98527e42f918..236013da9453 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -2451,7 +2451,7 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) ops->schedule_recovery_handler(cookie); } -static char *qed_hw_err_type_descr[] = { +static const char * const qed_hw_err_type_descr[] = { [QED_HW_ERR_FAN_FAIL] = "Fan Failure", [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", [QED_HW_ERR_HW_ATTN] = "HW Attention", @@ -2466,7 +2466,7 @@ void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, { struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; void *cookie = p_hwfn->cdev->ops_cookie; - char *err_str; + const char *err_str; if (err_type > QED_HW_ERR_LAST) err_type = QED_HW_ERR_LAST; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c index 3bd2f8c961c9..2c62d732e5c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -10,6 +10,7 @@ #include "qed_hw.h" #include "qed_l2.h" #include "qed_mcp.h" +#include "qed_ptp.h" #include "qed_reg_addr.h" /* 16 nano second time quantas to wait before making a Drift adjustment */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h new file mode 100644 index 000000000000..40a11c0e1185 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2020 Marvell International Ltd. */ + +#ifndef __QED_PTP_H +#define __QED_PTP_H + +extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; + +#endif /* __QED_PTP_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 552892c45670..eacd6457f195 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -246,6 +246,8 @@ enum qed_iov_wq_flag { QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, }; +extern const struct qed_iov_hv_ops qed_iov_ops_pass; + #ifdef CONFIG_QED_SRIOV /** * @brief Check if given VF ID @vfid is valid From 365cd2cee0c9813ccabe3e5b12c1b3bf499f8c27 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:15 +0300 Subject: [PATCH 0713/2056] net: qed: correct qed_hw_err_notify() prototype MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change the prototype of qed_hw_err_notify() with the following: * constify "fmt" argument according to printk() declarations; * anontate it with __cold attribute to move the function out of the line; * annotate it with __printf() attribute; This eliminates W=1+ warning: drivers/net/ethernet/qlogic/qed/qed_hw.c: In function ‘qed_hw_err_notify’: drivers/net/ethernet/qlogic/qed/qed_hw.c:851:3: warning: function ‘qed_hw_err_notify’ might be a candidate for ‘gnu_printf’ format attribute [-Wsuggest-attribute=format] len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl); ^~~ as well as saves some code size: add/remove: 0/0 grow/shrink: 2/4 up/down: 40/-125 (-85) Function old new delta qed_dmae_execute_command 1680 1711 +31 qed_spq_post 1104 1113 +9 qed_int_sp_dpc 3554 3545 -9 qed_mcp_cmd_and_union 1896 1876 -20 qed_hw_err_notify 395 352 -43 qed_mcp_handle_events 2630 2577 -53 Total: Before=368645, After=368560, chg -0.02% __printf() will also be helpful with catching bad format strings and arguments. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hw.c | 5 ++--- drivers/net/ethernet/qlogic/qed/qed_hw.h | 7 ++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index e8c777f30207..554f30b0cfd5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -812,9 +812,8 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, return rc; } -void qed_hw_err_notify(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_hw_err_type err_type, char *fmt, ...) +void qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_hw_err_type err_type, const char *fmt, ...) { char buf[QED_HW_ERR_MAX_STR_SIZE]; va_list vl; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 6a61b88b544d..2734f49956f7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -301,7 +301,8 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn, * @param fmt - debug data buffer to send to the MFW * @param ... - buffer format args */ -void qed_hw_err_notify(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_hw_err_type err_type, char *fmt, ...); +void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_hw_err_type err_type, + const char *fmt, ...); #endif From 71e11a3f5e57ccc4ede64a8bdc578e99e2455c4f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:16 +0300 Subject: [PATCH 0714/2056] net: qed: address kernel-doc warnings Get rid of the kernel-doc warnings when building with W=1+ by rewriting the problematic doc comments according to the recommended format and style. Note that this only fixes problems found in C source files, headers aren't in scope for now. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 17 ++++--- drivers/net/ethernet/qlogic/qed/qed_int.c | 49 ++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_spq.c | 16 +++--- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 21 ++++---- 4 files changed, 55 insertions(+), 48 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 1eb48ea80484..2ce1f5180231 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -996,14 +996,17 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, } while (0) /** - * @brief qed_dmae_to_grc - is an internal function - writes from host to - * wide-bus registers (split registers are not supported yet) + * qed_dmae_to_grc() - Internal function for writing from host to + * wide-bus registers (split registers are not supported yet). * - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers. - * @param p_data - pointer to source data. - * @param addr - Destination register address. - * @param len_in_dwords - data length in DWARDS (u32) + * @p_hwfn: HW device data. + * @p_ptt: PTT window used for writing the registers. + * @p_data: Pointer to source data. + * @addr: Destination register address. + * @len_in_dwords: Data length in dwords (u32). + * + * Return: Length of the written data in dwords (u32) or -1 on invalid + * input. */ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 297983d66e2f..0da38c47a8cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -816,11 +816,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, } /** - * @brief qed_int_assertion - handles asserted attention bits + * qed_int_assertion() - Handle asserted attention bits. * - * @param p_hwfn - * @param asserted_bits newly asserted bits - * @return int + * @p_hwfn: HW device data. + * @asserted_bits: Newly asserted bits. + * + * Return: Zero value. */ static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) { @@ -880,16 +881,17 @@ static void qed_int_attn_print(struct qed_hwfn *p_hwfn, } /** - * @brief qed_int_deassertion_aeu_bit - handles the effects of a single - * cause of the attention + * qed_int_deassertion_aeu_bit() - Handles the effects of a single + * cause of the attention. * - * @param p_hwfn - * @param p_aeu - descriptor of an AEU bit which caused the attention - * @param aeu_en_reg - register offset of the AEU enable reg. which configured - * this bit to this group. - * @param bit_index - index of this bit in the aeu_en_reg + * @p_hwfn: HW device data. + * @p_aeu: Descriptor of an AEU bit which caused the attention. + * @aeu_en_reg: Register offset of the AEU enable reg. which configured + * this bit to this group. + * @p_bit_name: AEU bit description for logging purposes. + * @bitmask: Index of this bit in the aeu_en_reg. * - * @return int + * Return: Zero on success, negative errno otherwise. */ static int qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, @@ -938,12 +940,12 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, } /** - * @brief qed_int_deassertion_parity - handle a single parity AEU source + * qed_int_deassertion_parity() - Handle a single parity AEU source. * - * @param p_hwfn - * @param p_aeu - descriptor of an AEU bit which caused the parity - * @param aeu_en_reg - address of the AEU enable register - * @param bit_index + * @p_hwfn: HW device data. + * @p_aeu: Descriptor of an AEU bit which caused the parity. + * @aeu_en_reg: Address of the AEU enable register. + * @bit_index: Index (0-31) of an AEU bit. */ static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_aeu, @@ -976,12 +978,13 @@ static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, } /** - * @brief - handles deassertion of previously asserted attentions. + * qed_int_deassertion() - Handle deassertion of previously asserted + * attentions. * - * @param p_hwfn - * @param deasserted_bits - newly deasserted bits - * @return int + * @p_hwfn: HW device data. + * @deasserted_bits: newly deasserted bits. * + * Return: Zero value. */ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, u16 deasserted_bits) @@ -2241,9 +2244,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) } /** - * @brief Initialize igu runtime registers + * qed_int_igu_init_rt() - Initialize IGU runtime registers. * - * @param p_hwfn + * @p_hwfn: HW device data. */ void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index ee89a8f4f585..92ab029789e5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -642,18 +642,18 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) } /** - * @brief qed_spq_add_entry - adds a new entry to the pending - * list. Should be used while lock is being held. + * qed_spq_add_entry() - Add a new entry to the pending list. + * Should be used while lock is being held. * - * Addes an entry to the pending list is there is room (en empty + * @p_hwfn: HW device data. + * @p_ent: An entry to add. + * @priority: Desired priority. + * + * Adds an entry to the pending list is there is room (an empty * element is available in the free_pool), or else places the * entry in the unlimited_pending pool. * - * @param p_hwfn - * @param p_ent - * @param priority - * - * @return int + * Return: zero on success, -EINVAL on invalid @priority. */ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index fcf4d82da161..3930958f0ffa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -823,16 +823,17 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, } /** - * @brief qed_iov_config_perm_table - configure the permission - * zone table. - * In E4, queue zone permission table size is 320x9. There - * are 320 VF queues for single engine device (256 for dual - * engine device), and each entry has the following format: - * {Valid, VF[7:0]} - * @param p_hwfn - * @param p_ptt - * @param vf - * @param enable + * qed_iov_config_perm_table() - Configure the permission zone table. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT window for writing the registers. + * @vf: VF info data. + * @enable: The actual permision for this VF. + * + * In E4, queue zone permission table size is 320x9. There + * are 320 VF queues for single engine device (256 for dual + * engine device), and each entry has the following format: + * {Valid, VF[7:0]} */ static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, From 1451e467a3a5a26f3133214078d198cc6ade1ae0 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:17 +0300 Subject: [PATCH 0715/2056] net: qed: improve indentation of some parts of code To not mix functional and stylistic changes, correct indentation of code that will be modified in the subsequent commits. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_debug.c | 5 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 194 +++++++++--------- .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 40 ++-- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 7 +- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 10 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 7 +- drivers/net/ethernet/qlogic/qed/qed_sp.h | 9 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 4 +- 9 files changed, 136 insertions(+), 144 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index e72d25854d79..e3fe982532d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -73,8 +73,8 @@ union type1_task_context { }; struct src_ent { - u8 opaque[56]; - u64 next; + u8 opaque[56]; + u64 next; }; #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 45cbe1c87106..f856bb9a3897 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -1122,9 +1122,8 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, dump, "fw-version", fw_ver_str); offset += qed_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str); - offset += qed_dump_num_param(dump_buf + offset, - dump, - "fw-timestamp", fw_info.ver.timestamp); + offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", + fw_info.ver.timestamp); return offset; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 1b5506c1364b..71809ff97a03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2793,34 +2793,34 @@ struct fw_overlay_buf_hdr { /* init array header: raw */ struct init_array_raw_hdr { - u32 data; -#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF -#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 + u32 data; +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 }; /* init array header: standard */ struct init_array_standard_hdr { - u32 data; -#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF -#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 + u32 data; +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 }; /* init array header: zipped */ struct init_array_zipped_hdr { - u32 data; -#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF -#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 + u32 data; +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 }; /* init array header: pattern */ struct init_array_pattern_hdr { - u32 data; + u32 data; #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF @@ -2831,10 +2831,10 @@ struct init_array_pattern_hdr { /* init array header union */ union init_array_hdr { - struct init_array_raw_hdr raw; - struct init_array_standard_hdr standard; - struct init_array_zipped_hdr zipped; - struct init_array_pattern_hdr pattern; + struct init_array_raw_hdr raw; + struct init_array_standard_hdr standard; + struct init_array_zipped_hdr zipped; + struct init_array_pattern_hdr pattern; }; /* init array types */ @@ -2847,54 +2847,54 @@ enum init_array_types { /* init operation: callback */ struct init_callback_op { - u32 op_data; -#define INIT_CALLBACK_OP_OP_MASK 0xF -#define INIT_CALLBACK_OP_OP_SHIFT 0 -#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF -#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - u16 callback_id; - u16 block_id; + u32 op_data; +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + u16 callback_id; + u16 block_id; }; /* init operation: delay */ struct init_delay_op { - u32 op_data; -#define INIT_DELAY_OP_OP_MASK 0xF -#define INIT_DELAY_OP_OP_SHIFT 0 -#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF -#define INIT_DELAY_OP_RESERVED_SHIFT 4 - u32 delay; + u32 op_data; +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + u32 delay; }; /* init operation: if_mode */ struct init_if_mode_op { - u32 op_data; -#define INIT_IF_MODE_OP_OP_MASK 0xF -#define INIT_IF_MODE_OP_OP_SHIFT 0 -#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF -#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 -#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF -#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - u16 reserved2; - u16 modes_buf_offset; + u32 op_data; +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + u16 reserved2; + u16 modes_buf_offset; }; /* init operation: if_phase */ struct init_if_phase_op { - u32 op_data; -#define INIT_IF_PHASE_OP_OP_MASK 0xF -#define INIT_IF_PHASE_OP_OP_SHIFT 0 -#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF -#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4 -#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF -#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - u32 phase_data; -#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF -#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 -#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF -#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 -#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF -#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 + u32 op_data; +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 + u32 phase_data; +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 }; /* init mode operators */ @@ -2907,67 +2907,67 @@ enum init_mode_ops { /* init operation: raw */ struct init_raw_op { - u32 op_data; -#define INIT_RAW_OP_OP_MASK 0xF -#define INIT_RAW_OP_OP_SHIFT 0 -#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF -#define INIT_RAW_OP_PARAM1_SHIFT 4 - u32 param2; + u32 op_data; +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF +#define INIT_RAW_OP_PARAM1_SHIFT 4 + u32 param2; }; /* init array params */ struct init_op_array_params { - u16 size; - u16 offset; + u16 size; + u16 offset; }; /* Write init operation arguments */ union init_write_args { - u32 inline_val; - u32 zeros_count; - u32 array_offset; - struct init_op_array_params runtime; + u32 inline_val; + u32 zeros_count; + u32 array_offset; + struct init_op_array_params runtime; }; /* init operation: write */ struct init_write_op { - u32 data; -#define INIT_WRITE_OP_OP_MASK 0xF -#define INIT_WRITE_OP_OP_SHIFT 0 -#define INIT_WRITE_OP_SOURCE_MASK 0x7 -#define INIT_WRITE_OP_SOURCE_SHIFT 4 -#define INIT_WRITE_OP_RESERVED_MASK 0x1 -#define INIT_WRITE_OP_RESERVED_SHIFT 7 -#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 -#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 -#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF -#define INIT_WRITE_OP_ADDRESS_SHIFT 9 - union init_write_args args; + u32 data; +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args; }; /* init operation: read */ struct init_read_op { - u32 op_data; -#define INIT_READ_OP_OP_MASK 0xF -#define INIT_READ_OP_OP_SHIFT 0 -#define INIT_READ_OP_POLL_TYPE_MASK 0xF -#define INIT_READ_OP_POLL_TYPE_SHIFT 4 -#define INIT_READ_OP_RESERVED_MASK 0x1 -#define INIT_READ_OP_RESERVED_SHIFT 8 -#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF -#define INIT_READ_OP_ADDRESS_SHIFT 9 - u32 expected_val; + u32 op_data; +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 8 +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 + u32 expected_val; }; /* Init operations union */ union init_op { - struct init_raw_op raw; - struct init_write_op write; - struct init_read_op read; - struct init_if_mode_op if_mode; - struct init_if_phase_op if_phase; - struct init_callback_op callback; - struct init_delay_op delay; + struct init_raw_op raw; + struct init_write_op write; + struct init_read_op read; + struct init_if_mode_op if_mode; + struct init_if_phase_op if_phase; + struct init_callback_op callback; + struct init_delay_op delay; }; /* Init command operation types */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 2ce1f5180231..775ef5eaefd4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -156,23 +156,26 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { cmd ## _ ## field, \ value) -#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \ - ext_voq, wrr) \ - do { \ - typeof(map) __map; \ - memset(&__map, 0, sizeof(__map)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \ - rl_valid ? 1 : 0);\ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \ - vp_pq_id); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \ - SET_FIELD(__map.reg, \ - QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \ - STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ - *((u32 *)&__map)); \ - (map) = __map; \ +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \ + rl_id, ext_voq, wrr) \ + do { \ + typeof(map) __map; \ + \ + memset(&__map, 0, sizeof(__map)); \ + \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ + !!(rl_valid)); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, \ + (vp_pq_id)); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ + SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ + (wrr)); \ + \ + STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ + *((u32 *)&__map)); \ + (map) = __map; \ } while (0) #define WRITE_PQ_INFO_TO_RAM 1 @@ -1008,8 +1011,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, * Return: Length of the written data in dwords (u32) or -1 on invalid * input. */ -static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, +static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_data, u32 addr, u32 len_in_dwords) { struct qed_dmae_params params = {}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index f9d9e21cb66b..8abb31b63e4e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -117,10 +117,9 @@ struct qed_iscsi_conn { u8 abortive_dsconnect; }; -static int -qed_iscsi_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, - u16 echo, union event_ring_data *data, u8 fw_return_code) +static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + u16 echo, union event_ring_data *data, + u8 fw_return_code) { if (p_hwfn->p_iscsi_info->event_cb) { struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index b7a0a717ee6d..6f2cd5a18e5f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -59,9 +59,8 @@ struct mpa_v2_hdr { #define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ -static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, u16 echo, - union event_ring_data *data, +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + u16 echo, union event_ring_data *data, u8 fw_return_code); /* Override devinfo with iWARP specific values */ @@ -3008,9 +3007,8 @@ qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) return true; } -static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, u16 echo, - union event_ring_data *data, +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + u16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index d5db07db65b1..871282187268 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -37,10 +37,9 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); -static int -qed_roce_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, - u16 echo, union event_ring_data *data, u8 fw_return_code) +static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + u16 echo, union event_ring_data *data, + u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 35539e951bee..f7f983a8bf44 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -154,12 +154,9 @@ struct qed_consq { struct qed_chain chain; }; -typedef int -(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, - u8 opcode, - u16 echo, - union event_ring_data *data, - u8 fw_return_code); +typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode, + u16 echo, union event_ring_data *data, + u8 fw_return_code); int qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 3930958f0ffa..b4e21e4792b7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4037,9 +4037,7 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, - u8 opcode, - __le16 echo, +static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, u16 echo, union event_ring_data *data, u8 fw_return_code) { switch (opcode) { From a0f3266f4bf966eefd02123d3aacdf7df8d67c1c Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:18 +0300 Subject: [PATCH 0716/2056] net: qed: use ptr shortcuts to dedup field accessing in some parts Use intermediate pointers instead of multiple dereferencing to simplify and beautify parts of code that will be addressed in the next commit. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 18 +++--- drivers/net/ethernet/qlogic/qed/qed_l2.c | 59 ++++++++++--------- drivers/net/ethernet/qlogic/qed/qed_roce.c | 24 ++++---- .../net/ethernet/qlogic/qed/qed_sp_commands.c | 24 ++++---- 5 files changed, 67 insertions(+), 63 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index e3fe982532d5..3a62358b9749 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2170,6 +2170,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, enum qed_cxt_elem_type elem_type, u32 iid) { u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; + struct tdif_task_context *tdif_context; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; struct qed_ptt *p_ptt; @@ -2252,7 +2253,9 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, for (elem_i = 0; elem_i < elems_per_p; elem_i++) { elem = (union type1_task_context *)elem_start; - SET_FIELD(elem->roce_ctx.tdif_context.flags1, + tdif_context = &elem->roce_ctx.tdif_context; + + SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 6f2cd5a18e5f..55e73a842507 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -808,6 +808,7 @@ static int qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) { struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; + struct mpa_outgoing_params *common; struct qed_iwarp_info *iwarp_info; struct qed_sp_init_data init_data; dma_addr_t async_output_phys; @@ -840,16 +841,17 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) return rc; p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; + common = &p_mpa_ramrod->common; + out_pdata_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, out_pdata); - DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr, - out_pdata_phys); - p_mpa_ramrod->common.outgoing_ulp_buffer.len = - ep->cm_info.private_data_len; - p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; + DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys); - p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord; - p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird; + common->outgoing_ulp_buffer.len = ep->cm_info.private_data_len; + common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; + + common->out_rq.ord = ep->cm_info.ord; + common->out_rq.ird = ep->cm_info.ird; p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; @@ -873,7 +875,7 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) p_mpa_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; } else { - p_mpa_ramrod->common.reject = 1; + common->reject = 1; } iwarp_info = &p_hwfn->p_rdma_info->iwarp; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index bf02748f5185..41afd15f4991 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -342,6 +342,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params) { struct vport_start_ramrod_data *p_ramrod = NULL; + struct eth_vport_tpa_param *tpa_param; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 abs_vport_id = 0; @@ -378,21 +379,21 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); /* TPA related fields */ - memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); + tpa_param = &p_ramrod->tpa_param; + memset(tpa_param, 0, sizeof(*tpa_param)); - p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; + tpa_param->max_buff_num = p_params->max_buffers_per_cqe; switch (p_params->tpa_mode) { case QED_TPA_MODE_GRO: - p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; - p_ramrod->tpa_param.tpa_max_size = (u16)-1; - p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; - p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; - p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; - p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; - p_ramrod->tpa_param.tpa_pkt_split_flg = 1; - p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; - break; + tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + tpa_param->tpa_max_size = (u16)-1; + tpa_param->tpa_min_size_to_cont = p_params->mtu / 2; + tpa_param->tpa_min_size_to_start = p_params->mtu / 2; + tpa_param->tpa_ipv4_en_flg = 1; + tpa_param->tpa_ipv6_en_flg = 1; + tpa_param->tpa_pkt_split_flg = 1; + tpa_param->tpa_gro_consistent_flg = 1; default: break; } @@ -601,33 +602,33 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, static void qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, - struct qed_sge_tpa_params *p_params) + const struct qed_sge_tpa_params *param) { - struct eth_vport_tpa_param *p_tpa; + struct eth_vport_tpa_param *tpa; - if (!p_params) { + if (!param) { p_ramrod->common.update_tpa_param_flg = 0; p_ramrod->common.update_tpa_en_flg = 0; p_ramrod->common.update_tpa_param_flg = 0; return; } - p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; - p_tpa = &p_ramrod->tpa_param; - p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; - p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; - p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; - p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; + p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg; + tpa = &p_ramrod->tpa_param; + tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg; + tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg; + tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg; + tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg; - p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; - p_tpa->max_buff_num = p_params->max_buffers_per_cqe; - p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; - p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; - p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; - p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; - p_tpa->tpa_max_size = p_params->tpa_max_size; - p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; - p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; + p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg; + tpa->max_buff_num = param->max_buffers_per_cqe; + tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg; + tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; + tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; + tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; + tpa->tpa_max_size = param->tpa_max_size; + tpa->tpa_min_size_to_start = param->tpa_min_size_to_start; + tpa->tpa_min_size_to_cont = param->tpa_min_size_to_cont; } static void diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 871282187268..5433e43a1930 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -42,29 +42,25 @@ static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + union rdma_eqe_data *rdata = &data->rdma_data; if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { - u16 icid = - (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); + u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid); /* icid release in this async event can occur only if the icid * was offloaded to the FW. In case it wasn't offloaded this is * handled in qed_roce_sp_destroy_qp. */ qed_roce_free_real_icid(p_hwfn, icid); + } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)rdata->async_handle.lo; + + events.affiliated_event(events.context, fw_event_code, + &srq_id); } else { - if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || - fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { - u16 srq_id = (u16)data->rdma_data.async_handle.lo; - - events.affiliated_event(events.context, fw_event_code, - &srq_id); - } else { - union rdma_eqe_data rdata = data->rdma_data; - - events.affiliated_event(events.context, fw_event_code, - (void *)&rdata.async_handle); - } + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata->async_handle); } return 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 745d76d13732..71ab57bca7c9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -300,6 +300,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_tunnel_info *p_tunn, bool allow_npar_tx_switch) { + struct outer_tag_config_struct *outer_tag_config; struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; @@ -336,29 +337,30 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, else p_ramrod->mf_mode = MF_NPAR; - p_ramrod->outer_tag_config.outer_tag.tci = - cpu_to_le16(p_hwfn->hw_info.ovlan); + outer_tag_config = &p_ramrod->outer_tag_config; + outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { - p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + outer_tag_config->outer_tag.tpid = ETH_P_8021Q; } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { - p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; - p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + outer_tag_config->outer_tag.tpid = ETH_P_8021AD; + outer_tag_config->enable_stag_pri_change = 1; } - p_ramrod->outer_tag_config.pri_map_valid = 1; + outer_tag_config->pri_map_valid = 1; for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) - p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + outer_tag_config->inner_to_outer_pri_map[i] = i; /* enable_stag_pri_change should be set if port is in BD mode or, * UFP with Host Control mode. */ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) - p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + outer_tag_config->enable_stag_pri_change = 1; else - p_ramrod->outer_tag_config.enable_stag_pri_change = 0; + outer_tag_config->enable_stag_pri_change = 0; - p_ramrod->outer_tag_config.outer_tag.tci |= + outer_tag_config->outer_tag.tci |= cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); } @@ -406,7 +408,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", - sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci); + sb, sb_index, outer_tag_config->outer_tag.tci); rc = qed_spq_post(p_hwfn, p_ent, NULL); From 5ab903418ad14732131df0af0d63f19b73e377ae Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:19 +0300 Subject: [PATCH 0717/2056] net: qed: sanitize BE/LE data processing Current code assumes that both host and device operates in Little Endian in lots of places. While this is true for x86 platform, this doesn't mean we should not care about this. This commit addresses all parts of the code that were pointed out by sparse checker. All operations with restricted (__be*/__le*) types are now protected with explicit from/to CPU conversions, even if they're noops on common setups. I'm sure there are more such places, but this implies a deeper code investigation, and is a subject for future works. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 11 +- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 27 ++- drivers/net/ethernet/qlogic/qed/qed_debug.c | 49 +++-- drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 54 +++--- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 48 ++--- .../ethernet/qlogic/qed/qed_init_fw_funcs.c | 93 ++++----- drivers/net/ethernet/qlogic/qed/qed_int.c | 74 ++++---- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 43 +++-- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 132 ++++++++----- drivers/net/ethernet/qlogic/qed/qed_l2.c | 22 ++- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 9 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 12 +- drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 52 +++--- drivers/net/ethernet/qlogic/qed/qed_rdma.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_roce.c | 176 +++++++----------- drivers/net/ethernet/qlogic/qed/qed_sp.h | 2 +- .../net/ethernet/qlogic/qed/qed_sp_commands.c | 4 +- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 4 +- include/linux/qed/qed_if.h | 15 +- 20 files changed, 434 insertions(+), 399 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 3a62358b9749..5362dc18b6c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -73,8 +73,8 @@ union type1_task_context { }; struct src_ent { - u8 opaque[56]; - u64 next; + __u8 opaque[56]; + __be64 next; }; #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ @@ -2177,6 +2177,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, dma_addr_t p_phys; u64 ilt_hw_entry; void *p_virt; + u32 flags1; int rc = 0; switch (elem_type) { @@ -2255,8 +2256,10 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, elem = (union type1_task_context *)elem_start; tdif_context = &elem->roce_ctx.tdif_context; - SET_FIELD(tdif_context->flags1, - TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + flags1 = le32_to_cpu(tdif_context->flags1); + SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + tdif_context->flags1 = cpu_to_le32(flags1); + elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 9f16a3a66007..17d5b649eb36 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -547,7 +547,8 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, struct dcbx_ets_feature *p_ets, struct qed_dcbx_params *p_params) { - u32 bw_map[2], tsa_map[2], pri_map; + __be32 bw_map[2], tsa_map[2]; + u32 pri_map; int i; p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags, @@ -573,11 +574,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, /* 8 bit tsa and bw data corresponding to each of the 8 TC's are * encoded in a type u32 array of size 2. */ - bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]); - bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); - tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); - tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); + cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2); + cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2); pri_map = p_ets->pri_tc_tbl[0]; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; @@ -1054,7 +1054,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, struct dcbx_ets_feature *p_ets, struct qed_dcbx_params *p_params) { - u8 *bw_map, *tsa_map; + __be32 bw_map[2], tsa_map[2]; u32 val; int i; @@ -1076,22 +1076,21 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT; - bw_map = (u8 *)&p_ets->tc_bw_tbl[0]; - tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0]; p_ets->pri_tc_tbl[0] = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { - bw_map[i] = p_params->ets_tc_bw_tbl[i]; - tsa_map[i] = p_params->ets_tc_tsa_tbl[i]; + ((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i]; + ((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i]; + /* Copy the priority value to the corresponding 4 bits in the * traffic class table. */ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); p_ets->pri_tc_tbl[0] |= val; } - for (i = 0; i < 2; i++) { - p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); - p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); - } + + be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2); + be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2); } static void diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index f856bb9a3897..41ab23712bbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -972,7 +972,7 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, { struct storm_defs *storm = &s_storm_defs[storm_id]; struct fw_info_location fw_info_location; - u32 addr, i, *dest; + u32 addr, i, size, *dest; memset(&fw_info_location, 0, sizeof(fw_info_location)); memset(fw_info, 0, sizeof(*fw_info)); @@ -985,20 +985,29 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, sizeof(fw_info_location); dest = (u32 *)&fw_info_location; + size = BYTES_TO_DWORDS(sizeof(fw_info_location)); - for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location)); - i++, addr += BYTES_IN_DWORD) + for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) dest[i] = qed_rd(p_hwfn, p_ptt, addr); + /* qed_rq() fetches data in CPU byteorder. Swap it back to + * the device's to get right structure layout. + */ + cpu_to_le32_array(dest, size); + /* Read FW version info from Storm RAM */ - if (fw_info_location.size > 0 && fw_info_location.size <= - sizeof(*fw_info)) { - addr = fw_info_location.grc_addr; - dest = (u32 *)fw_info; - for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size); - i++, addr += BYTES_IN_DWORD) - dest[i] = qed_rd(p_hwfn, p_ptt, addr); - } + size = le32_to_cpu(fw_info_location.size); + if (!size || size > sizeof(*fw_info)) + return; + + addr = le32_to_cpu(fw_info_location.grc_addr); + dest = (u32 *)fw_info; + size = BYTES_TO_DWORDS(size); + + for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) + dest[i] = qed_rd(p_hwfn, p_ptt, addr); + + cpu_to_le32_array(dest, size); } /* Dumps the specified string to the specified buffer. @@ -1123,7 +1132,7 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, offset += qed_dump_str_param(dump_buf + offset, dump, "fw-image", fw_img_str); offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", - fw_info.ver.timestamp); + le32_to_cpu(fw_info.ver.timestamp)); return offset; } @@ -4440,9 +4449,11 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, continue; } + addr = le16_to_cpu(asserts->section_ram_line_offset); fw_asserts_section_addr = storm->sem_fast_mem_addr + - SEM_FAST_REG_INT_RAM + - RAM_LINES_TO_BYTES(asserts->section_ram_line_offset); + SEM_FAST_REG_INT_RAM + + RAM_LINES_TO_BYTES(addr); + next_list_idx_addr = fw_asserts_section_addr + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); @@ -7650,8 +7661,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, { struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; - u32 len_rounded, i; - __be32 val; + u32 len_rounded; int rc; *num_dumped_bytes = 0; @@ -7670,10 +7680,9 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ if (image_id != QED_NVM_IMAGE_NVM_META) - for (i = 0; i < len_rounded; i += 4) { - val = cpu_to_be32(*(u32 *)(buffer + i)); - *(u32 *)(buffer + i) = val; - } + cpu_to_be32_array((__force __be32 *)buffer, + (const u32 *)buffer, + len_rounded / sizeof(u32)); *num_dumped_bytes = len_rounded; diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index a10e57bba6b9..b768f0698170 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -95,7 +95,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, struct qed_cxt_info cxt_info; u32 dummy_cid; int rc = 0; - u16 tmp; + __le16 tmp; u8 i; /* Get SPQ entry */ @@ -162,17 +162,13 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); p_data->q_params.cmdq_num_entries = tmp; - tmp = fcoe_pf_params->num_cqs; - p_data->q_params.num_queues = (u8)tmp; + p_data->q_params.num_queues = fcoe_pf_params->num_cqs; - tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; - p_data->q_params.queue_relative_offset = (u8)tmp; + tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_data->q_params.queue_relative_offset = (__force u8)tmp; for (i = 0; i < fcoe_pf_params->num_cqs; i++) { - u16 igu_sb_id; - - igu_sb_id = qed_get_igu_sb_id(p_hwfn, i); - tmp = cpu_to_le16(igu_sb_id); + tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i)); p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; } @@ -185,21 +181,21 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; - tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]; - p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); - tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]; - p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp); + tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]); + p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]); + p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp; DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; - tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; - p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); - tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; - p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp); - tmp = fcoe_pf_params->rq_buffer_size; - p_data->q_params.rq_buffer_size = cpu_to_le16(tmp); + tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size); + p_data->q_params.rq_buffer_size = tmp; if (fcoe_pf_params->is_target) { SET_FIELD(p_data->q_params.q_validity, @@ -233,7 +229,8 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, struct fcoe_conn_offload_ramrod_data *p_data; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; - u16 physical_q0, tmp; + u16 physical_q0; + __le16 tmp; int rc; /* Get SPQ entry */ @@ -254,7 +251,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, /* Transmission PQ is the first of the PF */ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - p_conn->physical_q0 = cpu_to_le16(physical_q0); + p_conn->physical_q0 = physical_q0; p_data->physical_q0 = cpu_to_le16(physical_q0); p_data->conn_id = cpu_to_le16(p_conn->conn_id); @@ -553,8 +550,8 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn) { struct e4_fcoe_task_context *p_task_ctx = NULL; + u32 i, lc; int rc; - u32 i; spin_lock_init(&p_hwfn->p_fcoe_info->lock); for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { @@ -565,10 +562,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) continue; memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context)); - SET_FIELD(p_task_ctx->timer_context.logical_client_0, - TIMERS_CONTEXT_VALIDLC0, 1); - SET_FIELD(p_task_ctx->timer_context.logical_client_1, - TIMERS_CONTEXT_VALIDLC1, 1); + + lc = 0; + SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); + p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc); + + lc = 0; + SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1); + p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); + SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 71809ff97a03..6bb0bbc0013b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2793,7 +2793,7 @@ struct fw_overlay_buf_hdr { /* init array header: raw */ struct init_array_raw_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF @@ -2802,7 +2802,7 @@ struct init_array_raw_hdr { /* init array header: standard */ struct init_array_standard_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF @@ -2811,7 +2811,7 @@ struct init_array_standard_hdr { /* init array header: zipped */ struct init_array_zipped_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF @@ -2820,7 +2820,7 @@ struct init_array_zipped_hdr { /* init array header: pattern */ struct init_array_pattern_hdr { - u32 data; + __le32 data; #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 #define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF @@ -2847,48 +2847,48 @@ enum init_array_types { /* init operation: callback */ struct init_callback_op { - u32 op_data; + __le32 op_data; #define INIT_CALLBACK_OP_OP_MASK 0xF #define INIT_CALLBACK_OP_OP_SHIFT 0 #define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF #define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - u16 callback_id; - u16 block_id; + __le16 callback_id; + __le16 block_id; }; /* init operation: delay */ struct init_delay_op { - u32 op_data; + __le32 op_data; #define INIT_DELAY_OP_OP_MASK 0xF #define INIT_DELAY_OP_OP_SHIFT 0 #define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF #define INIT_DELAY_OP_RESERVED_SHIFT 4 - u32 delay; + __le32 delay; }; /* init operation: if_mode */ struct init_if_mode_op { - u32 op_data; + __le32 op_data; #define INIT_IF_MODE_OP_OP_MASK 0xF #define INIT_IF_MODE_OP_OP_SHIFT 0 #define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF #define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 #define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - u16 reserved2; - u16 modes_buf_offset; + __le16 reserved2; + __le16 modes_buf_offset; }; /* init operation: if_phase */ struct init_if_phase_op { - u32 op_data; + __le32 op_data; #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF #define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4 #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - u32 phase_data; + __le32 phase_data; #define INIT_IF_PHASE_OP_PHASE_MASK 0xFF #define INIT_IF_PHASE_OP_PHASE_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF @@ -2907,31 +2907,31 @@ enum init_mode_ops { /* init operation: raw */ struct init_raw_op { - u32 op_data; + __le32 op_data; #define INIT_RAW_OP_OP_MASK 0xF #define INIT_RAW_OP_OP_SHIFT 0 #define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF #define INIT_RAW_OP_PARAM1_SHIFT 4 - u32 param2; + __le32 param2; }; /* init array params */ struct init_op_array_params { - u16 size; - u16 offset; + __le16 size; + __le16 offset; }; /* Write init operation arguments */ union init_write_args { - u32 inline_val; - u32 zeros_count; - u32 array_offset; + __le32 inline_val; + __le32 zeros_count; + __le32 array_offset; struct init_op_array_params runtime; }; /* init operation: write */ struct init_write_op { - u32 data; + __le32 data; #define INIT_WRITE_OP_OP_MASK 0xF #define INIT_WRITE_OP_OP_SHIFT 0 #define INIT_WRITE_OP_SOURCE_MASK 0x7 @@ -2947,7 +2947,7 @@ struct init_write_op { /* init operation: read */ struct init_read_op { - u32 op_data; + __le32 op_data; #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 #define INIT_READ_OP_POLL_TYPE_MASK 0xF @@ -2956,7 +2956,7 @@ struct init_read_op { #define INIT_READ_OP_RESERVED_SHIFT 8 #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 - u32 expected_val; + __le32 expected_val; }; /* Init operations union */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 775ef5eaefd4..ea888a2c6ddb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -159,23 +159,22 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \ rl_id, ext_voq, wrr) \ do { \ - typeof(map) __map; \ + u32 __reg = 0; \ \ - memset(&__map, 0, sizeof(__map)); \ + BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \ !!(rl_valid)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, \ - (vp_pq_id)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ - SET_FIELD(__map.reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \ (wrr)); \ \ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ - *((u32 *)&__map)); \ - (map) = __map; \ + __reg); \ + (map).reg = cpu_to_le32(__reg); \ } while (0) #define WRITE_PQ_INFO_TO_RAM 1 @@ -1012,9 +1011,10 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, * input. */ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 *p_data, u32 addr, u32 len_in_dwords) + __le32 *p_data, u32 addr, u32 len_in_dwords) { struct qed_dmae_params params = {}; + u32 *data_cpu; int rc; if (!p_data) @@ -1033,8 +1033,13 @@ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Failed writing to chip using DMAE, using GRC instead\n"); - /* write to registers using GRC */ - ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords); + + /* Swap to CPU byteorder and write to registers using GRC */ + data_cpu = (__force u32 *)p_data; + le32_to_cpu_array(data_cpu, len_in_dwords); + + ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords); + cpu_to_le32_array(data_cpu, len_in_dwords); } return len_in_dwords; @@ -1235,7 +1240,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); /* Zero ramline */ - qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE); } @@ -1247,8 +1252,10 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool udp, bool ipv4, bool ipv6, enum gft_profile_type profile_type) { - u32 reg_val, cam_line, search_non_ip_as_gft; - struct regpair ram_line = { }; + struct regpair ram_line; + u32 search_non_ip_as_gft; + u32 reg_val, cam_line; + u32 lo = 0, hi = 0; if (!ipv6 && !ipv4) DP_NOTICE(p_hwfn, @@ -1319,43 +1326,46 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, search_non_ip_as_gft = 0; /* Tunnel type */ - SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); + SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1); + SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { - SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1); - SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { - SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); /* Allow tunneled traffic without inner IP */ search_non_ip_as_gft = 1; } + ram_line.lo = cpu_to_le32(lo); + ram_line.hi = cpu_to_le32(hi); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); - qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, sizeof(ram_line) / REG_SIZE); /* Set default profile so that no filter match will happen */ - ram_line.lo = 0xffffffff; - ram_line.hi = 0x3ff; - qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line, + ram_line.lo = cpu_to_le32(0xffffffff); + ram_line.hi = cpu_to_le32(0x3ff); + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH, sizeof(ram_line) / REG_SIZE); @@ -1373,7 +1383,7 @@ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) u8 crc, validation_byte = 0; static u8 crc8_table_valid; /* automatically initialized to 0 */ u32 validation_string = 0; - u32 data_to_crc; + __be32 data_to_crc; if (!crc8_table_valid) { crc8_populate_msb(cdu_crc8_table, 0x07); @@ -1395,10 +1405,9 @@ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) validation_string |= (conn_type & 0xF); /* Convert to big-endian and calculate CRC8 */ - data_to_crc = be32_to_cpu(validation_string); - - crc = crc8(cdu_crc8_table, - (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE); + data_to_crc = cpu_to_be32(validation_string); + crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), + CRC8_INIT_VALUE); /* The validation byte [7:0] is composed: * for type A validation diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 0da38c47a8cf..9be40280eaaa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -1191,16 +1191,15 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn) static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, void __iomem *igu_addr, u32 ack_cons) { - struct igu_prod_cons_update igu_ack = { 0 }; + u32 igu_ack; - igu_ack.sb_id_and_flags = - ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | - (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | - (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | - (IGU_SEG_ACCESS_ATTN << - IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); - DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags); + DIRECT_REG_WR(igu_addr, igu_ack); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. @@ -1414,16 +1413,16 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u8 pf_id, u16 vf_number, u8 vf_valid) { struct qed_dev *cdev = p_hwfn->cdev; - u32 cau_state; + u32 cau_state, params = 0, data = 0; u8 timer_res; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); + SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id); + SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number); + SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid); + SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); + SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); cau_state = CAU_HC_DISABLE_STATE; @@ -1442,7 +1441,8 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, timer_res = 1; else timer_res = 2; - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); if (cdev->tx_coalesce_usecs <= 0x7F) timer_res = 0; @@ -1450,10 +1450,13 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, timer_res = 1; else timer_res = 2; - SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); - SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); - SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + p_sb_entry->params = cpu_to_le32(params); + + SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state); + SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state); + p_sb_entry->data = cpu_to_le32(data); } static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, @@ -1463,31 +1466,27 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, enum qed_coalescing_fsm coalescing_fsm, u8 timeset) { - struct cau_pi_entry pi_entry; u32 sb_offset, pi_offset; + u32 prod = 0; if (IS_VF(p_hwfn->cdev)) return; - sb_offset = igu_sb_id * PIS_PER_SB_E4; - memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); - - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset); if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0); else - SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); + sb_offset = igu_sb_id * PIS_PER_SB_E4; pi_offset = sb_offset + pi_index; - if (p_hwfn->hw_init_done) { + + if (p_hwfn->hw_init_done) qed_wr(p_hwfn, p_ptt, - CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), - *((u32 *)&(pi_entry))); - } else { - STORE_RT_REG(p_hwfn, - CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, - *((u32 *)&(pi_entry))); - } + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod); + else + STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + prod); } void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, @@ -2356,6 +2355,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 timer_res, u16 sb_id, bool tx) { struct cau_sb_entry sb_entry; + u32 params; int rc; if (!p_hwfn->hw_init_done) { @@ -2371,10 +2371,14 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } + params = le32_to_cpu(sb_entry.params); + if (tx) - SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); else - SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + sb_entry.params = cpu_to_le32(params); rc = qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 8abb31b63e4e..25d2c882d7ac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -118,7 +118,7 @@ struct qed_iscsi_conn { }; static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code) { if (p_hwfn->p_iscsi_info->event_cb) { @@ -270,6 +270,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, dma_addr_t xhq_pbl_addr; dma_addr_t uhq_pbl_addr; u16 physical_q; + __le16 tmp; int rc = 0; u32 dval; u16 wval; @@ -293,12 +294,12 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, /* Transmission PQ is the first of the PF */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - p_conn->physical_q0 = cpu_to_le16(physical_q); + p_conn->physical_q0 = physical_q; p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q); /* iSCSI Pure-ACK PQ */ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); - p_conn->physical_q1 = cpu_to_le16(physical_q); + p_conn->physical_q1 = physical_q; p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q); p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); @@ -324,14 +325,20 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, p_tcp = &p_ramrod->tcp; p = (u16 *)p_conn->local_mac; - p_tcp->local_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp->local_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp->local_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp->local_mac_addr_lo = tmp; p = (u16 *)p_conn->remote_mac; - p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp->remote_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp->remote_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp->remote_mac_addr_lo = tmp; p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); @@ -390,14 +397,20 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp; p = (u16 *)p_conn->local_mac; - p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp2->local_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp2->local_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp2->local_mac_addr_lo = tmp; p = (u16 *)p_conn->remote_mac; - p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p)); - p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1)); - p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp2->remote_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp2->remote_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp2->remote_mac_addr_lo = tmp; p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 55e73a842507..512cbef24097 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -60,7 +60,7 @@ struct mpa_v2_hdr { #define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code); /* Override devinfo with iWARP specific values */ @@ -246,14 +246,14 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, SET_FIELD(p_ramrod->flags, IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); - p_ramrod->pd = qp->pd; - p_ramrod->sq_num_pages = qp->sq_num_pages; - p_ramrod->rq_num_pages = qp->rq_num_pages; + p_ramrod->pd = cpu_to_le16(qp->pd); + p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); + p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid_for_sq = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); @@ -288,6 +288,7 @@ static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) struct iwarp_modify_qp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags, trans_to_state; int rc; /* Get SPQ entry */ @@ -303,12 +304,17 @@ static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) return rc; p_ramrod = &p_ent->ramrod.iwarp_modify_qp; - SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, - 0x1); + + flags = le16_to_cpu(p_ramrod->flags); + SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1); + p_ramrod->flags = cpu_to_le16(flags); + if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) - p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING; + trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING; else - p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR; + trans_to_state = IWARP_MODIFY_QP_STATE_ERROR; + + p_ramrod->transition_to_state = cpu_to_le16(trans_to_state); rc = qed_spq_post(p_hwfn, p_ent, NULL); @@ -621,6 +627,7 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) dma_addr_t async_output_phys; dma_addr_t in_pdata_phys; u16 physical_q; + u16 flags = 0; u8 tcp_flags; int rc; int i; @@ -673,13 +680,14 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; - tcp->flags = 0; - SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, + + SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, !!(tcp_flags & QED_IWARP_TS_EN)); - SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, + SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, !!(tcp_flags & QED_IWARP_DA_EN)); + tcp->flags = cpu_to_le16(flags); tcp->ip_version = ep->cm_info.ip_version; for (i = 0; i < 4; i++) { @@ -695,10 +703,10 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) tcp->tos_or_tc = 0; tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; - tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss; + tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss); tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; - tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT; - tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL; + tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT); + tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL); tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->connect_mode = ep->connect_mode; @@ -729,6 +737,7 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) union async_output *async_data; u16 mpa_ord, mpa_ird; u8 mpa_hdr_size = 0; + u16 ulp_data_len; u8 mpa_rev; async_data = &ep->ep_buffer_virt->async_output; @@ -792,8 +801,8 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) /* Strip mpa v2 hdr from private data before sending to upper layer */ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; - ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len - - mpa_hdr_size; + ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len); + ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size; params.event = QED_IWARP_EVENT_MPA_REQUEST; params.cm_info = &ep->cm_info; @@ -817,6 +826,7 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) dma_addr_t in_pdata_phys; struct qed_rdma_qp *qp; bool reject; + u32 val; int rc; if (!ep) @@ -847,13 +857,15 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) offsetof(struct qed_iwarp_ep_memory, out_pdata); DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys); - common->outgoing_ulp_buffer.len = ep->cm_info.private_data_len; + val = ep->cm_info.private_data_len; + common->outgoing_ulp_buffer.len = cpu_to_le16(val); common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; - common->out_rq.ord = ep->cm_info.ord; - common->out_rq.ird = ep->cm_info.ird; + common->out_rq.ord = cpu_to_le32(ep->cm_info.ord); + common->out_rq.ird = cpu_to_le32(ep->cm_info.ird); - p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + p_mpa_ramrod->tcp_cid = cpu_to_le32(val); in_pdata_phys = ep->ep_buffer_phys + offsetof(struct qed_iwarp_ep_memory, in_pdata); @@ -879,7 +891,7 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) } iwarp_info = &p_hwfn->p_rdma_info->iwarp; - p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size; + p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size); p_mpa_ramrod->mode = ep->mpa_rev; SET_FIELD(p_mpa_ramrod->rtr_pref, IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); @@ -930,6 +942,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) union async_output *async_data; u16 mpa_ird, mpa_ord; u8 mpa_data_size = 0; + u16 ulp_data_len; if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { mpa_v2_params = @@ -941,11 +954,12 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); } - async_data = &ep->ep_buffer_virt->async_output; + async_data = &ep->ep_buffer_virt->async_output; ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; - ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len - - mpa_data_size; + + ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len); + ep->cm_info.private_data_len = ulp_data_len - mpa_data_size; } static void @@ -1822,7 +1836,7 @@ qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, goto out; } - mpa_len = ntohs(*((u16 *)(mpa_data))); + mpa_len = ntohs(*(__force __be16 *)mpa_data); fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); if (fpdu->fpdu_length <= tcp_payload_len) @@ -1844,11 +1858,13 @@ qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, struct unaligned_opaque_data *pkt_data, u16 tcp_payload_size, u8 placement_offset) { + u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); + fpdu->mpa_buf = buf; fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; - fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; - fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; + fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset; + fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset; if (tcp_payload_size == 1) fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; @@ -1866,6 +1882,7 @@ qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, struct unaligned_opaque_data *pkt_data, struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) { + u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; int rc; @@ -1886,13 +1903,11 @@ qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", fpdu->mpa_frag_virt, fpdu->mpa_frag_len, - (u8 *)(buf->data) + pkt_data->first_mpa_offset, - tcp_payload_size); + (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); memcpy(tmp_buf + fpdu->mpa_frag_len, - (u8 *)(buf->data) + pkt_data->first_mpa_offset, - tcp_payload_size); + (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); if (rc) @@ -2035,6 +2050,7 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) { struct qed_ll2_tx_pkt_info tx_pkt; + u16 first_mpa_offset; u8 ll2_handle; int rc; @@ -2086,11 +2102,13 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, if (!fpdu->incomplete_bytes) goto out; + first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); + /* Set third fragment to second part of the packet */ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, buf->data_phys_addr + - curr_pkt->first_mpa_offset, + first_mpa_offset, fpdu->incomplete_bytes); out: DP_VERBOSE(p_hwfn, @@ -2111,12 +2129,12 @@ qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, { u64 opaque_data; - opaque_data = HILO_64(opaque_data1, opaque_data0); + opaque_data = HILO_64(cpu_to_le32(opaque_data1), + cpu_to_le32(opaque_data0)); *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); - curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset + - le16_to_cpu(curr_pkt->first_mpa_offset); - curr_pkt->cid = le32_to_cpu(curr_pkt->cid); + le16_add_cpu(&curr_pkt->first_mpa_offset, + curr_pkt->tcp_payload_offset); } /* This function is called when an unaligned or incomplete MPA packet arrives @@ -2131,18 +2149,22 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; enum qed_iwarp_mpa_pkt_type pkt_type; struct qed_iwarp_fpdu *fpdu; + u16 cid, first_mpa_offset; int rc = -EINVAL; u8 *mpa_data; - fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff); + cid = le32_to_cpu(curr_pkt->cid); + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); if (!fpdu) { /* something corrupt with cid, post rx back */ DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", - curr_pkt->cid); + cid); goto err; } do { - mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset); + first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); + mpa_data = ((u8 *)(buf->data) + first_mpa_offset); pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, mpa_buf->tcp_payload_len, @@ -2188,7 +2210,8 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, } mpa_buf->tcp_payload_len -= fpdu->fpdu_length; - curr_pkt->first_mpa_offset += fpdu->fpdu_length; + le16_add_cpu(&curr_pkt->first_mpa_offset, + fpdu->fpdu_length); break; case QED_IWARP_MPA_PKT_UNALIGNED: qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); @@ -2227,7 +2250,9 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, } mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; - curr_pkt->first_mpa_offset += fpdu->incomplete_bytes; + le16_add_cpu(&curr_pkt->first_mpa_offset, + fpdu->incomplete_bytes); + /* The framed PDU was sent - no more incomplete bytes */ fpdu->incomplete_bytes = 0; break; @@ -2278,6 +2303,7 @@ qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) struct qed_iwarp_ll2_mpa_buf *mpa_buf; struct qed_iwarp_info *iwarp_info; struct qed_hwfn *p_hwfn = cxt; + u16 first_mpa_offset; iwarp_info = &p_hwfn->p_rdma_info->iwarp; mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, @@ -2291,17 +2317,21 @@ qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, data->opaque_data_0, data->opaque_data_1); + first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", - data->length.packet_length, mpa_buf->data.first_mpa_offset, + data->length.packet_length, first_mpa_offset, mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, mpa_buf->data.cid); mpa_buf->ll2_buf = data->cookie; mpa_buf->tcp_payload_len = data->length.packet_length - - mpa_buf->data.first_mpa_offset; - mpa_buf->data.first_mpa_offset += data->u.placement_offset; + first_mpa_offset; + + first_mpa_offset += data->u.placement_offset; + mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset); mpa_buf->placement_offset = data->u.placement_offset; list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); @@ -2500,14 +2530,16 @@ qed_iwarp_ll2_slowpath(void *cxt, struct unaligned_opaque_data unalign_data; struct qed_hwfn *p_hwfn = cxt; struct qed_iwarp_fpdu *fpdu; + u32 cid; qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, opaque_data_0, opaque_data_1); - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", - unalign_data.cid); + cid = le32_to_cpu(unalign_data.cid); - fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid); + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); if (fpdu) memset(fpdu, 0, sizeof(*fpdu)); } @@ -3010,7 +3042,7 @@ qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) } static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 41afd15f4991..4c6ac8862744 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -345,8 +345,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct eth_vport_tpa_param *tpa_param; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; + u16 min_size, rx_mode = 0; u8 abs_vport_id = 0; - u16 rx_mode = 0; int rc; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); @@ -386,10 +386,12 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, switch (p_params->tpa_mode) { case QED_TPA_MODE_GRO: + min_size = p_params->mtu / 2; + tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; - tpa_param->tpa_max_size = (u16)-1; - tpa_param->tpa_min_size_to_cont = p_params->mtu / 2; - tpa_param->tpa_min_size_to_start = p_params->mtu / 2; + tpa_param->tpa_max_size = cpu_to_le16(U16_MAX); + tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size); + tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size); tpa_param->tpa_ipv4_en_flg = 1; tpa_param->tpa_ipv6_en_flg = 1; tpa_param->tpa_pkt_split_flg = 1; @@ -626,9 +628,9 @@ qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; - tpa->tpa_max_size = param->tpa_max_size; - tpa->tpa_min_size_to_start = param->tpa_min_size_to_start; - tpa->tpa_min_size_to_cont = param->tpa_min_size_to_cont; + tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size); + tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start); + tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont); } static void @@ -2090,7 +2092,8 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, return rc; } - timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), + CAU_SB_ENTRY_TIMER_RES0); address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); @@ -2123,7 +2126,8 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, return rc; } - timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), + CAU_SB_ENTRY_TIMER_RES1); address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index cce6fd27c042..6f4aec339cd4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1798,6 +1798,7 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, enum core_roce_flavor_type roce_flavor; enum core_tx_dest tx_dest; u16 bd_data = 0, frag_idx; + u16 bitfield1; roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE : CORE_RROCE; @@ -1829,9 +1830,11 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, pkt->remove_stag = true; } - SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, - cpu_to_le16(pkt->l4_hdr_offset_w)); - SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); + bitfield1 = le16_to_cpu(start_bd->bitfield1); + SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); + SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); + start_bd->bitfield1 = cpu_to_le16(bitfield1); + bd_data |= pkt->bd_flags; SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 236013da9453..4c5f5bd91359 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1962,8 +1962,7 @@ static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, u32 *crc) { u8 *buf = NULL; - int rc, j; - u32 val; + int rc; /* Allocate a buffer for holding the nvram image */ buf = kzalloc(nvm_image->length, GFP_KERNEL); @@ -1981,15 +1980,14 @@ static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, /* Convert the buffer into big-endian format (excluding the * closing 4 bytes of CRC). */ - for (j = 0; j < nvm_image->length - 4; j += 4) { - val = cpu_to_be32(*(u32 *)&buf[j]); - *(u32 *)&buf[j] = val; - } + cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, + DIV_ROUND_UP(nvm_image->length - 4, 4)); /* Calc CRC for the "actual" image buffer, i.e. not including * the last 4 CRC bytes. */ - *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); + *crc = ~crc32(~0U, buf, nvm_image->length - 4); + *crc = (__force u32)cpu_to_be32p(crc); out: kfree(buf); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 1dd01e0373ab..3e3192a3ad9b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -1276,7 +1276,7 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) */ for (offset = 0; offset < size; offset += sizeof(u32)) { val = qed_rd(p_hwfn, p_ptt, addr + offset); - val = be32_to_cpu(val); + val = be32_to_cpu((__force __be32)val); memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); } @@ -1325,7 +1325,7 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) */ for (offset = 0; offset < size; offset += sizeof(u32)) { memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); - val = cpu_to_be32(val); + val = (__force u32)cpu_to_be32(val); qed_wr(p_hwfn, p_ptt, addr + offset, val); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 59d916693654..e5648ca2838b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1106,7 +1106,7 @@ static int qed_rdma_create_cq(void *rdma_cxt, p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + params->cnq_id; - p_ramrod->int_timeout = params->int_timeout; + p_ramrod->int_timeout = cpu_to_le16(params->int_timeout); /* toggle the bit for every resize or create cq for a given icid */ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); @@ -1206,7 +1206,7 @@ err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, return rc; } -void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac) +void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac) { p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); @@ -1495,6 +1495,7 @@ qed_rdma_register_tid(void *rdma_cxt, struct qed_spq_entry *p_ent; enum rdma_tid_type tid_type; u8 fw_return_code; + u16 flags = 0; int rc; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); @@ -1514,54 +1515,46 @@ qed_rdma_register_tid(void *rdma_cxt, if (p_hwfn->p_rdma_info->last_tid < params->itid) p_hwfn->p_rdma_info->last_tid = params->itid; - p_ramrod = &p_ent->ramrod.rdma_register_tid; - - p_ramrod->flags = 0; - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, params->pbl_two_level); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva); + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, + params->zbva); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); /* Don't initialize D/C field, as it may override other bits. */ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, params->page_size_log - 12); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, params->remote_read); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, params->remote_write); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, params->remote_atomic); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, params->local_write); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read); + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, + params->local_read); - SET_FIELD(p_ramrod->flags, - RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, params->mw_bind); + p_ramrod = &p_ent->ramrod.rdma_register_tid; + p_ramrod->flags = cpu_to_le16(flags); + SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, params->pbl_page_size_log - 12); - SET_FIELD(p_ramrod->flags2, - RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr); + SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, + params->dma_mr); switch (params->tid_type) { case QED_RDMA_TID_REGISTERED_MR: @@ -1579,8 +1572,9 @@ qed_rdma_register_tid(void *rdma_cxt, qed_sp_destroy_request(p_hwfn, p_ent); return rc; } - SET_FIELD(p_ramrod->flags1, - RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type); + + SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, + tid_type); p_ramrod->itid = cpu_to_le32(params->itid); p_ramrod->key = params->key; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h index fba43adbb68e..6a1de3a25257 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -201,7 +201,7 @@ qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); int qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); -void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac); +void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac); bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index 5433e43a1930..a1423ec0edf7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -38,7 +38,7 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code) { struct qed_rdma_events events = p_hwfn->p_rdma_info->events; @@ -54,7 +54,7 @@ static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, qed_roce_free_real_icid(p_hwfn, icid); } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { - u16 srq_id = (u16)rdata->async_handle.lo; + u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo); events.affiliated_event(events.context, fw_event_code, &srq_id); @@ -217,9 +217,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct roce_create_qp_resp_ramrod_data *p_ramrod; u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; - enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; enum protocol_type proto; + u32 flags = 0; int rc; u8 tc; @@ -252,45 +252,34 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - p_ramrod = &p_ent->ramrod.roce_create_qp_resp; + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); - p_ramrod->flags = 0; - - roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); - - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, qp->fmr_and_reserved_lkey); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, qp->min_rnr_nak_timer); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, qed_rdma_is_xrc_qp(qp)); + p_ramrod = &p_ent->ramrod.roce_create_qp_resp; + p_ramrod->flags = cpu_to_le32(flags); p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; @@ -305,10 +294,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); - p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); - p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); @@ -330,7 +319,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); - p_ramrod->udp_src_port = qp->udp_src_port; + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); @@ -366,9 +355,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, struct roce_create_qp_req_ramrod_data *p_ramrod; u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; - enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; enum protocol_type proto; + u16 flags = 0; int rc; u8 tc; @@ -402,34 +391,29 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - p_ramrod = &p_ent->ramrod.roce_create_qp_req; + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); - p_ramrod->flags = 0; - - roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); - - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, qp->fmr_and_reserved_lkey); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, + qp->signal_all); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, + qp->retry_cnt); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, qed_rdma_is_xrc_qp(qp)); - SET_FIELD(p_ramrod->flags2, - ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode); + p_ramrod = &p_ent->ramrod.roce_create_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + + SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, + qp->edpm_mode); p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->traffic_class = qp->traffic_class_tos; @@ -446,10 +430,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); - p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); - p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); @@ -470,7 +454,7 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); - p_ramrod->udp_src_port = qp->udp_src_port; + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; @@ -502,6 +486,7 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, struct roce_modify_qp_resp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags = 0; int rc; if (!qp->has_resp) @@ -526,53 +511,43 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, return rc; } - p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); - p_ramrod->flags = 0; - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); + p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; + p_ramrod->flags = cpu_to_le16(flags); + p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, @@ -599,6 +574,7 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, struct roce_modify_qp_req_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags = 0; int rc; if (!qp->has_req) @@ -623,54 +599,44 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, return rc; } - p_ramrod = &p_ent->ramrod.roce_modify_qp_req; + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, + !!move_to_sqd); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, qp->sqd_async); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); + p_ramrod = &p_ent->ramrod.roce_modify_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); - - SET_FIELD(p_ramrod->fields, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); p_ramrod->max_ord = qp->max_rd_atomic_req; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index f7f983a8bf44..993f1357b6fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -155,7 +155,7 @@ struct qed_consq { }; typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode, - u16 echo, union event_ring_data *data, + __le16 echo, union event_ring_data *data, u8 fw_return_code); int diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 71ab57bca7c9..8142f5669b26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -341,9 +341,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { - outer_tag_config->outer_tag.tpid = ETH_P_8021Q; + outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q); } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { - outer_tag_config->outer_tag.tpid = ETH_P_8021AD; + outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD); outer_tag_config->enable_stag_pri_change = 1; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index b4e21e4792b7..aa215eeeb4df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4005,7 +4005,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, /* List the physical address of the request so that handler * could later on copy the message from it. */ - p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); /* Mark the event and schedule the workqueue */ p_vf->vf_mbx.b_pending_msg = true; @@ -4037,7 +4037,7 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, } } -static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, u16 echo, +static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data, u8 fw_return_code) { switch (opcode) { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 5ca081cd2ed9..90e1060da02b 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1403,16 +1403,15 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, u8 upd_flg) { - struct igu_prod_cons_update igu_ack = { 0 }; + u32 igu_ack; - igu_ack.sb_id_and_flags = - ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | - (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | - (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | - (IGU_SEG_ACCESS_REG << - IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); - DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + DIRECT_REG_WR(sb_info->igu_addr, igu_ack); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. From 50089be6bf61310a81e622599d13a68f2309c119 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:20 +0300 Subject: [PATCH 0718/2056] net: qede: fix kernel-doc for qede_ptp_adjfreq() One of the function arguments was renamed some time ago, but this wasn't reflected in its kernel-doc comment. Also add the description for return values. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ptp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 60a29a937a94..8c28fabb0ff6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -28,12 +28,12 @@ struct qede_ptp { }; /** - * qede_ptp_adjfreq - * @ptp: the ptp clock structure - * @ppb: parts per billion adjustment from base + * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter. * - * Adjust the frequency of the ptp cycle counter by the - * indicated ppb from the base frequency. + * @info: The PTP clock info structure. + * @ppb: Parts per billion adjustment from base. + * + * Return: Zero on success, negative errno otherwise. */ static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) { From fd0816628a5a60a6d3719281d5c14d3aa45eaba1 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 6 Jul 2020 18:38:21 +0300 Subject: [PATCH 0719/2056] net: qede: fix BE vs CPU comparison Flow Dissector's keys are mostly Network / Big Endian. U{16,32}_MAX are the same in either of byteorders, but let's make sparse happy with wrapping them into noops. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_filter.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index db17a675bd02..d8100434e340 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1764,8 +1764,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule, struct flow_match_ports match; flow_rule_match_ports(rule, &match); - if ((match.key->src && match.mask->src != U16_MAX) || - (match.key->dst && match.mask->dst != U16_MAX)) { + if ((match.key->src && match.mask->src != htons(U16_MAX)) || + (match.key->dst && match.mask->dst != htons(U16_MAX))) { DP_NOTICE(edev, "Do not support ports masks\n"); return -EINVAL; } @@ -1817,8 +1817,8 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule, struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); - if ((match.key->src && match.mask->src != U32_MAX) || - (match.key->dst && match.mask->dst != U32_MAX)) { + if ((match.key->src && match.mask->src != htonl(U32_MAX)) || + (match.key->dst && match.mask->dst != htonl(U32_MAX))) { DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n"); return -EINVAL; } From 7a6498ebcdc0fde4613a20ae405481d49166e8bb Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 6 Jul 2020 19:38:50 +0200 Subject: [PATCH 0720/2056] Replace HTTP links with HTTPS ones: IPv* Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- net/ipv4/Kconfig | 8 ++++---- net/ipv4/cipso_ipv4.c | 4 ++-- net/ipv4/fib_trie.c | 2 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 +- net/ipv4/tcp_highspeed.c | 2 +- net/ipv4/tcp_htcp.c | 2 +- net/ipv4/tcp_input.c | 2 +- net/ipv4/tcp_veno.c | 2 +- net/ipv6/Kconfig | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index e64e59b536d3..60db5a6487cc 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -10,7 +10,7 @@ config IP_MULTICAST intend to participate in the MBONE, a high bandwidth network on top of the Internet which carries audio and video broadcasts. More information about the MBONE is on the WWW at - . For most people, it's safe to say N. + . For most people, it's safe to say N. config IP_ADVANCED_ROUTER bool "IP: advanced router" @@ -73,7 +73,7 @@ config IP_MULTIPLE_TABLES If you need more information, see the Linux Advanced Routing and Traffic Control documentation at - + If unsure, say N. @@ -280,7 +280,7 @@ config SYN_COOKIES continue to connect, even when your machine is under attack. There is no need for the legitimate users to change their TCP/IP software; SYN cookies work transparently to them. For technical information - about SYN cookies, check out . + about SYN cookies, check out . If you are SYN flooded, the source address reported by the kernel is likely to have been forged by the attacker; it is only reported as @@ -525,7 +525,7 @@ config TCP_CONG_HSTCP A modification to TCP's congestion control mechanism for use with large congestion windows. A table indicates how much to increase the congestion window by when an ACK is received. - For more detail see http://www.icir.org/floyd/hstcp.html + For more detail see https://www.icir.org/floyd/hstcp.html config TCP_CONG_HYBLA tristate "TCP-Hybla congestion control algorithm" diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index a23094b050f8..0f1b9065c0a6 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -10,9 +10,9 @@ * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: - * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt + * https://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: - * http://www.itl.nist.gov/fipspubs/fip188.htm + * https://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore */ diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 248f1c1959a6..dcb0802a47d5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -13,7 +13,7 @@ * * An experimental study of compression methods for dynamic tries * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. - * http://www.csc.kth.se/~snilsson/software/dyntrie2/ + * https://www.csc.kth.se/~snilsson/software/dyntrie2/ * * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999 diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index f8755a4ae9d4..a8b980ad11d4 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -3,7 +3,7 @@ * (C) 2003-2004 by Harald Welte * based on ideas of Fabio Olive Leite * - * Development of this code funded by SuSE Linux AG, http://www.suse.com/ + * Development of this code funded by SuSE Linux AG, https://www.suse.com/ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index bfdfbb972c57..349069d6cd0a 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -2,7 +2,7 @@ /* * Sally Floyd's High Speed TCP (RFC 3649) congestion control * - * See http://www.icir.org/floyd/hstcp.html + * See https://www.icir.org/floyd/hstcp.html * * John Heffner */ diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 88e1f011afe0..55adcfcf96fe 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -4,7 +4,7 @@ * R.N.Shorten, D.J.Leith: * "H-TCP: TCP for high-speed and long-distance networks" * Proc. PFLDnet, Argonne, 2004. - * http://www.hamilton.ie/net/htcp3.pdf + * https://www.hamilton.ie/net/htcp3.pdf */ #include diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 12c26c9565b7..dc77309ea15b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -518,7 +518,7 @@ EXPORT_SYMBOL(tcp_initialize_rcv_mss); * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. - * + * * * More detail on this code can be found at * , diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 50a9a6e2c4cd..cd50a61c9976 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -7,7 +7,7 @@ * "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks." * IEEE Journal on Selected Areas in Communication, * Feb. 2003. - * See http://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf + * See https://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf */ #include diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index f4f19e89af5e..76bff79d6fed 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -14,7 +14,7 @@ menuconfig IPV6 . For specific information about IPv6 under Linux, see Documentation/networking/ipv6.rst and read the HOWTO at - + To compile this protocol support as a module, choose M here: the module will be called ipv6. From d47a72152097d7be7cfc453d205196c0aa976c33 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Mon, 6 Jul 2020 21:06:12 +0200 Subject: [PATCH 0721/2056] mptcp: fix race in subflow_data_ready() syzkaller was able to make the kernel reach subflow_data_ready() for a server subflow that was closed before subflow_finish_connect() completed. In these cases we can avoid using the path for regular/fallback MPTCP data, and just wake the main socket, to avoid the following warning: WARNING: CPU: 0 PID: 9370 at net/mptcp/subflow.c:885 subflow_data_ready+0x1e6/0x290 net/mptcp/subflow.c:885 Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 9370 Comm: syz-executor.0 Not tainted 5.7.0 #106 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xb7/0xfe lib/dump_stack.c:118 panic+0x29e/0x692 kernel/panic.c:221 __warn.cold+0x2f/0x3d kernel/panic.c:582 report_bug+0x28b/0x2f0 lib/bug.c:195 fixup_bug arch/x86/kernel/traps.c:105 [inline] fixup_bug arch/x86/kernel/traps.c:100 [inline] do_error_trap+0x10f/0x180 arch/x86/kernel/traps.c:197 do_invalid_op+0x32/0x40 arch/x86/kernel/traps.c:216 invalid_op+0x1e/0x30 arch/x86/entry/entry_64.S:1027 RIP: 0010:subflow_data_ready+0x1e6/0x290 net/mptcp/subflow.c:885 Code: 04 02 84 c0 74 06 0f 8e 91 00 00 00 41 0f b6 5e 48 31 ff 83 e3 18 89 de e8 37 ec 3d fe 84 db 0f 85 65 ff ff ff e8 fa ea 3d fe <0f> 0b e9 59 ff ff ff e8 ee ea 3d fe 48 89 ee 4c 89 ef e8 f3 77 ff RSP: 0018:ffff88811b2099b0 EFLAGS: 00010206 RAX: ffff888111197000 RBX: 0000000000000000 RCX: ffffffff82fbc609 RDX: 0000000000000100 RSI: ffffffff82fbc616 RDI: 0000000000000001 RBP: ffff8881111bc800 R08: ffff888111197000 R09: ffffed10222a82af R10: ffff888111541577 R11: ffffed10222a82ae R12: 1ffff11023641336 R13: ffff888111541000 R14: ffff88810fd4ca00 R15: ffff888111541570 tcp_child_process+0x754/0x920 net/ipv4/tcp_minisocks.c:841 tcp_v4_do_rcv+0x749/0x8b0 net/ipv4/tcp_ipv4.c:1642 tcp_v4_rcv+0x2666/0x2e60 net/ipv4/tcp_ipv4.c:1999 ip_protocol_deliver_rcu+0x29/0x1f0 net/ipv4/ip_input.c:204 ip_local_deliver_finish net/ipv4/ip_input.c:231 [inline] NF_HOOK include/linux/netfilter.h:421 [inline] ip_local_deliver+0x2da/0x390 net/ipv4/ip_input.c:252 dst_input include/net/dst.h:441 [inline] ip_rcv_finish net/ipv4/ip_input.c:428 [inline] ip_rcv_finish net/ipv4/ip_input.c:414 [inline] NF_HOOK include/linux/netfilter.h:421 [inline] ip_rcv+0xef/0x140 net/ipv4/ip_input.c:539 __netif_receive_skb_one_core+0x197/0x1e0 net/core/dev.c:5268 __netif_receive_skb+0x27/0x1c0 net/core/dev.c:5382 process_backlog+0x1e5/0x6d0 net/core/dev.c:6226 napi_poll net/core/dev.c:6671 [inline] net_rx_action+0x3e3/0xd70 net/core/dev.c:6739 __do_softirq+0x18c/0x634 kernel/softirq.c:292 do_softirq_own_stack+0x2a/0x40 arch/x86/entry/entry_64.S:1082 do_softirq.part.0+0x26/0x30 kernel/softirq.c:337 do_softirq arch/x86/include/asm/preempt.h:26 [inline] __local_bh_enable_ip+0x46/0x50 kernel/softirq.c:189 local_bh_enable include/linux/bottom_half.h:32 [inline] rcu_read_unlock_bh include/linux/rcupdate.h:723 [inline] ip_finish_output2+0x78a/0x19c0 net/ipv4/ip_output.c:229 __ip_finish_output+0x471/0x720 net/ipv4/ip_output.c:306 dst_output include/net/dst.h:435 [inline] ip_local_out+0x181/0x1e0 net/ipv4/ip_output.c:125 __ip_queue_xmit+0x7a1/0x14e0 net/ipv4/ip_output.c:530 __tcp_transmit_skb+0x19dc/0x35e0 net/ipv4/tcp_output.c:1238 __tcp_send_ack.part.0+0x3c2/0x5b0 net/ipv4/tcp_output.c:3785 __tcp_send_ack net/ipv4/tcp_output.c:3791 [inline] tcp_send_ack+0x7d/0xa0 net/ipv4/tcp_output.c:3791 tcp_rcv_synsent_state_process net/ipv4/tcp_input.c:6040 [inline] tcp_rcv_state_process+0x36a4/0x49c2 net/ipv4/tcp_input.c:6209 tcp_v4_do_rcv+0x343/0x8b0 net/ipv4/tcp_ipv4.c:1651 sk_backlog_rcv include/net/sock.h:996 [inline] __release_sock+0x1ad/0x310 net/core/sock.c:2548 release_sock+0x54/0x1a0 net/core/sock.c:3064 inet_wait_for_connect net/ipv4/af_inet.c:594 [inline] __inet_stream_connect+0x57e/0xd50 net/ipv4/af_inet.c:686 inet_stream_connect+0x53/0xa0 net/ipv4/af_inet.c:725 mptcp_stream_connect+0x171/0x5f0 net/mptcp/protocol.c:1920 __sys_connect_file net/socket.c:1854 [inline] __sys_connect+0x267/0x2f0 net/socket.c:1871 __do_sys_connect net/socket.c:1882 [inline] __se_sys_connect net/socket.c:1879 [inline] __x64_sys_connect+0x6f/0xb0 net/socket.c:1879 do_syscall_64+0xb7/0x3d0 arch/x86/entry/common.c:295 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x7fb577d06469 Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d ff 49 2b 00 f7 d8 64 89 01 48 RSP: 002b:00007fb5783d5dd8 EFLAGS: 00000246 ORIG_RAX: 000000000000002a RAX: ffffffffffffffda RBX: 000000000068bfa0 RCX: 00007fb577d06469 RDX: 000000000000004d RSI: 0000000020000040 RDI: 0000000000000003 RBP: 00000000ffffffff R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 000000000041427c R14: 00007fb5783d65c0 R15: 0000000000000003 Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/39 Reported-by: Christoph Paasch Fixes: e1ff9e82e2ea ("net: mptcp: improve fallback to TCP") Suggested-by: Paolo Abeni Signed-off-by: Davide Caratti Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index e1e19c76e267..9f7f3772c13c 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -873,7 +873,7 @@ static void subflow_data_ready(struct sock *sk) struct mptcp_sock *msk; msk = mptcp_sk(parent); - if (inet_sk_state_load(sk) == TCP_LISTEN) { + if ((1 << inet_sk_state_load(sk)) & (TCPF_LISTEN | TCPF_CLOSE)) { set_bit(MPTCP_DATA_READY, &msk->flags); parent->sk_data_ready(parent); return; From 343ad3964d2f10646178ed3caa2db6e75fe84612 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 29 Jun 2020 20:05:45 +0200 Subject: [PATCH 0722/2056] dt-bindings: net: bluetooth: realtek: Fix uart-has-rtscts example uart-has-rtscts is a boolean property. These are defined as present (which means that this property evaluates to "true") or absent (which means that this property evaluates to "false"). Remove the numeric value from the example to make it comply with the boolean property bindings. Fixes: 1cc2d0e021f867 ("dt-bindings: net: bluetooth: Add rtl8723bs-bluetooth") Signed-off-by: Martin Blumenstingl Signed-off-by: Marcel Holtmann --- Documentation/devicetree/bindings/net/realtek-bluetooth.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml index f15a5e5e4859..c488f24ed38f 100644 --- a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml @@ -44,7 +44,7 @@ examples: uart1 { pinctrl-names = "default"; pinctrl-0 = <&uart1_pins>, <&uart1_rts_cts_pins>; - uart-has-rtscts = <1>; + uart-has-rtscts; bluetooth { compatible = "realtek,rtl8723bs-bt"; From 49b020c1d236a36a4533e7db6d2604cb57ed4c51 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 29 Jun 2020 16:11:00 +0000 Subject: [PATCH 0723/2056] Bluetooth: Adding a configurable autoconnect timeout This patch adds a configurable LE autoconnect timeout. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 1 + net/bluetooth/hci_core.c | 1 + net/bluetooth/hci_event.c | 2 +- net/bluetooth/mgmt_config.c | 13 +++++++++++++ 4 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 836dc997ff94..34ad5b207598 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -376,6 +376,7 @@ struct hci_dev { __u16 def_br_lsto; __u16 def_page_timeout; __u16 def_multi_adv_rotation_duration; + __u16 def_le_autoconnect_timeout; __u16 pkt_type; __u16 esco_type; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 7959b851cc63..e6bf3d9f9d7a 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3569,6 +3569,7 @@ struct hci_dev *hci_alloc_dev(void) hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; + hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e060fc9ebb18..03a0759f2fc2 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5315,7 +5315,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, } conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, - HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER, + hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER, direct_rpa); if (!IS_ERR(conn)) { /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c index 8d01a8ff85e9..b30b571f8caf 100644 --- a/net/bluetooth/mgmt_config.c +++ b/net/bluetooth/mgmt_config.c @@ -17,6 +17,12 @@ { cpu_to_le16(hdev->_param_name_) } \ } +#define HDEV_PARAM_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \ +{ \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + { cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) } \ +} + int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { @@ -59,6 +65,8 @@ int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, HDEV_PARAM_U16(0x0018, le_conn_max_interval), HDEV_PARAM_U16(0x0019, le_conn_latency), HDEV_PARAM_U16(0x001a, le_supv_timeout), + HDEV_PARAM_U16_JIFFIES_TO_MSECS(0x001b, + def_le_autoconnect_timeout), }; struct mgmt_rp_read_def_system_config *rp = (void *)params; @@ -129,6 +137,7 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, case 0x0018: case 0x0019: case 0x001a: + case 0x001b: if (len != sizeof(u16)) { bt_dev_warn(hdev, "invalid length %d, exp %zu for type %d", len, sizeof(u16), type); @@ -238,6 +247,10 @@ int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, case 0x0001a: hdev->le_supv_timeout = TLV_GET_LE16(buffer); break; + case 0x0001b: + hdev->def_le_autoconnect_timeout = + msecs_to_jiffies(TLV_GET_LE16(buffer)); + break; default: bt_dev_warn(hdev, "unsupported parameter %u", type); break; From d4edda0f791fccf4cbb8a88566a8f2b1228faaee Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Mon, 29 Jun 2020 17:04:15 +0000 Subject: [PATCH 0724/2056] Bluetooth: use configured default params for active scans This patch fixes active scans to use the configured default parameters. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 116207009dde..68a2ec36e1c1 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -2767,8 +2767,9 @@ static int active_scan(struct hci_request *req, unsigned long opt) if (err < 0) own_addr_type = ADDR_LE_DEV_PUBLIC; - hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN, - own_addr_type, filter_policy); + hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, + hdev->le_scan_window_discovery, own_addr_type, + filter_policy); return 0; } @@ -2815,18 +2816,18 @@ static void start_discovery(struct hci_dev *hdev, u8 *status) * to do BR/EDR inquiry. */ hci_req_sync(hdev, interleaved_discov, - DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT, + hdev->le_scan_int_discovery * 2, HCI_CMD_TIMEOUT, status); break; } timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); - hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, + hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, HCI_CMD_TIMEOUT, status); break; case DISCOV_TYPE_LE: timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); - hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, + hci_req_sync(hdev, active_scan, hdev->le_scan_int_discovery, HCI_CMD_TIMEOUT, status); break; default: From 461f95f04f19382dcfd17da2d8db37e0cdc719f2 Mon Sep 17 00:00:00 2001 From: Hilda Wu Date: Tue, 30 Jun 2020 21:09:40 +0800 Subject: [PATCH 0725/2056] Bluetooth: btusb: USB alternate setting 1 for WBS RTL8822CE supports transparent WBS to apply USB alternate setting 1. Add a flag to the device match data to apply alternate setting 1 which meet the transfer speed for WBS support. Signed-off-by: Hilda Wu Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 34 ++++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 0e143c0cecf2..faa863dd5d0a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -453,6 +453,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { #define BTUSB_HW_RESET_ACTIVE 12 #define BTUSB_TX_WAIT_VND_EVT 13 #define BTUSB_WAKEUP_DISABLE 14 +#define BTUSB_USE_ALT1_FOR_WBS 15 struct btusb_data { struct hci_dev *hdev; @@ -1682,14 +1683,15 @@ static void btusb_work(struct work_struct *work) new_alts = data->sco_num; } } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) { - - data->usb_alt6_packet_flow = true; - /* Check if Alt 6 is supported for Transparent audio */ - if (btusb_find_altsetting(data, 6)) + if (btusb_find_altsetting(data, 6)) { + data->usb_alt6_packet_flow = true; new_alts = 6; - else + } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) { + new_alts = 1; + } else { bt_dev_err(hdev, "Device does not support ALT setting 6"); + } } if (btusb_switch_alt_setting(hdev, new_alts) < 0) @@ -4004,6 +4006,15 @@ static int btusb_probe(struct usb_interface *intf, btusb_check_needs_reset_resume(intf); } + if (id->driver_info & BTUSB_AMP) { + /* AMP controllers do not support SCO packets */ + data->isoc = NULL; + } else { + /* Interface orders are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1); + data->isoc_ifnum = ifnum_base + 1; + } + if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) && (id->driver_info & BTUSB_REALTEK)) { hdev->setup = btrtl_setup_realtek; @@ -4015,21 +4026,16 @@ static int btusb_probe(struct usb_interface *intf, * (DEVICE_REMOTE_WAKEUP) */ set_bit(BTUSB_WAKEUP_DISABLE, &data->flags); + if (btusb_find_altsetting(data, 1)) + set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags); + else + bt_dev_err(hdev, "Device does not support ALT setting 1"); err = usb_autopm_get_interface(intf); if (err < 0) goto out_free_dev; } - if (id->driver_info & BTUSB_AMP) { - /* AMP controllers do not support SCO packets */ - data->isoc = NULL; - } else { - /* Interface orders are hardcoded in the specification */ - data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1); - data->isoc_ifnum = ifnum_base + 1; - } - if (!reset) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); From b83764f9220a4a14525657466f299850bbc98de9 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Mon, 29 Jun 2020 20:15:00 -0700 Subject: [PATCH 0726/2056] Bluetooth: Fix kernel oops triggered by hci_adv_monitors_clear() This fixes the kernel oops by removing unnecessary background scan update from hci_adv_monitors_clear() which shouldn't invoke any work queue. The following test was performed. - Run "rmmod btusb" and verify that no kernel oops is triggered. Signed-off-by: Miao-chen Chou Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index e6bf3d9f9d7a..6509f785dd14 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3005,8 +3005,6 @@ void hci_adv_monitors_clear(struct hci_dev *hdev) hci_free_adv_monitor(monitor); idr_destroy(&hdev->adv_monitors_idr); - - hci_update_background_scan(hdev); } void hci_free_adv_monitor(struct adv_monitor *monitor) From 15d8ce05ebec37a0d701cde768bbf21349f2329d Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Tue, 7 Jul 2020 17:46:06 +0200 Subject: [PATCH 0727/2056] Bluetooth: le_simult_central_peripheral experimental feature This patch adds an le_simult_central_peripheral features which allows a clients to determine if the controller is able to support peripheral and central connections separately and at the same time. Signed-off-by: Alain Michaud Signed-off-by: Marcel Holtmann --- net/bluetooth/mgmt.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 5e9b9728eeac..d29da80e38fe 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -3753,12 +3753,19 @@ static const u8 debug_uuid[16] = { }; #endif +/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ +static const u8 simult_central_periph_uuid[16] = { + 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, + 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, +}; + static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - char buf[42]; + char buf[44]; struct mgmt_rp_read_exp_features_info *rp = (void *)buf; u16 idx = 0; + u32 flags; bt_dev_dbg(hdev, "sock %p", sk); @@ -3766,7 +3773,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, #ifdef CONFIG_BT_FEATURE_DEBUG if (!hdev) { - u32 flags = bt_dbg_get() ? BIT(0) : 0; + flags = bt_dbg_get() ? BIT(0) : 0; memcpy(rp->features[idx].uuid, debug_uuid, 16); rp->features[idx].flags = cpu_to_le32(flags); @@ -3774,6 +3781,20 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, } #endif + if (hdev) { + if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) && + (hdev->le_states[4] & 0x08) && /* Central */ + (hdev->le_states[4] & 0x40) && /* Peripheral */ + (hdev->le_states[3] & 0x10)) /* Simultaneous */ + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable From 6e9fdb60d362a5db769d1a39a390288e1ee45afd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 6 Jul 2020 14:29:39 -0700 Subject: [PATCH 0728/2056] net: systemport: Add support for VLAN transmit acceleration SYSTEMPORT is capable of performing VLAN transmit acceleration, support that by configuring it appropriately, providing the VLAN ID and PCP/DEI where necessary. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 37 ++++++++++++++++++++-- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b25356e21a1e..b470551bae4f 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -160,13 +160,26 @@ static void bcm_sysport_set_tx_csum(struct net_device *dev, /* Hardware transmit checksum requires us to enable the Transmit status * block prepended to the packet contents */ - priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_TX)); reg = tdma_readl(priv, TDMA_CONTROL); if (priv->tsb_en) reg |= tdma_control_bit(priv, TSB_EN); else reg &= ~tdma_control_bit(priv, TSB_EN); + /* Indicating that software inserts Broadcom tags is needed for the TX + * checksum to be computed correctly when using VLAN HW acceleration, + * else it has no effect, so it can always be turned on. + */ + if (netdev_uses_dsa(dev)) + reg |= tdma_control_bit(priv, SW_BRCM_TAG); + else + reg &= ~tdma_control_bit(priv, SW_BRCM_TAG); tdma_writel(priv, reg, TDMA_CONTROL); + + /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */ + if (wanted & NETIF_F_HW_VLAN_CTAG_TX) + tdma_writel(priv, ETH_P_8021Q, TDMA_TPID); } static int bcm_sysport_set_features(struct net_device *dev, @@ -1236,6 +1249,12 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, /* Zero-out TSB by default */ memset(tsb, 0, sizeof(*tsb)); + if (skb_vlan_tag_present(skb)) { + tsb->pcp_dei_vid = (skb_vlan_tag_get_prio(skb) >> + VLAN_PRIO_SHIFT & PCP_DEI_MASK); + tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT; + } + if (skb->ip_summed == CHECKSUM_PARTIAL) { ip_ver = skb->protocol; switch (ip_ver) { @@ -1251,6 +1270,9 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, /* Get the checksum offset and the L4 (transport) offset */ csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb); + /* Account for the HW inserted VLAN tag */ + if (skb_vlan_tag_present(skb)) + csum_start += VLAN_HLEN; csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK; csum_info |= (csum_start << L4_PTR_SHIFT); @@ -1330,6 +1352,8 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb, DESC_STATUS_SHIFT; if (skb->ip_summed == CHECKSUM_PARTIAL) len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT); + if (skb_vlan_tag_present(skb)) + len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT); ring->curr_desc++; if (ring->curr_desc == ring->size) @@ -1503,7 +1527,13 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, reg |= RING_IGNORE_STATUS; } tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); - tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + reg = 0; + /* Adjust the packet size calculations if SYSTEMPORT is responsible + * for HW insertion of VLAN tags + */ + if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX) + reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT; + tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index)); /* Enable ACB algorithm 2 */ reg = tdma_readl(priv, TDMA_CONTROL); @@ -2523,7 +2553,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64); dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA | - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_TX; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; From 14f5d8e3bb4cffce4590c04b5d9c6dc1fc397f85 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 6 Jul 2020 14:53:41 -0700 Subject: [PATCH 0729/2056] ice: add documentation for device-caps region The recent change by commit 8d7aab3515fa ("ice: implement snapshot for device capabilities") to implement the device-caps region for the ice driver forgot to document it. Add documentation to the ice devlink documentation file describing the new region and add some sample output to the shell commands provided as an example. Fixes: 8d7aab3515fa ("ice: implement snapshot for device capabilities") Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- Documentation/networking/devlink/ice.rst | 55 +++++++++++++++++++++++- 1 file changed, 53 insertions(+), 2 deletions(-) diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst index 72ea8d295724..237848d56f9b 100644 --- a/Documentation/networking/devlink/ice.rst +++ b/Documentation/networking/devlink/ice.rst @@ -84,8 +84,20 @@ The ``ice`` driver reports the following versions Regions ======= -The ``ice`` driver enables access to the contents of the Non Volatile Memory -flash chip via the ``nvm-flash`` region. +The ``ice`` driver implements the following regions for accessing internal +device data. + +.. list-table:: regions implemented + :widths: 15 85 + + * - Name + - Description + * - ``nvm-flash`` + - The contents of the entire flash chip, sometimes referred to as + the device's Non Volatile Memory. + * - ``device-caps`` + - The contents of the device firmware's capabilities buffer. Useful to + determine the current state and configuration of the device. Users can request an immediate capture of a snapshot via the ``DEVLINK_CMD_REGION_NEW`` @@ -105,3 +117,42 @@ Users can request an immediate capture of a snapshot via the 0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30 $ devlink region delete pci/0000:01:00.0/nvm-flash snapshot 1 + + $ devlink region new pci/0000:01:00.0/device-caps snapshot 1 + $ devlink region dump pci/0000:01:00.0/device-caps snapshot 1 + 0000000000000000 01 00 01 00 00 00 00 00 01 00 00 00 00 00 00 00 + 0000000000000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000020 02 00 02 01 32 03 00 00 0a 00 00 00 25 00 00 00 + 0000000000000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000040 04 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000060 05 00 01 00 03 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000080 06 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000090 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000000a0 08 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000000c0 12 00 01 00 01 00 00 00 01 00 01 00 00 00 00 00 + 00000000000000d0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000000e0 13 00 01 00 00 01 00 00 00 00 00 00 00 00 00 00 + 00000000000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000100 14 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000110 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000120 15 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000130 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000140 16 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000150 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000160 17 00 01 00 06 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000170 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000180 18 00 01 00 01 00 00 00 01 00 00 00 08 00 00 00 + 0000000000000190 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000001a0 22 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00 + 00000000000001b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000001c0 40 00 01 00 00 08 00 00 08 00 00 00 00 00 00 00 + 00000000000001d0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 00000000000001e0 41 00 01 00 00 08 00 00 00 00 00 00 00 00 00 00 + 00000000000001f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + 0000000000000200 42 00 01 00 00 08 00 00 00 00 00 00 00 00 00 00 + 0000000000000210 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + + $ devlink region delete pci/0000:01:00.0/device-caps snapshot 1 From 5411ca7178d40d94e009f3a00f10e20e6067037c Mon Sep 17 00:00:00 2001 From: Chris Healy Date: Mon, 6 Jul 2020 18:27:07 -0700 Subject: [PATCH 0730/2056] net: sfp: Unique GPIO interrupt names Dynamically generate a unique GPIO interrupt name, based on the device name and the GPIO name. For example: 103: 0 sx1503q 12 Edge sff2-los 104: 0 sx1503q 13 Edge sff2-tx-fault The sffX indicates the SFP the los and tx-fault are associated with. v3: - reverse Christmas tree new variable - fix spaces vs tabs v2: - added net-next to PATCH part of subject line - switched to devm_kasprintf() Signed-off-by: Chris Healy Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 73c2969f11a4..7bdfcde98266 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2238,6 +2238,7 @@ static int sfp_probe(struct platform_device *pdev) { const struct sff_data *sff; struct i2c_adapter *i2c; + char *sfp_irq_name; struct sfp *sfp; int err, i; @@ -2349,12 +2350,16 @@ static int sfp_probe(struct platform_device *pdev) continue; } + sfp_irq_name = devm_kasprintf(sfp->dev, GFP_KERNEL, + "%s-%s", dev_name(sfp->dev), + gpio_of_names[i]); + err = devm_request_threaded_irq(sfp->dev, sfp->gpio_irq[i], NULL, sfp_irq, IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - dev_name(sfp->dev), sfp); + sfp_irq_name, sfp); if (err) { sfp->gpio_irq[i] = 0; sfp->need_poll = true; From 3f2edd309ff02c973bc3cb815c87def07a1ac804 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:33 +0200 Subject: [PATCH 0731/2056] net: phy: at803x: Avoid comparison is always false warning By placing the GENMASK value into an unsigned int and then passing it to PREF_FIELD, the type is reduces down from ULL. Given the reduced size of the type, the range checks in PREP_FAIL() are always true, and -Wtype-limits then gives a warning. By skipping the intermediate variable, the warning can be avoided. Reviewed-by: Jakub Kicinski Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/at803x.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 97cbe593f0ea..96c61aa75bd7 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -400,8 +400,8 @@ static int at803x_parse_dt(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; struct at803x_priv *priv = phydev->priv; - unsigned int sel, mask; u32 freq, strength; + unsigned int sel; int ret; if (!IS_ENABLED(CONFIG_OF_MDIO)) @@ -409,7 +409,6 @@ static int at803x_parse_dt(struct phy_device *phydev) ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq); if (!ret) { - mask = AT803X_CLK_OUT_MASK; switch (freq) { case 25000000: sel = AT803X_CLK_OUT_25MHZ_XTAL; @@ -428,8 +427,8 @@ static int at803x_parse_dt(struct phy_device *phydev) return -EINVAL; } - priv->clk_25m_reg |= FIELD_PREP(mask, sel); - priv->clk_25m_mask |= mask; + priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel); + priv->clk_25m_mask |= AT803X_CLK_OUT_MASK; /* Fixup for the AR8030/AR8035. This chip has another mask and * doesn't support the DSP reference. Eg. the lowest bit of the From 19c5a5fec30bde10d269a9f638566195016c90ed Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:34 +0200 Subject: [PATCH 0732/2056] net: phy: Fixup parameters in kerneldoc Correct the kerneldoc for a few structure and function calls, as reported by C=1 W=1. Cc: Alexandru Ardelean Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/adin.c | 12 ++++++------ drivers/net/phy/mdio-boardinfo.c | 3 ++- drivers/net/phy/mdio_device.c | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/phy/adin.c b/drivers/net/phy/adin.c index c7eabe4382fb..7471a8b90873 100644 --- a/drivers/net/phy/adin.c +++ b/drivers/net/phy/adin.c @@ -106,8 +106,8 @@ /** * struct adin_cfg_reg_map - map a config value to aregister value - * @cfg value in device configuration - * @reg value in the register + * @cfg: value in device configuration + * @reg: value in the register */ struct adin_cfg_reg_map { int cfg; @@ -135,9 +135,9 @@ static const struct adin_cfg_reg_map adin_rmii_fifo_depths[] = { /** * struct adin_clause45_mmd_map - map to convert Clause 45 regs to Clause 22 - * @devad device address used in Clause 45 access - * @cl45_regnum register address defined by Clause 45 - * @adin_regnum equivalent register address accessible via Clause 22 + * @devad: device address used in Clause 45 access + * @cl45_regnum: register address defined by Clause 45 + * @adin_regnum: equivalent register address accessible via Clause 22 */ struct adin_clause45_mmd_map { int devad; @@ -174,7 +174,7 @@ static const struct adin_hw_stat adin_hw_stats[] = { /** * struct adin_priv - ADIN PHY driver private data - * stats statistic counters for the PHY + * @stats: statistic counters for the PHY */ struct adin_priv { u64 stats[ARRAY_SIZE(adin_hw_stats)]; diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c index d9b54c67ef9f..033df435f76c 100644 --- a/drivers/net/phy/mdio-boardinfo.c +++ b/drivers/net/phy/mdio-boardinfo.c @@ -17,7 +17,8 @@ static DEFINE_MUTEX(mdio_board_lock); /** * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices * from pre-collected board specific MDIO information - * @mdiodev: MDIO device pointer + * @bus: Bus the board_info belongs to + * @cb: Callback to create device on bus * Context: can sleep */ void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus, diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index be615504b829..0f625a1b1644 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -181,7 +181,7 @@ static int mdio_remove(struct device *dev) /** * mdio_driver_register - register an mdio_driver with the MDIO layer - * @new_driver: new mdio_driver to register + * @drv: new mdio_driver to register */ int mdio_driver_register(struct mdio_driver *drv) { From 3970ed49a46bd0b062870edcc0894b2bd820371d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:35 +0200 Subject: [PATCH 0733/2056] net: phy: Properly define genphy_c45_driver Avoid the W=1 warning that symbol 'genphy_c45_driver' was not declared. Should it be static? Declare it on the phy header file. Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 1 - include/linux/phy.h | 3 +++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index eb1068a77ce1..98be28567c65 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -227,7 +227,6 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev) } static struct phy_driver genphy_driver; -extern struct phy_driver genphy_c45_driver; static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); diff --git a/include/linux/phy.h b/include/linux/phy.h index 101a48fa6750..1592c3d0e12f 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1385,6 +1385,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); +/* Generic C45 PHY driver */ +extern struct phy_driver genphy_c45_driver; + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); From 4f2b38e3ea4a64ea23e313a78d9974ca6b5d7703 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:36 +0200 Subject: [PATCH 0734/2056] net: phy: Make phy_10gbit_fec_features_array static This array is not used outside of phy_device.c, so make it static. Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 98be28567c65..cf3505e2f587 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -106,10 +106,9 @@ const int phy_10gbit_features_array[1] = { }; EXPORT_SYMBOL_GPL(phy_10gbit_features_array); -const int phy_10gbit_fec_features_array[1] = { +static const int phy_10gbit_fec_features_array[1] = { ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, }; -EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_10gbit_full_features); From 82e7627fd4d8592be2acb908943fb943c440840d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:37 +0200 Subject: [PATCH 0735/2056] net: phy: dp83640: Fixup cast to restricted __be16 warning ntohs() expects to be passed a __be16. Correct the type of the variable holding the sequence ID. Cc: Richard Cochran Acked-by: Richard Cochran Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/dp83640.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index ecbd5e0d685c..da31756f5a70 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -803,9 +803,10 @@ static int decode_evnt(struct dp83640_private *dp83640, static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) { - u16 *seqid, hash; unsigned int offset = 0; u8 *msgtype, *data = skb_mac_header(skb); + __be16 *seqid; + u16 hash; /* check sequenceID, messageType, 12 bit hash of offset 20-29 */ @@ -836,7 +837,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) if (rxts->msgtype != (*msgtype & 0xf)) return 0; - seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); + seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID); if (rxts->seqid != ntohs(*seqid)) return 0; From c7b04d1030f505f2d883612ed31a6ddd70182705 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:38 +0200 Subject: [PATCH 0736/2056] net: phy: cavium: Improve __iomem mess The MIPS low level register access functions seem to be missing __iomem annotation. This causes lots of sparse warnings, when code casts off the __iomem. Make the Cavium MDIO drivers cleaner by pushing the casts lower down into the helpers, allow the drivers to work as normal, with __iomem. bus->register_base is now an void *, rather than a u64. So forming the mii_bus->id string cannot use %llx any more. Use %px, so this kernel address is still exposed to user space, as it was before. v2: s/cases/causes/g Cc: Sunil Goutham Cc: Robert Richter Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-cavium.h | 14 +++++++------- drivers/net/phy/mdio-octeon.c | 5 ++--- drivers/net/phy/mdio-thunder.c | 2 +- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h index e33d3ea9a907..a2245d436f5d 100644 --- a/drivers/net/phy/mdio-cavium.h +++ b/drivers/net/phy/mdio-cavium.h @@ -90,7 +90,7 @@ union cvmx_smix_wr_dat { struct cavium_mdiobus { struct mii_bus *mii_bus; - u64 register_base; + void __iomem *register_base; enum cavium_mdiobus_mode mode; }; @@ -98,20 +98,20 @@ struct cavium_mdiobus { #include -static inline void oct_mdio_writeq(u64 val, u64 addr) +static inline void oct_mdio_writeq(u64 val, void __iomem *addr) { - cvmx_write_csr(addr, val); + cvmx_write_csr((u64 __force)addr, val); } -static inline u64 oct_mdio_readq(u64 addr) +static inline u64 oct_mdio_readq(void __iomem *addr) { - return cvmx_read_csr(addr); + return cvmx_read_csr((u64 __force)addr); } #else #include -#define oct_mdio_writeq(val, addr) writeq(val, (void *)addr) -#define oct_mdio_readq(addr) readq((void *)addr) +#define oct_mdio_writeq(val, addr) writeq(val, addr) +#define oct_mdio_readq(addr) readq(addr) #endif int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index 8327382aa568..a2f93948db97 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -44,8 +44,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) return -ENXIO; } - bus->register_base = - (u64)devm_ioremap(&pdev->dev, mdio_phys, regsize); + bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize); if (!bus->register_base) { dev_err(&pdev->dev, "dev_ioremap failed\n"); return -ENOMEM; @@ -56,7 +55,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->name = KBUILD_MODNAME; - snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base); + snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%px", bus->register_base); bus->mii_bus->parent = &pdev->dev; bus->mii_bus->read = cavium_mdiobus_read; diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c index 2a97938d1972..3d7eda99d34e 100644 --- a/drivers/net/phy/mdio-thunder.c +++ b/drivers/net/phy/mdio-thunder.c @@ -84,7 +84,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, nexus->buses[i] = bus; i++; - bus->register_base = (u64)nexus->bar0 + + bus->register_base = nexus->bar0 + r.start - pci_resource_start(pdev, 0); smi_en.u64 = 0; From 791e5f61aec5a1332a8f760f43a6d32c416df602 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Tue, 7 Jul 2020 03:49:39 +0200 Subject: [PATCH 0737/2056] net: phy: mdio-octeon: Cleanup module loading dependencies To ensure that the octeon MDIO driver has been loaded, the Cavium ethernet drivers reference a dummy symbol in the MDIO driver. This forces it to be loaded first. And this symbol has not been cleanly implemented, resulting in warnings when build W=1 C=1. Since device tree is being used, and a phandle points to the PHY on the MDIO bus, we can make use of deferred probing. If the PHY fails to connect, it should be because the MDIO bus driver has not loaded yet. Return -EPROBE_DEFER so it will be tried again later. Additionally, add a MODULE_SOFTDEP() to give user space a hint as to what order it should load the modules. v2: s/octoen/octeon/ Add MODULE_SOFTDEP() Cc: Sunil Goutham Cc: Robert Richter Cc: Chris Packham Tested-by: Chris Packham Acked-by: Greg Kroah-Hartman Reviewed-by: Florian Fainelli Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 7 ++----- drivers/net/phy/mdio-octeon.c | 6 ------ drivers/staging/octeon/ethernet-mdio.c | 2 +- drivers/staging/octeon/ethernet-mdio.h | 2 -- drivers/staging/octeon/ethernet.c | 3 +-- 5 files changed, 4 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index cbaa1924afbe..3e17ce0d2314 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -961,7 +961,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev) PHY_INTERFACE_MODE_MII); if (!phydev) - return -ENODEV; + return -EPROBE_DEFER; return 0; } @@ -1554,12 +1554,8 @@ static struct platform_driver octeon_mgmt_driver = { .remove = octeon_mgmt_remove, }; -extern void octeon_mdiobus_force_mod_depencency(void); - static int __init octeon_mgmt_mod_init(void) { - /* Force our mdiobus driver module to be loaded first. */ - octeon_mdiobus_force_mod_depencency(); return platform_driver_register(&octeon_mgmt_driver); } @@ -1571,6 +1567,7 @@ static void __exit octeon_mgmt_mod_exit(void) module_init(octeon_mgmt_mod_init); module_exit(octeon_mgmt_mod_exit); +MODULE_SOFTDEP("pre: mdio-cavium"); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index a2f93948db97..d1e1009d51af 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -108,12 +108,6 @@ static struct platform_driver octeon_mdiobus_driver = { .remove = octeon_mdiobus_remove, }; -void octeon_mdiobus_force_mod_depencency(void) -{ - /* Let ethernet drivers force us to be loaded. */ -} -EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); - module_platform_driver(octeon_mdiobus_driver); MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver"); diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index c798672d61b2..cfb673a52b25 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c @@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) of_node_put(phy_node); if (!phydev) - return -ENODEV; + return -EPROBE_DEFER; priv->last_link = 0; phy_start(phydev); diff --git a/drivers/staging/octeon/ethernet-mdio.h b/drivers/staging/octeon/ethernet-mdio.h index e3771d48c49b..7f6716e3fad4 100644 --- a/drivers/staging/octeon/ethernet-mdio.h +++ b/drivers/staging/octeon/ethernet-mdio.h @@ -22,7 +22,5 @@ extern const struct ethtool_ops cvm_oct_ethtool_ops; -void octeon_mdiobus_force_mod_depencency(void); - int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int cvm_oct_phy_setup_device(struct net_device *dev); diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index f42c3816ce49..204f0b1e2739 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -689,8 +689,6 @@ static int cvm_oct_probe(struct platform_device *pdev) mtu_overhead += VLAN_HLEN; #endif - octeon_mdiobus_force_mod_depencency(); - pip = pdev->dev.of_node; if (!pip) { pr_err("Error: No 'pip' in /aliases\n"); @@ -987,6 +985,7 @@ static struct platform_driver cvm_oct_driver = { module_platform_driver(cvm_oct_driver); +MODULE_SOFTDEP("pre: mdio-cavium"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Cavium Networks "); MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver."); From 86fc3f7074d029ce23271d492a1c8ffee3067244 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Tue, 7 Jul 2020 13:11:22 +0530 Subject: [PATCH 0738/2056] sun/niu: add __maybe_unused attribute to PM functions The upgraded .suspend() and .resume() throw "defined but not used [-Wunused-function]" warning for certain configurations. Mark them with "__maybe_unused" attribute. Compile-tested only. Fixes: b0db0cc2f695 ("sun/niu: use generic power management") Reported-by: Stephen Rothwell Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/niu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 68541c823245..b4e20d15d138 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9873,7 +9873,7 @@ static void niu_pci_remove_one(struct pci_dev *pdev) } } -static int niu_suspend(struct device *dev_d) +static int __maybe_unused niu_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); @@ -9900,7 +9900,7 @@ static int niu_suspend(struct device *dev_d) return 0; } -static int niu_resume(struct device *dev_d) +static int __maybe_unused niu_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct niu *np = netdev_priv(dev); From 847d97e013dc8359312e2376c4805ef9d2a52f64 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 7 Jul 2020 18:55:43 +0800 Subject: [PATCH 0739/2056] sun/cassini: mark cas_resume() as __maybe_unused In certain configurations without power management support, gcc report the following warning: drivers/net/ethernet/sun/cassini.c:5206:12: warning: 'cas_resume' defined but not used [-Wunused-function] 5206 | static int cas_resume(struct device *dev_d) | ^~~~~~~~~~ Mark cas_resume() as __maybe_unused to make it clear. Fixes: f193f4ebde3d ("sun/cassini: use generic power management") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 5b11124450d6..3bab2cb700c7 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5203,7 +5203,7 @@ static int __maybe_unused cas_suspend(struct device *dev_d) return 0; } -static int cas_resume(struct device *dev_d) +static int __maybe_unused cas_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct cas *cp = netdev_priv(dev); From b416268b7a819c0508ed0dc81461e513b110f2ac Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 7 Jul 2020 14:40:48 +0200 Subject: [PATCH 0740/2056] mptcp: use mptcp worker for path management We can re-use the existing work queue to handle path management instead of a dedicated work queue. Just move pm_worker to protocol.c, call it from the mptcp worker and get rid of the msk lock (already held). Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/pm.c | 44 +------------------------------------------- net/mptcp/protocol.c | 27 ++++++++++++++++++++++++++- net/mptcp/protocol.h | 3 --- 3 files changed, 27 insertions(+), 47 deletions(-) diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index 7de09fdd42a3..a8ad20559aaa 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -10,8 +10,6 @@ #include #include "protocol.h" -static struct workqueue_struct *pm_wq; - /* path manager command handlers */ int mptcp_pm_announce_addr(struct mptcp_sock *msk, @@ -78,7 +76,7 @@ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, return false; msk->pm.status |= BIT(new_status); - if (queue_work(pm_wq, &msk->pm.work)) + if (schedule_work(&msk->work)) sock_hold((struct sock *)msk); return true; } @@ -181,35 +179,6 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) return mptcp_pm_nl_get_local_id(msk, skc); } -static void pm_worker(struct work_struct *work) -{ - struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data, - work); - struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm); - struct sock *sk = (struct sock *)msk; - - lock_sock(sk); - spin_lock_bh(&msk->pm.lock); - - pr_debug("msk=%p status=%x", msk, pm->status); - if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { - pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); - mptcp_pm_nl_add_addr_received(msk); - } - if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { - pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); - mptcp_pm_nl_fully_established(msk); - } - if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { - pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); - mptcp_pm_nl_subflow_established(msk); - } - - spin_unlock_bh(&msk->pm.lock); - release_sock(sk); - sock_put(sk); -} - void mptcp_pm_data_init(struct mptcp_sock *msk) { msk->pm.add_addr_signaled = 0; @@ -223,22 +192,11 @@ void mptcp_pm_data_init(struct mptcp_sock *msk) msk->pm.status = 0; spin_lock_init(&msk->pm.lock); - INIT_WORK(&msk->pm.work, pm_worker); mptcp_pm_nl_data_init(msk); } -void mptcp_pm_close(struct mptcp_sock *msk) -{ - if (cancel_work_sync(&msk->pm.work)) - sock_put((struct sock *)msk); -} - void __init mptcp_pm_init(void) { - pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8); - if (!pm_wq) - panic("Failed to allocate workqueue"); - mptcp_pm_nl_init(); } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3ab060e30038..dbe43e0cd734 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1214,6 +1214,29 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) return 0; } +static void pm_work(struct mptcp_sock *msk) +{ + struct mptcp_pm_data *pm = &msk->pm; + + spin_lock_bh(&msk->pm.lock); + + pr_debug("msk=%p status=%x", msk, pm->status); + if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { + pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); + mptcp_pm_nl_add_addr_received(msk); + } + if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { + pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); + mptcp_pm_nl_fully_established(msk); + } + if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { + pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); + mptcp_pm_nl_subflow_established(msk); + } + + spin_unlock_bh(&msk->pm.lock); +} + static void mptcp_worker(struct work_struct *work) { struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); @@ -1230,6 +1253,9 @@ static void mptcp_worker(struct work_struct *work) __mptcp_flush_join_list(msk); __mptcp_move_skbs(msk); + if (msk->pm.status) + pm_work(msk); + if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) mptcp_check_for_eof(msk); @@ -1420,7 +1446,6 @@ static void mptcp_close(struct sock *sk, long timeout) } mptcp_cancel_work(sk); - mptcp_pm_close(msk); __skb_queue_purge(&sk->sk_receive_queue); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index a6412ff0fddb..39bfec3f1586 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -174,8 +174,6 @@ struct mptcp_pm_data { u8 local_addr_max; u8 subflows_max; u8 status; - - struct work_struct work; }; struct mptcp_data_frag { @@ -412,7 +410,6 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac); void __init mptcp_pm_init(void); void mptcp_pm_data_init(struct mptcp_sock *msk); -void mptcp_pm_close(struct mptcp_sock *msk); void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side); void mptcp_pm_fully_established(struct mptcp_sock *msk); bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk); From 4e48978cd28ce51945c08650e5c5502ca41e1fcc Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Tue, 7 Jul 2020 15:19:13 +0200 Subject: [PATCH 0741/2056] mvpp2: fix pointer check priv->page_pool is an array, so comparing against it will always return true. Do a meaningful check by checking priv->page_pool[0] instead. While at it, clear the page_pool pointers on deallocation, or when an allocation error happens during init. Reported-by: Colin Ian King Fixes: c2d6fe6163de ("mvpp2: XDP TX support") Signed-off-by: Matteo Croce Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index d8e238ed533e..6a3f356640a0 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -548,8 +548,10 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv, val |= MVPP2_BM_STOP_MASK; mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); - if (priv->percpu_pools) + if (priv->percpu_pools) { page_pool_destroy(priv->page_pool[bm_pool->id]); + priv->page_pool[bm_pool->id] = NULL; + } dma_free_coherent(dev, bm_pool->size_bytes, bm_pool->virt_addr, @@ -609,8 +611,15 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv) mvpp2_pools[pn].buf_num, mvpp2_pools[pn].pkt_size, dma_dir); - if (IS_ERR(priv->page_pool[i])) + if (IS_ERR(priv->page_pool[i])) { + int j; + + for (j = 0; j < i; j++) { + page_pool_destroy(priv->page_pool[j]); + priv->page_pool[j] = NULL; + } return PTR_ERR(priv->page_pool[i]); + } } } @@ -4486,7 +4495,7 @@ static int mvpp2_check_pagepool_dma(struct mvpp2_port *port) if (!priv->percpu_pools) return err; - if (!priv->page_pool) + if (!priv->page_pool[0]) return -ENOMEM; for (i = 0; i < priv->port_count; i++) { From 622594f2ad8be03d55a56f4f91c41ae5f3849703 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Fri, 3 Jul 2020 08:31:37 +0530 Subject: [PATCH 0742/2056] epic100: use generic power management Drivers should not use legacy power management as they have to manage power states and related operations, for the device, themselves. With generic PM, all essentials will be handled by the PCI core. Driver needs to do only device-specific operations. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/epic100.c | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 61ddee0c2a2e..d950b312c418 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -1512,12 +1512,9 @@ static void epic_remove_one(struct pci_dev *pdev) /* pci_power_off(pdev, -1); */ } - -#ifdef CONFIG_PM - -static int epic_suspend (struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused epic_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct epic_private *ep = netdev_priv(dev); void __iomem *ioaddr = ep->ioaddr; @@ -1531,9 +1528,9 @@ static int epic_suspend (struct pci_dev *pdev, pm_message_t state) } -static int epic_resume (struct pci_dev *pdev) +static int __maybe_unused epic_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); if (!netif_running(dev)) return 0; @@ -1542,18 +1539,14 @@ static int epic_resume (struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ - +static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume); static struct pci_driver epic_driver = { .name = DRV_NAME, .id_table = epic_pci_tbl, .probe = epic_init_one, .remove = epic_remove_one, -#ifdef CONFIG_PM - .suspend = epic_suspend, - .resume = epic_resume, -#endif /* CONFIG_PM */ + .driver.pm = &epic_pm_ops, }; From 53fff2bfb3fcce7f7e8ac424da73070ac7a550da Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Fri, 3 Jul 2020 08:31:38 +0530 Subject: [PATCH 0743/2056] smsc9420: use generic power management Drivers should not use legacy power management as they have to manage power states and related operations, for the device, themselves. This driver was handling them with the help of PCI helper functions. With generic PM, all essentials will be handled by the PCI core. Driver needs to do only device-specific operations. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/smsc9420.c | 40 ++++++++-------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 7312e522c022..42bef04d65ba 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1422,11 +1422,9 @@ static int smsc9420_open(struct net_device *dev) return result; } -#ifdef CONFIG_PM - -static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused smsc9420_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct smsc9420_pdata *pd = netdev_priv(dev); u32 int_cfg; ulong flags; @@ -1451,34 +1449,21 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); } - pci_save_state(pdev); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + device_wakeup_disable(dev_d); return 0; } -static int smsc9420_resume(struct pci_dev *pdev) +static int __maybe_unused smsc9420_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - struct smsc9420_pdata *pd = netdev_priv(dev); + struct net_device *dev = dev_get_drvdata(dev_d); int err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); + pci_set_master(to_pci_dev(dev_d)); - err = pci_enable_device(pdev); - if (err) - return err; - - pci_set_master(pdev); - - err = pci_enable_wake(pdev, PCI_D0, 0); - if (err) - netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n", - err); + device_wakeup_disable(dev_d); + err = 0; if (netif_running(dev)) { /* FIXME: gross. It looks like ancient PM relic.*/ err = smsc9420_open(dev); @@ -1487,8 +1472,6 @@ static int smsc9420_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ - static const struct net_device_ops smsc9420_netdev_ops = { .ndo_open = smsc9420_open, .ndo_stop = smsc9420_stop, @@ -1658,15 +1641,14 @@ static void smsc9420_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static SIMPLE_DEV_PM_OPS(smsc9420_pm_ops, smsc9420_suspend, smsc9420_resume); + static struct pci_driver smsc9420_driver = { .name = DRV_NAME, .id_table = smsc9420_id_table, .probe = smsc9420_probe, .remove = smsc9420_remove, -#ifdef CONFIG_PM - .suspend = smsc9420_suspend, - .resume = smsc9420_resume, -#endif /* CONFIG_PM */ + .driver.pm = &smsc9420_pm_ops, }; static int __init smsc9420_init_module(void) From 1814cff26739de7d02db6193bc620d0a4bdea676 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Sun, 5 Jul 2020 11:55:47 +0200 Subject: [PATCH 0744/2056] net: phy: add a Kconfig option for mdio_devres If phylib is built as a module and CONFIG_MDIO_DEVICE is 'y', the mdio_device and mdio_bus code will be in the phylib module, not in the kernel image. Meanwhile we build mdio_devres depending on the CONFIG_MDIO_DEVICE symbol, so if it's 'y', it will go into the kernel and we'll hit the following linker error: ld: drivers/net/phy/mdio_devres.o: in function `devm_mdiobus_alloc_size': >> drivers/net/phy/mdio_devres.c:38: undefined reference to `mdiobus_alloc_size' ld: drivers/net/phy/mdio_devres.o: in function `devm_mdiobus_free': >> drivers/net/phy/mdio_devres.c:16: undefined reference to `mdiobus_free' ld: drivers/net/phy/mdio_devres.o: in function `__devm_mdiobus_register': >> drivers/net/phy/mdio_devres.c:87: undefined reference to `__mdiobus_register' ld: drivers/net/phy/mdio_devres.o: in function `devm_mdiobus_unregister': >> drivers/net/phy/mdio_devres.c:53: undefined reference to `mdiobus_unregister' ld: drivers/net/phy/mdio_devres.o: in function `devm_of_mdiobus_register': >> drivers/net/phy/mdio_devres.c:120: undefined reference to `of_mdiobus_register' Add a hidden Kconfig option for MDIO_DEVRES which will be currently selected by CONFIG_PHYLIB as there are no non-phylib users of these helpers. Reported-by: kernel test robot Fixes: ac3a68d56651 ("net: phy: don't abuse devres in devm_mdiobus_register()") Signed-off-by: Bartosz Golaszewski Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 4 ++++ drivers/net/phy/Makefile | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index e351d65533aa..7ffa8a4529a8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -21,6 +21,9 @@ config MDIO_BUS if MDIO_BUS +config MDIO_DEVRES + tristate + config MDIO_ASPEED tristate "ASPEED MDIO bus controller" depends on ARCH_ASPEED || COMPILE_TEST @@ -252,6 +255,7 @@ menuconfig PHYLIB tristate "PHY Device support and infrastructure" depends on NETDEVICES select MDIO_DEVICE + select MDIO_DEVRES help Ethernet controllers are usually attached to PHY devices. This option provides infrastructure for diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index c9a9adf194d5..d84bab489a53 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -4,7 +4,6 @@ libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ linkmode.o mdio-bus-y += mdio_bus.o mdio_device.o -mdio-devres-y += mdio_devres.o ifdef CONFIG_MDIO_DEVICE obj-y += mdio-boardinfo.o @@ -18,7 +17,7 @@ libphy-y += $(mdio-bus-y) else obj-$(CONFIG_MDIO_DEVICE) += mdio-bus.o endif -obj-$(CONFIG_MDIO_DEVICE) += mdio-devres.o +obj-$(CONFIG_MDIO_DEVRES) += mdio_devres.o libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o From 1a4d681634fad302157afcb53a178c4c59cb75ee Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 5 Jul 2020 19:55:44 -0700 Subject: [PATCH 0745/2056] Documentation: networking: fix ethtool-netlink table formats Fix table formatting to eliminate warnings. Documentation/networking/ethtool-netlink.rst:509: WARNING: Malformed table. Documentation/networking/ethtool-netlink.rst:522: WARNING: Malformed table. Documentation/networking/ethtool-netlink.rst:543: WARNING: Malformed table. Documentation/networking/ethtool-netlink.rst:555: WARNING: Malformed table. Documentation/networking/ethtool-netlink.rst:591: WARNING: Malformed table. Signed-off-by: Randy Dunlap Signed-off-by: David S. Miller --- Documentation/networking/ethtool-netlink.rst | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 396390f4936b..459a0d11cfde 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -506,13 +506,13 @@ Link extended states: power required from cable or module ``ETHTOOL_LINK_EXT_STATE_OVERHEAT`` The module is overheated - ================================================= ============================================ + ================================================ ============================================ Link extended substates: Autoneg substates: - ============================================================== ================================ + =============================================================== ================================ ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED`` Peer side is down ``ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED`` Ack not received from peer side @@ -527,11 +527,11 @@ Link extended substates: in both sides are mismatched ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD`` No Highest Common Denominator - ============================================================== ================================ + =============================================================== ================================ Link training substates: - ========================================================================== ==================== + =========================================================================== ==================== ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED`` Frames were not recognized, the lock failed @@ -547,11 +547,11 @@ Link extended substates: ``ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT`` Remote side is not ready yet - ========================================================================== ==================== + =========================================================================== ==================== Link logical mismatch substates: - =============================================================== =============================== + ================================================================ =============================== ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK`` Physical coding sublayer was not locked in first phase - block lock @@ -568,7 +568,7 @@ Link extended substates: ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED`` RS forward error correction is not locked - =============================================================== =============================== + ================================================================ =============================== Bad signal integrity substates: @@ -585,11 +585,11 @@ Link extended substates: Cable issue substates: - ================================================== ============================================ + =================================================== ============================================ ``ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE`` Unsupported cable ``ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE`` Cable test failure - ================================================== ============================================ + =================================================== ============================================ DEBUG_GET ========= From 4895d7808e7030c831f2ef83a3cc6ec0d46c30b1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 5 Jul 2020 21:27:56 -0700 Subject: [PATCH 0746/2056] net: ethtool: Introduce ethtool_phy_ops In order to decouple ethtool from its PHY library dependency, define an ethtool_phy_ops singleton which can be overriden by the PHY library when it loads with an appropriate set of function pointers. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/linux/ethtool.h | 25 +++++++++++++++++++++++++ net/ethtool/common.c | 11 +++++++++++ net/ethtool/common.h | 2 ++ 3 files changed, 38 insertions(+) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 48ad3b6a0150..0c139a93b67a 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -502,5 +502,30 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd, u32 *dev_speed, u8 *dev_duplex); +struct netlink_ext_ack; +struct phy_device; +struct phy_tdr_config; + +/** + * struct ethtool_phy_ops - Optional PHY device options + * @start_cable_test - Start a cable test + * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test + * + * All operations are optional (i.e. the function pointer may be set to %NULL) + * and callers must take this into account. Callers must hold the RTNL lock. + */ +struct ethtool_phy_ops { + int (*start_cable_test)(struct phy_device *phydev, + struct netlink_ext_ack *extack); + int (*start_cable_test_tdr)(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config); +}; + +/** + * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton + * @ops: Ethtool PHY operations to set + */ +void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); #endif /* _LINUX_ETHTOOL_H */ diff --git a/net/ethtool/common.c b/net/ethtool/common.c index aaecfc916a4d..ce4dbae5a943 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -2,6 +2,7 @@ #include #include +#include #include "common.h" @@ -373,3 +374,13 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) return 0; } + +const struct ethtool_phy_ops *ethtool_phy_ops; + +void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops) +{ + rtnl_lock(); + ethtool_phy_ops = ops; + rtnl_unlock(); +} +EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops); diff --git a/net/ethtool/common.h b/net/ethtool/common.h index a62f68ccc43a..b83bef38368c 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -37,4 +37,6 @@ bool convert_legacy_settings_to_link_ksettings( int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max); int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +extern const struct ethtool_phy_ops *ethtool_phy_ops; + #endif /* _ETHTOOL_COMMON_H */ From 55d8f053ce1bb2c010458c6e9883b9c7ac12adc3 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 5 Jul 2020 21:27:57 -0700 Subject: [PATCH 0747/2056] net: phy: Register ethtool PHY operations Utilize ethtool_set_ethtool_phy_ops to register a suitable set of PHY ethtool operations in a dynamic fashion such that ethtool will no longer directy reference PHY library symbols. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index cf3505e2f587..233334406f0f 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3027,6 +3027,11 @@ static struct phy_driver genphy_driver = { .set_loopback = genphy_loopback, }; +static const struct ethtool_phy_ops phy_ethtool_phy_ops = { + .start_cable_test = phy_start_cable_test, + .start_cable_test_tdr = phy_start_cable_test_tdr, +}; + static int __init phy_init(void) { int rc; @@ -3035,6 +3040,7 @@ static int __init phy_init(void) if (rc) return rc; + ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops); features_init(); rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE); @@ -3056,6 +3062,7 @@ static void __exit phy_exit(void) phy_driver_unregister(&genphy_c45_driver); phy_driver_unregister(&genphy_driver); mdio_bus_exit(); + ethtool_set_ethtool_phy_ops(NULL); } subsys_initcall(phy_init); From f3631ab08eeb67ad023ba1b37004ecba0158f93f Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 5 Jul 2020 21:27:58 -0700 Subject: [PATCH 0748/2056] net: ethtool: Remove PHYLIB direct dependency Now that we have introduced ethtool_phy_ops and the PHY library dynamically registers its operations with that function pointer, we can remove the direct PHYLIB dependency in favor of using dynamic operations. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/Kconfig | 1 - net/ethtool/cabletest.c | 18 ++++++++++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/net/Kconfig b/net/Kconfig index d1672280d6a4..3831206977a1 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -455,7 +455,6 @@ config FAILOVER config ETHTOOL_NETLINK bool "Netlink interface for ethtool" default y - depends on PHYLIB=y || PHYLIB=n help An alternative userspace interface for ethtool based on generic netlink. It provides better extensibility and some new features, diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index 7194956aa09e..888f6e101f34 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -58,6 +58,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ETHTOOL_A_CABLE_TEST_MAX + 1]; struct ethnl_req_info req_info = {}; + const struct ethtool_phy_ops *ops; struct net_device *dev; int ret; @@ -81,11 +82,17 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info) } rtnl_lock(); + ops = ethtool_phy_ops; + if (!ops || !ops->start_cable_test) { + ret = -EOPNOTSUPP; + goto out_rtnl; + } + ret = ethnl_ops_begin(dev); if (ret < 0) goto out_rtnl; - ret = phy_start_cable_test(dev->phydev, info->extack); + ret = ops->start_cable_test(dev->phydev, info->extack); ethnl_ops_complete(dev); @@ -308,6 +315,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_MAX + 1]; struct ethnl_req_info req_info = {}; + const struct ethtool_phy_ops *ops; struct phy_tdr_config cfg; struct net_device *dev; int ret; @@ -337,11 +345,17 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info) goto out_dev_put; rtnl_lock(); + ops = ethtool_phy_ops; + if (!ops || !ops->start_cable_test_tdr) { + ret = -EOPNOTSUPP; + goto out_rtnl; + } + ret = ethnl_ops_begin(dev); if (ret < 0) goto out_rtnl; - ret = phy_start_cable_test_tdr(dev->phydev, info->extack, &cfg); + ret = ops->start_cable_test_tdr(dev->phydev, info->extack, &cfg); ethnl_ops_complete(dev); From 2291bde8c0cfc01090473802a7c03a38171daacb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 6 Jul 2020 13:12:40 +0100 Subject: [PATCH 0749/2056] bnx2x: fix spelling mistake "occurd" -> "occurred" There are spelling mistakes in various literal strings. Fix these. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c index 48f63ef2e6ea..3f8bdad3351c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c @@ -2218,31 +2218,31 @@ static struct st_record st_database[ST_DB_LINES] = { /*line 352*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_OCCURRED, NA, NA, NA, pneq, NA, IDLE_CHK_ERROR, - "MISC: system kill occurd;", + "MISC: system kill occurred;", {NA, NA, 0, NA, NA, NA} }, /*line 353*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_0, NA, NA, NA, pneq, NA, IDLE_CHK_ERROR, - "MISC: system kill occurd; status_0 register", + "MISC: system kill occurred; status_0 register", {NA, NA, 0, NA, NA, NA} }, /*line 354*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_1, NA, NA, NA, pneq, NA, IDLE_CHK_ERROR, - "MISC: system kill occurd; status_1 register", + "MISC: system kill occurred; status_1 register", {NA, NA, 0, NA, NA, NA} }, /*line 355*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_2, NA, NA, NA, pneq, NA, IDLE_CHK_ERROR, - "MISC: system kill occurd; status_2 register", + "MISC: system kill occurred; status_2 register", {NA, NA, 0, NA, NA, NA} }, /*line 356*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_3, NA, NA, NA, pneq, NA, IDLE_CHK_ERROR, - "MISC: system kill occurd; status_3 register", + "MISC: system kill occurred; status_3 register", {NA, NA, 0, NA, NA, NA} }, /*line 357*/{(0x1E), 1, MISC_REG_PCIE_HOT_RESET, From 1fd52137d3cffcd17225574052e7f34b85f1572b Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 6 Jul 2020 22:50:18 +0200 Subject: [PATCH 0750/2056] Replace HTTP links with HTTPS ones: GRETH 10/100/1G Ethernet MAC device driver Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/net/ethernet/aeroflex/greth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index bf546118dbc6..9c5891bbfe61 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -8,7 +8,7 @@ * available in the GRLIB VHDL IP core library. * * Full documentation of both cores can be found here: - * http://www.gaisler.com/products/grlib/grip.pdf + * https://www.gaisler.com/products/grlib/grip.pdf * * The Gigabit version supports scatter/gather DMA, any alignment of * buffers and checksum offloading. From 535094a0c9c4908a31f3158a647e203ceeecf277 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 6 Jul 2020 22:50:30 +0200 Subject: [PATCH 0751/2056] Replace HTTP links with HTTPS ones: X.25 network layer Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- net/x25/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/x25/Kconfig b/net/x25/Kconfig index e3ed23245a82..68729aa3a5d5 100644 --- a/net/x25/Kconfig +++ b/net/x25/Kconfig @@ -17,7 +17,7 @@ config X25 if you want that) and the lower level data link layer protocol LAPB (say Y to "LAPB Data Link Driver" below if you want that). - You can read more about X.25 at and + You can read more about X.25 at and . Information about X.25 for Linux is contained in the files and From 964201de695b8afca80652f048a95dc2cb68fcb7 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Jul 2020 12:21:38 -0500 Subject: [PATCH 0752/2056] net/sched: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/sched/act_csum.c | 3 ++- net/sched/act_ct.c | 2 +- net/sched/sch_cbq.c | 2 +- net/sched/sch_drr.c | 2 +- net/sched/sch_ets.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_fq_pie.c | 2 +- net/sched/sch_hfsc.c | 2 +- net/sched/sch_htb.c | 2 +- net/sched/sch_multiq.c | 2 +- net/sched/sch_prio.c | 2 +- net/sched/sch_qfq.c | 2 +- net/sched/sch_sfb.c | 2 +- net/sched/sch_sfq.c | 2 +- 14 files changed, 15 insertions(+), 14 deletions(-) diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index cb8608f0a77a..9035355e867f 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -598,7 +598,8 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a, if (!tcf_csum_ipv6(skb, update_flags)) goto drop; break; - case cpu_to_be16(ETH_P_8021AD): /* fall through */ + case cpu_to_be16(ETH_P_8021AD): + fallthrough; case cpu_to_be16(ETH_P_8021Q): if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) { protocol = skb->protocol; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 1b9c6d4a1b6b..fadfd6b0033b 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -783,7 +783,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct, } } /* Non-ICMP, fall thru to initialize if needed. */ - /* fall through */ + fallthrough; case IP_CT_NEW: /* Seen it before? This can happen for loopback, retrans, * or local packets. diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 052d4a1af69a..9e16fdf47fe7 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -250,7 +250,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; case TC_ACT_RECLASSIFY: diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index 0d5c9a8ec61d..ad14df2ecf3a 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -324,7 +324,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 373dc5855d4e..1af86f30b18e 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -397,7 +397,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6bf979f95509..bca016ffc069 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -99,7 +99,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return 0; } diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index a27a250ab8f9..741840b9994f 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -102,7 +102,7 @@ static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return 0; } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 7f6670044f0a..13ba8648bb63 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1136,7 +1136,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 52fc513688b1..b07f29059f47 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -239,7 +239,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 648611f5c105..56bdc4bcdc63 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -43,7 +43,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index a3e187f2603c..46b7ce81c6e3 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -46,7 +46,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index ede854516825..3cfe6262eb00 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -699,7 +699,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_STOLEN: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return NULL; } diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index d2a6e78262bb..356f6d1d30db 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -265,7 +265,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return false; } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 46cdefd69e44..a1314510dc69 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -186,7 +186,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; - /* fall through */ + fallthrough; case TC_ACT_SHOT: return 0; } From 8cb601f15886f6d05479e46913d954e9ff237312 Mon Sep 17 00:00:00 2001 From: Chris Healy Date: Tue, 7 Jul 2020 13:32:05 -0700 Subject: [PATCH 0753/2056] net: sfp: add error checking with sfp_irq_name Add error checking with sfp_irq_name before use. Signed-off-by: Chris Healy Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 7bdfcde98266..eef458ab0e5b 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2354,6 +2354,9 @@ static int sfp_probe(struct platform_device *pdev) "%s-%s", dev_name(sfp->dev), gpio_of_names[i]); + if (!sfp_irq_name) + return -ENOMEM; + err = devm_request_threaded_irq(sfp->dev, sfp->gpio_irq[i], NULL, sfp_irq, IRQF_ONESHOT | From f5836749c9c04a10decd2742845ad4870965fdef Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:25 -0700 Subject: [PATCH 0754/2056] bpf: Add BPF_CGROUP_INET_SOCK_RELEASE hook Sometimes it's handy to know when the socket gets freed. In particular, we'd like to try to use a smarter allocation of ports for bpf_bind and explore the possibility of limiting the number of SOCK_DGRAM sockets the process can have. Implement BPF_CGROUP_INET_SOCK_RELEASE hook that triggers on inet socket release. It triggers only for userspace sockets (not in-kernel ones) and therefore has the same semantics as the existing BPF_CGROUP_INET_SOCK_CREATE. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-2-sdf@google.com --- include/linux/bpf-cgroup.h | 4 ++++ include/uapi/linux/bpf.h | 1 + kernel/bpf/syscall.c | 3 +++ net/core/filter.c | 1 + net/ipv4/af_inet.c | 3 +++ 5 files changed, 12 insertions(+) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c66c545e161a..2c6f26670acc 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -210,6 +210,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) @@ -401,6 +404,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index da9bf35a26f8..548a749aebb3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -226,6 +226,7 @@ enum bpf_attach_type { BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 8da159936bab..156f51ffada2 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1981,6 +1981,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, case BPF_PROG_TYPE_CGROUP_SOCK: switch (expected_attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return 0; @@ -2779,6 +2780,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_CGROUP_SKB; break; case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_POST_BIND: case BPF_CGROUP_INET6_POST_BIND: return BPF_PROG_TYPE_CGROUP_SOCK; @@ -2929,6 +2931,7 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_CGROUP_INET_INGRESS: case BPF_CGROUP_INET_EGRESS: case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: case BPF_CGROUP_INET4_BIND: case BPF_CGROUP_INET6_BIND: case BPF_CGROUP_INET4_POST_BIND: diff --git a/net/core/filter.c b/net/core/filter.c index c5e696e6c315..ddcc0d6209e1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6890,6 +6890,7 @@ static bool __sock_filter_check_attach_type(int off, case offsetof(struct bpf_sock, priority): switch (attach_type) { case BPF_CGROUP_INET_SOCK_CREATE: + case BPF_CGROUP_INET_SOCK_RELEASE: goto full_access; default: return false; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ea6ed6d487ed..ff141d630bdf 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -411,6 +411,9 @@ int inet_release(struct socket *sock) if (sk) { long timeout; + if (!sk->sk_kern_sock) + BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk); + /* Applications forget to leave groups before exiting */ ip_mc_drop_socket(sk); From e8b012e9fabe7eafc1b2c72414e174547683860d Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:26 -0700 Subject: [PATCH 0755/2056] libbpf: Add support for BPF_CGROUP_INET_SOCK_RELEASE Add auto-detection for the cgroup/sock_release programs. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-3-sdf@google.com --- tools/include/uapi/linux/bpf.h | 1 + tools/lib/bpf/libbpf.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index da9bf35a26f8..548a749aebb3 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -226,6 +226,7 @@ enum bpf_attach_type { BPF_CGROUP_INET4_GETSOCKNAME, BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, + BPF_CGROUP_INET_SOCK_RELEASE, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4ea7f4f1a691..88a483627a2b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6917,6 +6917,10 @@ static const struct bpf_sec_def section_defs[] = { BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS), BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB), + BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_CREATE), + BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK, + BPF_CGROUP_INET_SOCK_RELEASE), BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE), BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK, From db94cc0b4805968a0357ed2507730cdf77adf174 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:27 -0700 Subject: [PATCH 0756/2056] bpftool: Add support for BPF_CGROUP_INET_SOCK_RELEASE Support attaching to BPF_CGROUP_INET_SOCK_RELEASE and properly display attach type upon prog dump. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-4-sdf@google.com --- tools/bpf/bpftool/common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 18e5604fe260..29f4e7611ae8 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -33,6 +33,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_CGROUP_INET_INGRESS] = "ingress", [BPF_CGROUP_INET_EGRESS] = "egress", [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create", + [BPF_CGROUP_INET_SOCK_RELEASE] = "sock_release", [BPF_CGROUP_SOCK_OPS] = "sock_ops", [BPF_CGROUP_DEVICE] = "device", [BPF_CGROUP_INET4_BIND] = "bind4", From 65ffd797861a44ff97081de1db01e4aef716ed46 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Mon, 6 Jul 2020 16:01:28 -0700 Subject: [PATCH 0757/2056] selftests/bpf: Test BPF_CGROUP_INET_SOCK_RELEASE Simple test that enforces a single SOCK_DGRAM socket per cgroup. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200706230128.4073544-5-sdf@google.com --- .../selftests/bpf/prog_tests/udp_limit.c | 75 +++++++++++++++++++ tools/testing/selftests/bpf/progs/udp_limit.c | 42 +++++++++++ 2 files changed, 117 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/udp_limit.c create mode 100644 tools/testing/selftests/bpf/progs/udp_limit.c diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c new file mode 100644 index 000000000000..2aba09d4d01b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/udp_limit.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "udp_limit.skel.h" + +#include +#include + +static int duration; + +void test_udp_limit(void) +{ + struct udp_limit *skel; + int fd1 = -1, fd2 = -1; + int cgroup_fd; + + cgroup_fd = test__join_cgroup("/udp_limit"); + if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno)) + return; + + skel = udp_limit__open_and_load(); + if (CHECK(!skel, "skel-load", "errno %d", errno)) + goto close_cgroup_fd; + + skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd); + skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd); + if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release), + "cg-attach", "sock %ld sock_release %ld", + PTR_ERR(skel->links.sock), + PTR_ERR(skel->links.sock_release))) + goto close_skeleton; + + /* BPF program enforces a single UDP socket per cgroup, + * verify that. + */ + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd1 < 0, "fd1", "errno %d", errno)) + goto close_skeleton; + + fd2 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd2 >= 0, "fd2", "errno %d", errno)) + goto close_skeleton; + + /* We can reopen again after close. */ + close(fd1); + fd1 = -1; + + fd1 = socket(AF_INET, SOCK_DGRAM, 0); + if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno)) + goto close_skeleton; + + /* Make sure the program was invoked the expected + * number of times: + * - open fd1 - BPF_CGROUP_INET_SOCK_CREATE + * - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE + * - close fd1 - BPF_CGROUP_INET_SOCK_RELEASE + * - open fd1 again - BPF_CGROUP_INET_SOCK_CREATE + */ + if (CHECK(skel->bss->invocations != 4, "bss-invocations", + "invocations=%d", skel->bss->invocations)) + goto close_skeleton; + + /* We should still have a single socket in use */ + if (CHECK(skel->bss->in_use != 1, "bss-in_use", + "in_use=%d", skel->bss->in_use)) + goto close_skeleton; + +close_skeleton: + if (fd1 >= 0) + close(fd1); + if (fd2 >= 0) + close(fd2); + udp_limit__destroy(skel); +close_cgroup_fd: + close(cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c new file mode 100644 index 000000000000..8429b22525a7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/udp_limit.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include + +int invocations = 0, in_use = 0; + +SEC("cgroup/sock_create") +int sock(struct bpf_sock *ctx) +{ + __u32 key; + + if (ctx->type != SOCK_DGRAM) + return 1; + + __sync_fetch_and_add(&invocations, 1); + + if (in_use > 0) { + /* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called + * when we return an error from the BPF + * program! + */ + return 0; + } + + __sync_fetch_and_add(&in_use, 1); + return 1; +} + +SEC("cgroup/sock_release") +int sock_release(struct bpf_sock *ctx) +{ + __u32 key; + + if (ctx->type != SOCK_DGRAM) + return 1; + + __sync_fetch_and_add(&invocations, 1); + __sync_fetch_and_add(&in_use, -1); + return 1; +} From af9bd3e3331b8af42b6606c75797d041ab39380c Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:52 +0900 Subject: [PATCH 0758/2056] samples: bpf: Fix bpf programs with kprobe/sys_connect event Currently, BPF programs with kprobe/sys_connect does not work properly. Commit 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64") This commit modifies the bpf_load behavior of kprobe events in the x64 architecture. If the current kprobe event target starts with "sys_*", add the prefix "__x64_" to the front of the event. Appending "__x64_" prefix with kprobe/sys_* event was appropriate as a solution to most of the problems caused by the commit below. commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct pt_regs-based sys_*() to __x64_sys_*()") However, there is a problem with the sys_connect kprobe event that does not work properly. For __sys_connect event, parameters can be fetched normally, but for __x64_sys_connect, parameters cannot be fetched. ffffffff818d3520 <__x64_sys_connect>: ffffffff818d3520: e8 fb df 32 00 callq 0xffffffff81c01520 <__fentry__> ffffffff818d3525: 48 8b 57 60 movq 96(%rdi), %rdx ffffffff818d3529: 48 8b 77 68 movq 104(%rdi), %rsi ffffffff818d352d: 48 8b 7f 70 movq 112(%rdi), %rdi ffffffff818d3531: e8 1a ff ff ff callq 0xffffffff818d3450 <__sys_connect> ffffffff818d3536: 48 98 cltq ffffffff818d3538: c3 retq ffffffff818d3539: 0f 1f 80 00 00 00 00 nopl (%rax) As the assembly code for __x64_sys_connect shows, parameters should be fetched and set into rdi, rsi, rdx registers prior to calling __sys_connect. Because of this problem, this commit fixes the sys_connect event by first getting the value of the rdi register and then the value of the rdi, rsi, and rdx register through an offset based on that value. Fixes: 34745aed515c ("samples/bpf: fix kprobe attachment issue on x64") Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-2-danieltimlee@gmail.com --- samples/bpf/map_perf_test_kern.c | 9 ++++++--- samples/bpf/test_map_in_map_kern.c | 9 ++++++--- samples/bpf/test_probe_write_user_kern.c | 9 ++++++--- 3 files changed, 18 insertions(+), 9 deletions(-) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index 12e91ae64d4d..c9b31193ca12 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -11,6 +11,8 @@ #include #include "bpf_legacy.h" #include +#include +#include "trace_common.h" #define MAX_ENTRIES 1000 #define MAX_NR_CPUS 1024 @@ -154,9 +156,10 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_connect") +SEC("kprobe/" SYSCALL(sys_connect)) int stress_lru_hmap_alloc(struct pt_regs *ctx) { + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn"; union { u16 dst6[8]; @@ -175,8 +178,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx) long val = 1; u32 key = 0; - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); - addrlen = (int)PT_REGS_PARM3(ctx); + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); + addrlen = (int)PT_REGS_PARM3_CORE(real_regs); if (addrlen != sizeof(*in6)) return 0; diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c index 6cee61e8ce9b..36a203e69064 100644 --- a/samples/bpf/test_map_in_map_kern.c +++ b/samples/bpf/test_map_in_map_kern.c @@ -13,6 +13,8 @@ #include #include "bpf_legacy.h" #include +#include +#include "trace_common.h" #define MAX_NR_PORTS 65536 @@ -102,9 +104,10 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port) return result ? *result : -ENOENT; } -SEC("kprobe/sys_connect") +SEC("kprobe/" SYSCALL(sys_connect)) int trace_sys_connect(struct pt_regs *ctx) { + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); struct sockaddr_in6 *in6; u16 test_case, port, dst6[8]; int addrlen, ret, inline_ret, ret_key = 0; @@ -112,8 +115,8 @@ int trace_sys_connect(struct pt_regs *ctx) void *outer_map, *inner_map; bool inline_hash = false; - in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx); - addrlen = (int)PT_REGS_PARM3(ctx); + in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs); + addrlen = (int)PT_REGS_PARM3_CORE(real_regs); if (addrlen != sizeof(*in6)) return 0; diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c index f033f36a13a3..fd651a65281e 100644 --- a/samples/bpf/test_probe_write_user_kern.c +++ b/samples/bpf/test_probe_write_user_kern.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include "trace_common.h" struct bpf_map_def SEC("maps") dnat_map = { .type = BPF_MAP_TYPE_HASH, @@ -26,13 +28,14 @@ struct bpf_map_def SEC("maps") dnat_map = { * This example sits on a syscall, and the syscall ABI is relatively stable * of course, across platforms, and over time, the ABI may change. */ -SEC("kprobe/sys_connect") +SEC("kprobe/" SYSCALL(sys_connect)) int bpf_prog1(struct pt_regs *ctx) { + struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx); + void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs); + int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs); struct sockaddr_in new_addr, orig_addr = {}; struct sockaddr_in *mapped_addr; - void *sockaddr_arg = (void *)PT_REGS_PARM2(ctx); - int sockaddr_len = (int)PT_REGS_PARM3(ctx); if (sockaddr_len > sizeof(orig_addr)) return 0; From 88795b4adb01a30fbfd75ef1c1ef73b4442e38b2 Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:53 +0900 Subject: [PATCH 0759/2056] samples: bpf: Refactor BPF map in map test with libbpf From commit 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map support"), a way to define internal map in BTF-defined map has been added. Instead of using previous 'inner_map_idx' definition, the structure to be used for the inner map can be directly defined using array directive. __array(values, struct inner_map) This commit refactors map in map test program with libbpf by explicitly defining inner map with BTF-defined format. Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-3-danieltimlee@gmail.com --- samples/bpf/Makefile | 2 +- samples/bpf/test_map_in_map_kern.c | 85 +++++++++++++++--------------- samples/bpf/test_map_in_map_user.c | 53 +++++++++++++++++-- 3 files changed, 91 insertions(+), 49 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 8403e4762306..f87ee02073ba 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS) tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o -test_map_in_map-objs := bpf_load.o test_map_in_map_user.o +test_map_in_map-objs := test_map_in_map_user.o per_socket_stats_example-objs := cookie_uid_helper_example.o xdp_redirect-objs := xdp_redirect_user.o xdp_redirect_map-objs := xdp_redirect_map_user.o diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c index 36a203e69064..8def45c5b697 100644 --- a/samples/bpf/test_map_in_map_kern.c +++ b/samples/bpf/test_map_in_map_kern.c @@ -11,7 +11,6 @@ #include #include #include -#include "bpf_legacy.h" #include #include #include "trace_common.h" @@ -19,60 +18,60 @@ #define MAX_NR_PORTS 65536 /* map #0 */ -struct bpf_map_def_legacy SEC("maps") port_a = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = MAX_NR_PORTS, -}; +struct inner_a { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, int); + __uint(max_entries, MAX_NR_PORTS); +} port_a SEC(".maps"); /* map #1 */ -struct bpf_map_def_legacy SEC("maps") port_h = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct inner_h { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, int); + __uint(max_entries, 1); +} port_h SEC(".maps"); /* map #2 */ -struct bpf_map_def_legacy SEC("maps") reg_result_h = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, int); + __uint(max_entries, 1); +} reg_result_h SEC(".maps"); /* map #3 */ -struct bpf_map_def_legacy SEC("maps") inline_result_h = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(int), - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, int); + __uint(max_entries, 1); +} inline_result_h SEC(".maps"); /* map #4 */ /* Test case #0 */ -struct bpf_map_def_legacy SEC("maps") a_of_port_a = { - .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, - .key_size = sizeof(u32), - .inner_map_idx = 0, /* map_fd[0] is port_a */ - .max_entries = MAX_NR_PORTS, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_NR_PORTS); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_a); /* use inner_a as inner map */ +} a_of_port_a SEC(".maps"); /* map #5 */ /* Test case #1 */ -struct bpf_map_def_legacy SEC("maps") h_of_port_a = { - .type = BPF_MAP_TYPE_HASH_OF_MAPS, - .key_size = sizeof(u32), - .inner_map_idx = 0, /* map_fd[0] is port_a */ - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 1); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_a); /* use inner_a as inner map */ +} h_of_port_a SEC(".maps"); /* map #6 */ /* Test case #2 */ -struct bpf_map_def_legacy SEC("maps") h_of_port_h = { - .type = BPF_MAP_TYPE_HASH_OF_MAPS, - .key_size = sizeof(u32), - .inner_map_idx = 1, /* map_fd[1] is port_h */ - .max_entries = 1, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS); + __uint(max_entries, 1); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_h); /* use inner_h as inner map */ +} h_of_port_h SEC(".maps"); static __always_inline int do_reg_lookup(void *inner_map, u32 port) { diff --git a/samples/bpf/test_map_in_map_user.c b/samples/bpf/test_map_in_map_user.c index eb29bcb76f3f..98656de56b83 100644 --- a/samples/bpf/test_map_in_map_user.c +++ b/samples/bpf/test_map_in_map_user.c @@ -11,7 +11,9 @@ #include #include #include -#include "bpf_load.h" +#include + +static int map_fd[7]; #define PORT_A (map_fd[0]) #define PORT_H (map_fd[1]) @@ -113,18 +115,59 @@ static void test_map_in_map(void) int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + struct bpf_link *link = NULL; + struct bpf_program *prog; + struct bpf_object *obj; char filename[256]; - assert(!setrlimit(RLIMIT_MEMLOCK, &r)); + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) { + fprintf(stderr, "ERROR: opening BPF object file failed\n"); + return 0; + } - if (load_bpf_file(filename)) { - printf("%s", bpf_log_buf); - return 1; + prog = bpf_object__find_program_by_name(obj, "trace_sys_connect"); + if (!prog) { + printf("finding a prog in obj file failed\n"); + goto cleanup; + } + + /* load BPF program */ + if (bpf_object__load(obj)) { + fprintf(stderr, "ERROR: loading BPF object file failed\n"); + goto cleanup; + } + + map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a"); + map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h"); + map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h"); + map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h"); + map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a"); + map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a"); + map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h"); + if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 || + map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) { + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); + goto cleanup; + } + + link = bpf_program__attach(prog); + if (libbpf_get_error(link)) { + fprintf(stderr, "ERROR: bpf_program__attach failed\n"); + link = NULL; + goto cleanup; } test_map_in_map(); +cleanup: + bpf_link__destroy(link); + bpf_object__close(obj); return 0; } From cc7f641d637bef0b31194d28667553f77c4ef947 Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:54 +0900 Subject: [PATCH 0760/2056] samples: bpf: Refactor BPF map performance test with libbpf Previously, in order to set the numa_node attribute at the time of map creation using "libbpf", it was necessary to call bpf_create_map_node() directly (bpf_load approach), instead of calling bpf_object_load() that handles everything on its own, including map creation. And because of this problem, this sample had problems with refactoring from bpf_load to libbbpf. However, by commit 1bdb6c9a1c43 ("libbpf: Add a bunch of attribute getters/setters for map definitions") added the numa_node attribute and allowed it to be set in the map. By using libbpf instead of bpf_load, the inner map definition has been explicitly declared with BTF-defined format. Also, the element of ARRAY_OF_MAPS was also statically specified using the BTF format. And for this reason some logic in fixup_map() was not needed and changed or removed. Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-4-danieltimlee@gmail.com --- samples/bpf/map_perf_test_kern.c | 167 ++++++++++++++++--------------- samples/bpf/map_perf_test_user.c | 162 +++++++++++++++++++----------- 2 files changed, 189 insertions(+), 140 deletions(-) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index c9b31193ca12..8773f22b6a98 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -9,7 +9,6 @@ #include #include #include -#include "bpf_legacy.h" #include #include #include "trace_common.h" @@ -17,89 +16,93 @@ #define MAX_ENTRIES 1000 #define MAX_NR_CPUS 1024 -struct bpf_map_def_legacy SEC("maps") hash_map = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); +} hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, 10000); +} lru_hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, 10000); + __uint(map_flags, BPF_F_NO_COMMON_LRU); +} nocommon_lru_hash_map SEC(".maps"); + +struct inner_lru { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NUMA_NODE); + __uint(numa_node, 0); +} inner_lru_hash_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); + __uint(max_entries, MAX_NR_CPUS); + __uint(key_size, sizeof(u32)); + __array(values, struct inner_lru); /* use inner_lru as inner map */ +} array_of_lru_hashs SEC(".maps") = { + /* statically initialize the first element */ + .values = { &inner_lru_hash_map }, }; -struct bpf_map_def_legacy SEC("maps") lru_hash_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 10000, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(long)); + __uint(max_entries, MAX_ENTRIES); +} percpu_hash_map SEC(".maps"); -struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = 10000, - .map_flags = BPF_F_NO_COMMON_LRU, -}; +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NO_PREALLOC); +} hash_map_alloc SEC(".maps"); -struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, - .map_flags = BPF_F_NUMA_NODE, - .numa_node = 0, -}; +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(key_size, sizeof(u32)); + __uint(value_size, sizeof(long)); + __uint(max_entries, MAX_ENTRIES); + __uint(map_flags, BPF_F_NO_PREALLOC); +} percpu_hash_map_alloc SEC(".maps"); -struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = { - .type = BPF_MAP_TYPE_ARRAY_OF_MAPS, - .key_size = sizeof(u32), - .max_entries = MAX_NR_CPUS, -}; +struct { + __uint(type, BPF_MAP_TYPE_LPM_TRIE); + __uint(key_size, 8); + __uint(value_size, sizeof(long)); + __uint(max_entries, 10000); + __uint(map_flags, BPF_F_NO_PREALLOC); +} lpm_trie_map_alloc SEC(".maps"); -struct bpf_map_def_legacy SEC("maps") percpu_hash_map = { - .type = BPF_MAP_TYPE_PERCPU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, -}; +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); +} array_map SEC(".maps"); -struct bpf_map_def_legacy SEC("maps") hash_map_alloc = { - .type = BPF_MAP_TYPE_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, - .map_flags = BPF_F_NO_PREALLOC, -}; +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __type(key, u32); + __type(value, long); + __uint(max_entries, MAX_ENTRIES); +} lru_hash_lookup_map SEC(".maps"); -struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = { - .type = BPF_MAP_TYPE_PERCPU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, - .map_flags = BPF_F_NO_PREALLOC, -}; - -struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = { - .type = BPF_MAP_TYPE_LPM_TRIE, - .key_size = 8, - .value_size = sizeof(long), - .max_entries = 10000, - .map_flags = BPF_F_NO_PREALLOC, -}; - -struct bpf_map_def_legacy SEC("maps") array_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, -}; - -struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = { - .type = BPF_MAP_TYPE_LRU_HASH, - .key_size = sizeof(u32), - .value_size = sizeof(long), - .max_entries = MAX_ENTRIES, -}; - -SEC("kprobe/sys_getuid") +SEC("kprobe/" SYSCALL(sys_getuid)) int stress_hmap(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -114,7 +117,7 @@ int stress_hmap(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_geteuid") +SEC("kprobe/" SYSCALL(sys_geteuid)) int stress_percpu_hmap(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -128,7 +131,7 @@ int stress_percpu_hmap(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getgid") +SEC("kprobe/" SYSCALL(sys_getgid)) int stress_hmap_alloc(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -142,7 +145,7 @@ int stress_hmap_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getegid") +SEC("kprobe/" SYSCALL(sys_getegid)) int stress_percpu_hmap_alloc(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -236,7 +239,7 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_gettid") +SEC("kprobe/" SYSCALL(sys_gettid)) int stress_lpm_trie_map_alloc(struct pt_regs *ctx) { union { @@ -258,7 +261,7 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getpgid") +SEC("kprobe/" SYSCALL(sys_getpgid)) int stress_hash_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; @@ -271,7 +274,7 @@ int stress_hash_map_lookup(struct pt_regs *ctx) return 0; } -SEC("kprobe/sys_getppid") +SEC("kprobe/" SYSCALL(sys_getppid)) int stress_array_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index fe5564bff39b..8b13230b4c46 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -19,7 +18,7 @@ #include #include -#include "bpf_load.h" +#include #define TEST_BIT(t) (1U << (t)) #define MAX_NR_CPUS 1024 @@ -61,12 +60,18 @@ const char *test_map_names[NR_TESTS] = { [LRU_HASH_LOOKUP] = "lru_hash_lookup_map", }; +enum map_idx { + array_of_lru_hashs_idx, + hash_map_alloc_idx, + lru_hash_lookup_idx, + NR_IDXES, +}; + +static int map_fd[NR_IDXES]; + static int test_flags = ~0; static uint32_t num_map_entries; static uint32_t inner_lru_hash_size; -static int inner_lru_hash_idx = -1; -static int array_of_lru_hashs_idx = -1; -static int lru_hash_lookup_idx = -1; static int lru_hash_lookup_test_entries = 32; static uint32_t max_cnt = 1000000; @@ -122,30 +127,30 @@ static void do_test_lru(enum test_type test, int cpu) __u64 start_time; int i, ret; - if (test == INNER_LRU_HASH_PREALLOC) { + if (test == INNER_LRU_HASH_PREALLOC && cpu) { + /* If CPU is not 0, create inner_lru hash map and insert the fd + * value into the array_of_lru_hash map. In case of CPU 0, + * 'inner_lru_hash_map' was statically inserted on the map init + */ int outer_fd = map_fd[array_of_lru_hashs_idx]; unsigned int mycpu, mynode; assert(cpu < MAX_NR_CPUS); - if (cpu) { - ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); - assert(!ret); + ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); + assert(!ret); - inner_lru_map_fds[cpu] = - bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, - test_map_names[INNER_LRU_HASH_PREALLOC], - sizeof(uint32_t), - sizeof(long), - inner_lru_hash_size, 0, - mynode); - if (inner_lru_map_fds[cpu] == -1) { - printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", - strerror(errno), errno); - exit(1); - } - } else { - inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx]; + inner_lru_map_fds[cpu] = + bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, + test_map_names[INNER_LRU_HASH_PREALLOC], + sizeof(uint32_t), + sizeof(long), + inner_lru_hash_size, 0, + mynode); + if (inner_lru_map_fds[cpu] == -1) { + printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", + strerror(errno), errno); + exit(1); } ret = bpf_map_update_elem(outer_fd, &cpu, @@ -377,7 +382,8 @@ static void fill_lpm_trie(void) key->data[1] = rand() & 0xff; key->data[2] = rand() & 0xff; key->data[3] = rand() & 0xff; - r = bpf_map_update_elem(map_fd[6], key, &value, 0); + r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], + key, &value, 0); assert(!r); } @@ -388,59 +394,52 @@ static void fill_lpm_trie(void) key->data[3] = 1; value = 128; - r = bpf_map_update_elem(map_fd[6], key, &value, 0); + r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0); assert(!r); } -static void fixup_map(struct bpf_map_data *map, int idx) +static void fixup_map(struct bpf_object *obj) { + struct bpf_map *map; int i; - if (!strcmp("inner_lru_hash_map", map->name)) { - inner_lru_hash_idx = idx; - inner_lru_hash_size = map->def.max_entries; - } + bpf_object__for_each_map(map, obj) { + const char *name = bpf_map__name(map); - if (!strcmp("array_of_lru_hashs", map->name)) { - if (inner_lru_hash_idx == -1) { - printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n"); - exit(1); + /* Only change the max_entries for the enabled test(s) */ + for (i = 0; i < NR_TESTS; i++) { + if (!strcmp(test_map_names[i], name) && + (check_test_flags(i))) { + bpf_map__resize(map, num_map_entries); + continue; + } } - map->def.inner_map_idx = inner_lru_hash_idx; - array_of_lru_hashs_idx = idx; } - if (!strcmp("lru_hash_lookup_map", map->name)) - lru_hash_lookup_idx = idx; - - if (num_map_entries <= 0) - return; - inner_lru_hash_size = num_map_entries; - - /* Only change the max_entries for the enabled test(s) */ - for (i = 0; i < NR_TESTS; i++) { - if (!strcmp(test_map_names[i], map->name) && - (check_test_flags(i))) { - map->def.max_entries = num_map_entries; - } - } } int main(int argc, char **argv) { struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); + struct bpf_link *links[8]; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_map *map; char filename[256]; - int num_cpu = 8; + int i = 0; - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - setrlimit(RLIMIT_MEMLOCK, &r); + if (setrlimit(RLIMIT_MEMLOCK, &r)) { + perror("setrlimit(RLIMIT_MEMLOCK)"); + return 1; + } if (argc > 1) test_flags = atoi(argv[1]) ? : test_flags; if (argc > 2) - num_cpu = atoi(argv[2]) ? : num_cpu; + nr_cpus = atoi(argv[2]) ? : nr_cpus; if (argc > 3) num_map_entries = atoi(argv[3]); @@ -448,14 +447,61 @@ int main(int argc, char **argv) if (argc > 4) max_cnt = atoi(argv[4]); - if (load_bpf_file_fixup_map(filename, fixup_map)) { - printf("%s", bpf_log_buf); - return 1; + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + obj = bpf_object__open_file(filename, NULL); + if (libbpf_get_error(obj)) { + fprintf(stderr, "ERROR: opening BPF object file failed\n"); + return 0; + } + + map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map"); + if (libbpf_get_error(map)) { + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); + goto cleanup; + } + + inner_lru_hash_size = bpf_map__max_entries(map); + if (!inner_lru_hash_size) { + fprintf(stderr, "ERROR: failed to get map attribute\n"); + goto cleanup; + } + + /* resize BPF map prior to loading */ + if (num_map_entries > 0) + fixup_map(obj); + + /* load BPF program */ + if (bpf_object__load(obj)) { + fprintf(stderr, "ERROR: loading BPF object file failed\n"); + goto cleanup; + } + + map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs"); + map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc"); + map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map"); + if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) { + fprintf(stderr, "ERROR: finding a map in obj file failed\n"); + goto cleanup; + } + + bpf_object__for_each_program(prog, obj) { + links[i] = bpf_program__attach(prog); + if (libbpf_get_error(links[i])) { + fprintf(stderr, "ERROR: bpf_program__attach failed\n"); + links[i] = NULL; + goto cleanup; + } + i++; } fill_lpm_trie(); - run_perf_test(num_cpu); + run_perf_test(nr_cpus); +cleanup: + for (i--; i >= 0; i--) + bpf_link__destroy(links[i]); + + bpf_object__close(obj); return 0; } From 5cfd607b49db2b7c05fccc2915a528d97bc8f17d Mon Sep 17 00:00:00 2001 From: "Daniel T. Lee" Date: Wed, 8 Jul 2020 03:48:55 +0900 Subject: [PATCH 0761/2056] selftests: bpf: Remove unused bpf_map_def_legacy struct samples/bpf no longer use bpf_map_def_legacy and instead use the libbpf's bpf_map_def or new BTF-defined MAP format. This commit removes unused bpf_map_def_legacy struct from selftests/bpf/bpf_legacy.h. Signed-off-by: Daniel T. Lee Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200707184855.30968-5-danieltimlee@gmail.com --- tools/testing/selftests/bpf/bpf_legacy.h | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tools/testing/selftests/bpf/bpf_legacy.h b/tools/testing/selftests/bpf/bpf_legacy.h index 6f8988738bc1..719ab56cdb5d 100644 --- a/tools/testing/selftests/bpf/bpf_legacy.h +++ b/tools/testing/selftests/bpf/bpf_legacy.h @@ -2,20 +2,6 @@ #ifndef __BPF_LEGACY__ #define __BPF_LEGACY__ -/* - * legacy bpf_map_def with extra fields supported only by bpf_load(), do not - * use outside of samples/bpf - */ -struct bpf_map_def_legacy { - unsigned int type; - unsigned int key_size; - unsigned int value_size; - unsigned int max_entries; - unsigned int map_flags; - unsigned int inner_map_idx; - unsigned int numa_node; -}; - #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val) \ struct ____btf_map_##name { \ type_key key; \ From 51b64c476a5ddc66d3459f14e98e5de9211e9e24 Mon Sep 17 00:00:00 2001 From: Miao-chen Chou Date: Tue, 7 Jul 2020 15:52:28 -0700 Subject: [PATCH 0762/2056] Bluetooth: Use whitelist for scan policy when suspending Even with one advertisement monitor in place, the scan policy should use the whitelist while the system is going to suspend to prevent waking by random advertisement. The following test was performed. - With a paired device, register one advertisement monitor, suspend the system and verify that the host was not awaken by random advertisements. Signed-off-by: Miao-chen Chou Reviewed-by: Abhishek Pandit-Subedi Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 68a2ec36e1c1..770b93758112 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -800,9 +800,10 @@ static u8 update_white_list(struct hci_request *req) /* Once the controller offloading of advertisement monitor is in place, * the if condition should include the support of MSFT extension - * support. + * support. If suspend is ongoing, whitelist should be the default to + * prevent waking by random advertisements. */ - if (!idr_is_empty(&hdev->adv_monitors_idr)) + if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended) return 0x00; /* Select filter policy to use white list */ From 93165ecbe1d0298bdaa9ddc4b4e4ff01831dc6c9 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 7 Jul 2020 21:45:13 -0700 Subject: [PATCH 0763/2056] net: dsa: loop: Print when registration is successful We have a number of error conditions that can lead to the driver not probing successfully, move the print when we are sure dsa_register_switch() has suceeded. This avoids repeated prints in case of probe deferral for instance. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 400207c5c7de..f8bc85a6e670 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -280,13 +280,11 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev) struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data; struct dsa_loop_priv *ps; struct dsa_switch *ds; + int ret; if (!pdata) return -ENODEV; - dev_info(&mdiodev->dev, "%s: 0x%0x\n", - pdata->name, pdata->enabled_ports); - ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL); if (!ds) return -ENOMEM; @@ -311,7 +309,12 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev) dev_set_drvdata(&mdiodev->dev, ds); - return dsa_register_switch(ds); + ret = dsa_register_switch(ds); + if (!ret) + dev_info(&mdiodev->dev, "%s: 0x%0x\n", + pdata->name, pdata->enabled_ports); + + return ret; } static void dsa_loop_drv_remove(struct mdio_device *mdiodev) From 5d75c04306f7f906c1200bef42406ccb71d4b75f Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Wed, 8 Jul 2020 12:35:19 +0200 Subject: [PATCH 0764/2056] Replace HTTP links with HTTPS ones: ATMEL MACB ETHERNET DRIVER Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_pci.c | 2 +- drivers/net/ethernet/cadence/macb_ptp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 617b3b728dd0..cd7d0332cba3 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -2,7 +2,7 @@ /** * Cadence GEM PCI wrapper. * - * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com + * Copyright (C) 2016 Cadence Design Systems - https://www.cadence.com * * Authors: Rafal Ozieblo * Bartosz Folta diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 43a3f0dbf857..31ebf3ee7ec0 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -2,7 +2,7 @@ /** * 1588 PTP support for Cadence GEM device. * - * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com + * Copyright (C) 2017 Cadence Design Systems - https://www.cadence.com * * Authors: Rafal Ozieblo * Bartosz Folta From bd36ed1c935144a0e3b788bba659258f666e7b5b Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Jul 2020 09:46:24 -0700 Subject: [PATCH 0765/2056] net: phy: Define PHY statistics ethtool_phy_ops Extend ethtool_phy_ops to include the 3 function pointers necessary for implementing PHY statistics. In a subsequent change we will uninline those functions. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 3 +++ include/linux/ethtool.h | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 233334406f0f..7cda95330aea 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3028,6 +3028,9 @@ static struct phy_driver genphy_driver = { }; static const struct ethtool_phy_ops phy_ethtool_phy_ops = { + .get_sset_count = phy_ethtool_get_sset_count, + .get_strings = phy_ethtool_get_strings, + .get_stats = phy_ethtool_get_stats, .start_cable_test = phy_start_cable_test, .start_cable_test_tdr = phy_start_cable_test_tdr, }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 0c139a93b67a..969a80211df6 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -508,6 +508,9 @@ struct phy_tdr_config; /** * struct ethtool_phy_ops - Optional PHY device options + * @get_sset_count: Get number of strings that @get_strings will write. + * @get_strings: Return a set of strings that describe the requested objects + * @get_stats: Return extended statistics about the PHY device. * @start_cable_test - Start a cable test * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test * @@ -515,6 +518,10 @@ struct phy_tdr_config; * and callers must take this into account. Callers must hold the RTNL lock. */ struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *dev); + int (*get_strings)(struct phy_device *dev, u8 *data); + int (*get_stats)(struct phy_device *dev, + struct ethtool_stats *stats, u64 *data); int (*start_cable_test)(struct phy_device *phydev, struct netlink_ext_ack *extack); int (*start_cable_test_tdr)(struct phy_device *phydev, From 17809516a03a045565ad6a80e6241754615871ac Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 8 Jul 2020 09:46:25 -0700 Subject: [PATCH 0766/2056] net: phy: Uninline PHY ethtool statistics operations Now that we have moved the PHY ethtool statistics to be dynamically registered, we no longer need to inline those for ethtool. This used to be done to avoid cross symbol referencing and allow ethtool to be decoupled from PHYLIB entirely. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 48 ++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 49 ++++--------------------------------------- net/ethtool/ioctl.c | 23 +++++++++++++------- net/ethtool/strset.c | 11 ++++++---- 4 files changed, 74 insertions(+), 57 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 56cfae950472..79b4f35d151e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -489,6 +489,54 @@ static void phy_abort_cable_test(struct phy_device *phydev) phydev_err(phydev, "Error while aborting cable test"); } +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_strings(phydev, data); + mutex_unlock(&phydev->lock); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_get_strings); + +int phy_ethtool_get_sset_count(struct phy_device *phydev) +{ + int ret; + + if (!phydev->drv) + return -EIO; + + if (phydev->drv->get_sset_count && + phydev->drv->get_strings && + phydev->drv->get_stats) { + mutex_lock(&phydev->lock); + ret = phydev->drv->get_sset_count(phydev); + mutex_unlock(&phydev->lock); + + return ret; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(phy_ethtool_get_sset_count); + +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data) +{ + if (!phydev->drv) + return -EIO; + + mutex_lock(&phydev->lock); + phydev->drv->get_stats(phydev, stats, data); + mutex_unlock(&phydev->lock); + + return 0; +} +EXPORT_SYMBOL(phy_ethtool_get_stats); + int phy_start_cable_test(struct phy_device *phydev, struct netlink_ext_ack *extack) { diff --git a/include/linux/phy.h b/include/linux/phy.h index 1592c3d0e12f..0403eb799913 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1474,51 +1474,10 @@ int __init mdio_bus_init(void); void mdio_bus_exit(void); #endif -/* Inline function for use within net/core/ethtool.c (built-in) */ -static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) -{ - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - phydev->drv->get_strings(phydev, data); - mutex_unlock(&phydev->lock); - - return 0; -} - -static inline int phy_ethtool_get_sset_count(struct phy_device *phydev) -{ - int ret; - - if (!phydev->drv) - return -EIO; - - if (phydev->drv->get_sset_count && - phydev->drv->get_strings && - phydev->drv->get_stats) { - mutex_lock(&phydev->lock); - ret = phydev->drv->get_sset_count(phydev); - mutex_unlock(&phydev->lock); - - return ret; - } - - return -EOPNOTSUPP; -} - -static inline int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data) -{ - if (!phydev->drv) - return -EIO; - - mutex_lock(&phydev->lock); - phydev->drv->get_stats(phydev, stats, data); - mutex_unlock(&phydev->lock); - - return 0; -} +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); +int phy_ethtool_get_sset_count(struct phy_device *phydev); +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data); static inline int phy_package_read(struct phy_device *phydev, u32 regnum) { diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 83f22196d64c..441794e0034f 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -135,6 +135,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr) static int __ethtool_get_sset_count(struct net_device *dev, int sset) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; if (sset == ETH_SS_FEATURES) @@ -150,8 +151,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset) return ARRAY_SIZE(phy_tunable_strings); if (sset == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - return phy_ethtool_get_sset_count(dev->phydev); + !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_sset_count) + return phy_ops->get_sset_count(dev->phydev); if (sset == ETH_SS_LINK_MODES) return __ETHTOOL_LINK_MODE_MASK_NBITS; @@ -165,6 +167,7 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset) static void __ethtool_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; if (stringset == ETH_SS_FEATURES) @@ -178,8 +181,9 @@ static void __ethtool_get_strings(struct net_device *dev, else if (stringset == ETH_SS_PHY_TUNABLES) memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings)); else if (stringset == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - phy_ethtool_get_strings(dev->phydev, data); + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_strings) + phy_ops->get_strings(dev->phydev, data); else if (stringset == ETH_SS_LINK_MODES) memcpy(data, link_mode_names, __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN); @@ -1929,6 +1933,7 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; struct ethtool_stats stats; @@ -1938,8 +1943,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count)) return -EOPNOTSUPP; - if (dev->phydev && !ops->get_ethtool_phy_stats) - n_stats = phy_ethtool_get_sset_count(dev->phydev); + if (dev->phydev && !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_sset_count) + n_stats = phy_ops->get_sset_count(dev->phydev); else n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS); if (n_stats < 0) @@ -1958,8 +1964,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr) if (!data) return -ENOMEM; - if (dev->phydev && !ops->get_ethtool_phy_stats) { - ret = phy_ethtool_get_stats(dev->phydev, &stats, data); + if (dev->phydev && !ops->get_ethtool_phy_stats && + phy_ops && phy_ops->get_stats) { + ret = phy_ops->get_stats(dev->phydev, &stats, data); if (ret < 0) goto out; } else { diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 0eed4e4909ab..773634b6b048 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -209,13 +209,15 @@ static void strset_cleanup_data(struct ethnl_reply_data *reply_base) static int strset_prepare_set(struct strset_info *info, struct net_device *dev, unsigned int id, bool counts_only) { + const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops; const struct ethtool_ops *ops = dev->ethtool_ops; void *strings; int count, ret; if (id == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - ret = phy_ethtool_get_sset_count(dev->phydev); + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_sset_count) + ret = phy_ops->get_sset_count(dev->phydev); else if (ops->get_sset_count && ops->get_strings) ret = ops->get_sset_count(dev, id); else @@ -231,8 +233,9 @@ static int strset_prepare_set(struct strset_info *info, struct net_device *dev, if (!strings) return -ENOMEM; if (id == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - phy_ethtool_get_strings(dev->phydev, strings); + !ops->get_ethtool_phy_stats && phy_ops && + phy_ops->get_strings) + phy_ops->get_strings(dev->phydev, strings); else ops->get_strings(dev, id, strings); info->strings = strings; From 34370d2435f9853ac882056faa98f3263c537c36 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:53 -0400 Subject: [PATCH 0767/2056] bnxt_en: Set up the chip specific RSS table size. Currently, we allocate one page for the hardware DMA RSS indirection table. While the size is currently big enough for all chips, future chip variations may support bigger sizes, so it is better to calculate and store the chip specific size and allocate accordingly. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 7 +++++++ 2 files changed, 15 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a884df44612..4afc1df74129 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3538,7 +3538,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp) } if (vnic->rss_table) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, + dma_free_coherent(&pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); vnic->rss_table = NULL; @@ -3603,7 +3603,13 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) continue; /* Allocate rss table and hash key */ - vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); + if (bp->flags & BNXT_FLAG_CHIP_P5) + size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); + + vnic->rss_table_size = size + HW_HASH_KEY_SIZE; + vnic->rss_table = dma_alloc_coherent(&pdev->dev, + vnic->rss_table_size, &vnic->rss_table_dma_addr, GFP_KERNEL); if (!vnic->rss_table) { @@ -3611,8 +3617,6 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) goto out; } - size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16)); - vnic->rss_hash_key = ((void *)vnic->rss_table) + size; vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 78e2fd63ac3d..5883b2462db2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1017,6 +1017,13 @@ struct bnxt_vnic_info { __le16 *rss_table; dma_addr_t rss_hash_key_dma_addr; u64 *rss_hash_key; + int rss_table_size; +#define BNXT_RSS_TABLE_ENTRIES_P5 64 +#define BNXT_RSS_TABLE_SIZE_P5 (BNXT_RSS_TABLE_ENTRIES_P5 * 4) +#define BNXT_RSS_TABLE_MAX_TBL_P5 8 +#define BNXT_MAX_RSS_TABLE_SIZE_P5 \ + (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) + u32 rx_mask; u8 *mc_list; From b73c1d08a0ec33f2ddafdd21d3a48614da4e6853 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:54 -0400 Subject: [PATCH 0768/2056] bnxt_en: Fix up bnxt_get_rxfh_indir_size(). Fix up bnxt_get_rxfh_indir_size() to return the proper current RSS table size for P5 chips. Change it to non-static so that bnxt.c can use it to get the table size. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 6 +++++- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6b88143af5ea..995de937358a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1273,8 +1273,12 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return rc; } -static u32 bnxt_get_rxfh_indir_size(struct net_device *dev) +u32 bnxt_get_rxfh_indir_size(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); + + if (bp->flags & BNXT_FLAG_CHIP_P5) + return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5); return HW_HASH_INDEX_SIZE; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index ce7585ff9e4d..dddbca1d052c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -86,6 +86,7 @@ struct hwrm_dbg_cmn_output { extern const struct ethtool_ops bnxt_ethtool_ops; +u32 bnxt_get_rxfh_indir_size(struct net_device *dev); u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 bnxt_fw_to_ethtool_speed(u16); u16 bnxt_get_fw_auto_link_speeds(u32); From 1667cbf6a4ebe0901bd93ef0d6defd35006fd2be Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:55 -0400 Subject: [PATCH 0769/2056] bnxt_en: Add logical RSS indirection table structure. The driver currently does not keep track of the logical RSS indirection table. The hardware RSS table is set up with standard default ring distribution when initializing the chip. This makes it difficult to support user sepcified indirection table entries. As a first step, add the logical table in the main bnxt structure and allocate it according to chip specific table size. Add a function that sets up default RSS distribution based on the number of RX rings. v4: Use bnxt_get_rxfh_indir_size() for the current RSS table size. v2: Use kmalloc_array() since we init. all entries afterwards. Use ALIGN() to roundup the RSS table size. Use ethtool_rxfh_indir_default() to init. the default entries. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 48 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 ++ 2 files changed, 52 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4afc1df74129..228ba6664083 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4830,6 +4830,45 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) } } +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) +{ + int entries; + + if (bp->flags & BNXT_FLAG_CHIP_P5) + entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; + else + entries = HW_HASH_INDEX_SIZE; + + bp->rss_indir_tbl_entries = entries; + bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), + GFP_KERNEL); + if (!bp->rss_indir_tbl) + return -ENOMEM; + return 0; +} + +static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) +{ + u16 max_rings, max_entries, pad, i; + + if (!bp->rx_nr_rings) + return; + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + + max_entries = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < max_entries; i++) + bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings); + + pad = bp->rss_indir_tbl_entries - max_entries; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { u32 i, j, max_rings; @@ -11514,6 +11553,8 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; bnxt_free_port_stats(bp); free_netdev(dev); } @@ -12034,6 +12075,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + rc = bnxt_alloc_rss_indir_tbl(bp); + if (rc) + goto init_err_pci_clean; + bnxt_set_dflt_rss_indir_tbl(bp); + if (BNXT_PF(bp)) { if (!bnxt_pf_wq) { bnxt_pf_wq = @@ -12078,6 +12124,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; + kfree(bp->rss_indir_tbl); + bp->rss_indir_tbl = NULL; init_err_free: free_netdev(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5883b2462db2..6de281302f2e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1023,6 +1023,8 @@ struct bnxt_vnic_info { #define BNXT_RSS_TABLE_MAX_TBL_P5 8 #define BNXT_MAX_RSS_TABLE_SIZE_P5 \ (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) +#define BNXT_MAX_RSS_TABLE_ENTRIES_P5 \ + (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5) u32 rx_mask; @@ -1655,6 +1657,8 @@ struct bnxt { struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; int nr_vnics; + u16 *rss_indir_tbl; + u16 rss_indir_tbl_entries; u32 rss_hash_cfg; u16 max_mtu; From f9f6a3fbb5eb89e738ebdf16ac56437177537b28 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:56 -0400 Subject: [PATCH 0770/2056] bnxt_en: Add helper function to return the number of RSS contexts. On some chips, this varies based on the number of RX rings. Add this helper function and refactor the existing code to use it. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 +++++++++++-- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 228ba6664083..3d0bb4380f9f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4869,6 +4869,15 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); } +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5); + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 2; + return 1; +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { u32 i, j, max_rings; @@ -4924,7 +4933,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); - nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0, k = 0; i < nr_ctxs; i++) { __le16 *ring_tbl = vnic->rss_table; int rc; @@ -7677,7 +7686,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id) { int rc, i, nr_ctxs; - nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64); + nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); for (i = 0; i < nr_ctxs; i++) { rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i); if (rc) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6de281302f2e..58909130ca4d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2039,6 +2039,7 @@ int hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only); +int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); int bnxt_nq_rings_in_use(struct bnxt *bp); From f33a305d09388880ec92db8de3c38448db36b629 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:57 -0400 Subject: [PATCH 0771/2056] bnxt_en: Fill HW RSS table from the RSS logical indirection table. Now that we have the logical table, we can fill the HW RSS table using the logical table's entries and converting them to the HW specific format. Re-initialize the logical table to standard distribution if the number of RX rings changes during ring reservation. v4: Use bnxt_get_rxfh_indir_size() to get the RSS table size. v2: Use ALIGN() to roundup the RSS table size. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 88 +++++++++++++---------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 3d0bb4380f9f..fcb7bf28778f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4878,9 +4878,51 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) return 1; } +static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); + u16 i, j; + + /* Fill the RSS indirection table with ring group ids */ + for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { + if (!no_rss) + j = bp->rss_indir_tbl[i]; + vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); + } +} + +static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + __le16 *ring_tbl = vnic->rss_table; + struct bnxt_rx_ring_info *rxr; + u16 tbl_size, i; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + + for (i = 0; i < tbl_size; i++) { + u16 ring_id, j; + + j = bp->rss_indir_tbl[i]; + rxr = &bp->rx_ring[j]; + + ring_id = rxr->rx_ring_struct.fw_ring_id; + *ring_tbl++ = cpu_to_le16(ring_id); + ring_id = bnxt_cp_ring_for_rx(bp, rxr); + *ring_tbl++ = cpu_to_le16(ring_id); + } +} + +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + if (bp->flags & BNXT_FLAG_CHIP_P5) + __bnxt_fill_hw_rss_tbl_p5(bp, vnic); + else + __bnxt_fill_hw_rss_tbl(bp, vnic); +} + static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { - u32 i, j, max_rings; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input req = {0}; @@ -4890,24 +4932,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); if (set_rss) { + bnxt_fill_hw_rss_tbl(bp, vnic); req.hash_type = cpu_to_le32(bp->rss_hash_cfg); req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - if (vnic->flags & BNXT_VNIC_RSS_FLAG) { - if (BNXT_CHIP_TYPE_NITRO_A0(bp)) - max_rings = bp->rx_nr_rings - 1; - else - max_rings = bp->rx_nr_rings; - } else { - max_rings = 1; - } - - /* Fill the RSS indirection table with ring group ids */ - for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { - if (j == max_rings) - j = 0; - vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]); - } - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); @@ -4919,9 +4946,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings; - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; struct hwrm_vnic_rss_cfg_input req = {0}; + dma_addr_t ring_tbl_map; + u32 i, nr_ctxs; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -4929,31 +4956,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return 0; } + bnxt_fill_hw_rss_tbl(bp, vnic); req.hash_type = cpu_to_le32(bp->rss_hash_cfg); req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + ring_tbl_map = vnic->rss_table_dma_addr; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); - for (i = 0, k = 0; i < nr_ctxs; i++) { - __le16 *ring_tbl = vnic->rss_table; + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { int rc; + req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); req.ring_table_pair_index = i; req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); - for (j = 0; j < 64; j++) { - u16 ring_id; - - ring_id = rxr->rx_ring_struct.fw_ring_id; - *ring_tbl++ = cpu_to_le16(ring_id); - ring_id = bnxt_cp_ring_for_rx(bp, rxr); - *ring_tbl++ = cpu_to_le16(ring_id); - rxr++; - k++; - if (k == max_rings) { - k = 0; - rxr = &bp->rx_ring[0]; - } - } rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return rc; @@ -8248,6 +8262,8 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); } + bnxt_set_dflt_rss_indir_tbl(bp); + if (rc) { netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; From adc38ac66745949ce12c1861c1a25f3ef93df1f8 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:58 -0400 Subject: [PATCH 0772/2056] bnxt_en: Return correct RSS indirection table entries to ethtool -x. Now that we have the logical indirection table, we can return these proper logical indices directly to ethtool -x instead of the physical IDs. Reported-by: Jakub Kicinski Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 995de937358a..1fe7c61042f4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1292,7 +1292,7 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, { struct bnxt *bp = netdev_priv(dev); struct bnxt_vnic_info *vnic; - int i = 0; + u32 i, tbl_size; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; @@ -1301,9 +1301,10 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, return 0; vnic = &bp->vnic_info[0]; - if (indir && vnic->rss_table) { - for (i = 0; i < HW_HASH_INDEX_SIZE; i++) - indir[i] = le16_to_cpu(vnic->rss_table[i]); + if (indir && bp->rss_indir_tbl) { + tbl_size = bnxt_get_rxfh_indir_size(dev); + for (i = 0; i < tbl_size; i++) + indir[i] = bp->rss_indir_tbl[i]; } if (key && vnic->rss_hash_key) From bd3191b5d87d5ebc1d4149bbbb42a64ec3d469bf Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 8 Jul 2020 07:53:59 -0400 Subject: [PATCH 0773/2056] bnxt_en: Implement ethtool -X to set indirection table. With the new infrastructure in place, we can now support the setting of the indirection table from ethtool. When changing channels, in a rare case that firmware cannot reserve the rings that were promised, we will still try to keep the RSS map and only revert to default when absolutely necessary. v4: Revert RSS map to default during ring change only when absolutely necessary. v3: Add warning messages when firmware cannot reserve the requested RX rings, and when the RSS table entries have to change to default. v2: When changing channels, if the RSS table size changes and RSS map is non-default, return error. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 +++++++++++++++- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 37 +++++++++++++++++++ 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fcb7bf28778f..f3e45f3de88e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4869,6 +4869,19 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp) memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); } +static u16 bnxt_get_max_rss_ring(struct bnxt *bp) +{ + u16 i, tbl_size, max_ring = 0; + + if (!bp->rss_indir_tbl) + return 0; + + tbl_size = bnxt_get_rxfh_indir_size(bp->dev); + for (i = 0; i < tbl_size; i++) + max_ring = max(max_ring, bp->rss_indir_tbl[i]); + return max_ring; +} + int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_CHIP_P5) @@ -6058,6 +6071,21 @@ static int __bnxt_reserve_rings(struct bnxt *bp) rx = rx_rings << 1; cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; bp->tx_nr_rings = tx; + + /* If we cannot reserve all the RX rings, reset the RSS map only + * if absolutely necessary + */ + if (rx_rings != bp->rx_nr_rings) { + netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n", + rx_rings, bp->rx_nr_rings); + if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) && + (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) != + bnxt_get_nr_rss_ctxs(bp, rx_rings) || + bnxt_get_max_rss_ring(bp) >= rx_rings)) { + netdev_warn(bp->dev, "RSS table entries reverting to default\n"); + bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED; + } + } bp->rx_nr_rings = rx_rings; bp->cp_nr_rings = cp; @@ -8262,7 +8290,8 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init) rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); } - bnxt_set_dflt_rss_indir_tbl(bp); + if (!netif_is_rxfh_configured(bp->dev)) + bnxt_set_dflt_rss_indir_tbl(bp); if (rc) { netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1fe7c61042f4..538c976200f7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -926,6 +926,13 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } + if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != + bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && + (dev->priv_flags & IFF_RXFH_CONFIGURED)) { + netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n"); + return -EINVAL; + } + if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1313,6 +1320,35 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, return 0; } +static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + if (hfunc && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (key) + return -EOPNOTSUPP; + + if (indir) { + u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev); + + for (i = 0; i < tbl_size; i++) + bp->rss_indir_tbl[i] = indir[i]; + pad = bp->rss_indir_tbl_entries - tbl_size; + if (pad) + memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + } + + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + rc = bnxt_open_nic(bp, false, false); + } + return rc; +} + static void bnxt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -3619,6 +3655,7 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_rxfh_indir_size = bnxt_get_rxfh_indir_size, .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, + .set_rxfh = bnxt_set_rxfh, .flash_device = bnxt_flash_device, .get_eeprom_len = bnxt_get_eeprom_len, .get_eeprom = bnxt_get_eeprom, From a196e96bb68fbc7a319f45df1d529b807216a03a Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Wed, 8 Jul 2020 07:54:00 -0400 Subject: [PATCH 0774/2056] bnxt_en: clean up VLAN feature bit handling The hardware VLAN offload feature on our NIC does not have separate knobs for handling customer and service tags on RX. Either offloading of both must be enabled or both must be disabled. Introduce definitions for the combined feature set in order to clean up the code and make this constraint more clear. Technically these features can be separately enabled on TX, however, since the default is to turn both on, the combined TX feature set is also introduced for code consistency. Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 34 +++++++++-------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 ++++ 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f3e45f3de88e..749dc7cf8d64 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1614,7 +1614,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u16 vlan_proto = tpa_info->metadata >> RX_CMP_FLAGS2_METADATA_TPID_SFT; u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; @@ -1832,7 +1832,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && - (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; @@ -9932,24 +9932,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, /* Both CTAG and STAG VLAN accelaration on the RX side have to be * turned on or off together. */ - vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX)) { - if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); + vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX; + if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) { + if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; else if (vlan_features) - features |= NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX; + features |= BNXT_HW_FEATURE_VLAN_ALL_RX; } #ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp)) { - if (bp->vf.vlan) { - features &= ~(NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_STAG_RX); - } - } + if (BNXT_VF(bp) && bp->vf.vlan) + features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX; #endif return features; } @@ -9972,7 +9964,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) if (bp->flags & BNXT_FLAG_NO_AGG_RINGS) flags &= ~BNXT_FLAG_TPA; - if (features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & BNXT_HW_FEATURE_VLAN_ALL_RX) flags |= BNXT_FLAG_STRIP_VLAN; if (features & NETIF_F_NTUPLE) @@ -12060,8 +12052,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX | + BNXT_HW_FEATURE_VLAN_ALL_TX; if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; @@ -12117,7 +12109,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_fw_init_one_p3(bp); - if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX) + if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX) bp->flags |= BNXT_FLAG_STRIP_VLAN; rc = bnxt_init_int_mode(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 58909130ca4d..13c40645f884 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1906,6 +1906,11 @@ struct bnxt { #define BNXT_PCIE_STATS_OFFSET(counter) \ (offsetof(struct pcie_ctx_hw_stats, counter) / 8) +#define BNXT_HW_FEATURE_VLAN_ALL_RX \ + (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) +#define BNXT_HW_FEATURE_VLAN_ALL_TX \ + (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX) + #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFF_DIAG_SUPPORT_OFFSET 0x5c From 1da63ddd0e155277bf613dfc7062af95d90452f2 Mon Sep 17 00:00:00 2001 From: Edwin Peer Date: Wed, 8 Jul 2020 07:54:01 -0400 Subject: [PATCH 0775/2056] bnxt_en: allow firmware to disable VLAN offloads Bare-metal use cases require giving firmware and the embedded application processor control over VLAN offloads. The driver should not attempt to override or utilize this feature in such scenarios since it will not work as expected. Signed-off-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 22 +++++++++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 +++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 749dc7cf8d64..43956232b0a4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5218,6 +5218,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (flags & VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; + + /* Older P5 fw before EXT_HW_STATS support did not set + * VLAN_STRIP_CAP properly. + */ + if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) || + ((bp->flags & BNXT_FLAG_CHIP_P5) && + !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) + bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); if (bp->max_tpa_v2) bp->hw_ring_stats_size = @@ -7049,7 +7057,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u32 flags; + u32 flags, flags_ext; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); req.fid = cpu_to_le16(0xffff); @@ -7074,6 +7082,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD) bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED)) + bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT; + + flags_ext = le32_to_cpu(resp->flags_ext); + if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -12052,8 +12066,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; - dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX | - BNXT_HW_FEATURE_VLAN_ALL_TX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX; + if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT) + dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX; if (BNXT_SUPPORTS_TPA(bp)) dev->hw_features |= NETIF_F_GRO_HW; dev->features |= dev->hw_features | NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 13c40645f884..d556e5660a02 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1716,6 +1716,9 @@ struct bnxt { #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_FW_CAP_SHARED_PORT_CFG 0x00400000 + #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 + #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 + #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; From 065e0d42a0a728d7f6c2aec7c9f3e5dc7b715394 Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Mon, 6 Jul 2020 20:42:32 -0700 Subject: [PATCH 0776/2056] ethtool: Add support for 100Gbps per lane link modes Define 100G, 200G and 400G link modes using 100Gbps per lane LR, ER and FR are defined as a single link mode because they are using same technology and by design are fully interoperable. EEPROM content indicates if the module is LR, ER, or FR, and the user space ethtool decoder is planned to support decoding these modes in the EEPROM. Signed-off-by: Meir Lichtinger CC: Andrew Lunn Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 17 ++++++++++++++++- include/uapi/linux/ethtool.h | 15 +++++++++++++++ net/ethtool/common.c | 15 +++++++++++++++ net/ethtool/linkmodes.c | 15 +++++++++++++++ 4 files changed, 61 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 46bd68e9ecfa..ff8e14b01eeb 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -8,7 +8,7 @@ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 75, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 90, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -78,12 +78,22 @@ static const struct phy_setting settings[] = { PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full ), PHY_SETTING( 400000, FULL, 400000baseDR8_Full ), PHY_SETTING( 400000, FULL, 400000baseSR8_Full ), + PHY_SETTING( 400000, FULL, 400000baseCR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseKR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseDR4_Full ), + PHY_SETTING( 400000, FULL, 400000baseSR4_Full ), /* 200G */ PHY_SETTING( 200000, FULL, 200000baseCR4_Full ), PHY_SETTING( 200000, FULL, 200000baseKR4_Full ), PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ), PHY_SETTING( 200000, FULL, 200000baseDR4_Full ), PHY_SETTING( 200000, FULL, 200000baseSR4_Full ), + PHY_SETTING( 200000, FULL, 200000baseCR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseKR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseDR2_Full ), + PHY_SETTING( 200000, FULL, 200000baseSR2_Full ), /* 100G */ PHY_SETTING( 100000, FULL, 100000baseCR4_Full ), PHY_SETTING( 100000, FULL, 100000baseKR4_Full ), @@ -94,6 +104,11 @@ static const struct phy_setting settings[] = { PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ), PHY_SETTING( 100000, FULL, 100000baseDR2_Full ), PHY_SETTING( 100000, FULL, 100000baseSR2_Full ), + PHY_SETTING( 100000, FULL, 100000baseCR_Full ), + PHY_SETTING( 100000, FULL, 100000baseKR_Full ), + PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full ), + PHY_SETTING( 100000, FULL, 100000baseDR_Full ), + PHY_SETTING( 100000, FULL, 100000baseSR_Full ), /* 56G */ PHY_SETTING( 56000, FULL, 56000baseCR4_Full ), PHY_SETTING( 56000, FULL, 56000baseKR4_Full ), diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index d1413538ef30..60856e0f9618 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1600,6 +1600,21 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72, ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73, ETHTOOL_LINK_MODE_FEC_LLRS_BIT = 74, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT = 75, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT = 76, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT = 77, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT = 78, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT = 79, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT = 80, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT = 81, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT = 83, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT = 84, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT = 85, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT = 86, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS }; diff --git a/net/ethtool/common.c b/net/ethtool/common.c index ce4dbae5a943..c54166713797 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -176,6 +176,21 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(400000, DR8, Full), __DEFINE_LINK_MODE_NAME(400000, CR8, Full), __DEFINE_SPECIAL_MODE_NAME(FEC_LLRS, "LLRS"), + __DEFINE_LINK_MODE_NAME(100000, KR, Full), + __DEFINE_LINK_MODE_NAME(100000, SR, Full), + __DEFINE_LINK_MODE_NAME(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_NAME(100000, DR, Full), + __DEFINE_LINK_MODE_NAME(100000, CR, Full), + __DEFINE_LINK_MODE_NAME(200000, KR2, Full), + __DEFINE_LINK_MODE_NAME(200000, SR2, Full), + __DEFINE_LINK_MODE_NAME(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_NAME(200000, DR2, Full), + __DEFINE_LINK_MODE_NAME(200000, CR2, Full), + __DEFINE_LINK_MODE_NAME(400000, KR4, Full), + __DEFINE_LINK_MODE_NAME(400000, SR4, Full), + __DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_NAME(400000, DR4, Full), + __DEFINE_LINK_MODE_NAME(400000, CR4, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index fd4f3e58c6f6..317a93129551 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -257,6 +257,21 @@ static const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(400000, DR8, Full), __DEFINE_LINK_MODE_PARAMS(400000, CR8, Full), __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS), + __DEFINE_LINK_MODE_PARAMS(100000, KR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, SR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, DR, Full), + __DEFINE_LINK_MODE_PARAMS(100000, CR, Full), + __DEFINE_LINK_MODE_PARAMS(200000, KR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, SR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, DR2, Full), + __DEFINE_LINK_MODE_PARAMS(200000, CR2, Full), + __DEFINE_LINK_MODE_PARAMS(400000, KR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, SR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, DR4, Full), + __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full), }; static const struct nla_policy From 12fdafb817c6cde87a4fa0e674e66b0226a0889d Mon Sep 17 00:00:00 2001 From: Meir Lichtinger Date: Mon, 6 Jul 2020 20:42:33 -0700 Subject: [PATCH 0777/2056] net/mlx5: Added support for 100Gbps per lane link modes This patch exposes new link modes using 100Gbps per lane, including 100G, 200G and 400G modes. Signed-off-by: Meir Lichtinger Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en/port.c | 3 +++ .../ethernet/mellanox/mlx5/core/en_ethtool.c | 21 ++++++++++++++++++- include/linux/mlx5/port.h | 3 +++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 2a8950b3056f..be83db63aca0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -76,6 +76,9 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = { [MLX5E_100GAUI_2_100GBASE_CR2_KR2] = 100000, [MLX5E_200GAUI_4_200GBASE_CR4_KR4] = 200000, [MLX5E_400GAUI_8] = 400000, + [MLX5E_100GAUI_1_100GBASE_CR_KR] = 100000, + [MLX5E_200GAUI_2_200GBASE_CR2_KR2] = 200000, + [MLX5E_400GAUI_4_400GBASE_CR4_KR4] = 400000, }; static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ec5658bbe3c5..6183bee7d21b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -194,6 +194,24 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext, + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_2_200GBASE_CR2_KR2, ext, + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_4_400GBASE_CR4_KR4, ext, + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT); } static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev, @@ -1012,7 +1030,8 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes) unsigned long modes[2]; for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) { - if (*ptys2ext_ethtool_table[i].advertised == 0) + if (ptys2ext_ethtool_table[i].advertised[0] == 0 && + ptys2ext_ethtool_table[i].advertised[1] == 0) continue; memset(modes, 0, sizeof(modes)); bitmap_and(modes, ptys2ext_ethtool_table[i].advertised, diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index de9a272c9f3d..2d45a6af52a4 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -104,8 +104,11 @@ enum mlx5e_ext_link_mode { MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8, MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9, MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10, + MLX5E_100GAUI_1_100GBASE_CR_KR = 11, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, + MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, MLX5E_400GAUI_8 = 15, + MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, MLX5E_EXT_LINK_MODES_NUMBER, }; From 625eb8e85e913273cd9441329a82b4e3496b30cd Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Wed, 8 Jul 2020 13:08:27 +0200 Subject: [PATCH 0778/2056] bpf: Fix another bpftool segfault without skeleton code enabled emit_obj_refs_json needs to added the same as with emit_obj_refs_plain to prevent segfaults, similar to Commit "8ae4121bd89e bpf: Fix bpftool without skeleton code enabled"). See the error below: # ./bpftool -p prog { "error": "bpftool built without PID iterator support" },[{ "id": 2, "type": "cgroup_skb", "tag": "7be49e3934a125ba", "gpl_compatible": true, "loaded_at": 1594052789, "uid": 0, "bytes_xlated": 296, "jited": true, "bytes_jited": 203, "bytes_memlock": 4096, "map_ids": [2,3 Segmentation fault (core dumped) The same happens for ./bpftool -p map, as well as ./bpftool -j prog/map. Fixes: d53dee3fe013 ("tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs") Signed-off-by: Louis Peens Signed-off-by: Daniel Borkmann Reviewed-by: Simon Horman Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200708110827.7673-1-louis.peens@netronome.com --- tools/bpf/bpftool/pids.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index 7d5416667c85..c0d23ce4a6f4 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -20,6 +20,7 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) } void delete_obj_refs_table(struct obj_refs_table *table) {} void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {} +void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {} #else /* BPFTOOL_WITHOUT_SKELETONS */ From 3220fb667842a9725cbb71656f406eadb03c094b Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 7 Jul 2020 09:12:19 +0200 Subject: [PATCH 0779/2056] selftests/bpf: test_progs use another shell exit on non-actions This is a follow up adjustment to commit 6c92bd5cd465 ("selftests/bpf: Test_progs indicate to shell on non-actions"), that returns shell exit indication EXIT_FAILURE (value 1) when user selects a non-existing test. The problem with using EXIT_FAILURE is that a shell script cannot tell the difference between a non-existing test and the test failing. This patch uses value 2 as shell exit indication. (Aside note unrecognized option parameters use value 64). Fixes: 6c92bd5cd465 ("selftests/bpf: Test_progs indicate to shell on non-actions") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159410593992.1093222.90072558386094370.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 104e833d0087..65d3f8686e29 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -12,6 +12,8 @@ #include #include /* backtrace */ +#define EXIT_NO_TEST 2 + /* defined in test_progs.h */ struct test_env env = {}; @@ -740,7 +742,7 @@ int main(int argc, char **argv) close(env.saved_netns_fd); if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0) - return EXIT_FAILURE; + return EXIT_NO_TEST; return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; } From b8c50df0cb3eb9008f8372e4ff0317eee993b8d1 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 7 Jul 2020 09:12:25 +0200 Subject: [PATCH 0780/2056] selftests/bpf: test_progs avoid minus shell exit codes There are a number of places in test_progs that use minus-1 as the argument to exit(). This is confusing as a process exit status is masked to be a number between 0 and 255 as defined in man exit(3). Thus, users will see status 255 instead of minus-1. This patch use positive exit code 3 instead of minus-1. These cases are put in the same group of infrastructure setup errors. Fixes: fd27b1835e70 ("selftests/bpf: Reset process and thread affinity after each test/sub-test") Fixes: 811d7e375d08 ("bpf: selftests: Restore netns after each test") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159410594499.1093222.11080787853132708654.stgit@firesoul --- tools/testing/selftests/bpf/test_progs.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 65d3f8686e29..b1e4dadacd9b 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -13,6 +13,7 @@ #include /* backtrace */ #define EXIT_NO_TEST 2 +#define EXIT_ERR_SETUP_INFRA 3 /* defined in test_progs.h */ struct test_env env = {}; @@ -113,13 +114,13 @@ static void reset_affinity() { if (err < 0) { stdio_restore(); fprintf(stderr, "Failed to reset process affinity: %d!\n", err); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset); if (err < 0) { stdio_restore(); fprintf(stderr, "Failed to reset thread affinity: %d!\n", err); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } } @@ -128,7 +129,7 @@ static void save_netns(void) env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY); if (env.saved_netns_fd == -1) { perror("open(/proc/self/ns/net)"); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } } @@ -137,7 +138,7 @@ static void restore_netns(void) if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) { stdio_restore(); perror("setns(CLONE_NEWNS)"); - exit(-1); + exit(EXIT_ERR_SETUP_INFRA); } } From efd7fe68f0c6c9649757bf80cbc382fd21e764c9 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 8 Jul 2020 14:25:36 +0200 Subject: [PATCH 0781/2056] net: dsa: tag_rtl4_a: Implement Realtek 4 byte A tag This implements the known parts of the Realtek 4 byte tag protocol version 0xA, as found in the RTL8366RB DSA switch. It is designated as protocol version 0xA as a different Realtek 4 byte tag format with protocol version 0x9 is known to exist in the Realtek RTL8306 chips. The tag and switch chip lacks public documentation, so the tag format has been reverse-engineered from packet dumps. As only ingress traffic has been available for analysis an egress tag has not been possible to develop (even using educated guesses about bit fields) so this is as far as it gets. It is not known if the switch even supports egress tagging. Excessive attempts to figure out the egress tag format was made. When nothing else worked, I just tried all bit combinations with 0xannp where a is protocol and p is port. I looped through all values several times trying to get a response from ping, without any positive result. Using just these ingress tags however, the switch functionality is vastly improved and the packets find their way into the destination port without any tricky VLAN configuration. On the D-Link DIR-685 the LAN ports now come up and respond to ping without any command line configuration so this is a real improvement for users. Egress packets need to be restricted to the proper target ports using VLAN, which the RTL8366RB DSA switch driver already sets up. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- include/net/dsa.h | 2 + net/dsa/Kconfig | 7 +++ net/dsa/Makefile | 1 + net/dsa/tag_rtl4_a.c | 130 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 140 insertions(+) create mode 100644 net/dsa/tag_rtl4_a.c diff --git a/include/net/dsa.h b/include/net/dsa.h index 4046ccd1945d..b28c95c76762 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -44,6 +44,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_KSZ8795_VALUE 14 #define DSA_TAG_PROTO_OCELOT_VALUE 15 #define DSA_TAG_PROTO_AR9331_VALUE 16 +#define DSA_TAG_PROTO_RTL4_A_VALUE 17 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -63,6 +64,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE, DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE, DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE, + DSA_TAG_PROTO_RTL4_A = DSA_TAG_PROTO_RTL4_A_VALUE, }; struct packet_type; diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index d5bc6ac599ef..1f9b9b11008c 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -86,6 +86,13 @@ config NET_DSA_TAG_KSZ Say Y if you want to enable support for tagging frames for the Microchip 8795/9477/9893 families of switches. +config NET_DSA_TAG_RTL4_A + tristate "Tag driver for Realtek 4 byte protocol A tags" + help + Say Y or M if you want to enable support for tagging frames for the + Realtek switches with 4 byte protocol A tags, sich as found in + the Realtek RTL8366RB. + config NET_DSA_TAG_OCELOT tristate "Tag driver for Ocelot family of switches" select PACKING diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 108486cfdeef..4f47b2025ff5 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -11,6 +11,7 @@ obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o +obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c new file mode 100644 index 000000000000..7b63010fa87b --- /dev/null +++ b/net/dsa/tag_rtl4_a.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Handler for Realtek 4 byte DSA switch tags + * Currently only supports protocol "A" found in RTL8366RB + * Copyright (c) 2020 Linus Walleij + * + * This "proprietary tag" header looks like so: + * + * ------------------------------------------------- + * | MAC DA | MAC SA | 0x8899 | 2 bytes tag | Type | + * ------------------------------------------------- + * + * The 2 bytes tag form a 16 bit big endian word. The exact + * meaning has been guessed from packet dumps from ingress + * frames, as no working egress traffic has been available + * we do not know the format of the egress tags or if they + * are even supported. + */ + +#include +#include + +#include "dsa_priv.h" + +#define RTL4_A_HDR_LEN 4 +#define RTL4_A_ETHERTYPE 0x8899 +#define RTL4_A_PROTOCOL_SHIFT 12 +/* + * 0x1 = Realtek Remote Control protocol (RRCP) + * 0x2/0x3 seems to be used for loopback testing + * 0x9 = RTL8306 DSA protocol + * 0xa = RTL8366RB DSA protocol + */ +#define RTL4_A_PROTOCOL_RTL8366RB 0xa + +static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + /* + * Just let it pass thru, we don't know if it is possible + * to tag a frame with the 0x8899 ethertype and direct it + * to a specific port, all attempts at reverse-engineering have + * ended up with the frames getting dropped. + * + * The VLAN set-up needs to restrict the frames to the right port. + * + * If you have documentation on the tagging format for RTL8366RB + * (tag type A) then please contribute. + */ + return skb; +} + +static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb, + struct net_device *dev, + struct packet_type *pt) +{ + u16 protport; + __be16 *p; + u16 etype; + u8 *tag; + u8 prot; + u8 port; + + if (unlikely(!pskb_may_pull(skb, RTL4_A_HDR_LEN))) + return NULL; + + /* The RTL4 header has its own custom Ethertype 0x8899 and that + * starts right at the beginning of the packet, after the src + * ethernet addr. Apparantly skb->data always points 2 bytes in, + * behind the Ethertype. + */ + tag = skb->data - 2; + p = (__be16 *)tag; + etype = ntohs(*p); + if (etype != RTL4_A_ETHERTYPE) { + /* Not custom, just pass through */ + netdev_dbg(dev, "non-realtek ethertype 0x%04x\n", etype); + return skb; + } + p = (__be16 *)(tag + 2); + protport = ntohs(*p); + /* The 4 upper bits are the protocol */ + prot = (protport >> RTL4_A_PROTOCOL_SHIFT) & 0x0f; + if (prot != RTL4_A_PROTOCOL_RTL8366RB) { + netdev_err(dev, "unknown realtek protocol 0x%01x\n", prot); + return NULL; + } + port = protport & 0xff; + + skb->dev = dsa_master_find_slave(dev, 0, port); + if (!skb->dev) { + netdev_dbg(dev, "could not find slave for port %d\n", port); + return NULL; + } + + /* Remove RTL4 tag and recalculate checksum */ + skb_pull_rcsum(skb, RTL4_A_HDR_LEN); + + /* Move ethernet DA and SA in front of the data */ + memmove(skb->data - ETH_HLEN, + skb->data - ETH_HLEN - RTL4_A_HDR_LEN, + 2 * ETH_ALEN); + + skb->offload_fwd_mark = 1; + + return skb; +} + +static int rtl4a_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto, + int *offset) +{ + *offset = RTL4_A_HDR_LEN; + /* Skip past the tag and fetch the encapsulated Ethertype */ + *proto = ((__be16 *)skb->data)[1]; + + return 0; +} + +static const struct dsa_device_ops rtl4a_netdev_ops = { + .name = "rtl4a", + .proto = DSA_TAG_PROTO_RTL4_A, + .xmit = rtl4a_tag_xmit, + .rcv = rtl4a_tag_rcv, + .flow_dissect = rtl4a_tag_flow_dissect, + .overhead = RTL4_A_HDR_LEN, +}; +module_dsa_tag_driver(rtl4a_netdev_ops); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL4_A); From a20fafb92bd84b9ab585ef4743339c96865cada2 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 8 Jul 2020 14:25:37 +0200 Subject: [PATCH 0782/2056] net: dsa: rtl8366rb: Support the CPU DSA tag This activates the support to use the CPU tag to properly direct ingress traffic to the right port. Bit 15 in register RTL8368RB_CPU_CTRL_REG can be set to 1 to disable the insertion of the CPU tag which is what the code currently does. The bit 15 define calls this setting RTL8368RB_CPU_INSTAG which is confusing since the inverse meaning is implied: programmers may think that setting this bit to 1 will *enable* inserting the tag rather than disabling it, so rename this setting in bit 15 to RTL8368RB_CPU_NO_TAG which is more to the point. After this e.g. ping works out-of-the-box with the RTL8366RB. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/dsa/Kconfig | 1 + drivers/net/dsa/rtl8366rb.c | 31 ++++++++----------------------- 2 files changed, 9 insertions(+), 23 deletions(-) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index d0024cb30a7b..468b3c4273c5 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -70,6 +70,7 @@ config NET_DSA_QCA8K config NET_DSA_REALTEK_SMI tristate "Realtek SMI Ethernet switch family support" depends on NET_DSA + select NET_DSA_TAG_RTL4_A select FIXED_PHY select IRQ_DOMAIN select REALTEK_PHY diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c index fd1977590cb4..48f1ff746799 100644 --- a/drivers/net/dsa/rtl8366rb.c +++ b/drivers/net/dsa/rtl8366rb.c @@ -109,8 +109,8 @@ /* CPU port control reg */ #define RTL8368RB_CPU_CTRL_REG 0x0061 #define RTL8368RB_CPU_PORTS_MSK 0x00FF -/* Enables inserting custom tag length/type 0x8899 */ -#define RTL8368RB_CPU_INSTAG BIT(15) +/* Disables inserting custom tag length/type 0x8899 */ +#define RTL8368RB_CPU_NO_TAG BIT(15) #define RTL8366RB_SMAR0 0x0070 /* bits 0..15 */ #define RTL8366RB_SMAR1 0x0071 /* bits 16..31 */ @@ -844,16 +844,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds) if (ret) return ret; - /* Enable CPU port and enable inserting CPU tag + /* Enable CPU port with custom DSA tag 8899. * - * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour - * of the switch totally and it will start talking Realtek RRCP - * internally. It is probably possible to experiment with this, - * but then the kernel needs to understand and handle RRCP first. + * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers + * the custom tag is turned off. */ ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG, 0xFFFF, - RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port)); + BIT(smi->cpu_port)); if (ret) return ret; @@ -967,21 +965,8 @@ static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) { - /* For now, the RTL switches are handled without any custom tags. - * - * It is possible to turn on "custom tags" by removing the - * RTL8368RB_CPU_INSTAG flag when enabling the port but what it - * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek - * Remote Control Protocol (RRCP) start to appear on the CPU port of - * the device. So this is not the ordinary few extra bytes in the - * frame. Instead it appears that the switch starts to talk Realtek - * RRCP internally which means a pretty complex RRCP implementation - * decoding and responding the RRCP protocol is needed to exploit this. - * - * The OpenRRCP project (dormant since 2009) have reverse-egineered - * parts of the protocol. - */ - return DSA_TAG_PROTO_NONE; + /* This switch uses the 4 byte protocol A Realtek DSA tag */ + return DSA_TAG_PROTO_RTL4_A; } static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port, From 5cd24cbe7dca62089ac6228f1dd14729d7da6ed8 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Wed, 8 Jul 2020 13:46:31 -0400 Subject: [PATCH 0783/2056] bonding: deal with xfrm state in all modes and add more error-checking It's possible that device removal happens when the bond is in non-AB mode, and addition happens in AB mode, so bond_ipsec_del_sa() never gets called, which leaves security associations in an odd state if bond_ipsec_add_sa() then gets called after switching the bond into AB. Just call add and delete universally for all modes to keep things consistent. However, it's also possible that this code gets called when the system is shutting down, and the xfrm subsystem has already been disconnected from the bond device, so we need to do some error-checking and bail, lest we hit a null ptr deref. Fixes: a3b658cfb664 ("bonding: allow xfrm offload setup post-module-load") CC: Huy Nguyen CC: Saeed Mahameed CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 39 +++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2adf6ce20a38..f886d97c4359 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -383,9 +383,14 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, static int bond_ipsec_add_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave = rtnl_dereference(bond->curr_active_slave); + struct bonding *bond; + struct slave *slave; + if (!bond_dev) + return -EINVAL; + + bond = netdev_priv(bond_dev); + slave = rtnl_dereference(bond->curr_active_slave); xs->xso.real_dev = slave->dev; bond->xs = xs; @@ -405,8 +410,14 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) static void bond_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *slave = rtnl_dereference(bond->curr_active_slave); + struct bonding *bond; + struct slave *slave; + + if (!bond_dev) + return; + + bond = netdev_priv(bond_dev); + slave = rtnl_dereference(bond->curr_active_slave); if (!slave) return; @@ -960,12 +971,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) if (old_active == new_active) return; - if (new_active) { #ifdef CONFIG_XFRM_OFFLOAD - if ((BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) && bond->xs) - bond_ipsec_del_sa(bond->xs); + if (old_active && bond->xs) + bond_ipsec_del_sa(bond->xs); #endif /* CONFIG_XFRM_OFFLOAD */ + if (new_active) { new_active->last_link_up = jiffies; if (new_active->link == BOND_LINK_BACK) { @@ -1028,13 +1039,6 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) bond_should_notify_peers(bond); } -#ifdef CONFIG_XFRM_OFFLOAD - if (old_active && bond->xs) { - xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); - bond_ipsec_add_sa(bond->xs); - } -#endif /* CONFIG_XFRM_OFFLOAD */ - call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) { bond->send_peer_notif--; @@ -1044,6 +1048,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } } +#ifdef CONFIG_XFRM_OFFLOAD + if (new_active && bond->xs) { + xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); + bond_ipsec_add_sa(bond->xs); + } +#endif /* CONFIG_XFRM_OFFLOAD */ + /* resend IGMP joins since active slave has changed or * all were sent on curr_active_slave. * resend only if bond is brought up with the affected From a6b9580b4936d2bb36c29e849ce5e9f10e599b0e Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 8 Jul 2020 18:24:21 -0300 Subject: [PATCH 0784/2056] dt-bindings: dp83867: Fix the type of device DP83867 is an Ethernet PHY, not a charger, so fix the documentation accordingly. Fixes: 74ac28f16486 ("dt-bindings: dp83867: Convert DP83867 to yaml") Signed-off-by: Fabio Estevam Acked-by: Dan Murphy Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/ti,dp83867.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.yaml b/Documentation/devicetree/bindings/net/ti,dp83867.yaml index 554dcd7a40a9..c6716ac6cbcc 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83867.yaml +++ b/Documentation/devicetree/bindings/net/ti,dp83867.yaml @@ -24,7 +24,7 @@ description: | IEEE 802.3 Standard Media Independent Interface (MII), the IEEE 802.3 Gigabit Media Independent Interface (GMII) or Reduced GMII (RGMII). - Specifications about the charger can be found at: + Specifications about the Ethernet PHY can be found at: https://www.ti.com/lit/gpn/dp83867ir properties: From 7d25e14eb2c0f07d10815a524542cdf681cb472c Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 8 Jul 2020 18:24:22 -0300 Subject: [PATCH 0785/2056] dt-bindings: dp83869: Fix the type of device DP83869 is an Ethernet PHY, not a charger, so fix the documentation accordingly. Fixes: 4d66c56f7efe ("dt-bindings: net: dp83869: Add TI dp83869 phy") Signed-off-by: Fabio Estevam Acked-by: Dan Murphy Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/ti,dp83869.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/net/ti,dp83869.yaml b/Documentation/devicetree/bindings/net/ti,dp83869.yaml index 71e90a3e4652..cf40b469c719 100644 --- a/Documentation/devicetree/bindings/net/ti,dp83869.yaml +++ b/Documentation/devicetree/bindings/net/ti,dp83869.yaml @@ -24,7 +24,7 @@ description: | conversions. The DP83869HM can also support Bridge Conversion from RGMII to SGMII and SGMII to RGMII. - Specifications about the charger can be found at: + Specifications about the Ethernet PHY can be found at: http://www.ti.com/lit/ds/symlink/dp83869hm.pdf properties: From bfc96656a766bd0566565d8a7b6232b2b036fdfb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:13 -0700 Subject: [PATCH 0786/2056] libbpf: Make BTF finalization strict With valid ELF and valid BTF, there is no reason (apart from bugs) why BTF finalization should fail. So make it strict and return error if it fails. This makes CO-RE relocation more reliable, as they are not going to be just silently skipped, if BTF finalization failed. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-2-andriin@fb.com --- tools/lib/bpf/libbpf.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 88a483627a2b..efd857ebd5ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2473,19 +2473,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) return 0; err = btf__finalize_data(obj, obj->btf); - if (!err) - return 0; - - pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - - if (libbpf_needs_btf(obj)) { - pr_warn("BTF is required, but is missing or corrupted.\n"); - return -ENOENT; + if (err) { + pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); + return err; } + return 0; } From 81372e121802fd57892a0b44d93cc747d9568627 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:14 -0700 Subject: [PATCH 0787/2056] libbpf: Add btf__set_fd() for more control over loaded BTF FD Add setter for BTF FD to allow application more fine-grained control in more advanced scenarios. Storing BTF FD inside `struct btf` provides little benefit and probably would be better done differently (e.g., btf__load() could just return FD on success), but we are stuck with this due to backwards compatibility. The main problem is that it's impossible to load BTF and than free user-space memory, but keep FD intact, because `struct btf` assumes ownership of that FD upon successful load and will attempt to close it during btf__free(). To allow callers (e.g., libbpf itself for BTF sanitization) to have more control over this, add btf__set_fd() to allow to reset FD arbitrarily, if necessary. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-3-andriin@fb.com --- tools/lib/bpf/btf.c | 7 ++++++- tools/lib/bpf/btf.h | 1 + tools/lib/bpf/libbpf.map | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index bfef3d606b54..c8861c9e3635 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -389,7 +389,7 @@ void btf__free(struct btf *btf) if (!btf) return; - if (btf->fd != -1) + if (btf->fd >= 0) close(btf->fd); free(btf->data); @@ -700,6 +700,11 @@ int btf__fd(const struct btf *btf) return btf->fd; } +void btf__set_fd(struct btf *btf, int fd) +{ + btf->fd = fd; +} + const void *btf__get_raw_data(const struct btf *btf, __u32 *size) { *size = btf->data_size; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 06cd1731c154..173eff23c472 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -79,6 +79,7 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); +LIBBPF_API void btf__set_fd(struct btf *btf, int fd); LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6544d2cd1ed6..c5d5c7664c3b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -288,4 +288,5 @@ LIBBPF_0.1.0 { bpf_map__value_size; bpf_program__autoload; bpf_program__set_autoload; + btf__set_fd; } LIBBPF_0.0.9; From 0f0e55d8247c0742a9b026fc097fcc605383b9db Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:15 -0700 Subject: [PATCH 0788/2056] libbpf: Improve BTF sanitization handling Change sanitization process to preserve original BTF, which might be used by libbpf itself for Kconfig externs, CO-RE relocs, etc, even if kernel is old and doesn't support BTF. To achieve that, if libbpf detects the need for BTF sanitization, it would clone original BTF, sanitize it in-place, attempt to load it into kernel, and if successful, will preserve loaded BTF FD in original `struct btf`, while freeing sanitized local copy. If kernel doesn't support any BTF, original btf and btf_ext will still be preserved to be used later for CO-RE relocation and other BTF-dependent libbpf features, which don't dependon kernel BTF support. Patch takes care to not specify BTF and BTF.ext features when loading BPF programs and/or maps, if it was detected that kernel doesn't support BTF features. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-4-andriin@fb.com --- tools/lib/bpf/libbpf.c | 103 +++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 45 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index efd857ebd5ee..460e5790a95f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2338,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx) return false; } -static void bpf_object__sanitize_btf(struct bpf_object *obj) +static bool btf_needs_sanitization(struct bpf_object *obj) +{ + bool has_func_global = obj->caps.btf_func_global; + bool has_datasec = obj->caps.btf_datasec; + bool has_func = obj->caps.btf_func; + + return !has_func || !has_datasec || !has_func_global; +} + +static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) { bool has_func_global = obj->caps.btf_func_global; bool has_datasec = obj->caps.btf_datasec; bool has_func = obj->caps.btf_func; - struct btf *btf = obj->btf; struct btf_type *t; int i, j, vlen; - if (!obj->btf || (has_func && has_datasec && has_func_global)) - return; - for (i = 1; i <= btf__get_nr_types(btf); i++) { t = (struct btf_type *)btf__type_by_id(btf, i); @@ -2402,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj) } } -static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) -{ - if (!obj->btf_ext) - return; - - if (!obj->caps.btf_func) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } -} - static bool libbpf_needs_btf(const struct bpf_object *obj) { return obj->efile.btf_maps_shndx >= 0 || @@ -2530,30 +2524,50 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj) static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) { + struct btf *kern_btf = obj->btf; + bool btf_mandatory, sanitize; int err = 0; if (!obj->btf) return 0; - bpf_object__sanitize_btf(obj); - bpf_object__sanitize_btf_ext(obj); + sanitize = btf_needs_sanitization(obj); + if (sanitize) { + const void *orig_data; + void *san_data; + __u32 sz; - err = btf__load(obj->btf); - if (err) { - pr_warn("Error loading %s into kernel: %d.\n", - BTF_ELF_SEC, err); - btf__free(obj->btf); - obj->btf = NULL; - /* btf_ext can't exist without btf, so free it as well */ - if (obj->btf_ext) { - btf_ext__free(obj->btf_ext); - obj->btf_ext = NULL; - } + /* clone BTF to sanitize a copy and leave the original intact */ + orig_data = btf__get_raw_data(obj->btf, &sz); + san_data = malloc(sz); + if (!san_data) + return -ENOMEM; + memcpy(san_data, orig_data, sz); + kern_btf = btf__new(san_data, sz); + if (IS_ERR(kern_btf)) + return PTR_ERR(kern_btf); - if (kernel_needs_btf(obj)) - return err; + bpf_object__sanitize_btf(obj, kern_btf); } - return 0; + + err = btf__load(kern_btf); + if (sanitize) { + if (!err) { + /* move fd to libbpf's BTF */ + btf__set_fd(obj->btf, btf__fd(kern_btf)); + btf__set_fd(kern_btf, -1); + } + btf__free(kern_btf); + } + if (err) { + btf_mandatory = kernel_needs_btf(obj); + pr_warn("Error loading .BTF into kernel: %d. %s\n", err, + btf_mandatory ? "BTF is mandatory, can't proceed." + : "BTF is optional, ignoring."); + if (!btf_mandatory) + err = 0; + } + return err; } static int bpf_object__elf_collect(struct bpf_object *obj) @@ -3777,7 +3791,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map) create_attr.btf_fd = 0; create_attr.btf_key_type_id = 0; create_attr.btf_value_type_id = 0; - if (obj->btf && !bpf_map_find_btf_info(obj, map)) { + if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; @@ -5361,18 +5375,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.kern_version = kern_version; load_attr.prog_ifindex = prog->prog_ifindex; } - /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ - if (prog->obj->btf_ext) - btf_fd = bpf_object__btf_fd(prog->obj); - else - btf_fd = -1; - load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0; - load_attr.func_info = prog->func_info; - load_attr.func_info_rec_size = prog->func_info_rec_size; - load_attr.func_info_cnt = prog->func_info_cnt; - load_attr.line_info = prog->line_info; - load_attr.line_info_rec_size = prog->line_info_rec_size; - load_attr.line_info_cnt = prog->line_info_cnt; + /* specify func_info/line_info only if kernel supports them */ + btf_fd = bpf_object__btf_fd(prog->obj); + if (btf_fd >= 0 && prog->obj->caps.btf_func) { + load_attr.prog_btf_fd = btf_fd; + load_attr.func_info = prog->func_info; + load_attr.func_info_rec_size = prog->func_info_rec_size; + load_attr.func_info_cnt = prog->func_info_cnt; + load_attr.line_info = prog->line_info; + load_attr.line_info_rec_size = prog->line_info_rec_size; + load_attr.line_info_cnt = prog->line_info_cnt; + } load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; From fcda189a5133b6fe50eb63d1062a15be4df9e3de Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:16 -0700 Subject: [PATCH 0789/2056] selftests/bpf: Add test relying only on CO-RE and no recent kernel features Add a test that relies on CO-RE, but doesn't expect any of the recent features, not available on old kernels. This is useful for Travis CI tests running against very old kernels (e.g., libbpf has 4.9 kernel testing now), to verify that CO-RE still works, even if kernel itself doesn't support BTF yet, as long as there is .BTF embedded into vmlinux image by pahole. Given most of CO-RE doesn't require any kernel awareness of BTF, it is a useful test to validate that libbpf's BTF sanitization is working well even with ancient kernels. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-5-andriin@fb.com --- .../selftests/bpf/prog_tests/core_retro.c | 33 +++++++++++++++++++ .../selftests/bpf/progs/test_core_retro.c | 30 +++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/core_retro.c create mode 100644 tools/testing/selftests/bpf/progs/test_core_retro.c diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c new file mode 100644 index 000000000000..78e30d3a23d5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#define _GNU_SOURCE +#include +#include "test_core_retro.skel.h" + +void test_core_retro(void) +{ + int err, zero = 0, res, duration = 0; + struct test_core_retro *skel; + + /* load program */ + skel = test_core_retro__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) + goto out_close; + + /* attach probe */ + err = test_core_retro__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) + goto out_close; + + /* trigger */ + usleep(1); + + err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res); + if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) + goto out_close; + + CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid()); + +out_close: + test_core_retro__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_core_retro.c b/tools/testing/selftests/bpf/progs/test_core_retro.c new file mode 100644 index 000000000000..75c60c3c29cf --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_retro.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include +#include +#include + +struct task_struct { + int tgid; +} __attribute__((preserve_access_index)); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} results SEC(".maps"); + +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) +{ + struct task_struct *task = (void *)bpf_get_current_task(); + int tgid = BPF_CORE_READ(task, tgid); + int zero = 0; + + bpf_map_update_elem(&results, &zero, &tgid, 0); + + return 0; +} + +char _license[] SEC("license") = "GPL"; From 0e289487308236903b19273f2ddb4f0adf732b9e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:17 -0700 Subject: [PATCH 0790/2056] libbpf: Handle missing BPF_OBJ_GET_INFO_BY_FD gracefully in perf_buffer perf_buffer__new() is relying on BPF_OBJ_GET_INFO_BY_FD availability for few sanity checks. OBJ_GET_INFO for maps is actually much more recent feature than perf_buffer support itself, so this causes unnecessary problems on old kernels before BPF_OBJ_GET_INFO_BY_FD was added. This patch makes those sanity checks optional and just assumes best if command is not supported. If user specified something incorrectly (e.g., wrong map type), kernel will reject it later anyway, except user won't get a nice explanation as to why it failed. This seems like a good trade off for supporting perf_buffer on old kernels. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-6-andriin@fb.com --- tools/lib/bpf/libbpf.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 460e5790a95f..6602eb479596 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8591,7 +8591,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, struct perf_buffer_params *p) { const char *online_cpus_file = "/sys/devices/system/cpu/online"; - struct bpf_map_info map = {}; + struct bpf_map_info map; char msg[STRERR_BUFSIZE]; struct perf_buffer *pb; bool *online = NULL; @@ -8604,19 +8604,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, return ERR_PTR(-EINVAL); } + /* best-effort sanity checks */ + memset(&map, 0, sizeof(map)); map_info_len = sizeof(map); err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); if (err) { err = -errno; - pr_warn("failed to get map info for map FD %d: %s\n", - map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); - return ERR_PTR(err); - } - - if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { - pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", - map.name); - return ERR_PTR(-EINVAL); + /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return + * -EBADFD, -EFAULT, or -E2BIG on real error + */ + if (err != -EINVAL) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); + return ERR_PTR(err); + } + pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", + map_fd); + } else { + if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { + pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", + map.name); + return ERR_PTR(-EINVAL); + } } pb = calloc(1, sizeof(*pb)); @@ -8648,7 +8657,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, err = pb->cpu_cnt; goto error; } - if (map.max_entries < pb->cpu_cnt) + if (map.max_entries && map.max_entries < pb->cpu_cnt) pb->cpu_cnt = map.max_entries; } From 6984cbc6dfa280687367b9660d8c830518239851 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 7 Jul 2020 18:53:18 -0700 Subject: [PATCH 0791/2056] selftests/bpf: Switch perf_buffer test to tracepoint and skeleton Switch perf_buffer test to use skeleton to avoid use of bpf_prog_load() and make test a bit more succinct. Also switch BPF program to use tracepoint instead of kprobe, as that allows to support older kernels, which had tracepoint support before kprobe support in the form that libbpf expects (i.e., libbpf expects /sys/bus/event_source/devices/kprobe/type, which doesn't always exist on old kernels). Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200708015318.3827358-7-andriin@fb.com --- .../selftests/bpf/prog_tests/perf_buffer.c | 42 ++++++------------- .../selftests/bpf/progs/test_perf_buffer.c | 4 +- 2 files changed, 14 insertions(+), 32 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c index a122ce3b360e..c33ec180b3f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c +++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c @@ -4,6 +4,7 @@ #include #include #include +#include "test_perf_buffer.skel.h" #include "bpf/libbpf_internal.h" /* AddressSanitizer sometimes crashes due to data dereference below, due to @@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_perf_buffer(void) { - int err, prog_fd, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; - const char *prog_name = "kprobe/sys_nanosleep"; - const char *file = "./test_perf_buffer.o"; + int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0; struct perf_buffer_opts pb_opts = {}; - struct bpf_map *perf_buf_map; + struct test_perf_buffer *skel; cpu_set_t cpu_set, cpu_seen; - struct bpf_program *prog; - struct bpf_object *obj; struct perf_buffer *pb; - struct bpf_link *link; bool *online; nr_cpus = libbpf_num_possible_cpus(); @@ -51,33 +47,21 @@ void test_perf_buffer(void) nr_on_cpus++; /* load program */ - err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd); - if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) { - obj = NULL; - goto out_close; - } - - prog = bpf_object__find_program_by_title(obj, prog_name); - if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name)) + skel = test_perf_buffer__open_and_load(); + if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; - /* load map */ - perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map"); - if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) - goto out_close; - - /* attach kprobe */ - link = bpf_program__attach_kprobe(prog, false /* retprobe */, - SYS_NANOSLEEP_KPROBE_NAME); - if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link))) + /* attach probe */ + err = test_perf_buffer__attach(skel); + if (CHECK(err, "attach_kprobe", "err %d\n", err)) goto out_close; /* set up perf buffer */ pb_opts.sample_cb = on_sample; pb_opts.ctx = &cpu_seen; - pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts); + pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts); if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) - goto out_detach; + goto out_close; /* trigger kprobe on every CPU */ CPU_ZERO(&cpu_seen); @@ -94,7 +78,7 @@ void test_perf_buffer(void) &cpu_set); if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", i, err)) - goto out_detach; + goto out_close; usleep(1); } @@ -110,9 +94,7 @@ void test_perf_buffer(void) out_free_pb: perf_buffer__free(pb); -out_detach: - bpf_link__destroy(link); out_close: - bpf_object__close(obj); + test_perf_buffer__destroy(skel); free(online); } diff --git a/tools/testing/selftests/bpf/progs/test_perf_buffer.c b/tools/testing/selftests/bpf/progs/test_perf_buffer.c index ad59c4c9aba8..8207a2dc2f9d 100644 --- a/tools/testing/selftests/bpf/progs/test_perf_buffer.c +++ b/tools/testing/selftests/bpf/progs/test_perf_buffer.c @@ -12,8 +12,8 @@ struct { __uint(value_size, sizeof(int)); } perf_buf_map SEC(".maps"); -SEC("kprobe/sys_nanosleep") -int BPF_KPROBE(handle_sys_nanosleep_entry) +SEC("tp/raw_syscalls/sys_enter") +int handle_sys_enter(void *ctx) { int cpu = bpf_get_smp_processor_id(); From f548a476268d621846bb0146af861bb56250ae37 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Wed, 8 Jul 2020 18:58:49 -0400 Subject: [PATCH 0792/2056] bonding: don't need RTNL for ipsec helpers The bond_ipsec_* helpers don't need RTNL, and can potentially get called without it being held, so switch from rtnl_dereference() to rcu_dereference() to access bond struct data. Lightly tested with xfrm bonding, no problems found, should address the syzkaller bug referenced below. Reported-by: syzbot+582c98032903dcc04816@syzkaller.appspotmail.com CC: Huy Nguyen CC: Saeed Mahameed CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: "David S. Miller" CC: Jeff Kirsher CC: Jakub Kicinski CC: Steffen Klassert CC: Herbert Xu CC: netdev@vger.kernel.org CC: intel-wired-lan@lists.osuosl.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f886d97c4359..e2d491c4378c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -390,7 +390,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; bond = netdev_priv(bond_dev); - slave = rtnl_dereference(bond->curr_active_slave); + slave = rcu_dereference(bond->curr_active_slave); xs->xso.real_dev = slave->dev; bond->xs = xs; @@ -417,7 +417,7 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) return; bond = netdev_priv(bond_dev); - slave = rtnl_dereference(bond->curr_active_slave); + slave = rcu_dereference(bond->curr_active_slave); if (!slave) return; @@ -442,7 +442,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; struct bonding *bond = netdev_priv(bond_dev); - struct slave *curr_active = rtnl_dereference(bond->curr_active_slave); + struct slave *curr_active = rcu_dereference(bond->curr_active_slave); struct net_device *slave_dev = curr_active->dev; if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) From 1475ee0ac9a16dd5df23ca8abe1039eb6086eb66 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:29 +0800 Subject: [PATCH 0793/2056] xfrm: add is_ipip to struct xfrm_input_afinfo This patch is to add a new member is_ipip to struct xfrm_input_afinfo, to allow another group family of callback functions to be registered with is_ipip set. This will be used for doing a callback for struct xfrm(6)_tunnel of ipip/ipv6 tunnels in xfrm_input() by calling xfrm_rcv_cb(), which is needed by ipip/ipv6 tunnels' support in ip(6)_vti and xfrm interface in the next patches. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 3 ++- net/xfrm/xfrm_input.c | 24 +++++++++++++----------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e20b2b27ec48..4666bc9e59ab 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -373,7 +373,8 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); struct xfrm_input_afinfo { - unsigned int family; + u8 family; + bool is_ipip; int (*callback)(struct sk_buff *skb, u8 protocol, int err); }; diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index bd984ff17c2d..37456d022cfa 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -42,7 +42,7 @@ struct xfrm_trans_cb { #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0])) static DEFINE_SPINLOCK(xfrm_input_afinfo_lock); -static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1]; +static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1]; static struct gro_cells gro_cells; static struct net_device xfrm_napi_dev; @@ -53,14 +53,14 @@ int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo) { int err = 0; - if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo))) + if (WARN_ON(afinfo->family > AF_INET6)) return -EAFNOSUPPORT; spin_lock_bh(&xfrm_input_afinfo_lock); - if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL)) + if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) err = -EEXIST; else - rcu_assign_pointer(xfrm_input_afinfo[afinfo->family], afinfo); + rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo); spin_unlock_bh(&xfrm_input_afinfo_lock); return err; } @@ -71,11 +71,11 @@ int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) int err = 0; spin_lock_bh(&xfrm_input_afinfo_lock); - if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) { - if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo)) + if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) { + if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo)) err = -EINVAL; else - RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->family], NULL); + RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL); } spin_unlock_bh(&xfrm_input_afinfo_lock); synchronize_rcu(); @@ -83,15 +83,15 @@ int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo) } EXPORT_SYMBOL(xfrm_input_unregister_afinfo); -static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family) +static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip) { const struct xfrm_input_afinfo *afinfo; - if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo))) + if (WARN_ON_ONCE(family > AF_INET6)) return NULL; rcu_read_lock(); - afinfo = rcu_dereference(xfrm_input_afinfo[family]); + afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]); if (unlikely(!afinfo)) rcu_read_unlock(); return afinfo; @@ -100,9 +100,11 @@ static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol, int err) { + bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6); + const struct xfrm_input_afinfo *afinfo; int ret; - const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family); + afinfo = xfrm_input_get_afinfo(family, is_ipip); if (!afinfo) return -EAFNOSUPPORT; From 6df2db5d37ba3df8c80d90c15f1e20480be43f75 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:30 +0800 Subject: [PATCH 0794/2056] tunnel4: add cb_handler to struct xfrm_tunnel This patch is to register a callback function tunnel4_rcv_cb with is_ipip set in a xfrm_input_afinfo object for tunnel4 and tunnel64. It will be called by xfrm_rcv_cb() from xfrm_input() when family is AF_INET and proto is IPPROTO_IPIP or IPPROTO_IPV6. v1->v2: - Fix a sparse warning caused by the missing "__rcu", as Jakub noticed. - Handle the err returned by xfrm_input_register_afinfo() in tunnel4_init/fini(), as Sabrina noticed. v2->v3: - Add "#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL)" to fix the build error when xfrm is disabled, reported by kbuild test robot. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/ipv4/tunnel4.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 4666bc9e59ab..c1ec6294d773 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1416,6 +1416,7 @@ struct xfrm6_protocol { /* XFRM tunnel handlers. */ struct xfrm_tunnel { int (*handler)(struct sk_buff *skb); + int (*cb_handler)(struct sk_buff *skb, int err); int (*err_handler)(struct sk_buff *skb, u32 info); struct xfrm_tunnel __rcu *next; diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c index c4b2ccbeba04..e44aaf41a138 100644 --- a/net/ipv4/tunnel4.c +++ b/net/ipv4/tunnel4.c @@ -110,6 +110,33 @@ static int tunnel4_rcv(struct sk_buff *skb) return 0; } +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +static int tunnel4_rcv_cb(struct sk_buff *skb, u8 proto, int err) +{ + struct xfrm_tunnel __rcu *head; + struct xfrm_tunnel *handler; + int ret; + + head = (proto == IPPROTO_IPIP) ? tunnel4_handlers : tunnel64_handlers; + + for_each_tunnel_rcu(head, handler) { + if (handler->cb_handler) { + ret = handler->cb_handler(skb, err); + if (ret <= 0) + return ret; + } + } + + return 0; +} + +static const struct xfrm_input_afinfo tunnel4_input_afinfo = { + .family = AF_INET, + .is_ipip = true, + .callback = tunnel4_rcv_cb, +}; +#endif + #if IS_ENABLED(CONFIG_IPV6) static int tunnel64_rcv(struct sk_buff *skb) { @@ -230,6 +257,18 @@ static int __init tunnel4_init(void) #endif goto err; } +#endif +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + if (xfrm_input_register_afinfo(&tunnel4_input_afinfo)) { + inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); +#if IS_ENABLED(CONFIG_IPV6) + inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6); +#endif +#if IS_ENABLED(CONFIG_MPLS) + inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS); +#endif + goto err; + } #endif return 0; @@ -240,6 +279,10 @@ static int __init tunnel4_init(void) static void __exit tunnel4_fini(void) { +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + if (xfrm_input_unregister_afinfo(&tunnel4_input_afinfo)) + pr_err("tunnel4 close: can't remove input afinfo\n"); +#endif #if IS_ENABLED(CONFIG_MPLS) if (inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS)) pr_err("tunnelmpls4 close: can't remove protocol\n"); From 86afc7031826147407e96412668d343e0f1bd6fd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:31 +0800 Subject: [PATCH 0795/2056] tunnel6: add tunnel6_input_afinfo for ipip and ipv6 tunnels This patch is to register a callback function tunnel6_rcv_cb with is_ipip set in a xfrm_input_afinfo object for tunnel6 and tunnel46. It will be called by xfrm_rcv_cb() from xfrm_input() when family is AF_INET6 and proto is IPPROTO_IPIP or IPPROTO_IPV6. v1->v2: - Fix a sparse warning caused by the missing "__rcu", as Jakub noticed. - Handle the err returned by xfrm_input_register_afinfo() in tunnel6_init/fini(), as Sabrina noticed. v2->v3: - Add "#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL)" to fix the build error when xfrm is disabled, reported by kbuild test robot Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 1 + net/ipv6/tunnel6.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c1ec6294d773..83a532dda1bd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1425,6 +1425,7 @@ struct xfrm_tunnel { struct xfrm6_tunnel { int (*handler)(struct sk_buff *skb); + int (*cb_handler)(struct sk_buff *skb, int err); int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info); struct xfrm6_tunnel __rcu *next; diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c index 06c02ebe6b9b..00e8d8b1c9a7 100644 --- a/net/ipv6/tunnel6.c +++ b/net/ipv6/tunnel6.c @@ -155,6 +155,33 @@ static int tunnel6_rcv(struct sk_buff *skb) return 0; } +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +static int tunnel6_rcv_cb(struct sk_buff *skb, u8 proto, int err) +{ + struct xfrm6_tunnel __rcu *head; + struct xfrm6_tunnel *handler; + int ret; + + head = (proto == IPPROTO_IPV6) ? tunnel6_handlers : tunnel46_handlers; + + for_each_tunnel_rcu(head, handler) { + if (handler->cb_handler) { + ret = handler->cb_handler(skb, err); + if (ret <= 0) + return ret; + } + } + + return 0; +} + +static const struct xfrm_input_afinfo tunnel6_input_afinfo = { + .family = AF_INET6, + .is_ipip = true, + .callback = tunnel6_rcv_cb, +}; +#endif + static int tunnel46_rcv(struct sk_buff *skb) { struct xfrm6_tunnel *handler; @@ -245,11 +272,25 @@ static int __init tunnel6_init(void) inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP); return -EAGAIN; } +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + if (xfrm_input_register_afinfo(&tunnel6_input_afinfo)) { + pr_err("%s: can't add input afinfo\n", __func__); + inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6); + inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP); + if (xfrm6_tunnel_mpls_supported()) + inet6_del_protocol(&tunnelmpls6_protocol, IPPROTO_MPLS); + return -EAGAIN; + } +#endif return 0; } static void __exit tunnel6_fini(void) { +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + if (xfrm_input_unregister_afinfo(&tunnel6_input_afinfo)) + pr_err("%s: can't remove input afinfo\n", __func__); +#endif if (inet6_del_protocol(&tunnel46_protocol, IPPROTO_IPIP)) pr_err("%s: can't remove protocol\n", __func__); if (inet6_del_protocol(&tunnel6_protocol, IPPROTO_IPV6)) From 87e66b9682d7067eb7db08040dae36b608a4d971 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:32 +0800 Subject: [PATCH 0796/2056] ip_vti: support IPIP tunnel processing with .cb_handler With tunnel4_input_afinfo added, IPIP tunnel processing in ip_vti can be easily done with .cb_handler. So replace the processing by calling ip_tunnel_rcv() with it. v1->v2: - no change. v2-v3: - enable it only when CONFIG_INET_XFRM_TUNNEL is defined, to fix the build error, reported by kbuild test robot. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 51 +++++++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 30 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 1d9c8cff5ac3..68177f065117 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -91,32 +91,6 @@ static int vti_rcv_proto(struct sk_buff *skb) return vti_rcv(skb, 0, false); } -static int vti_rcv_tunnel(struct sk_buff *skb) -{ - struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); - const struct iphdr *iph = ip_hdr(skb); - struct ip_tunnel *tunnel; - - tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, - iph->saddr, iph->daddr, 0); - if (tunnel) { - struct tnl_ptk_info tpi = { - .proto = htons(ETH_P_IP), - }; - - if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) - goto drop; - if (iptunnel_pull_header(skb, 0, tpi.proto, false)) - goto drop; - return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); - } - - return -EINVAL; -drop: - kfree_skb(skb); - return 0; -} - static int vti_rcv_cb(struct sk_buff *skb, int err) { unsigned short family; @@ -495,11 +469,22 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { .priority = 100, }; -static struct xfrm_tunnel ipip_handler __read_mostly = { +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +static int vti_rcv_tunnel(struct sk_buff *skb) +{ + XFRM_SPI_SKB_CB(skb)->family = AF_INET; + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); + + return vti_input(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr, 0, false); +} + +static struct xfrm_tunnel vti_ipip_handler __read_mostly = { .handler = vti_rcv_tunnel, + .cb_handler = vti_rcv_cb, .err_handler = vti4_err, .priority = 0, }; +#endif static int __net_init vti_init_net(struct net *net) { @@ -669,10 +654,12 @@ static int __init vti_init(void) if (err < 0) goto xfrm_proto_comp_failed; +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) msg = "ipip tunnel"; - err = xfrm4_tunnel_register(&ipip_handler, AF_INET); + err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET); if (err < 0) goto xfrm_tunnel_failed; +#endif msg = "netlink interface"; err = rtnl_link_register(&vti_link_ops); @@ -682,8 +669,10 @@ static int __init vti_init(void) return err; rtnl_link_failed: - xfrm4_tunnel_deregister(&ipip_handler, AF_INET); +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET); xfrm_tunnel_failed: +#endif xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); xfrm_proto_comp_failed: xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); @@ -699,7 +688,9 @@ static int __init vti_init(void) static void __exit vti_fini(void) { rtnl_link_unregister(&vti_link_ops); - xfrm4_tunnel_deregister(&ipip_handler, AF_INET); +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET); +#endif xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); From e6ce64570f2451684b4f9bcbaee6c40c4a7dff82 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:33 +0800 Subject: [PATCH 0797/2056] ip_vti: support IPIP6 tunnel processing For IPIP6 tunnel processing, the functions called will be the same as that for IPIP tunnel's. So reuse it and register it with family == AF_INET6. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 68177f065117..c0b97b8f6fbd 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -658,7 +658,12 @@ static int __init vti_init(void) msg = "ipip tunnel"; err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET); if (err < 0) - goto xfrm_tunnel_failed; + goto xfrm_tunnel_ipip_failed; +#if IS_ENABLED(CONFIG_IPV6) + err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET6); + if (err < 0) + goto xfrm_tunnel_ipip6_failed; +#endif #endif msg = "netlink interface"; @@ -670,8 +675,12 @@ static int __init vti_init(void) rtnl_link_failed: #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +#if IS_ENABLED(CONFIG_IPV6) + xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET6); +xfrm_tunnel_ipip6_failed: +#endif xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET); -xfrm_tunnel_failed: +xfrm_tunnel_ipip_failed: #endif xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); xfrm_proto_comp_failed: @@ -689,6 +698,9 @@ static void __exit vti_fini(void) { rtnl_link_unregister(&vti_link_ops); #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +#if IS_ENABLED(CONFIG_IPV6) + xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET6); +#endif xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET); #endif xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); From 08622869ed3f167db9b2250ab1bb055f55293401 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:34 +0800 Subject: [PATCH 0798/2056] ip6_vti: support IP6IP6 tunnel processing with .cb_handler Similar to IPIP tunnel's processing, this patch is to support IP6IP6 tunnel processing with .cb_handler. v1->v2: - no change. v2-v3: - enable it only when CONFIG_INET6_XFRM_TUNNEL is defined, to fix the build error, reported by kbuild test robot. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 1147f647b9a0..39efe41f7b48 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1218,6 +1218,26 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { .priority = 100, }; +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +static int vti6_rcv_tunnel(struct sk_buff *skb) +{ + const xfrm_address_t *saddr; + __be32 spi; + + saddr = (const xfrm_address_t *)&ipv6_hdr(skb)->saddr; + spi = xfrm6_tunnel_spi_lookup(dev_net(skb->dev), saddr); + + return vti6_input_proto(skb, IPPROTO_IPV6, spi, 0); +} + +static struct xfrm6_tunnel vti_ipv6_handler __read_mostly = { + .handler = vti6_rcv_tunnel, + .cb_handler = vti6_rcv_cb, + .err_handler = vti6_err, + .priority = 0, +}; +#endif + /** * vti6_tunnel_init - register protocol and reserve needed resources * @@ -1243,6 +1263,12 @@ static int __init vti6_tunnel_init(void) err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP); if (err < 0) goto xfrm_proto_comp_failed; +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + msg = "ipv6 tunnel"; + err = xfrm6_tunnel_register(&vti_ipv6_handler, AF_INET6); + if (err < 0) + goto vti_tunnel_failed; +#endif msg = "netlink interface"; err = rtnl_link_register(&vti6_link_ops); @@ -1252,6 +1278,10 @@ static int __init vti6_tunnel_init(void) return 0; rtnl_link_failed: +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + err = xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); +vti_tunnel_failed: +#endif xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); xfrm_proto_comp_failed: xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); @@ -1270,6 +1300,9 @@ static int __init vti6_tunnel_init(void) static void __exit vti6_tunnel_cleanup(void) { rtnl_link_unregister(&vti6_link_ops); +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); +#endif xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); From 2ab110cbb0c0cb05c64f37f42b78f5bc11699b0e Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:35 +0800 Subject: [PATCH 0799/2056] ip6_vti: support IP6IP tunnel processing For IP6IP tunnel processing, the functions called will be the same as that for IP6IP6 tunnel's. So reuse it and register it with family == AF_INET. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 39efe41f7b48..dfa93bc857d2 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1267,7 +1267,10 @@ static int __init vti6_tunnel_init(void) msg = "ipv6 tunnel"; err = xfrm6_tunnel_register(&vti_ipv6_handler, AF_INET6); if (err < 0) - goto vti_tunnel_failed; + goto vti_tunnel_ipv6_failed; + err = xfrm6_tunnel_register(&vti_ipv6_handler, AF_INET); + if (err < 0) + goto vti_tunnel_ip6ip_failed; #endif msg = "netlink interface"; @@ -1279,8 +1282,10 @@ static int __init vti6_tunnel_init(void) rtnl_link_failed: #if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + err = xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET); +vti_tunnel_ip6ip_failed: err = xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); -vti_tunnel_failed: +vti_tunnel_ipv6_failed: #endif xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); xfrm_proto_comp_failed: @@ -1301,6 +1306,7 @@ static void __exit vti6_tunnel_cleanup(void) { rtnl_link_unregister(&vti6_link_ops); #if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET); xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); #endif xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); From d5a7a5057387d79b91a6e2fd78a76ccd53f91e6c Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:36 +0800 Subject: [PATCH 0800/2056] ipcomp: assign if_id to child tunnel from parent tunnel The child tunnel if_id will be used for xfrm interface's lookup when processing the IP(6)IP(6) packets in the next patches. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv4/ipcomp.c | 1 + net/ipv6/ipcomp6.c | 1 + 2 files changed, 2 insertions(+) diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index 59bfa3825810..b42683212c65 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -72,6 +72,7 @@ static struct xfrm_state *ipcomp_tunnel_create(struct xfrm_state *x) t->props.flags = x->props.flags; t->props.extra_flags = x->props.extra_flags; memcpy(&t->mark, &x->mark, sizeof(t->mark)); + t->if_id = x->if_id; if (xfrm_init_state(t)) goto error; diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index 99668bfebd85..daef890460b7 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -91,6 +91,7 @@ static struct xfrm_state *ipcomp6_tunnel_create(struct xfrm_state *x) t->props.mode = x->props.mode; memcpy(t->props.saddr.a6, x->props.saddr.a6, sizeof(struct in6_addr)); memcpy(&t->mark, &x->mark, sizeof(t->mark)); + t->if_id = x->if_id; if (xfrm_init_state(t)) goto error; From d7b360c2869f9ce2418510d14baf0f9696fcf1e9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:37 +0800 Subject: [PATCH 0801/2056] xfrm: interface: support IP6IP6 and IP6IP tunnels processing with .cb_handler Similar to ip6_vti, IP6IP6 and IP6IP tunnels processing can easily be done with .cb_handler for xfrm interface. v1->v2: - no change. v2-v3: - enable it only when CONFIG_INET6_XFRM_TUNNEL is defined, to fix the build error, reported by kbuild test robot. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index c407ecbc5d46..b9ef496d3d7c 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -798,6 +798,26 @@ static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = { .priority = 10, }; +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +static int xfrmi6_rcv_tunnel(struct sk_buff *skb) +{ + const xfrm_address_t *saddr; + __be32 spi; + + saddr = (const xfrm_address_t *)&ipv6_hdr(skb)->saddr; + spi = xfrm6_tunnel_spi_lookup(dev_net(skb->dev), saddr); + + return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL); +} + +static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = { + .handler = xfrmi6_rcv_tunnel, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi6_err, + .priority = -1, +}; +#endif + static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = { .handler = xfrm4_rcv, .input_handler = xfrm_input, @@ -866,9 +886,23 @@ static int __init xfrmi6_init(void) err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); if (err < 0) goto xfrm_proto_comp_failed; +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6); + if (err < 0) + goto xfrm_tunnel_ipv6_failed; + err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET); + if (err < 0) + goto xfrm_tunnel_ip6ip_failed; +#endif return 0; +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +xfrm_tunnel_ip6ip_failed: + xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6); +xfrm_tunnel_ipv6_failed: + xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); +#endif xfrm_proto_comp_failed: xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH); xfrm_proto_ah_failed: @@ -879,6 +913,10 @@ static int __init xfrmi6_init(void) static void xfrmi6_fini(void) { +#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) + xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET); + xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6); +#endif xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP); From da9bbf0598c9e66b8a46ceabaa6172596795acf2 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 6 Jul 2020 20:01:38 +0800 Subject: [PATCH 0802/2056] xfrm: interface: support IPIP and IPIP6 tunnels processing with .cb_handler Similar to ip_vti, IPIP and IPIP6 tunnels processing can easily be done with .cb_handler for xfrm interface. v1->v2: - no change. v2-v3: - enable it only when CONFIG_INET_XFRM_TUNNEL is defined, to fix the build error, reported by kbuild test robot. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index b9ef496d3d7c..a79eb49a4e0d 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -842,6 +842,20 @@ static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = { .priority = 10, }; +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +static int xfrmi4_rcv_tunnel(struct sk_buff *skb) +{ + return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr); +} + +static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = { + .handler = xfrmi4_rcv_tunnel, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi4_err, + .priority = -1, +}; +#endif + static int __init xfrmi4_init(void) { int err; @@ -855,9 +869,23 @@ static int __init xfrmi4_init(void) err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); if (err < 0) goto xfrm_proto_comp_failed; +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET); + if (err < 0) + goto xfrm_tunnel_ipip_failed; + err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET6); + if (err < 0) + goto xfrm_tunnel_ipip6_failed; +#endif return 0; +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +xfrm_tunnel_ipip6_failed: + xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET); +xfrm_tunnel_ipip_failed: + xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); +#endif xfrm_proto_comp_failed: xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH); xfrm_proto_ah_failed: @@ -868,6 +896,10 @@ static int __init xfrmi4_init(void) static void xfrmi4_fini(void) { +#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) + xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET6); + xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET); +#endif xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH); xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP); From 5ca670e58dc4452b8f47158f0e1238d7d756e8b0 Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Thu, 9 Jul 2020 06:48:55 +0000 Subject: [PATCH 0803/2056] net: enetc: use eth_broadcast_addr() to assign broadcast This patch is to use eth_broadcast_addr() to assign broadcast address insetad of memset(). Signed-off-by: Xu Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index b8b336179d82..a9939550fa8f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -511,7 +511,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv, cbd.addr[0] = lower_32_bits(dma); cbd.addr[1] = upper_32_bits(dma); - memset(si_data->dmac, 0xff, ETH_ALEN); + eth_broadcast_addr(si_data->dmac); si_data->vid_vidm_tg = cpu_to_le16(ENETC_CBDR_SID_VID_MASK + ((0x3 << 14) | ENETC_CBDR_SID_VIDM)); From 3f935c75eb52dd968351dba824adf466fb9c9429 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 9 Jul 2020 15:12:39 +0200 Subject: [PATCH 0804/2056] inet_diag: support for wider protocol numbers After commit bf9765145b85 ("sock: Make sk_protocol a 16-bit value") the current size of 'sdiag_protocol' is not sufficient to represent the possible protocol values. This change introduces a new inet diag request attribute to let user space specify the relevant protocol number using u32 values. The attribute is parsed by inet diag core on get/dump command and the extended protocol value, if available, is preferred to 'sdiag_protocol' to lookup the diag handler. The parse attributed are exposed to all the diag handlers via the cb->data. Note that inet_diag_dump_one_icsk() is left unmodified, as it will not be used by protocol using the extended attribute. Suggested-by: David S. Miller Co-developed-by: Christoph Paasch Signed-off-by: Christoph Paasch Acked-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/uapi/linux/inet_diag.h | 1 + net/core/sock.c | 1 + net/ipv4/inet_diag.c | 65 +++++++++++++++++++++++++--------- 3 files changed, 50 insertions(+), 17 deletions(-) diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index e6f183ee8417..5ba122c1949a 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -65,6 +65,7 @@ enum { INET_DIAG_REQ_NONE, INET_DIAG_REQ_BYTECODE, INET_DIAG_REQ_SK_BPF_STORAGES, + INET_DIAG_REQ_PROTOCOL, __INET_DIAG_REQ_MAX, }; diff --git a/net/core/sock.c b/net/core/sock.c index f5b5fdd61c88..de26fe4ea19f 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3566,6 +3566,7 @@ int sock_load_diag_module(int family, int protocol) #ifdef CONFIG_INET if (family == AF_INET && protocol != IPPROTO_RAW && + protocol < MAX_INET_PROTOS && !rcu_access_pointer(inet_protos[protocol])) return -ENOENT; #endif diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 125f4f8a36b4..4a98dd736270 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -52,6 +52,11 @@ static DEFINE_MUTEX(inet_diag_table_mutex); static const struct inet_diag_handler *inet_diag_lock_handler(int proto) { + if (proto < 0 || proto >= IPPROTO_MAX) { + mutex_lock(&inet_diag_table_mutex); + return ERR_PTR(-ENOENT); + } + if (!inet_diag_table[proto]) sock_load_diag_module(AF_INET, proto); @@ -181,6 +186,28 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill); +static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr **req_nlas) +{ + struct nlattr *nla; + int remaining; + + nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) { + int type = nla_type(nla); + + if (type < __INET_DIAG_REQ_MAX) + req_nlas[type] = nla; + } +} + +static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req, + const struct inet_diag_dump_data *data) +{ + if (data->req_nlas[INET_DIAG_REQ_PROTOCOL]) + return nla_get_u32(data->req_nlas[INET_DIAG_REQ_PROTOCOL]); + return req->sdiag_protocol; +} + #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, @@ -198,7 +225,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, void *info = NULL; cb_data = cb->data; - handler = inet_diag_table[req->sdiag_protocol]; + handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)]; BUG_ON(!handler); nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, @@ -539,20 +566,25 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk); static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb, const struct nlmsghdr *nlh, + int hdrlen, const struct inet_diag_req_v2 *req) { const struct inet_diag_handler *handler; - int err; + struct inet_diag_dump_data dump_data; + int err, protocol; - handler = inet_diag_lock_handler(req->sdiag_protocol); + memset(&dump_data, 0, sizeof(dump_data)); + inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas); + protocol = inet_diag_get_protocol(req, &dump_data); + + handler = inet_diag_lock_handler(protocol); if (IS_ERR(handler)) { err = PTR_ERR(handler); } else if (cmd == SOCK_DIAG_BY_FAMILY) { - struct inet_diag_dump_data empty_dump_data = {}; struct netlink_callback cb = { .nlh = nlh, .skb = in_skb, - .data = &empty_dump_data, + .data = &dump_data, }; err = handler->dump_one(&cb, req); } else if (cmd == SOCK_DESTROY && handler->destroy) { @@ -1103,13 +1135,16 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_icsk); static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *r) { + struct inet_diag_dump_data *cb_data = cb->data; const struct inet_diag_handler *handler; u32 prev_min_dump_alloc; - int err = 0; + int protocol, err = 0; + + protocol = inet_diag_get_protocol(r, cb_data); again: prev_min_dump_alloc = cb->min_dump_alloc; - handler = inet_diag_lock_handler(r->sdiag_protocol); + handler = inet_diag_lock_handler(protocol); if (!IS_ERR(handler)) handler->dump(skb, cb, r); else @@ -1139,19 +1174,13 @@ static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) struct inet_diag_dump_data *cb_data; struct sk_buff *skb = cb->skb; struct nlattr *nla; - int rem, err; + int err; cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL); if (!cb_data) return -ENOMEM; - nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen), - nlmsg_attrlen(nlh, hdrlen), rem) { - int type = nla_type(nla); - - if (type < __INET_DIAG_REQ_MAX) - cb_data->req_nlas[type] = nla; - } + inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas); nla = cb_data->inet_diag_nla_bc; if (nla) { @@ -1237,7 +1266,8 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb, req.idiag_states = rc->idiag_states; req.id = rc->id; - return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req); + return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, + sizeof(struct inet_diag_req), &req); } static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) @@ -1279,7 +1309,8 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h) return netlink_dump_start(net->diag_nlsk, skb, h, &c); } - return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h)); + return inet_diag_cmd_exact(h->nlmsg_type, skb, h, hdrlen, + nlmsg_data(h)); } static From 96d890daad05a3e47e914451f07b79275b325c95 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 9 Jul 2020 15:12:40 +0200 Subject: [PATCH 0805/2056] mptcp: add msk interations helper mptcp_token_iter_next() allow traversing all the MPTCP sockets inside the token container belonging to the given network namespace with a quite standard iterator semantic. That will be used by the next patch, but keep the API generic, as we plan to use this later for PM's sake. Additionally export mptcp_token_get_sock(), as it also will be used by the diag module. Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/protocol.h | 2 ++ net/mptcp/token.c | 61 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 39bfec3f1586..e5baaef5ec89 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -391,6 +391,8 @@ int mptcp_token_new_connect(struct sock *sk); void mptcp_token_accept(struct mptcp_subflow_request_sock *r, struct mptcp_sock *msk); struct mptcp_sock *mptcp_token_get_sock(u32 token); +struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, + long *s_num); void mptcp_token_destroy(struct mptcp_sock *msk); void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn); diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 66a4990bd897..7d8106026081 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -238,6 +238,66 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token) rcu_read_unlock(); return msk; } +EXPORT_SYMBOL_GPL(mptcp_token_get_sock); + +/** + * mptcp_token_iter_next - iterate over the token container from given pos + * @net: namespace to be iterated + * @s_slot: start slot number + * @s_num: start number inside the given lock + * + * This function returns the first mptcp connection structure found inside the + * token container starting from the specified position, or NULL. + * + * On successful iteration, the iterator is move to the next position and the + * the acquires a reference to the returned socket. + */ +struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, + long *s_num) +{ + struct mptcp_sock *ret = NULL; + struct hlist_nulls_node *pos; + int slot, num; + + for (slot = *s_slot; slot <= token_mask; *s_num = 0, slot++) { + struct token_bucket *bucket = &token_hash[slot]; + struct sock *sk; + + num = 0; + + if (hlist_nulls_empty(&bucket->msk_chain)) + continue; + + rcu_read_lock(); + sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) { + ++num; + if (!net_eq(sock_net(sk), net)) + continue; + + if (num <= *s_num) + continue; + + if (!refcount_inc_not_zero(&sk->sk_refcnt)) + continue; + + if (!net_eq(sock_net(sk), net)) { + sock_put(sk); + continue; + } + + ret = mptcp_sk(sk); + rcu_read_unlock(); + goto out; + } + rcu_read_unlock(); + } + +out: + *s_slot = slot; + *s_num = num; + return ret; +} +EXPORT_SYMBOL_GPL(mptcp_token_iter_next); /** * mptcp_token_destroy_request - remove mptcp connection/token @@ -312,7 +372,6 @@ void __init mptcp_token_init(void) EXPORT_SYMBOL_GPL(mptcp_token_new_request); EXPORT_SYMBOL_GPL(mptcp_token_new_connect); EXPORT_SYMBOL_GPL(mptcp_token_accept); -EXPORT_SYMBOL_GPL(mptcp_token_get_sock); EXPORT_SYMBOL_GPL(mptcp_token_destroy_request); EXPORT_SYMBOL_GPL(mptcp_token_destroy); #endif From ac3b45f6095452a9731f8825be1513d326dbfa15 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 9 Jul 2020 15:12:41 +0200 Subject: [PATCH 0806/2056] mptcp: add MPTCP socket diag interface exposes basic inet socket attribute, plus some MPTCP socket fields comprising PM status and MPTCP-level sequence numbers. Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/uapi/linux/mptcp.h | 17 ++++ net/mptcp/Kconfig | 4 + net/mptcp/Makefile | 2 + net/mptcp/mptcp_diag.c | 169 +++++++++++++++++++++++++++++++++++++ 4 files changed, 192 insertions(+) create mode 100644 net/mptcp/mptcp_diag.c diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index 5f2c77082d9e..9762660df741 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -86,4 +86,21 @@ enum { __MPTCP_PM_CMD_AFTER_LAST }; +#define MPTCP_INFO_FLAG_FALLBACK _BITUL(0) +#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1) + +struct mptcp_info { + __u8 mptcpi_subflows; + __u8 mptcpi_add_addr_signal; + __u8 mptcpi_add_addr_accepted; + __u8 mptcpi_subflows_max; + __u8 mptcpi_add_addr_signal_max; + __u8 mptcpi_add_addr_accepted_max; + __u32 mptcpi_flags; + __u32 mptcpi_token; + __u64 mptcpi_write_seq; + __u64 mptcpi_snd_una; + __u64 mptcpi_rcv_nxt; +}; + #endif /* _UAPI_MPTCP_H */ diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig index af84fce70bb0..698bc3525160 100644 --- a/net/mptcp/Kconfig +++ b/net/mptcp/Kconfig @@ -13,6 +13,10 @@ config MPTCP if MPTCP +config INET_MPTCP_DIAG + depends on INET_DIAG + def_tristate INET_DIAG + config MPTCP_IPV6 bool "MPTCP: IPv6 support for Multipath TCP" select IPV6 diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index c53f9b845523..2360cbd27d59 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -4,6 +4,8 @@ obj-$(CONFIG_MPTCP) += mptcp.o mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \ mib.o pm_netlink.o +obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o + mptcp_crypto_test-objs := crypto_test.o mptcp_token_test-objs := token_test.o obj-$(CONFIG_MPTCP_KUNIT_TESTS) += mptcp_crypto_test.o mptcp_token_test.o diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c new file mode 100644 index 000000000000..5f390a97f556 --- /dev/null +++ b/net/mptcp/mptcp_diag.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP socket monitoring support + * + * Copyright (c) 2020 Red Hat + * + * Author: Paolo Abeni + */ + +#include +#include +#include +#include +#include +#include "protocol.h" + +static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, + struct netlink_callback *cb, + const struct inet_diag_req_v2 *req, + struct nlattr *bc, bool net_admin) +{ + if (!inet_diag_bc_sk(bc, sk)) + return 0; + + return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI, + net_admin); +} + +static int mptcp_diag_dump_one(struct netlink_callback *cb, + const struct inet_diag_req_v2 *req) +{ + struct sk_buff *in_skb = cb->skb; + struct mptcp_sock *msk = NULL; + struct sk_buff *rep; + int err = -ENOENT; + struct net *net; + struct sock *sk; + + net = sock_net(in_skb->sk); + msk = mptcp_token_get_sock(req->id.idiag_cookie[0]); + if (!msk) + goto out_nosk; + + err = -ENOMEM; + sk = (struct sock *)msk; + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) + + inet_diag_msg_attrs_size() + + nla_total_size(sizeof(struct mptcp_info)) + + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64, + GFP_KERNEL); + if (!rep) + goto out; + + err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0, + netlink_net_capable(in_skb, CAP_NET_ADMIN)); + if (err < 0) { + WARN_ON(err == -EMSGSIZE); + kfree_skb(rep); + goto out; + } + err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid, + MSG_DONTWAIT); + if (err > 0) + err = 0; +out: + sock_put(sk); + +out_nosk: + return err; +} + +static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, + const struct inet_diag_req_v2 *r) +{ + bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN); + struct net *net = sock_net(skb->sk); + struct inet_diag_dump_data *cb_data; + struct mptcp_sock *msk; + struct nlattr *bc; + + cb_data = cb->data; + bc = cb_data->inet_diag_nla_bc; + + while ((msk = mptcp_token_iter_next(net, &cb->args[0], &cb->args[1])) != + NULL) { + struct inet_sock *inet = (struct inet_sock *)msk; + struct sock *sk = (struct sock *)msk; + int ret = 0; + + if (!(r->idiag_states & (1 << sk->sk_state))) + goto next; + if (r->sdiag_family != AF_UNSPEC && + sk->sk_family != r->sdiag_family) + goto next; + if (r->id.idiag_sport != inet->inet_sport && + r->id.idiag_sport) + goto next; + if (r->id.idiag_dport != inet->inet_dport && + r->id.idiag_dport) + goto next; + + ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin); +next: + sock_put(sk); + if (ret < 0) { + /* will retry on the same position */ + cb->args[1]--; + break; + } + cond_resched(); + } +} + +static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, + void *_info) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + struct mptcp_info *info = _info; + u32 flags = 0; + bool slow; + u8 val; + + r->idiag_rqueue = sk_rmem_alloc_get(sk); + r->idiag_wqueue = sk_wmem_alloc_get(sk); + if (!info) + return; + + slow = lock_sock_fast(sk); + info->mptcpi_subflows = READ_ONCE(msk->pm.subflows); + info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled); + info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); + info->mptcpi_subflows_max = READ_ONCE(msk->pm.subflows_max); + val = READ_ONCE(msk->pm.add_addr_signal_max); + info->mptcpi_add_addr_signal_max = val; + val = READ_ONCE(msk->pm.add_addr_accept_max); + info->mptcpi_add_addr_accepted_max = val; + if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) + flags |= MPTCP_INFO_FLAG_FALLBACK; + if (READ_ONCE(msk->can_ack)) + flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED; + info->mptcpi_flags = flags; + info->mptcpi_token = READ_ONCE(msk->token); + info->mptcpi_write_seq = READ_ONCE(msk->write_seq); + info->mptcpi_snd_una = atomic64_read(&msk->snd_una); + info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); + unlock_sock_fast(sk, slow); +} + +static const struct inet_diag_handler mptcp_diag_handler = { + .dump = mptcp_diag_dump, + .dump_one = mptcp_diag_dump_one, + .idiag_get_info = mptcp_diag_get_info, + .idiag_type = IPPROTO_MPTCP, + .idiag_info_size = sizeof(struct mptcp_info), +}; + +static int __init mptcp_diag_init(void) +{ + return inet_diag_register(&mptcp_diag_handler); +} + +static void __exit mptcp_diag_exit(void) +{ + inet_diag_unregister(&mptcp_diag_handler); +} + +module_init(mptcp_diag_init); +module_exit(mptcp_diag_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-262 /* AF_INET - IPPROTO_MPTCP */); From df62f2ec3df698e16bdc3dd44de1d337a9eac6b3 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 9 Jul 2020 15:12:42 +0200 Subject: [PATCH 0807/2056] selftests/mptcp: add diag interface tests basic functional test, triggering the msk diag interface code. Require appropriate iproute2 support, skip elsewhere. Reviewed-by: Mat Martineau Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- tools/testing/selftests/net/mptcp/Makefile | 2 +- tools/testing/selftests/net/mptcp/diag.sh | 121 ++++++++++++++++++ .../selftests/net/mptcp/mptcp_connect.c | 22 +++- 3 files changed, 140 insertions(+), 5 deletions(-) create mode 100755 tools/testing/selftests/net/mptcp/diag.sh diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index f50976ee7d44..aa254aefc2c3 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -5,7 +5,7 @@ KSFT_KHDR_INSTALL := 1 CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include -TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh +TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh TEST_GEN_FILES = mptcp_connect pm_nl_ctl diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh new file mode 100755 index 000000000000..39edce4f541c --- /dev/null +++ b/tools/testing/selftests/net/mptcp/diag.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +rndh=$(printf %x $sec)-$(mktemp -u XXXXXX) +ns="ns1-$rndh" +ksft_skip=4 +test_cnt=1 +ret=0 +pids=() + +flush_pids() +{ + # mptcp_connect in join mode will sleep a bit before completing, + # give it some time + sleep 1.1 + + for pid in ${pids[@]}; do + [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1 + done + pids=() +} + +cleanup() +{ + ip netns del $ns + for pid in ${pids[@]}; do + [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1 + done +} + +ip -Version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without ip tool" + exit $ksft_skip +fi +ss -h | grep -q MPTCP +if [ $? -ne 0 ];then + echo "SKIP: ss tool does not support MPTCP" + exit $ksft_skip +fi + +__chk_nr() +{ + local condition="$1" + local expected=$2 + local msg nr + + shift 2 + msg=$* + nr=$(ss -inmHMN $ns | $condition) + + printf "%-50s" "$msg" + if [ $nr != $expected ]; then + echo "[ fail ] expected $expected found $nr" + ret=$test_cnt + else + echo "[ ok ]" + fi + test_cnt=$((test_cnt+1)) +} + +chk_msk_nr() +{ + __chk_nr "grep -c token:" $* +} + +chk_msk_fallback_nr() +{ + __chk_nr "grep -c fallback" $* +} + +chk_msk_remote_key_nr() +{ + __chk_nr "grep -c remote_key" $* +} + + +trap cleanup EXIT +ip netns add $ns +ip -n $ns link set dev lo up + +echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null & +sleep 0.1 +pids[0]=$! +chk_msk_nr 0 "no msk on netns creation" + +echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null & +sleep 0.1 +pids[1]=$! +chk_msk_nr 2 "after MPC handshake " +chk_msk_remote_key_nr 2 "....chk remote_key" +chk_msk_fallback_nr 0 "....chk no fallback" +flush_pids + + +echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null & +pids[0]=$! +sleep 0.1 +echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null & +pids[1]=$! +sleep 0.1 +chk_msk_fallback_nr 1 "check fallback" +flush_pids + +NR_CLIENTS=100 +for I in `seq 1 $NR_CLIENTS`; do + echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null & + pids[$((I*2))]=$! +done +sleep 0.1 + +for I in `seq 1 $NR_CLIENTS`; do + echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null & + pids[$((I*2 + 1))]=$! +done +sleep 1.5 + +chk_msk_nr $((NR_CLIENTS*2)) "many msk socket present" +flush_pids + +exit $ret diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index cedee5b952ba..cad6f73a5fd0 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -36,6 +37,7 @@ extern int optind; static int poll_timeout = 10 * 1000; static bool listen_mode; +static bool quit; enum cfg_mode { CFG_MODE_POLL, @@ -52,11 +54,12 @@ static int pf = AF_INET; static int cfg_sndbuf; static int cfg_rcvbuf; static bool cfg_join; +static int cfg_wait; static void die_usage(void) { fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]" - "[-l] connect_address\n"); + "[-l] [-w sec] connect_address\n"); fprintf(stderr, "\t-6 use ipv6\n"); fprintf(stderr, "\t-t num -- set poll timeout to num\n"); fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n"); @@ -65,9 +68,15 @@ static void die_usage(void) fprintf(stderr, "\t-m [MPTCP|TCP] -- use tcp or mptcp sockets\n"); fprintf(stderr, "\t-s [mmap|poll] -- use poll (default) or mmap\n"); fprintf(stderr, "\t-u -- check mptcp ulp\n"); + fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n"); exit(1); } +static void handle_signal(int nr) +{ + quit = true; +} + static const char *getxinfo_strerr(int err) { if (err == EAI_SYSTEM) @@ -418,8 +427,8 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd) } /* leave some time for late join/announce */ - if (cfg_join) - usleep(400000); + if (cfg_wait) + usleep(cfg_wait); close(peerfd); return 0; @@ -812,11 +821,12 @@ static void parse_opts(int argc, char **argv) { int c; - while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:")) != -1) { + while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:w:")) != -1) { switch (c) { case 'j': cfg_join = true; cfg_mode = CFG_MODE_POLL; + cfg_wait = 400000; break; case 'l': listen_mode = true; @@ -850,6 +860,9 @@ static void parse_opts(int argc, char **argv) case 'R': cfg_rcvbuf = parse_int(optarg); break; + case 'w': + cfg_wait = atoi(optarg)*1000000; + break; } } @@ -865,6 +878,7 @@ int main(int argc, char *argv[]) { init_rng(); + signal(SIGUSR1, handle_signal); parse_opts(argc, argv); if (tcpulp_audit) From e3cbdaf146159d2e708e51d168fd5fab793f766b Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 8 Jul 2020 19:37:23 +0100 Subject: [PATCH 0808/2056] net: systemport: fix double shift of a vlan_tci by VLAN_PRIO_SHIFT Currently the u16 skb->vlan_tci is being right shifted twice by VLAN_PRIO_SHIFT, once in the macro skb_vlan_tag_get_pri and explicitly by VLAN_PRIO_SHIFT afterwards. The combined shift amount is larger than the u16 so the end result is always zero. Remove the second explicit shift as this is extraneous. Fixes: 6e9fdb60d362 ("net: systemport: Add support for VLAN transmit acceleration") Addresses-Coverity: ("Operands don't affect result") Signed-off-by: Colin Ian King Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b470551bae4f..dfed9ade6950 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1250,8 +1250,7 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, memset(tsb, 0, sizeof(*tsb)); if (skb_vlan_tag_present(skb)) { - tsb->pcp_dei_vid = (skb_vlan_tag_get_prio(skb) >> - VLAN_PRIO_SHIFT & PCP_DEI_MASK); + tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK; tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT; } From faea30ed717d78e890f3fc0d96ceb6aba2baa4c4 Mon Sep 17 00:00:00 2001 From: kernel test robot Date: Fri, 10 Jul 2020 00:06:18 +0800 Subject: [PATCH 0809/2056] net: phy: mscc: fix ptr_ret.cocci warnings drivers/net/phy/mscc/mscc_ptp.c:1496:1-3: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci Fixes: 7d272e63e097 ("net: phy: mscc: timestamping and PHC support") CC: Antoine Tenart Signed-off-by: kernel test robot Signed-off-by: David S. Miller --- drivers/net/phy/mscc/mscc_ptp.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index ef3441747348..b97ee79f3cdf 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1494,10 +1494,7 @@ static int __vsc8584_init_ptp(struct phy_device *phydev) vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps, &phydev->mdio.dev); - if (IS_ERR(vsc8531->ptp->ptp_clock)) - return PTR_ERR(vsc8531->ptp->ptp_clock); - - return 0; + return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock); } void vsc8584_config_ts_intr(struct phy_device *phydev) From 10a429bab4462581bbda3fd7f41d4ec0ddc5e682 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:14 +0300 Subject: [PATCH 0810/2056] devlink: Move set attribute of devlink_port_attrs to devlink_port The struct devlink_port_attrs holds the attributes of devlink_port. The 'set' field is not devlink_port's attribute as opposed to most of the others. Move 'set' to be devlink_port's field called 'attrs_set'. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 4 ++-- net/core/devlink.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index 428f55f8197c..28f8d92c5741 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -65,8 +65,7 @@ struct devlink_port_pci_vf_attrs { }; struct devlink_port_attrs { - u8 set:1, - split:1, + u8 split:1, switch_port:1; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; @@ -90,6 +89,7 @@ struct devlink_port { enum devlink_port_type desired_type; void *type_dev; struct devlink_port_attrs attrs; + u8 attrs_set:1; struct delayed_work type_warn_dw; }; diff --git a/net/core/devlink.c b/net/core/devlink.c index 6ae36808c152..f28ae63cdb6b 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -528,7 +528,7 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, { struct devlink_port_attrs *attrs = &devlink_port->attrs; - if (!attrs->set) + if (!devlink_port->attrs_set) return 0; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; @@ -7518,7 +7518,7 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, if (WARN_ON(devlink_port->registered)) return -EEXIST; - attrs->set = true; + devlink_port->attrs_set = true; attrs->flavour = flavour; if (switch_id) { attrs->switch_port = true; @@ -7626,7 +7626,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, struct devlink_port_attrs *attrs = &devlink_port->attrs; int n = 0; - if (!attrs->set) + if (!devlink_port->attrs_set) return -EOPNOTSUPP; switch (attrs->flavour) { From 46737a194945e540e3e2eb1fc870207928a9c2eb Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:15 +0300 Subject: [PATCH 0811/2056] devlink: Move switch_port attribute of devlink_port_attrs to devlink_port The struct devlink_port_attrs holds the attributes of devlink_port. Similarly to the previous patch, 'switch_port' attribute is another exception. Move 'switch_port' to be devlink_port's field. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/net/devlink.h | 6 +++--- net/core/devlink.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index 28f8d92c5741..de4b5dcdb4a5 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -65,8 +65,7 @@ struct devlink_port_pci_vf_attrs { }; struct devlink_port_attrs { - u8 split:1, - switch_port:1; + u8 split:1; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; union { @@ -89,7 +88,8 @@ struct devlink_port { enum devlink_port_type desired_type; void *type_dev; struct devlink_port_attrs attrs; - u8 attrs_set:1; + u8 attrs_set:1, + switch_port:1; struct delayed_work type_warn_dw; }; diff --git a/net/core/devlink.c b/net/core/devlink.c index f28ae63cdb6b..452b2f8a054e 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7521,13 +7521,13 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, devlink_port->attrs_set = true; attrs->flavour = flavour; if (switch_id) { - attrs->switch_port = true; + devlink_port->switch_port = true; if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN)) switch_id_len = MAX_PHYS_ITEM_ID_LEN; memcpy(attrs->switch_id.id, switch_id, switch_id_len); attrs->switch_id.id_len = switch_id_len; } else { - attrs->switch_port = false; + devlink_port->switch_port = false; } return 0; } @@ -9461,7 +9461,7 @@ int devlink_compat_switch_id_get(struct net_device *dev, * any devlink lock as only permanent values are accessed. */ devlink_port = netdev_to_devlink_port(dev); - if (!devlink_port || !devlink_port->attrs.switch_port) + if (!devlink_port || !devlink_port->switch_port) return -EOPNOTSUPP; memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid)); From 71ad8d55f8e5ea101069b552422f392655e2ffb6 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:16 +0300 Subject: [PATCH 0812/2056] devlink: Replace devlink_port_attrs_set parameters with a struct Currently, devlink_port_attrs_set accepts a long list of parameters, that most of them are devlink port's attributes. Use the devlink_port_attrs struct to replace the relevant parameters. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/broadcom/bnxt/bnxt_devlink.c | 9 ++-- drivers/net/ethernet/intel/ice/ice_devlink.c | 6 ++- .../ethernet/mellanox/mlx5/core/en/devlink.c | 19 ++++--- .../net/ethernet/mellanox/mlx5/core/en_rep.c | 16 +++--- drivers/net/ethernet/mellanox/mlxsw/core.c | 11 ++-- .../net/ethernet/netronome/nfp/nfp_devlink.c | 11 ++-- .../ethernet/pensando/ionic/ionic_devlink.c | 5 +- drivers/net/netdevsim/dev.c | 10 ++-- include/net/devlink.h | 20 ++++--- net/core/devlink.c | 54 ++++--------------- net/dsa/dsa2.c | 17 +++--- 11 files changed, 82 insertions(+), 96 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 2bd610fafc58..3a854195d5b0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -691,6 +691,7 @@ static void bnxt_dl_params_unregister(struct bnxt *bp) int bnxt_dl_register(struct bnxt *bp) { + struct devlink_port_attrs attrs = {}; struct devlink *dl; int rc; @@ -719,9 +720,11 @@ int bnxt_dl_register(struct bnxt *bp) if (!BNXT_PF(bp)) return 0; - devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - bp->pf.port_id, false, 0, bp->dsn, - sizeof(bp->dsn)); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = bp->pf.port_id; + memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn)); + attrs.switch_id.id_len = sizeof(bp->dsn); + devlink_port_attrs_set(&bp->dl_port, &attrs); rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); if (rc) { netdev_err(bp->dev, "devlink_port_register failed\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 3ea470e8cfa2..43da2dcb0cbc 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -312,6 +312,7 @@ int ice_devlink_create_port(struct ice_pf *pf) struct devlink *devlink = priv_to_devlink(pf); struct ice_vsi *vsi = ice_get_main_vsi(pf); struct device *dev = ice_pf_to_dev(pf); + struct devlink_port_attrs attrs = {}; int err; if (!vsi) { @@ -319,8 +320,9 @@ int ice_devlink_create_port(struct ice_pf *pf) return -EIO; } - devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - pf->hw.pf_id, false, 0, NULL, 0); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.pf_id; + devlink_port_attrs_set(&pf->devlink_port, &attrs); err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id); if (err) { dev_err(dev, "devlink_port_register failed: %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index f8b2de4b04be..a69c62d72d16 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -6,17 +6,16 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv) { struct devlink *devlink = priv_to_devlink(priv->mdev); + struct devlink_port_attrs attrs = {}; - if (mlx5_core_is_pf(priv->mdev)) - devlink_port_attrs_set(&priv->dl_port, - DEVLINK_PORT_FLAVOUR_PHYSICAL, - PCI_FUNC(priv->mdev->pdev->devfn), - false, 0, - NULL, 0); - else - devlink_port_attrs_set(&priv->dl_port, - DEVLINK_PORT_FLAVOUR_VIRTUAL, - 0, false, 0, NULL, 0); + if (mlx5_core_is_pf(priv->mdev)) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn); + } else { + attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + } + + devlink_port_attrs_set(&priv->dl_port, &attrs); return devlink_port_register(devlink, &priv->dl_port, 1); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index ed2430677b12..0a69f10ac30c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1185,6 +1185,7 @@ static int register_devlink_port(struct mlx5_core_dev *dev, { struct devlink *devlink = priv_to_devlink(dev); struct mlx5_eswitch_rep *rep = rpriv->rep; + struct devlink_port_attrs attrs = {}; struct netdev_phys_item_id ppid = {}; unsigned int dl_port_index = 0; u16 pfnum; @@ -1195,19 +1196,16 @@ static int register_devlink_port(struct mlx5_core_dev *dev, mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport); pfnum = PCI_FUNC(dev->pdev->devfn); - + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pfnum; + memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len); + attrs.switch_id.id_len = ppid.id_len; if (rep->vport == MLX5_VPORT_UPLINK) - devlink_port_attrs_set(&rpriv->dl_port, - DEVLINK_PORT_FLAVOUR_PHYSICAL, - pfnum, false, 0, - &ppid.id[0], ppid.id_len); + devlink_port_attrs_set(&rpriv->dl_port, &attrs); else if (rep->vport == MLX5_VPORT_PF) - devlink_port_attrs_pci_pf_set(&rpriv->dl_port, - &ppid.id[0], ppid.id_len, - pfnum); + devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum); else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) devlink_port_attrs_pci_vf_set(&rpriv->dl_port, - &ppid.id[0], ppid.id_len, pfnum, rep->vport - 1); return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e9ccd333f61d..bbe7358d4ea5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2129,12 +2129,17 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, struct mlxsw_core_port *mlxsw_core_port = &mlxsw_core->ports[local_port]; struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + struct devlink_port_attrs attrs = {}; int err; + attrs.split = split; + attrs.flavour = flavour; + attrs.phys.port_number = port_number; + attrs.phys.split_subport_number = split_port_subnumber; + memcpy(attrs.switch_id.id, switch_id, switch_id_len); + attrs.switch_id.id_len = switch_id_len; mlxsw_core_port->local_port = local_port; - devlink_port_attrs_set(devlink_port, flavour, port_number, - split, split_port_subnumber, - switch_id, switch_id_len); + devlink_port_attrs_set(devlink_port, &attrs); err = devlink_port_register(devlink, devlink_port, local_port); if (err) memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port)); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 07dbf4d72227..71f4e624b3db 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -353,6 +353,7 @@ const struct devlink_ops nfp_devlink_ops = { int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) { + struct devlink_port_attrs attrs = {}; struct nfp_eth_table_port eth_port; struct devlink *devlink; const u8 *serial; @@ -365,10 +366,14 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) if (ret) return ret; + attrs.split = eth_port.is_split; + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = eth_port.label_port; + attrs.phys.split_subport_number = eth_port.label_subport; serial_len = nfp_cpp_serial(port->app->cpp, &serial); - devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - eth_port.label_port, eth_port.is_split, - eth_port.label_subport, serial, serial_len); + memcpy(attrs.switch_id.id, serial, serial_len); + attrs.switch_id.id_len = serial_len; + devlink_port_attrs_set(&port->dl_port, &attrs); devlink = priv_to_devlink(app->pf); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index 2d590e571133..c4f4fd469fe3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -69,6 +69,7 @@ void ionic_devlink_free(struct ionic *ionic) int ionic_devlink_register(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); + struct devlink_port_attrs attrs = {}; int err; err = devlink_register(dl, ionic->dev); @@ -77,8 +78,8 @@ int ionic_devlink_register(struct ionic *ionic) return err; } - devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - 0, false, 0, NULL, 0); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); if (err) dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index ec6b6f7818ac..0dc2c66a5d56 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -889,6 +889,7 @@ static const struct devlink_ops nsim_dev_devlink_ops = { static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, unsigned int port_index) { + struct devlink_port_attrs attrs = {}; struct nsim_dev_port *nsim_dev_port; struct devlink_port *devlink_port; int err; @@ -899,10 +900,11 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, nsim_dev_port->port_index = port_index; devlink_port = &nsim_dev_port->devlink_port; - devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, - port_index + 1, 0, 0, - nsim_dev->switch_id.id, - nsim_dev->switch_id.id_len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = port_index + 1; + memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len); + attrs.switch_id.id_len = nsim_dev->switch_id.id_len; + devlink_port_attrs_set(devlink_port, &attrs); err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port, port_index); if (err) diff --git a/include/net/devlink.h b/include/net/devlink.h index de4b5dcdb4a5..8f9db991192d 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -52,7 +52,7 @@ struct devlink_port_phys_attrs { * A physical port which is visible to the user * for a given port flavour. */ - u32 split_subport_number; + u32 split_subport_number; /* If the port is split, this is the number of subport. */ }; struct devlink_port_pci_pf_attrs { @@ -64,6 +64,12 @@ struct devlink_port_pci_vf_attrs { u16 vf; /* Associated PCI VF for of the PCI PF for this port. */ }; +/** + * struct devlink_port_attrs - devlink port object + * @flavour: flavour of the port + * @split: indicates if this is split port + * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL + */ struct devlink_port_attrs { u8 split:1; enum devlink_port_flavour flavour; @@ -1180,17 +1186,9 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port, struct ib_device *ibdev); void devlink_port_type_clear(struct devlink_port *devlink_port); void devlink_port_attrs_set(struct devlink_port *devlink_port, - enum devlink_port_flavour flavour, - u32 port_number, bool split, - u32 split_subport_number, - const unsigned char *switch_id, - unsigned char switch_id_len); -void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf); + struct devlink_port_attrs *devlink_port_attrs); +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf); void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf, u16 vf); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, diff --git a/net/core/devlink.c b/net/core/devlink.c index 452b2f8a054e..266936c38357 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7510,9 +7510,7 @@ void devlink_port_type_clear(struct devlink_port *devlink_port) EXPORT_SYMBOL_GPL(devlink_port_type_clear); static int __devlink_port_attrs_set(struct devlink_port *devlink_port, - enum devlink_port_flavour flavour, - const unsigned char *switch_id, - unsigned char switch_id_len) + enum devlink_port_flavour flavour) { struct devlink_port_attrs *attrs = &devlink_port->attrs; @@ -7520,12 +7518,10 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, return -EEXIST; devlink_port->attrs_set = true; attrs->flavour = flavour; - if (switch_id) { + if (attrs->switch_id.id_len) { devlink_port->switch_port = true; - if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN)) - switch_id_len = MAX_PHYS_ITEM_ID_LEN; - memcpy(attrs->switch_id.id, switch_id, switch_id_len); - attrs->switch_id.id_len = switch_id_len; + if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN)) + attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN; } else { devlink_port->switch_port = false; } @@ -7536,33 +7532,17 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port, * devlink_port_attrs_set - Set port attributes * * @devlink_port: devlink port - * @flavour: flavour of the port - * @port_number: number of the port that is facing user, for example - * the front panel port number - * @split: indicates if this is split port - * @split_subport_number: if the port is split, this is the number - * of subport. - * @switch_id: if the port is part of switch, this is buffer with ID, - * otwerwise this is NULL - * @switch_id_len: length of the switch_id buffer + * @attrs: devlink port attrs */ void devlink_port_attrs_set(struct devlink_port *devlink_port, - enum devlink_port_flavour flavour, - u32 port_number, bool split, - u32 split_subport_number, - const unsigned char *switch_id, - unsigned char switch_id_len) + struct devlink_port_attrs *attrs) { - struct devlink_port_attrs *attrs = &devlink_port->attrs; int ret; - ret = __devlink_port_attrs_set(devlink_port, flavour, - switch_id, switch_id_len); + devlink_port->attrs = *attrs; + ret = __devlink_port_attrs_set(devlink_port, attrs->flavour); if (ret) return; - attrs->split = split; - attrs->phys.port_number = port_number; - attrs->phys.split_subport_number = split_subport_number; } EXPORT_SYMBOL_GPL(devlink_port_attrs_set); @@ -7571,20 +7551,14 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_set); * * @devlink_port: devlink port * @pf: associated PF for the devlink port instance - * @switch_id: if the port is part of switch, this is buffer with ID, - * otherwise this is NULL - * @switch_id_len: length of the switch_id buffer */ -void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf) +void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf) { struct devlink_port_attrs *attrs = &devlink_port->attrs; int ret; ret = __devlink_port_attrs_set(devlink_port, - DEVLINK_PORT_FLAVOUR_PCI_PF, - switch_id, switch_id_len); + DEVLINK_PORT_FLAVOUR_PCI_PF); if (ret) return; @@ -7598,21 +7572,15 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set); * @devlink_port: devlink port * @pf: associated PF for the devlink port instance * @vf: associated VF of a PF for the devlink port instance - * @switch_id: if the port is part of switch, this is buffer with ID, - * otherwise this is NULL - * @switch_id_len: length of the switch_id buffer */ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, - const unsigned char *switch_id, - unsigned char switch_id_len, u16 pf, u16 vf) { struct devlink_port_attrs *attrs = &devlink_port->attrs; int ret; ret = __devlink_port_attrs_set(devlink_port, - DEVLINK_PORT_FLAVOUR_PCI_VF, - switch_id, switch_id_len); + DEVLINK_PORT_FLAVOUR_PCI_VF); if (ret) return; attrs->pci_vf.pf = pf; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 076908fdd29b..e055efff390b 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -261,10 +261,15 @@ static int dsa_port_setup(struct dsa_port *dp) struct devlink_port *dlp = &dp->devlink_port; bool dsa_port_link_registered = false; bool devlink_port_registered = false; + struct devlink_port_attrs attrs = {}; struct devlink *dl = ds->devlink; bool dsa_port_enabled = false; int err = 0; + attrs.phys.port_number = dp->index; + memcpy(attrs.switch_id.id, id, len); + attrs.switch_id.id_len = len; + if (dp->setup) return 0; @@ -274,8 +279,8 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_CPU: memset(dlp, 0, sizeof(*dlp)); - devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_CPU, - dp->index, false, 0, id, len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU; + devlink_port_attrs_set(dlp, &attrs); err = devlink_port_register(dl, dlp, dp->index); if (err) break; @@ -294,8 +299,8 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_DSA: memset(dlp, 0, sizeof(*dlp)); - devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_DSA, - dp->index, false, 0, id, len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA; + devlink_port_attrs_set(dlp, &attrs); err = devlink_port_register(dl, dlp, dp->index); if (err) break; @@ -314,8 +319,8 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_USER: memset(dlp, 0, sizeof(*dlp)); - devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_PHYSICAL, - dp->index, false, 0, id, len); + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + devlink_port_attrs_set(dlp, &attrs); err = devlink_port_register(dl, dlp, dp->index); if (err) break; From 622d3e9201072aaa94d6291b8dee05e3dd50c3af Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:17 +0300 Subject: [PATCH 0813/2056] mlxsw: Set number of port lanes attribute in driver Currently, port attributes like flavour, port number and whether the port was split are set when initializing a port. Set the number of lanes of the port as well so that it could be easily passed to devlink in the next patch. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 + drivers/net/ethernet/mellanox/mlxsw/minimal.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +++- drivers/net/ethernet/mellanox/mlxsw/switchib.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 2 +- 6 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index bbe7358d4ea5..f44cb1a537f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2122,6 +2122,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_port_subnumber, + u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len) { @@ -2159,13 +2160,14 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, u32 port_number, bool split, u32 split_port_subnumber, + u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len) { return __mlxsw_core_port_init(mlxsw_core, local_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, port_number, split, split_port_subnumber, - switch_id, switch_id_len); + lanes, switch_id, switch_id_len); } EXPORT_SYMBOL(mlxsw_core_port_init); @@ -2186,7 +2188,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, DEVLINK_PORT_FLAVOUR_CPU, - 0, false, 0, + 0, false, 0, 0, switch_id, switch_id_len); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 22b0dfa7cfae..b5c02e6e6865 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -193,6 +193,7 @@ void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, u32 port_number, bool split, u32 split_port_subnumber, + u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len); void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index c4caeeadcba9..f9c9d7baf3be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -164,7 +164,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) int err; err = mlxsw_core_port_init(mlxsw_m->core, local_port, - module + 1, false, 0, + module + 1, false, 0, 0, mlxsw_m->base_mac, sizeof(mlxsw_m->base_mac)); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 81bb9ea1479b..35d677e0911e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1733,12 +1733,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; bool split = !!split_base_local_port; struct mlxsw_sp_port *mlxsw_sp_port; + u32 lanes = port_mapping->width; struct net_device *dev; int err; err = mlxsw_core_port_init(mlxsw_sp->core, local_port, port_mapping->module + 1, split, - port_mapping->lane / port_mapping->width, + port_mapping->lane / lanes, + lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index 4ff1e623aa76..1b446e6071b2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -281,7 +281,7 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port, int err; err = mlxsw_core_port_init(mlxsw_sib->core, local_port, - module + 1, false, 0, + module + 1, false, 0, 0, mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id)); if (err) { dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n", diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index b438f5576e18..ac4cd7c23439 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1107,7 +1107,7 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, int err; err = mlxsw_core_port_init(mlxsw_sx->core, local_port, - module + 1, false, 0, + module + 1, false, 0, 0, mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id)); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", From a21cf0a8330bba60e44ca6c99e1591042f336ff5 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:18 +0300 Subject: [PATCH 0814/2056] devlink: Add a new devlink port lanes attribute and pass to netlink Add a new devlink port attribute that indicates the port's number of lanes. Drivers are expected to set it via devlink_port_attrs_set(), before registering the port. The attribute is not passed to user space in case the number of lanes is invalid (0). Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 1 + include/net/devlink.h | 2 ++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 4 ++++ 4 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f44cb1a537f3..6cde196f6b70 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2134,6 +2134,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, int err; attrs.split = split; + attrs.lanes = lanes; attrs.flavour = flavour; attrs.phys.port_number = port_number; attrs.phys.split_subport_number = split_port_subnumber; diff --git a/include/net/devlink.h b/include/net/devlink.h index 8f9db991192d..91a9f8770d08 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -68,10 +68,12 @@ struct devlink_port_pci_vf_attrs { * struct devlink_port_attrs - devlink port object * @flavour: flavour of the port * @split: indicates if this is split port + * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink. * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL */ struct devlink_port_attrs { u8 split:1; + u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; union { diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 87c83a82991b..f741ab8d9cf0 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -455,6 +455,8 @@ enum devlink_attr { DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ + DEVLINK_ATTR_PORT_LANES, /* u32 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 266936c38357..7f26d1054974 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -530,6 +530,10 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, if (!devlink_port->attrs_set) return 0; + if (attrs->lanes) { + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes)) + return -EMSGSIZE; + } if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; switch (devlink_port->attrs.flavour) { From 1b604efb6c28902df306fce610fba761e9c056d2 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:19 +0300 Subject: [PATCH 0815/2056] mlxsw: Set port split ability attribute in driver Currently, port attributes like flavour, port number and whether the port was split are set when initializing a port. Set the split ability of the port as well, based on port_mapping->width field and split attribute of devlink port in spectrum, so that it could be easily passed to devlink in the next patch. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 9 +++++---- drivers/net/ethernet/mellanox/mlxsw/core.h | 5 ++--- drivers/net/ethernet/mellanox/mlxsw/minimal.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 +++- drivers/net/ethernet/mellanox/mlxsw/switchib.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 2 +- 6 files changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 6cde196f6b70..f85f5d88d331 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2122,7 +2122,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, enum devlink_port_flavour flavour, u32 port_number, bool split, u32 split_port_subnumber, - u32 lanes, + bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len) { @@ -2161,14 +2161,15 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port) int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, u32 port_number, bool split, u32 split_port_subnumber, - u32 lanes, + bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len) { return __mlxsw_core_port_init(mlxsw_core, local_port, DEVLINK_PORT_FLAVOUR_PHYSICAL, port_number, split, split_port_subnumber, - lanes, switch_id, switch_id_len); + splittable, lanes, + switch_id, switch_id_len); } EXPORT_SYMBOL(mlxsw_core_port_init); @@ -2189,7 +2190,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core, err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT, DEVLINK_PORT_FLAVOUR_CPU, - 0, false, 0, 0, + 0, false, 0, false, 0, switch_id, switch_id_len); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index b5c02e6e6865..7d6b0a232789 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -191,9 +191,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port); int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, - u32 port_number, bool split, - u32 split_port_subnumber, - u32 lanes, + u32 port_number, bool split, u32 split_port_subnumber, + bool splittable, u32 lanes, const unsigned char *switch_id, unsigned char switch_id_len); void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port); diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index f9c9d7baf3be..c010db2c9dba 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -164,8 +164,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module) int err; err = mlxsw_core_port_init(mlxsw_m->core, local_port, - module + 1, false, 0, 0, - mlxsw_m->base_mac, + module + 1, false, 0, false, + 0, mlxsw_m->base_mac, sizeof(mlxsw_m->base_mac)); if (err) { dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n", diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 35d677e0911e..9cd2dc3afb13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1735,12 +1735,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, struct mlxsw_sp_port *mlxsw_sp_port; u32 lanes = port_mapping->width; struct net_device *dev; + bool splittable; int err; + splittable = lanes > 1 && !split; err = mlxsw_core_port_init(mlxsw_sp->core, local_port, port_mapping->module + 1, split, port_mapping->lane / lanes, - lanes, + splittable, lanes, mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac)); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index 1b446e6071b2..1e561132eb1e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -281,7 +281,7 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port, int err; err = mlxsw_core_port_init(mlxsw_sib->core, local_port, - module + 1, false, 0, 0, + module + 1, false, 0, false, 0, mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id)); if (err) { dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n", diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index ac4cd7c23439..6f9a725662fb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1107,7 +1107,7 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port, int err; err = mlxsw_core_port_init(mlxsw_sx->core, local_port, - module + 1, false, 0, 0, + module + 1, false, 0, false, 0, mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id)); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", From a0f49b54865273c895be3826d6d59cbc5ad725c2 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:20 +0300 Subject: [PATCH 0816/2056] devlink: Add a new devlink port split ability attribute and pass to netlink Add a new attribute that indicates the split ability of devlink port. Drivers are expected to set it via devlink_port_attrs_set(), before registering the port. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_devlink.c | 1 + include/net/devlink.h | 4 +++- include/uapi/linux/devlink.h | 1 + net/core/devlink.c | 3 +++ 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f85f5d88d331..8b3791d73c99 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -2135,6 +2135,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port, attrs.split = split; attrs.lanes = lanes; + attrs.splittable = splittable; attrs.flavour = flavour; attrs.phys.port_number = port_number; attrs.phys.split_subport_number = split_port_subnumber; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index 71f4e624b3db..b6a10565309a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -367,6 +367,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port) return ret; attrs.split = eth_port.is_split; + attrs.splittable = !attrs.split; attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; attrs.phys.port_number = eth_port.label_port; attrs.phys.split_subport_number = eth_port.label_subport; diff --git a/include/net/devlink.h b/include/net/devlink.h index 91a9f8770d08..746bed538664 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -68,11 +68,13 @@ struct devlink_port_pci_vf_attrs { * struct devlink_port_attrs - devlink port object * @flavour: flavour of the port * @split: indicates if this is split port + * @splittable: indicates if the port can be split. * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink. * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL */ struct devlink_port_attrs { - u8 split:1; + u8 split:1, + splittable:1; u32 lanes; enum devlink_port_flavour flavour; struct netdev_phys_item_id switch_id; diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index f741ab8d9cf0..cfef4245ea5a 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -456,6 +456,7 @@ enum devlink_attr { DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER, /* string */ DEVLINK_ATTR_PORT_LANES, /* u32 */ + DEVLINK_ATTR_PORT_SPLITTABLE, /* u8 */ /* add new attributes above here, update the policy in devlink.c */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 7f26d1054974..94c797b74378 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -534,6 +534,8 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes)) return -EMSGSIZE; } + if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable)) + return -EMSGSIZE; if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour)) return -EMSGSIZE; switch (devlink_port->attrs.flavour) { @@ -7547,6 +7549,7 @@ void devlink_port_attrs_set(struct devlink_port *devlink_port, ret = __devlink_port_attrs_set(devlink_port, attrs->flavour); if (ret) return; + WARN_ON(attrs->splittable && attrs->split); } EXPORT_SYMBOL_GPL(devlink_port_attrs_set); From 82901ad16905832b7a79ff4316302bb59ec4b4ba Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:21 +0300 Subject: [PATCH 0817/2056] devlink: Move input checks from driver to devlink Currently, all the input checks are done in driver. After adding the split capability to devlink port, move the checks to devlink. Signed-off-by: Danielle Ratson Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 17 ++-------------- .../net/ethernet/netronome/nfp/nfp_devlink.c | 5 +---- net/core/devlink.c | 20 +++++++++++++++++++ 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9cd2dc3afb13..eeeafd1d82ce 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2236,13 +2236,6 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return -EINVAL; } - /* Split ports cannot be split. */ - if (mlxsw_sp_port->split) { - netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); - NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further"); - return -EINVAL; - } - max_width = mlxsw_core_module_max_width(mlxsw_core, mlxsw_sp_port->mapping.module); if (max_width < 0) { @@ -2251,19 +2244,13 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, return max_width; } - /* Split port with non-max and 1 module width cannot be split. */ - if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) { + /* Split port with non-max cannot be split. */ + if (mlxsw_sp_port->mapping.width != max_width) { netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n"); NL_SET_ERR_MSG_MOD(extack, "Port cannot be split"); return -EINVAL; } - if (count == 1 || !is_power_of_2(count) || count > max_width) { - netdev_err(mlxsw_sp_port->dev, "Invalid split count\n"); - NL_SET_ERR_MSG_MOD(extack, "Invalid split count"); - return -EINVAL; - } - offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width); if (offset < 0) { netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c index b6a10565309a..be52510d446b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c @@ -70,9 +70,6 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, unsigned int lanes; int ret; - if (count < 2) - return -EINVAL; - mutex_lock(&pf->lock); rtnl_lock(); @@ -81,7 +78,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index, if (ret) goto out; - if (eth_port.is_split || eth_port.port_lanes % count) { + if (eth_port.port_lanes % count) { ret = -EINVAL; goto out; } diff --git a/net/core/devlink.c b/net/core/devlink.c index 94c797b74378..346d385ba09f 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -940,6 +940,7 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; + struct devlink_port *devlink_port; u32 port_index; u32 count; @@ -947,8 +948,27 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb, !info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]) return -EINVAL; + devlink_port = devlink_port_get_from_info(devlink, info); port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]); count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]); + + if (IS_ERR(devlink_port)) + return -EINVAL; + + if (!devlink_port->attrs.splittable) { + /* Split ports cannot be split. */ + if (devlink_port->attrs.split) + NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split further"); + else + NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split"); + return -EINVAL; + } + + if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) { + NL_SET_ERR_MSG_MOD(info->extack, "Invalid split count"); + return -EINVAL; + } + return devlink_port_split(devlink, port_index, count, info->extack); } From f3348a82e72795ce218264fd00ef5cf5137836a9 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 9 Jul 2020 16:18:22 +0300 Subject: [PATCH 0818/2056] selftests: net: Add port split test Test port split configuration using previously added number of port lanes attribute. Check that all the splittable ports are successfully split to their maximum number of lanes and below, and that those which are not splittable fail to be split. Test output example: TEST: swp4 is unsplittable [ OK ] TEST: split port swp53 into 4 [ OK ] TEST: Unsplit port pci/0000:03:00.0/25 [ OK ] TEST: split port swp53 into 2 [ OK ] TEST: Unsplit port pci/0000:03:00.0/25 [ OK ] Signed-off-by: Danielle Ratson Reviewed-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 1 + .../selftests/net/devlink_port_split.py | 277 ++++++++++++++++++ 2 files changed, 278 insertions(+) create mode 100755 tools/testing/selftests/net/devlink_port_split.py diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index bfacb960450f..9491bbaa0831 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -18,6 +18,7 @@ TEST_PROGS += reuseaddr_ports_exhausted.sh TEST_PROGS += txtimestamp.sh TEST_PROGS += vrf-xfrm-tests.sh TEST_PROGS += rxtimestamp.sh +TEST_PROGS += devlink_port_split.py TEST_PROGS_EXTENDED := in_netns.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py new file mode 100755 index 000000000000..58bb7e9b88ce --- /dev/null +++ b/tools/testing/selftests/net/devlink_port_split.py @@ -0,0 +1,277 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0 + +from subprocess import PIPE, Popen +import json +import time +import argparse +import collections +import sys + +# +# Test port split configuration using devlink-port lanes attribute. +# The test is skipped in case the attribute is not available. +# +# First, check that all the ports with 1 lane fail to split. +# Second, check that all the ports with more than 1 lane can be split +# to all valid configurations (e.g., split to 2, split to 4 etc.) +# + + +Port = collections.namedtuple('Port', 'bus_info name') + + +def run_command(cmd, should_fail=False): + """ + Run a command in subprocess. + Return: Tuple of (stdout, stderr). + """ + + p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) + stdout, stderr = p.communicate() + stdout, stderr = stdout.decode(), stderr.decode() + + if stderr != "" and not should_fail: + print("Error sending command: %s" % cmd) + print(stdout) + print(stderr) + return stdout, stderr + + +class devlink_ports(object): + """ + Class that holds information on the devlink ports, required to the tests; + if_names: A list of interfaces in the devlink ports. + """ + + def get_if_names(dev): + """ + Get a list of physical devlink ports. + Return: Array of tuples (bus_info/port, if_name). + """ + + arr = [] + + cmd = "devlink -j port show" + stdout, stderr = run_command(cmd) + assert stderr == "" + ports = json.loads(stdout)['port'] + + for port in ports: + if dev in port: + if ports[port]['flavour'] == 'physical': + arr.append(Port(bus_info=port, name=ports[port]['netdev'])) + + return arr + + def __init__(self, dev): + self.if_names = devlink_ports.get_if_names(dev) + + +def get_max_lanes(port): + """ + Get the $port's maximum number of lanes. + Return: number of lanes, e.g. 1, 2, 4 and 8. + """ + + cmd = "devlink -j port show %s" % port + stdout, stderr = run_command(cmd) + assert stderr == "" + values = list(json.loads(stdout)['port'].values())[0] + + if 'lanes' in values: + lanes = values['lanes'] + else: + lanes = 0 + return lanes + + +def get_split_ability(port): + """ + Get the $port split ability. + Return: split ability, true or false. + """ + + cmd = "devlink -j port show %s" % port.name + stdout, stderr = run_command(cmd) + assert stderr == "" + values = list(json.loads(stdout)['port'].values())[0] + + return values['splittable'] + + +def split(k, port, should_fail=False): + """ + Split $port into $k ports. + If should_fail == True, the split should fail. Otherwise, should pass. + Return: Array of sub ports after splitting. + If the $port wasn't split, the array will be empty. + """ + + cmd = "devlink port split %s count %s" % (port.bus_info, k) + stdout, stderr = run_command(cmd, should_fail=should_fail) + + if should_fail: + if not test(stderr != "", "%s is unsplittable" % port.name): + print("split an unsplittable port %s" % port.name) + return create_split_group(port, k) + else: + if stderr == "": + return create_split_group(port, k) + print("didn't split a splittable port %s" % port.name) + + return [] + + +def unsplit(port): + """ + Unsplit $port. + """ + + cmd = "devlink port unsplit %s" % port + stdout, stderr = run_command(cmd) + test(stderr == "", "Unsplit port %s" % port) + + +def exists(port, dev): + """ + Check if $port exists in the devlink ports. + Return: True is so, False otherwise. + """ + + return any(dev_port.name == port + for dev_port in devlink_ports.get_if_names(dev)) + + +def exists_and_lanes(ports, lanes, dev): + """ + Check if every port in the list $ports exists in the devlink ports and has + $lanes number of lanes after splitting. + Return: True if both are True, False otherwise. + """ + + for port in ports: + max_lanes = get_max_lanes(port) + if not exists(port, dev): + print("port %s doesn't exist in devlink ports" % port) + return False + if max_lanes != lanes: + print("port %s has %d lanes, but %s were expected" + % (port, lanes, max_lanes)) + return False + return True + + +def test(cond, msg): + """ + Check $cond and print a message accordingly. + Return: True is pass, False otherwise. + """ + + if cond: + print("TEST: %-60s [ OK ]" % msg) + else: + print("TEST: %-60s [FAIL]" % msg) + + return cond + + +def create_split_group(port, k): + """ + Create the split group for $port. + Return: Array with $k elements, which are the split port group. + """ + + return list(port.name + "s" + str(i) for i in range(k)) + + +def split_unsplittable_port(port, k): + """ + Test that splitting of unsplittable port fails. + """ + + # split to max + new_split_group = split(k, port, should_fail=True) + + if new_split_group != []: + unsplit(port.bus_info) + + +def split_splittable_port(port, k, lanes, dev): + """ + Test that splitting of splittable port passes correctly. + """ + + new_split_group = split(k, port) + + # Once the split command ends, it takes some time to the sub ifaces' + # to get their names. Use udevadm to continue only when all current udev + # events are handled. + cmd = "udevadm settle" + stdout, stderr = run_command(cmd) + assert stderr == "" + + if new_split_group != []: + test(exists_and_lanes(new_split_group, lanes/k, dev), + "split port %s into %s" % (port.name, k)) + + unsplit(port.bus_info) + + +def make_parser(): + parser = argparse.ArgumentParser(description='A test for port splitting.') + parser.add_argument('--dev', + help='The devlink handle of the device under test. ' + + 'The default is the first registered devlink ' + + 'handle.') + + return parser + + +def main(cmdline=None): + parser = make_parser() + args = parser.parse_args(cmdline) + + dev = args.dev + if not dev: + cmd = "devlink -j dev show" + stdout, stderr = run_command(cmd) + assert stderr == "" + + devs = json.loads(stdout)['dev'] + dev = list(devs.keys())[0] + + cmd = "devlink dev show %s" % dev + stdout, stderr = run_command(cmd) + if stderr != "": + print("devlink device %s can not be found" % dev) + sys.exit(1) + + ports = devlink_ports(dev) + + for port in ports.if_names: + max_lanes = get_max_lanes(port.name) + + # If max lanes is 0, do not test port splitting at all + if max_lanes == 0: + continue + + # If 1 lane, shouldn't be able to split + elif max_lanes == 1: + test(not get_split_ability(port), + "%s should not be able to split" % port.name) + split_unsplittable_port(port, max_lanes) + + # Else, splitting should pass and all the split ports should exist. + else: + lane = max_lanes + test(get_split_ability(port), + "%s should be able to split" % port.name) + while lane > 1: + split_splittable_port(port, lane, max_lanes, dev) + + lane //= 2 + + +if __name__ == "__main__": + main() From e9716afdcae4ceea3a093e5b2fac1a5df0f943ca Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Wed, 24 Jun 2020 05:56:25 -0500 Subject: [PATCH 0819/2056] net/mlx5: E-switch, When eswitch is unsupported, return -EOPNOTSUPP When eswitch is unsupported, currently -EPERM error code is returned instead of -EOPNOTSUPP. Due to this VF device's devlink virtual port is not enumerated because port_function_get() callback returned -EPERM instead of -EOPNOTSUPP. Hence, return the error code -EOPNOTSUPP when eswitch is unsupported. Fixes: bd93975353d5 ("net/mlx5: E-switch, Introduce and use eswitch support check helper") Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c656c9f081c1..d70543ea57dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -69,7 +69,7 @@ static int mlx5_eswitch_check(const struct mlx5_core_dev *dev) return -EOPNOTSUPP; if (!MLX5_ESWITCH_MANAGER(dev)) - return -EPERM; + return -EOPNOTSUPP; return 0; } From bc562be9674bce9c42dd6d057558d498698a801a Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Sun, 29 Mar 2020 13:07:49 +0300 Subject: [PATCH 0820/2056] net/mlx5e: CT: Save ct entries tuples in hashtables Save original tuple and natted tuple in two new hashtables. This is a pre-step for restoring ct state after hw miss by performing a 5-tuple lookup on the hash tables. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 196 ++++++++++++++++++ 1 file changed, 196 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index c7107da03212..55402b1739ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -39,6 +39,8 @@ struct mlx5_tc_ct_priv { struct idr fte_ids; struct xarray tuple_ids; struct rhashtable zone_ht; + struct rhashtable ct_tuples_ht; + struct rhashtable ct_tuples_nat_ht; struct mlx5_flow_table *ct; struct mlx5_flow_table *ct_nat; struct mlx5_flow_table *post_ct; @@ -82,12 +84,38 @@ struct mlx5_ct_ft { struct mlx5_tc_ct_pre pre_ct_nat; }; +struct mlx5_ct_tuple { + u16 addr_type; + __be16 n_proto; + u8 ip_proto; + struct { + union { + __be32 src_v4; + struct in6_addr src_v6; + }; + union { + __be32 dst_v4; + struct in6_addr dst_v6; + }; + } ip; + struct { + __be16 src; + __be16 dst; + } port; + + u16 zone; +}; + struct mlx5_ct_entry { u16 zone; struct rhash_head node; + struct rhash_head tuple_node; + struct rhash_head tuple_nat_node; struct mlx5_fc *counter; unsigned long cookie; unsigned long restore_cookie; + struct mlx5_ct_tuple tuple; + struct mlx5_ct_tuple tuple_nat; struct mlx5_ct_zone_rule zone_rules[2]; }; @@ -106,6 +134,22 @@ static const struct rhashtable_params zone_params = { .automatic_shrinking = true, }; +static const struct rhashtable_params tuples_ht_params = { + .head_offset = offsetof(struct mlx5_ct_entry, tuple_node), + .key_offset = offsetof(struct mlx5_ct_entry, tuple), + .key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple), + .automatic_shrinking = true, + .min_size = 16 * 1024, +}; + +static const struct rhashtable_params tuples_nat_ht_params = { + .head_offset = offsetof(struct mlx5_ct_entry, tuple_nat_node), + .key_offset = offsetof(struct mlx5_ct_entry, tuple_nat), + .key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple_nat), + .automatic_shrinking = true, + .min_size = 16 * 1024, +}; + static struct mlx5_tc_ct_priv * mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) { @@ -118,6 +162,115 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv) return uplink_priv->ct_priv; } +static int +mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule) +{ + struct flow_match_control control; + struct flow_match_basic basic; + + flow_rule_match_basic(rule, &basic); + flow_rule_match_control(rule, &control); + + tuple->n_proto = basic.key->n_proto; + tuple->ip_proto = basic.key->ip_proto; + tuple->addr_type = control.key->addr_type; + + if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs match; + + flow_rule_match_ipv4_addrs(rule, &match); + tuple->ip.src_v4 = match.key->src; + tuple->ip.dst_v4 = match.key->dst; + } else if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs match; + + flow_rule_match_ipv6_addrs(rule, &match); + tuple->ip.src_v6 = match.key->src; + tuple->ip.dst_v6 = match.key->dst; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports match; + + flow_rule_match_ports(rule, &match); + switch (tuple->ip_proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + tuple->port.src = match.key->src; + tuple->port.dst = match.key->dst; + break; + default: + return -EOPNOTSUPP; + } + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +static int +mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple, + struct flow_rule *rule) +{ + struct flow_action *flow_action = &rule->action; + struct flow_action_entry *act; + u32 offset, val, ip6_offset; + int i; + + flow_action_for_each(i, act, flow_action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; + + offset = act->mangle.offset; + val = act->mangle.val; + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + if (offset == offsetof(struct iphdr, saddr)) + tuple->ip.src_v4 = cpu_to_be32(val); + else if (offset == offsetof(struct iphdr, daddr)) + tuple->ip.dst_v4 = cpu_to_be32(val); + else + return -EOPNOTSUPP; + break; + + case FLOW_ACT_MANGLE_HDR_TYPE_IP6: + ip6_offset = (offset - offsetof(struct ipv6hdr, saddr)); + ip6_offset /= 4; + if (ip6_offset < 8) + tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val); + else + return -EOPNOTSUPP; + break; + + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + if (offset == offsetof(struct tcphdr, source)) + tuple->port.src = cpu_to_be16(val); + else if (offset == offsetof(struct tcphdr, dest)) + tuple->port.dst = cpu_to_be16(val); + else + return -EOPNOTSUPP; + break; + + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + if (offset == offsetof(struct udphdr, source)) + tuple->port.src = cpu_to_be16(val); + else if (offset == offsetof(struct udphdr, dest)) + tuple->port.dst = cpu_to_be16(val); + else + return -EOPNOTSUPP; + break; + + default: + return -EOPNOTSUPP; + } + } + + return 0; +} + static int mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_rule *rule) @@ -614,9 +767,33 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, return -ENOMEM; entry->zone = ft->zone; + entry->tuple.zone = ft->zone; entry->cookie = flow->cookie; entry->restore_cookie = meta_action->ct_metadata.cookie; + err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule); + if (err) + goto err_set; + + memcpy(&entry->tuple_nat, &entry->tuple, sizeof(entry->tuple)); + err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule); + if (err) + goto err_set; + + err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht, + &entry->tuple_node, + tuples_ht_params); + if (err) + goto err_tuple; + + if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) { + err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); + if (err) + goto err_tuple_nat; + } + err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry); if (err) goto err_rules; @@ -631,6 +808,15 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, err_insert: mlx5_tc_ct_entry_del_rules(ct_priv, entry); err_rules: + rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, tuples_nat_ht_params); +err_tuple_nat: + if (entry->tuple_node.next) + rhashtable_remove_fast(&ct_priv->ct_tuples_ht, + &entry->tuple_node, + tuples_ht_params); +err_tuple: +err_set: kfree(entry); netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err); @@ -650,6 +836,12 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, return -ENOENT; mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); + if (entry->tuple_node.next) + rhashtable_remove_fast(&ft->ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); + rhashtable_remove_fast(&ft->ct_priv->ct_tuples_ht, &entry->tuple_node, + tuples_ht_params); WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params)); @@ -1563,6 +1755,8 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1); mutex_init(&ct_priv->control_lock); rhashtable_init(&ct_priv->zone_ht, &zone_params); + rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); + rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params); /* Done, set ct_priv to know it initializted */ uplink_priv->ct_priv = ct_priv; @@ -1593,6 +1787,8 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); + rhashtable_destroy(&ct_priv->ct_tuples_ht); + rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); rhashtable_destroy(&ct_priv->zone_ht); mutex_destroy(&ct_priv->control_lock); xa_destroy(&ct_priv->tuple_ids); From a7c119bd82a1d937db5b86fcfe1d5f991469c52d Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Sun, 3 May 2020 16:45:02 +0300 Subject: [PATCH 0821/2056] net/mlx5e: CT: Allow header rewrite of 5-tuple and ct clear action With ct clear we don't jump to the ct tables, so header rewrite of 5-tuple can be done in place (and not moved to after the CT action). Check for ct clear action, and if so, allow 5-tuple header rewrite. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index bc9c0ac15f99..5674dbb682de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3191,13 +3191,14 @@ static bool actions_match_supported(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { - bool ct_flow; + bool ct_flow = false, ct_clear = false; u32 actions; - ct_flow = flow_flag_test(flow, CT); if (mlx5e_is_eswitch_flow(flow)) { actions = flow->esw_attr->action; - + ct_clear = flow->esw_attr->ct_attr.ct_action & + TCA_CT_ACT_CLEAR; + ct_flow = flow_flag_test(flow, CT) && !ct_clear; if (flow->esw_attr->split_count && ct_flow) { /* All registers used by ct are cleared when using * split rules. From 3d486ec4fa87a80a57bc588d4520f7d2e866fbc0 Mon Sep 17 00:00:00 2001 From: Oz Shlomo Date: Mon, 1 Jun 2020 17:08:55 +0000 Subject: [PATCH 0822/2056] net/mlx5e: Use netdev_info instead of pr_info The next patch will pass the mlx5e_priv struct to the modify_header_match_supported method. Use this opportunity to refactor the existing pr_info call to a netdev_info call. Signed-off-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 5674dbb682de..a6cb5d81f08b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3138,7 +3138,8 @@ static int is_action_keys_supported(const struct flow_action_entry *act, return 0; } -static bool modify_header_match_supported(struct mlx5_flow_spec *spec, +static bool modify_header_match_supported(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, struct flow_action *flow_action, u32 actions, bool ct_flow, struct netlink_ext_ack *extack) @@ -3177,7 +3178,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of non TCP/UDP"); - pr_info("can't offload re-write of ip proto %d\n", ip_proto); + netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n", + ip_proto); return false; } @@ -3212,7 +3214,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, } if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) - return modify_header_match_supported(&parse_attr->spec, + return modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, ct_flow, extack); From 7e36feeb04676c7eed0b3fe9196543608bc7ecae Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Wed, 22 Apr 2020 18:00:25 +0300 Subject: [PATCH 0823/2056] net/mlx5e: CT: Don't offload tuple rewrites for established tuples Next patches will remove the tupleid registers that is used to restore the ct state on miss, and instead use the tuple on the missed packet to lookup which state to restore. Disable tuple rewrites after connection tracking. For tuple rewrites, inject a ct_state=-trk match so it won't change the tuple for established flows (+trk) that passed connection tracking, and instead miss to software. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 18 +++++ .../ethernet/mellanox/mlx5/core/en/tc_ct.h | 10 +++ .../net/ethernet/mellanox/mlx5/core/en_tc.c | 74 ++++++++++++++++--- .../net/ethernet/mellanox/mlx5/core/en_tc.h | 5 ++ 4 files changed, 95 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 55402b1739ae..08ebce35b2fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -894,6 +894,24 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data, return -EOPNOTSUPP; } +int +mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec) +{ + u32 ctstate = 0, ctstate_mask = 0; + + mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG, + &ctstate, &ctstate_mask); + if (ctstate_mask) + return -EOPNOTSUPP; + + ctstate_mask |= MLX5_CT_STATE_TRK_BIT; + mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, + ctstate, ctstate_mask); + + return 0; +} + int mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 626f6c04882e..94f74cf71ce4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -91,6 +91,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct flow_cls_offload *f, struct netlink_ext_ack *extack); int +mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec); +int mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr, const struct flow_action_entry *act, @@ -140,6 +143,13 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, return -EOPNOTSUPP; } +static inline int +mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec) +{ + return 0; +} + static inline int mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr *attr, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a6cb5d81f08b..fd984ef234b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -219,6 +219,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2; } +void +mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, + enum mlx5e_tc_attr_to_reg type, + u32 *data, + u32 *mask) +{ + int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset; + int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen; + void *headers_c = spec->match_criteria; + void *headers_v = spec->match_value; + void *fmask, *fval; + + fmask = headers_c + soffset; + fval = headers_v + soffset; + + memcpy(mask, fmask, match_len); + memcpy(data, fval, match_len); + + *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8)))); + *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8)))); +} + int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, @@ -3086,6 +3108,7 @@ struct ipv6_hoplimit_word { static int is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow, bool *modify_ip_header, + bool *modify_tuple, struct netlink_ext_ack *extack) { u32 mask, offset; @@ -3108,7 +3131,10 @@ static int is_action_keys_supported(const struct flow_action_entry *act, *modify_ip_header = true; } - if (ct_flow && offset >= offsetof(struct iphdr, saddr)) { + if (offset >= offsetof(struct iphdr, saddr)) + *modify_tuple = true; + + if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv4 address with action ct"); return -EOPNOTSUPP; @@ -3123,16 +3149,22 @@ static int is_action_keys_supported(const struct flow_action_entry *act, *modify_ip_header = true; } - if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) { + if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) + *modify_tuple = true; + + if (ct_flow && *modify_tuple) { NL_SET_ERR_MSG_MOD(extack, "can't offload re-write of ipv6 address with action ct"); return -EOPNOTSUPP; } - } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || - htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) { - NL_SET_ERR_MSG_MOD(extack, - "can't offload re-write of transport header ports with action ct"); - return -EOPNOTSUPP; + } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP || + htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) { + *modify_tuple = true; + if (ct_flow) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload re-write of transport header ports with action ct"); + return -EOPNOTSUPP; + } } return 0; @@ -3142,10 +3174,11 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_action *flow_action, u32 actions, bool ct_flow, + bool ct_clear, struct netlink_ext_ack *extack) { const struct flow_action_entry *act; - bool modify_ip_header; + bool modify_ip_header, modify_tuple; void *headers_c; void *headers_v; u16 ethertype; @@ -3162,17 +3195,32 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv, goto out_ok; modify_ip_header = false; + modify_tuple = false; flow_action_for_each(i, act, flow_action) { if (act->id != FLOW_ACTION_MANGLE && act->id != FLOW_ACTION_ADD) continue; err = is_action_keys_supported(act, ct_flow, - &modify_ip_header, extack); + &modify_ip_header, + &modify_tuple, extack); if (err) return err; } + /* Add ct_state=-trk match so it will be offloaded for non ct flows + * (or after clear action), as otherwise, since the tuple is changed, + * we can't restore ct state + */ + if (!ct_clear && modify_tuple && + mlx5_tc_ct_add_no_trk_match(priv, spec)) { + NL_SET_ERR_MSG_MOD(extack, + "can't offload tuple modify header with ct matches"); + netdev_info(priv->netdev, + "can't offload tuple modify header with ct matches"); + return false; + } + ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) { @@ -3216,7 +3264,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv, if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) return modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, - ct_flow, extack); + ct_flow, ct_clear, + extack); return true; } @@ -4483,11 +4532,12 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev); + /* actions validation depends on parsing the ct matches first */ + err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack); if (err) goto err_free; - err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev); if (err) goto err_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 1561eaa89ffd..68d49b945184 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -164,6 +164,11 @@ void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec, u32 data, u32 mask); +void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec, + enum mlx5e_tc_attr_to_reg type, + u32 *data, + u32 *mask); + int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev, int namespace, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); From a8eb919ba659adcbed8fd782b3e9a949c3a65b9c Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Sun, 29 Mar 2020 13:50:47 +0300 Subject: [PATCH 0824/2056] net/mlx5e: CT: Restore ct state from lookup in zone instead of tupleid Remove tupleid, and replace it with zone_restore, which is the zone an established tuple sets after match. On miss, Use this zone + tuple taken from the skb, to lookup the ct entry and restore it. This improves flow insertion rate by avoiding the allocation of a header rewrite context. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/rep/tc.c | 6 +- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 113 ++++++++++++------ .../ethernet/mellanox/mlx5/core/en/tc_ct.h | 17 +-- .../net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- .../net/ethernet/mellanox/mlx5/core/en_tc.h | 2 +- 5 files changed, 90 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index eefeb1cdc2ee..e27963b80c11 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -594,7 +594,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct mlx5e_tc_update_priv *tc_priv) { #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id; + u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone; struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *uplink_rpriv; struct tc_skb_ext *tc_skb_ext; @@ -631,11 +631,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, tc_skb_ext->chain = chain; - tuple_id = reg_c1 & TUPLE_ID_MAX; + zone = reg_c1 & ZONE_RESTORE_MAX; uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; - if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id)) + if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, zone)) return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 08ebce35b2fc..c6e92f818ecf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -60,7 +60,6 @@ struct mlx5_ct_flow { struct mlx5_ct_zone_rule { struct mlx5_flow_handle *rule; struct mlx5_esw_flow_attr attr; - int tupleid; bool nat; }; @@ -107,7 +106,6 @@ struct mlx5_ct_tuple { }; struct mlx5_ct_entry { - u16 zone; struct rhash_head node; struct rhash_head tuple_node; struct rhash_head tuple_nat_node; @@ -396,11 +394,10 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_esw_flow_attr *attr = &zone_rule->attr; struct mlx5_eswitch *esw = ct_priv->esw; - ct_dbg("Deleting ct entry rule in zone %d", entry->zone); + ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone); mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); - xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid); } static void @@ -434,7 +431,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, u8 ct_state, u32 mark, u32 label, - u32 tupleid) + u16 zone) { struct mlx5_eswitch *esw = ct_priv->esw; int err; @@ -455,7 +452,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, return err; err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, - TUPLEID_TO_REG, tupleid); + ZONE_RESTORE_TO_REG, zone + 1); if (err) return err; @@ -582,8 +579,7 @@ static int mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_esw_flow_attr *attr, struct flow_rule *flow_rule, - u32 tupleid, - bool nat) + u16 zone, bool nat) { struct mlx5e_tc_mod_hdr_acts mod_acts = {}; struct mlx5_eswitch *esw = ct_priv->esw; @@ -617,7 +613,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ct_state, meta->ct_metadata.mark, meta->ct_metadata.labels[0], - tupleid); + zone); if (err) goto err_mapping; @@ -648,7 +644,6 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_esw_flow_attr *attr = &zone_rule->attr; struct mlx5_eswitch *esw = ct_priv->esw; struct mlx5_flow_spec *spec = NULL; - u32 tupleid; int err; zone_rule->nat = nat; @@ -657,18 +652,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, if (!spec) return -ENOMEM; - /* Get tuple unique id */ - err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule, - XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL); - if (err) { - netdev_warn(ct_priv->netdev, - "Failed to allocate tuple id, err: %d\n", err); - goto err_xa_alloc; - } - zone_rule->tupleid = tupleid; - err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, - tupleid, nat); + entry->tuple.zone, nat); if (err) { ct_dbg("Failed to create ct entry mod hdr"); goto err_mod_hdr; @@ -686,7 +671,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, - entry->zone & MLX5_CT_ZONE_MASK, + entry->tuple.zone & MLX5_CT_ZONE_MASK, MLX5_CT_ZONE_MASK); zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); @@ -697,15 +682,13 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, } kfree(spec); - ct_dbg("Offloaded ct entry rule in zone %d", entry->zone); + ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone); return 0; err_rule: mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); err_mod_hdr: - xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid); -err_xa_alloc: kfree(spec); return err; } @@ -766,7 +749,6 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, if (!entry) return -ENOMEM; - entry->zone = ft->zone; entry->tuple.zone = ft->zone; entry->cookie = flow->cookie; entry->restore_cookie = meta_action->ct_metadata.cookie; @@ -894,6 +876,48 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data, return -EOPNOTSUPP; } +static bool +mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple, + u16 zone) +{ + struct flow_keys flow_keys; + + skb_reset_network_header(skb); + skb_flow_dissect_flow_keys(skb, &flow_keys, 0); + + tuple->zone = zone; + + if (flow_keys.basic.ip_proto != IPPROTO_TCP && + flow_keys.basic.ip_proto != IPPROTO_UDP) + return false; + + tuple->port.src = flow_keys.ports.src; + tuple->port.dst = flow_keys.ports.dst; + tuple->n_proto = flow_keys.basic.n_proto; + tuple->ip_proto = flow_keys.basic.ip_proto; + + switch (flow_keys.basic.n_proto) { + case htons(ETH_P_IP): + tuple->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + tuple->ip.src_v4 = flow_keys.addrs.v4addrs.src; + tuple->ip.dst_v4 = flow_keys.addrs.v4addrs.dst; + break; + + case htons(ETH_P_IPV6): + tuple->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + tuple->ip.src_v6 = flow_keys.addrs.v6addrs.src; + tuple->ip.dst_v6 = flow_keys.addrs.v6addrs.dst; + break; + default: + goto out; + } + + return true; + +out: + return false; +} + int mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec) @@ -978,7 +1002,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, } if (mask->ct_zone) - mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, + mlx5e_tc_match_to_reg_match(spec, ZONE_RESTORE_TO_REG, key->ct_zone, MLX5_CT_ZONE_MASK); if (ctstate_mask) mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, @@ -1008,6 +1032,18 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + /* To mark that the need restore ct state on a skb, we mark the + * packet with the zone restore register. To distinguise from an + * uninitalized 0 value for this register, we write zone + 1 on the + * packet. + * + * This restricts us to a max zone of 0xFFFE. + */ + if (act->ct.zone == (u16)~0) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported ct zone"); + return -EOPNOTSUPP; + } + attr->ct_attr.zone = act->ct.zone; attr->ct_attr.ct_action = act->ct.action; attr->ct_attr.nf_ft = act->ct.flow_table; @@ -1349,6 +1385,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * | set mark * | set label * | set established + * | set zone_restore * | do nat (if needed) * v * +--------------+ @@ -1770,7 +1807,6 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) } idr_init(&ct_priv->fte_ids); - xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1); mutex_init(&ct_priv->control_lock); rhashtable_init(&ct_priv->zone_ht, &zone_params); rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); @@ -1809,7 +1845,6 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); rhashtable_destroy(&ct_priv->zone_ht); mutex_destroy(&ct_priv->control_lock); - xa_destroy(&ct_priv->tuple_ids); idr_destroy(&ct_priv->fte_ids); kfree(ct_priv); @@ -1818,22 +1853,26 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) bool mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, - struct sk_buff *skb, u32 tupleid) + struct sk_buff *skb, u16 zone) { struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv; - struct mlx5_ct_zone_rule *zone_rule; + struct mlx5_ct_tuple tuple = {}; struct mlx5_ct_entry *entry; - if (!ct_priv || !tupleid) + if (!ct_priv || !zone) return true; - zone_rule = xa_load(&ct_priv->tuple_ids, tupleid); - if (!zone_rule) + if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone - 1)) return false; - entry = container_of(zone_rule, struct mlx5_ct_entry, - zone_rules[zone_rule->nat]); - tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie); + entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple, + tuples_ht_params); + if (!entry) + entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht, + &tuple, tuples_nat_ht_params); + if (!entry) + return false; + tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie); return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 94f74cf71ce4..b5e07bff843b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -67,16 +67,17 @@ struct mlx5_ct_attr { misc_parameters_2.metadata_reg_c_5),\ } -#define tupleid_to_reg_ct {\ +#define zone_restore_to_reg_ct {\ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\ .moffset = 0,\ - .mlen = 3,\ + .mlen = 2,\ .soffset = MLX5_BYTE_OFF(fte_match_param,\ misc_parameters_2.metadata_reg_c_1),\ } -#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8) -#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0) +#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen) +#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8) +#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0) #if IS_ENABLED(CONFIG_MLX5_TC_CT) @@ -112,7 +113,7 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, bool mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, - struct sk_buff *skb, u32 tupleid); + struct sk_buff *skb, u16 zone); #else /* CONFIG_MLX5_TC_CT */ @@ -180,10 +181,10 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, static inline bool mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, - struct sk_buff *skb, u32 tupleid) + struct sk_buff *skb, u16 zone) { - if (!tupleid) - return true; + if (!zone) + return true; return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd984ef234b2..090069e936b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -186,11 +186,11 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { misc_parameters_2.metadata_reg_c_1), }, [ZONE_TO_REG] = zone_to_reg_ct, + [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct, [CTSTATE_TO_REG] = ctstate_to_reg_ct, [MARK_TO_REG] = mark_to_reg_ct, [LABELS_TO_REG] = labels_to_reg_ct, [FTEID_TO_REG] = fteid_to_reg_ct, - [TUPLEID_TO_REG] = tupleid_to_reg_ct, }; static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 68d49b945184..856e034b92f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -129,10 +129,10 @@ enum mlx5e_tc_attr_to_reg { TUNNEL_TO_REG, CTSTATE_TO_REG, ZONE_TO_REG, + ZONE_RESTORE_TO_REG, MARK_TO_REG, LABELS_TO_REG, FTEID_TO_REG, - TUPLEID_TO_REG, }; struct mlx5e_tc_attr_to_reg_mapping { From b2fdf3d04714d4eea2f8a5c011cdd84b428c7df5 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 17 Mar 2020 16:32:21 +0200 Subject: [PATCH 0825/2056] net/mlx5e: Export sharing of mod headers to a new file Refactor sharing of mod headers to new file and while there, remove spin lock and flows list, as this is only used for warn on. Use the generic API in the next patch to re-use tuple modify headers for identical modify actions, Signed-off-by: Paul Blakey Reviewed-by: Roi Dayan Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/en/fs.h | 2 + .../ethernet/mellanox/mlx5/core/en/mod_hdr.c | 157 +++++++++++++++ .../ethernet/mellanox/mlx5/core/en/mod_hdr.h | 31 +++ .../net/ethernet/mellanox/mlx5/core/en_tc.c | 189 +++--------------- .../net/ethernet/mellanox/mlx5/core/en_tc.h | 6 - .../net/ethernet/mellanox/mlx5/core/eswitch.c | 6 +- 7 files changed, 222 insertions(+), 171 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 1e7c7f10db6e..124caec65a34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \ - en_rep.o en/rep/bond.o + en_rep.o en/rep/bond.o en/mod_hdr.o mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ en/mapping.o esw/chains.o en/tc_tun.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 385cbff1caf1..6f4767324044 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -4,6 +4,8 @@ #ifndef __MLX5E_FLOW_STEER_H__ #define __MLX5E_FLOW_STEER_H__ +#include "mod_hdr.h" + enum { MLX5E_TC_FT_LEVEL = 0, MLX5E_TC_TTC_FT_LEVEL, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c new file mode 100644 index 000000000000..7edde4d536fd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2020 Mellanox Technologies + +#include +#include "mod_hdr.h" + +#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) + +struct mod_hdr_key { + int num_actions; + void *actions; +}; + +struct mlx5e_mod_hdr_handle { + /* a node of a hash table which keeps all the mod_hdr entries */ + struct hlist_node mod_hdr_hlist; + + struct mod_hdr_key key; + + struct mlx5_modify_hdr *modify_hdr; + + refcount_t refcnt; + struct completion res_ready; + int compl_result; +}; + +static u32 hash_mod_hdr_info(struct mod_hdr_key *key) +{ + return jhash(key->actions, + key->num_actions * MLX5_MH_ACT_SZ, 0); +} + +static int cmp_mod_hdr_info(struct mod_hdr_key *a, struct mod_hdr_key *b) +{ + if (a->num_actions != b->num_actions) + return 1; + + return memcmp(a->actions, b->actions, + a->num_actions * MLX5_MH_ACT_SZ); +} + +void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl) +{ + mutex_init(&tbl->lock); + hash_init(tbl->hlist); +} + +void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl) +{ + mutex_destroy(&tbl->lock); +} + +static struct mlx5e_mod_hdr_handle *mod_hdr_get(struct mod_hdr_tbl *tbl, + struct mod_hdr_key *key, + u32 hash_key) +{ + struct mlx5e_mod_hdr_handle *mh, *found = NULL; + + hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { + if (!cmp_mod_hdr_info(&mh->key, key)) { + refcount_inc(&mh->refcnt); + found = mh; + break; + } + } + + return found; +} + +struct mlx5e_mod_hdr_handle * +mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev, + struct mod_hdr_tbl *tbl, + enum mlx5_flow_namespace_type namespace, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts) +{ + int num_actions, actions_size, err; + struct mlx5e_mod_hdr_handle *mh; + struct mod_hdr_key key; + u32 hash_key; + + num_actions = mod_hdr_acts->num_actions; + actions_size = MLX5_MH_ACT_SZ * num_actions; + + key.actions = mod_hdr_acts->actions; + key.num_actions = num_actions; + + hash_key = hash_mod_hdr_info(&key); + + mutex_lock(&tbl->lock); + mh = mod_hdr_get(tbl, &key, hash_key); + if (mh) { + mutex_unlock(&tbl->lock); + wait_for_completion(&mh->res_ready); + + if (mh->compl_result < 0) { + err = -EREMOTEIO; + goto attach_header_err; + } + goto attach_header; + } + + mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL); + if (!mh) { + mutex_unlock(&tbl->lock); + return ERR_PTR(-ENOMEM); + } + + mh->key.actions = (void *)mh + sizeof(*mh); + memcpy(mh->key.actions, key.actions, actions_size); + mh->key.num_actions = num_actions; + refcount_set(&mh->refcnt, 1); + init_completion(&mh->res_ready); + + hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key); + mutex_unlock(&tbl->lock); + + mh->modify_hdr = mlx5_modify_header_alloc(mdev, namespace, + mh->key.num_actions, + mh->key.actions); + if (IS_ERR(mh->modify_hdr)) { + err = PTR_ERR(mh->modify_hdr); + mh->compl_result = err; + goto alloc_header_err; + } + mh->compl_result = 1; + complete_all(&mh->res_ready); + +attach_header: + return mh; + +alloc_header_err: + complete_all(&mh->res_ready); +attach_header_err: + mlx5e_mod_hdr_detach(mdev, tbl, mh); + return ERR_PTR(err); +} + +void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev, + struct mod_hdr_tbl *tbl, + struct mlx5e_mod_hdr_handle *mh) +{ + if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock)) + return; + hash_del(&mh->mod_hdr_hlist); + mutex_unlock(&tbl->lock); + + if (mh->compl_result > 0) + mlx5_modify_header_dealloc(mdev, mh->modify_hdr); + + kfree(mh); +} + +struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh) +{ + return mh->modify_hdr; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h new file mode 100644 index 000000000000..33b23d8f9182 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies */ + +#ifndef __MLX5E_EN_MOD_HDR_H__ +#define __MLX5E_EN_MOD_HDR_H__ + +#include +#include + +struct mlx5e_mod_hdr_handle; + +struct mlx5e_tc_mod_hdr_acts { + int num_actions; + int max_actions; + void *actions; +}; + +struct mlx5e_mod_hdr_handle * +mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev, + struct mod_hdr_tbl *tbl, + enum mlx5_flow_namespace_type namespace, + struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts); +void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev, + struct mod_hdr_tbl *tbl, + struct mlx5e_mod_hdr_handle *mh); +struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh); + +void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl); +void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl); + +#endif /* __MLX5E_EN_MOD_HDR_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 090069e936b7..3814c70b5230 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -63,6 +63,7 @@ #include "en/tc_tun.h" #include "en/mapping.h" #include "en/tc_ct.h" +#include "en/mod_hdr.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "diag/en_tc_tracepoint.h" @@ -140,8 +141,7 @@ struct mlx5e_tc_flow { */ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; - struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */ - struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ + struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ @@ -309,29 +309,6 @@ struct mlx5e_hairpin_entry { struct completion res_ready; }; -struct mod_hdr_key { - int num_actions; - void *actions; -}; - -struct mlx5e_mod_hdr_entry { - /* a node of a hash table which keeps all the mod_hdr entries */ - struct hlist_node mod_hdr_hlist; - - /* protects flows list */ - spinlock_t flows_lock; - /* flows sharing the same mod_hdr entry */ - struct list_head flows; - - struct mod_hdr_key key; - - struct mlx5_modify_hdr *modify_hdr; - - refcount_t refcnt; - struct completion res_ready; - int compl_result; -}; - static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow); @@ -408,148 +385,43 @@ static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow) return flow_flag_test(flow, OFFLOADED); } -static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key) -{ - return jhash(key->actions, - key->num_actions * MLX5_MH_ACT_SZ, 0); -} - -static inline int cmp_mod_hdr_info(struct mod_hdr_key *a, - struct mod_hdr_key *b) -{ - if (a->num_actions != b->num_actions) - return 1; - - return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ); -} - -static struct mod_hdr_tbl * -get_mod_hdr_table(struct mlx5e_priv *priv, int namespace) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : - &priv->fs.tc.mod_hdr; -} - -static struct mlx5e_mod_hdr_entry * -mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key) -{ - struct mlx5e_mod_hdr_entry *mh, *found = NULL; - - hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) { - if (!cmp_mod_hdr_info(&mh->key, key)) { - refcount_inc(&mh->refcnt); - found = mh; - break; - } - } - - return found; -} - -static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv, - struct mlx5e_mod_hdr_entry *mh, - int namespace) -{ - struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace); - - if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock)) - return; - hash_del(&mh->mod_hdr_hlist); - mutex_unlock(&tbl->lock); - - WARN_ON(!list_empty(&mh->flows)); - if (mh->compl_result > 0) - mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr); - - kfree(mh); -} - static int get_flow_name_space(struct mlx5e_tc_flow *flow) { return mlx5e_is_eswitch_flow(flow) ? MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL; } + +static struct mod_hdr_tbl * +get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ? + &esw->offloads.mod_hdr : + &priv->fs.tc.mod_hdr; +} + static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow_parse_attr *parse_attr) { - int num_actions, actions_size, namespace, err; - struct mlx5e_mod_hdr_entry *mh; - struct mod_hdr_tbl *tbl; - struct mod_hdr_key key; - u32 hash_key; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5e_mod_hdr_handle *mh; - num_actions = parse_attr->mod_hdr_acts.num_actions; - actions_size = MLX5_MH_ACT_SZ * num_actions; + mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), + get_flow_name_space(flow), + &parse_attr->mod_hdr_acts); + if (IS_ERR(mh)) + return PTR_ERR(mh); - key.actions = parse_attr->mod_hdr_acts.actions; - key.num_actions = num_actions; - - hash_key = hash_mod_hdr_info(&key); - - namespace = get_flow_name_space(flow); - tbl = get_mod_hdr_table(priv, namespace); - - mutex_lock(&tbl->lock); - mh = mlx5e_mod_hdr_get(tbl, &key, hash_key); - if (mh) { - mutex_unlock(&tbl->lock); - wait_for_completion(&mh->res_ready); - - if (mh->compl_result < 0) { - err = -EREMOTEIO; - goto attach_header_err; - } - goto attach_flow; - } - - mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL); - if (!mh) { - mutex_unlock(&tbl->lock); - return -ENOMEM; - } - - mh->key.actions = (void *)mh + sizeof(*mh); - memcpy(mh->key.actions, key.actions, actions_size); - mh->key.num_actions = num_actions; - spin_lock_init(&mh->flows_lock); - INIT_LIST_HEAD(&mh->flows); - refcount_set(&mh->refcnt, 1); - init_completion(&mh->res_ready); - - hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key); - mutex_unlock(&tbl->lock); - - mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace, - mh->key.num_actions, - mh->key.actions); - if (IS_ERR(mh->modify_hdr)) { - err = PTR_ERR(mh->modify_hdr); - mh->compl_result = err; - goto alloc_header_err; - } - mh->compl_result = 1; - complete_all(&mh->res_ready); - -attach_flow: - flow->mh = mh; - spin_lock(&mh->flows_lock); - list_add(&flow->mod_hdr, &mh->flows); - spin_unlock(&mh->flows_lock); + modify_hdr = mlx5e_mod_hdr_get(mh); if (mlx5e_is_eswitch_flow(flow)) - flow->esw_attr->modify_hdr = mh->modify_hdr; + flow->esw_attr->modify_hdr = modify_hdr; else - flow->nic_attr->modify_hdr = mh->modify_hdr; + flow->nic_attr->modify_hdr = modify_hdr; + flow->mh = mh; return 0; - -alloc_header_err: - complete_all(&mh->res_ready); -attach_header_err: - mlx5e_mod_hdr_put(priv, mh, namespace); - return err; } static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, @@ -559,11 +431,8 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, if (!flow->mh) return; - spin_lock(&flow->mh->flows_lock); - list_del(&flow->mod_hdr); - spin_unlock(&flow->mh->flows_lock); - - mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow)); + mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), + flow->mh); flow->mh = NULL; } @@ -4460,7 +4329,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, flow->priv = priv; for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) INIT_LIST_HEAD(&flow->encaps[out_index].list); - INIT_LIST_HEAD(&flow->mod_hdr); INIT_LIST_HEAD(&flow->hairpin); INIT_LIST_HEAD(&flow->l3_to_l2_reformat); refcount_set(&flow->refcnt, 1); @@ -5064,9 +4932,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) struct mlx5e_tc_table *tc = &priv->fs.tc; int err; + mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); mutex_init(&tc->t_lock); - mutex_init(&tc->mod_hdr.lock); - hash_init(tc->mod_hdr.hlist); mutex_init(&tc->hairpin_tbl_lock); hash_init(tc->hairpin_tbl); @@ -5104,7 +4971,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) &tc->netdevice_nb, &tc->netdevice_nn); - mutex_destroy(&tc->mod_hdr.lock); + mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr); mutex_destroy(&tc->hairpin_tbl_lock); rhashtable_destroy(&tc->ht); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 856e034b92f3..b69f0e376ec0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -148,12 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[]; bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, struct net_device *out_dev); -struct mlx5e_tc_mod_hdr_acts { - int num_actions; - int max_actions; - void *actions; -}; - int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts, enum mlx5e_tc_attr_to_reg type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d70543ea57dd..c181f6b63f59 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -42,6 +42,7 @@ #include "fs_core.h" #include "devlink.h" #include "ecpf.h" +#include "en/mod_hdr.h" enum { MLX5_ACTION_NONE = 0, @@ -1748,10 +1749,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) mutex_init(&esw->offloads.encap_tbl_lock); hash_init(esw->offloads.encap_tbl); - mutex_init(&esw->offloads.mod_hdr.lock); - hash_init(esw->offloads.mod_hdr.hlist); mutex_init(&esw->offloads.decap_tbl_lock); hash_init(esw->offloads.decap_tbl); + mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr); atomic64_set(&esw->offloads.num_flows, 0); ida_init(&esw->offloads.vport_metadata_ida); mutex_init(&esw->state_lock); @@ -1793,7 +1793,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) mutex_destroy(&esw->mode_lock); mutex_destroy(&esw->state_lock); ida_destroy(&esw->offloads.vport_metadata_ida); - mutex_destroy(&esw->offloads.mod_hdr.lock); + mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr); mutex_destroy(&esw->offloads.encap_tbl_lock); mutex_destroy(&esw->offloads.decap_tbl_lock); kfree(esw->vports); From 6702d393557406e986bd6cfe250472b46a76e804 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 18 Feb 2020 10:24:07 +0200 Subject: [PATCH 0826/2056] net/mlx5e: CT: Re-use tuple modify headers for identical modify actions After removing the tupleid register which changed per tuple, tuple modify headers set the ct_state, zone, mark, and label registers. For non-natted tuples going through the same tc rules path, their values will be the same, and all their modify headers will be the same. Re-use tuple modify header when possible, by adding each new modify header to an hahstable, and looking up identical ones before creating a new one. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index c6e92f818ecf..7b5963593bf1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -16,6 +16,7 @@ #include "esw/chains.h" #include "en/tc_ct.h" +#include "en/mod_hdr.h" #include "en.h" #include "en_tc.h" #include "en_rep.h" @@ -59,6 +60,7 @@ struct mlx5_ct_flow { struct mlx5_ct_zone_rule { struct mlx5_flow_handle *rule; + struct mlx5e_mod_hdr_handle *mh; struct mlx5_esw_flow_attr attr; bool nat; }; @@ -397,7 +399,8 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone); mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); - mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); + mlx5e_mod_hdr_detach(ct_priv->esw->dev, + &esw->offloads.mod_hdr, zone_rule->mh); } static void @@ -579,11 +582,10 @@ static int mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_esw_flow_attr *attr, struct flow_rule *flow_rule, + struct mlx5e_mod_hdr_handle **mh, u16 zone, bool nat) { struct mlx5e_tc_mod_hdr_acts mod_acts = {}; - struct mlx5_eswitch *esw = ct_priv->esw; - struct mlx5_modify_hdr *mod_hdr; struct flow_action_entry *meta; u16 ct_state = 0; int err; @@ -617,14 +619,15 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, if (err) goto err_mapping; - mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, - mod_acts.num_actions, - mod_acts.actions); - if (IS_ERR(mod_hdr)) { - err = PTR_ERR(mod_hdr); + *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev, + &ct_priv->esw->offloads.mod_hdr, + MLX5_FLOW_NAMESPACE_FDB, + &mod_acts); + if (IS_ERR(*mh)) { + err = PTR_ERR(*mh); goto err_mapping; } - attr->modify_hdr = mod_hdr; + attr->modify_hdr = mlx5e_mod_hdr_get(*mh); dealloc_mod_hdr_actions(&mod_acts); return 0; @@ -653,6 +656,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, return -ENOMEM; err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, + &zone_rule->mh, entry->tuple.zone, nat); if (err) { ct_dbg("Failed to create ct entry mod hdr"); @@ -687,7 +691,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, return 0; err_rule: - mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr); + mlx5e_mod_hdr_detach(ct_priv->esw->dev, + &esw->offloads.mod_hdr, zone_rule->mh); err_mod_hdr: kfree(spec); return err; From 8f5b3c3ec10cb896c4949b5c26060bd610025dd8 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 5 May 2020 16:37:22 +0300 Subject: [PATCH 0827/2056] net/mlx5e: CT: Use mapping for zone restore register Use a single byte mapping for zone restore register (zone matching remains 16 bit). This makes room for using the freed 8 bits on register C1 for mapping more tunnels and tunnel options. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/rep/tc.c | 7 +- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 67 ++++++++++++------- .../ethernet/mellanox/mlx5/core/en/tc_ct.h | 10 +-- 3 files changed, 50 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index e27963b80c11..ece8f535ce80 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -594,7 +594,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct mlx5e_tc_update_priv *tc_priv) { #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone; + u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id; struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *uplink_rpriv; struct tc_skb_ext *tc_skb_ext; @@ -631,11 +631,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, tc_skb_ext->chain = chain; - zone = reg_c1 & ZONE_RESTORE_MAX; + zone_restore_id = reg_c1 & ZONE_RESTORE_MAX; uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; - if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, zone)) + if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, + zone_restore_id)) return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 7b5963593bf1..3802a26e944c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -17,6 +17,7 @@ #include "esw/chains.h" #include "en/tc_ct.h" #include "en/mod_hdr.h" +#include "en/mapping.h" #include "en.h" #include "en_tc.h" #include "en_rep.h" @@ -46,6 +47,7 @@ struct mlx5_tc_ct_priv { struct mlx5_flow_table *ct_nat; struct mlx5_flow_table *post_ct; struct mutex control_lock; /* guards parallel adds/dels */ + struct mapping_ctx *zone_mapping; }; struct mlx5_ct_flow { @@ -77,6 +79,7 @@ struct mlx5_tc_ct_pre { struct mlx5_ct_ft { struct rhash_head node; u16 zone; + u32 zone_restore_id; refcount_t refcount; struct nf_flowtable *nf_ft; struct mlx5_tc_ct_priv *ct_priv; @@ -434,7 +437,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, u8 ct_state, u32 mark, u32 label, - u16 zone) + u8 zone_restore_id) { struct mlx5_eswitch *esw = ct_priv->esw; int err; @@ -455,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, return err; err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, - ZONE_RESTORE_TO_REG, zone + 1); + ZONE_RESTORE_TO_REG, zone_restore_id); if (err) return err; @@ -583,7 +586,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_esw_flow_attr *attr, struct flow_rule *flow_rule, struct mlx5e_mod_hdr_handle **mh, - u16 zone, bool nat) + u8 zone_restore_id, bool nat) { struct mlx5e_tc_mod_hdr_acts mod_acts = {}; struct flow_action_entry *meta; @@ -615,7 +618,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, ct_state, meta->ct_metadata.mark, meta->ct_metadata.labels[0], - zone); + zone_restore_id); if (err) goto err_mapping; @@ -641,7 +644,7 @@ static int mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, struct flow_rule *flow_rule, struct mlx5_ct_entry *entry, - bool nat) + bool nat, u8 zone_restore_id) { struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat]; struct mlx5_esw_flow_attr *attr = &zone_rule->attr; @@ -657,7 +660,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &zone_rule->mh, - entry->tuple.zone, nat); + zone_restore_id, nat); if (err) { ct_dbg("Failed to create ct entry mod hdr"); goto err_mod_hdr; @@ -701,7 +704,8 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, static int mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, struct flow_rule *flow_rule, - struct mlx5_ct_entry *entry) + struct mlx5_ct_entry *entry, + u8 zone_restore_id) { struct mlx5_eswitch *esw = ct_priv->esw; int err; @@ -713,11 +717,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, return err; } - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false); + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false, + zone_restore_id); if (err) goto err_orig; - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true); + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true, + zone_restore_id); if (err) goto err_nat; @@ -781,7 +787,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, goto err_tuple_nat; } - err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry); + err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry, + ft->zone_restore_id); if (err) goto err_rules; @@ -1007,7 +1014,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, } if (mask->ct_zone) - mlx5e_tc_match_to_reg_match(spec, ZONE_RESTORE_TO_REG, + mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, key->ct_zone, MLX5_CT_ZONE_MASK); if (ctstate_mask) mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, @@ -1037,18 +1044,6 @@ mlx5_tc_ct_parse_action(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - /* To mark that the need restore ct state on a skb, we mark the - * packet with the zone restore register. To distinguise from an - * uninitalized 0 value for this register, we write zone + 1 on the - * packet. - * - * This restricts us to a max zone of 0xFFFE. - */ - if (act->ct.zone == (u16)~0) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported ct zone"); - return -EOPNOTSUPP; - } - attr->ct_attr.zone = act->ct.zone; attr->ct_attr.ct_action = act->ct.action; attr->ct_attr.nf_ft = act->ct.flow_table; @@ -1305,6 +1300,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, if (!ft) return ERR_PTR(-ENOMEM); + err = mapping_add(ct_priv->zone_mapping, &zone, &ft->zone_restore_id); + if (err) + goto err_mapping; + ft->zone = zone; ft->nf_ft = nf_ft; ft->ct_priv = ct_priv; @@ -1337,6 +1336,8 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone, err_init: mlx5_tc_ct_free_pre_ct_tables(ft); err_alloc_pre_ct: + mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); +err_mapping: kfree(ft); return ERR_PTR(err); } @@ -1363,6 +1364,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) mlx5_tc_ct_flush_ft_entry, ct_priv); mlx5_tc_ct_free_pre_ct_tables(ft); + mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id); kfree(ft); } @@ -1786,6 +1788,12 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) goto err_alloc; } + ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true); + if (IS_ERR(ct_priv->zone_mapping)) { + err = PTR_ERR(ct_priv->zone_mapping); + goto err_mapping; + } + ct_priv->esw = esw; ct_priv->netdev = rpriv->netdev; ct_priv->ct = mlx5_esw_chains_create_global_table(esw); @@ -1827,6 +1835,8 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) err_ct_nat_tbl: mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct); err_ct_tbl: + mapping_destroy(ct_priv->zone_mapping); +err_mapping: kfree(ct_priv); err_alloc: err_support: @@ -1845,6 +1855,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); + mapping_destroy(ct_priv->zone_mapping); rhashtable_destroy(&ct_priv->ct_tuples_ht); rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); @@ -1858,16 +1869,20 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) bool mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, - struct sk_buff *skb, u16 zone) + struct sk_buff *skb, u8 zone_restore_id) { struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv; struct mlx5_ct_tuple tuple = {}; struct mlx5_ct_entry *entry; + u16 zone; - if (!ct_priv || !zone) + if (!ct_priv || !zone_restore_id) return true; - if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone - 1)) + if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone)) + return false; + + if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone)) return false; entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index b5e07bff843b..5e10a72f5f24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -70,9 +70,9 @@ struct mlx5_ct_attr { #define zone_restore_to_reg_ct {\ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\ .moffset = 0,\ - .mlen = 2,\ + .mlen = 1,\ .soffset = MLX5_BYTE_OFF(fte_match_param,\ - misc_parameters_2.metadata_reg_c_1),\ + misc_parameters_2.metadata_reg_c_1) + 3,\ } #define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen) @@ -113,7 +113,7 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, bool mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, - struct sk_buff *skb, u16 zone); + struct sk_buff *skb, u8 zone_restore_id); #else /* CONFIG_MLX5_TC_CT */ @@ -181,9 +181,9 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, static inline bool mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv, - struct sk_buff *skb, u16 zone) + struct sk_buff *skb, u8 zone_restore_id) { - if (!zone) + if (!zone_restore_id) return true; return false; From d12f4521d3eff83206a0bb4a9135252ffbf1c3b5 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Tue, 5 May 2020 16:41:02 +0300 Subject: [PATCH 0828/2056] net/mlx5e: CT: Expand tunnel register mappings Reg_c1 is 32 bits wide. Originally, 24 bit were allocated for the tuple_id, 6 bits for tunnel mapping and 2 bits for tunnel options mappings. Restoring the ct state from zone lookup instead of tuple id requires reg_c1 to store 8 bits mapping the ct zone, leaving 24 bits for tunnel mappings. Expand tunnel and tunnel options register mappings to 12 bit each. Signed-off-by: Paul Blakey Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3814c70b5230..fa41c49691a7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -180,8 +180,8 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { }, [TUNNEL_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1, - .moffset = 3, - .mlen = 1, + .moffset = 1, + .mlen = 3, .soffset = MLX5_BYTE_OFF(fte_match_param, misc_parameters_2.metadata_reg_c_1), }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index b69f0e376ec0..437f680728fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -70,9 +70,9 @@ struct tunnel_match_enc_opts { * Upper TUNNEL_INFO_BITS for general tunnel info. * Lower ENC_OPTS_BITS bits for enc_opts. */ -#define TUNNEL_INFO_BITS 6 +#define TUNNEL_INFO_BITS 12 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0) -#define ENC_OPTS_BITS 2 +#define ENC_OPTS_BITS 12 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0) #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS) #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0) From 2acc4551d412607d23fd392d819f3b5bd320de48 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 4 May 2020 15:52:14 -0700 Subject: [PATCH 0829/2056] net/mlx5e: CT: Return err_ptr from internal functions Instead of having to deal with converting between int and ERR_PTR for return values in mlx5_tc_ct_flow_offload(), make the internal helper functions return a ptr to mlx5_flow_handle instead of passing it as output param, this will also avoid gcc confusion and false alarms, thus we remove the redundant ERR_PTR rule initialization. Signed-off-by: Saeed Mahameed Suggested-by: Jason Gunthorpe Reviewed-by: Roi Dayan --- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 37 +++++++------------ 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 3802a26e944c..709ad0012c24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1400,12 +1400,11 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * + fte_id match +------------------------> * +--------------+ */ -static int +static struct mlx5_flow_handle * __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *orig_spec, - struct mlx5_esw_flow_attr *attr, - struct mlx5_flow_handle **flow_rule) + struct mlx5_esw_flow_attr *attr) { struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT; @@ -1425,7 +1424,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, if (!post_ct_spec || !ct_flow) { kfree(post_ct_spec); kfree(ct_flow); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } /* Register for CT established events */ @@ -1546,11 +1545,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, } attr->ct_attr.ct_flow = ct_flow; - *flow_rule = ct_flow->post_ct_rule; dealloc_mod_hdr_actions(&pre_mod_acts); kfree(post_ct_spec); - return 0; + return rule; err_insert_orig: mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule, @@ -1568,16 +1566,15 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, kfree(post_ct_spec); kfree(ct_flow); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); - return err; + return ERR_PTR(err); } -static int +static struct mlx5_flow_handle * __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *orig_spec, struct mlx5_esw_flow_attr *attr, - struct mlx5e_tc_mod_hdr_acts *mod_acts, - struct mlx5_flow_handle **flow_rule) + struct mlx5e_tc_mod_hdr_acts *mod_acts) { struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct mlx5_eswitch *esw = ct_priv->esw; @@ -1589,7 +1586,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); if (!ct_flow) - return -ENOMEM; + return ERR_PTR(-ENOMEM); /* Base esw attributes on original rule attribute */ pre_ct_attr = &ct_flow->pre_ct_attr; @@ -1624,16 +1621,14 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, attr->ct_attr.ct_flow = ct_flow; ct_flow->pre_ct_rule = rule; - *flow_rule = rule; - - return 0; + return rule; err_insert: mlx5_modify_header_dealloc(priv->mdev, mod_hdr); err_set_registers: netdev_warn(priv->netdev, "Failed to offload ct clear flow, err %d\n", err); - return err; + return ERR_PTR(err); } struct mlx5_flow_handle * @@ -1645,22 +1640,18 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, { bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR; struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); - struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL); - int err; + struct mlx5_flow_handle *rule; if (!ct_priv) return ERR_PTR(-EOPNOTSUPP); mutex_lock(&ct_priv->control_lock); + if (clear_action) - err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, - mod_hdr_acts, &rule); + rule = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, mod_hdr_acts); else - err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr, - &rule); + rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr); mutex_unlock(&ct_priv->control_lock); - if (err) - return ERR_PTR(err); return rule; } From de96d5732a0832b1f69ff7bc573d63ccae999934 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 4 May 2020 15:53:06 -0700 Subject: [PATCH 0830/2056] net/mlx5e: CT: Remove unused function param "flow" parameter is not used in __mlx5_tc_ct_flow_offload_clear(), remove it. Signed-off-by: Saeed Mahameed Reviewed-by: Roi Dayan --- drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 709ad0012c24..96225e897064 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -1571,7 +1571,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, static struct mlx5_flow_handle * __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *orig_spec, struct mlx5_esw_flow_attr *attr, struct mlx5e_tc_mod_hdr_acts *mod_acts) @@ -1648,7 +1647,7 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv, mutex_lock(&ct_priv->control_lock); if (clear_action) - rule = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr, mod_hdr_acts); + rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts); else rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr); mutex_unlock(&ct_priv->control_lock); From bbe1124944de2f78eaf3141d05f957f8391e7899 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 30 Jun 2020 15:40:37 +0300 Subject: [PATCH 0831/2056] net/mlx5e: CT: Fix releasing ft entries Before this commit, on ft flush, ft entries were not removed from the ct_tuple hashtables. Fix it. Fixes: ac991b48d43c ("net/mlx5e: CT: Offload established flows") Signed-off-by: Roi Dayan Signed-off-by: Eli Britstein Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 24 ++++++++++++------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 96225e897064..4c65677feaab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -817,6 +817,19 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, return err; } +static void +mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv, + struct mlx5_ct_entry *entry) +{ + mlx5_tc_ct_entry_del_rules(ct_priv, entry); + if (entry->tuple_node.next) + rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); + rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, + tuples_ht_params); +} + static int mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, struct flow_cls_offload *flow) @@ -829,13 +842,7 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft, if (!entry) return -ENOENT; - mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry); - if (entry->tuple_node.next) - rhashtable_remove_fast(&ft->ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, - tuples_nat_ht_params); - rhashtable_remove_fast(&ft->ct_priv->ct_tuples_ht, &entry->tuple_node, - tuples_ht_params); + mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry); WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params)); @@ -1348,7 +1355,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg) struct mlx5_tc_ct_priv *ct_priv = arg; struct mlx5_ct_entry *entry = ptr; - mlx5_tc_ct_entry_del_rules(ct_priv, entry); + mlx5_tc_ct_del_ft_entry(ct_priv, entry); + kfree(entry); } static void From 5c3320d7fece4612d4a413aa3c8e82cdb5b49fcb Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 9 Jul 2020 18:10:23 -0700 Subject: [PATCH 0832/2056] libbpf: Fix memory leak and optimize BTF sanitization Coverity's static analysis helpfully reported a memory leak introduced by 0f0e55d8247c ("libbpf: Improve BTF sanitization handling"). While fixing it, I realized that btf__new() already creates a memory copy, so there is no need to do this. So this patch also fixes misleading btf__new() signature to make data into a `const void *` input parameter. And it avoids unnecessary memory allocation and copy in BTF sanitization code altogether. Fixes: 0f0e55d8247c ("libbpf: Improve BTF sanitization handling") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200710011023.1655008-1-andriin@fb.com --- tools/lib/bpf/btf.c | 2 +- tools/lib/bpf/btf.h | 2 +- tools/lib/bpf/libbpf.c | 11 +++-------- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c8861c9e3635..c9e760e120dc 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -397,7 +397,7 @@ void btf__free(struct btf *btf) free(btf); } -struct btf *btf__new(__u8 *data, __u32 size) +struct btf *btf__new(const void *data, __u32 size) { struct btf *btf; int err; diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 173eff23c472..a3b7ef9b737f 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -63,7 +63,7 @@ struct btf_ext_header { }; LIBBPF_API void btf__free(struct btf *btf); -LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); +LIBBPF_API struct btf *btf__new(const void *data, __u32 size); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6602eb479596..25e4f77be8d7 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2533,17 +2533,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) sanitize = btf_needs_sanitization(obj); if (sanitize) { - const void *orig_data; - void *san_data; + const void *raw_data; __u32 sz; /* clone BTF to sanitize a copy and leave the original intact */ - orig_data = btf__get_raw_data(obj->btf, &sz); - san_data = malloc(sz); - if (!san_data) - return -ENOMEM; - memcpy(san_data, orig_data, sz); - kern_btf = btf__new(san_data, sz); + raw_data = btf__get_raw_data(obj->btf, &sz); + kern_btf = btf__new(raw_data, sz); if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); From 51c19bf3d5cfaa66571e4b88ba2a6f6295311101 Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 10 Jul 2020 12:09:15 -0400 Subject: [PATCH 0833/2056] Bluetooth: Fix slab-out-of-bounds read in hci_extended_inquiry_result_evt() Check upon `num_rsp` is insufficient. A malformed event packet with a large `num_rsp` number makes hci_extended_inquiry_result_evt() go out of bounds. Fix it. This patch fixes the following syzbot bug: https://syzkaller.appspot.com/bug?id=4bf11aa05c4ca51ce0df86e500fce486552dc8d2 Reported-by: syzbot+d8489a79b781849b9c46@syzkaller.appspotmail.com Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Acked-by: Greg Kroah-Hartman Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 03a0759f2fc2..13d8802b8137 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4375,7 +4375,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) From 19186c7b45c134820ea6fde3165a2cf30c1ace47 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Jul 2020 15:18:23 -0500 Subject: [PATCH 0834/2056] Bluetooth: core: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 4 ++-- net/bluetooth/hci_sock.c | 3 +-- net/bluetooth/l2cap_core.c | 19 +++++++++---------- net/bluetooth/l2cap_sock.c | 4 ++-- net/bluetooth/mgmt.c | 4 ++-- net/bluetooth/smp.c | 2 +- 6 files changed, 17 insertions(+), 19 deletions(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 13d8802b8137..927bde511170 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2825,7 +2825,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) case HCI_AUTO_CONN_LINK_LOSS: if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) break; - /* Fall through */ + fallthrough; case HCI_AUTO_CONN_DIRECT: case HCI_AUTO_CONN_ALWAYS: @@ -4320,7 +4320,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, if (hci_setup_sync(conn, conn->link->handle)) goto unlock; } - /* fall through */ + fallthrough; default: conn->state = BT_CLOSED; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index d5627967fc25..fad842750442 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -443,8 +443,7 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) case HCI_DEV_SETUP: if (hdev->manufacturer == 0xffff) return NULL; - - /* fall through */ + fallthrough; case HCI_DEV_UP: skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 35d2bc569a2d..ade83e224567 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -666,8 +666,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->retrans_list); - - /* fall through */ + fallthrough; case L2CAP_MODE_STREAMING: skb_queue_purge(&chan->tx_q); @@ -872,7 +871,8 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) else return HCI_AT_NO_BONDING; } - /* fall through */ + fallthrough; + default: switch (chan->sec_level) { case BT_SECURITY_HIGH: @@ -2983,8 +2983,7 @@ static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, break; case L2CAP_EV_RECV_REQSEQ_AND_FBIT: l2cap_process_reqseq(chan, control->reqseq); - - /* Fall through */ + fallthrough; case L2CAP_EV_RECV_FBIT: if (control && control->final) { @@ -3311,7 +3310,7 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) case L2CAP_MODE_ERTM: if (l2cap_mode_supported(mode, remote_feat_mask)) return mode; - /* fall through */ + fallthrough; default: return L2CAP_MODE_BASIC; } @@ -3447,7 +3446,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data if (__l2cap_efs_supported(chan->conn)) set_bit(FLAG_EFS_ENABLE, &chan->flags); - /* fall through */ + fallthrough; default: chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); break; @@ -4539,7 +4538,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, goto done; break; } - /* fall through */ + fallthrough; default: l2cap_chan_set_err(chan, ECONNRESET); @@ -7719,7 +7718,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) conn->mtu = hcon->hdev->le_mtu; break; } - /* fall through */ + fallthrough; default: conn->mtu = hcon->hdev->acl_mtu; break; @@ -7841,7 +7840,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, case L2CAP_MODE_STREAMING: if (!disable_ertm) break; - /* fall through */ + fallthrough; default: err = -EOPNOTSUPP; goto done; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a995d2c51fa7..738a5345fa21 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -284,7 +284,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog) case L2CAP_MODE_STREAMING: if (!disable_ertm) break; - /* fall through */ + fallthrough; default: err = -EOPNOTSUPP; goto done; @@ -760,7 +760,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, case L2CAP_MODE_STREAMING: if (!disable_ertm) break; - /* fall through */ + fallthrough; default: err = -EINVAL; break; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index d29da80e38fe..686ef4792831 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -4525,7 +4525,7 @@ static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, *mgmt_status = mgmt_le_support(hdev); if (*mgmt_status) return false; - /* Intentional fall-through */ + fallthrough; case DISCOV_TYPE_BREDR: *mgmt_status = mgmt_bredr_support(hdev); if (*mgmt_status) @@ -5901,7 +5901,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, case MGMT_LTK_P256_DEBUG: authenticated = 0x00; type = SMP_LTK_P256_DEBUG; - /* fall through */ + fallthrough; default: continue; } diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 6fd9ddb2d85c..c5c812e8130e 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1658,7 +1658,7 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) memset(smp->tk, 0, sizeof(smp->tk)); BT_DBG("PassKey: %d", value); put_unaligned_le32(value, smp->tk); - /* Fall Through */ + fallthrough; case MGMT_OP_USER_CONFIRM_REPLY: set_bit(SMP_FLAG_TK_VALID, &smp->flags); break; From 710a9194610ace9db8ea9ac44677ed69602b1ad9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Jul 2020 15:25:05 -0500 Subject: [PATCH 0835/2056] Bluetooth: RFCOMM: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: Marcel Holtmann --- net/bluetooth/rfcomm/core.c | 2 +- net/bluetooth/rfcomm/sock.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 2e20af317cea..f2bacb464ccf 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -479,7 +479,7 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) /* if closing a dlc in a session that hasn't been started, * just close and unlink the dlc */ - /* fall through */ + fallthrough; default: rfcomm_dlc_clear_timer(d); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index df14eebe80da..0afc4bc5ab41 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -218,7 +218,7 @@ static void __rfcomm_sock_close(struct sock *sk) case BT_CONFIG: case BT_CONNECTED: rfcomm_dlc_close(d, 0); - /* fall through */ + fallthrough; default: sock_set_flag(sk, SOCK_ZAPPED); From a3b4cbfc078d9b97b500d78f225c6b53c43ff745 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Jul 2020 15:26:50 -0500 Subject: [PATCH 0836/2056] Bluetooth: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bcm203x.c | 2 +- drivers/bluetooth/bluecard_cs.c | 2 -- drivers/bluetooth/hci_ll.c | 2 +- drivers/bluetooth/hci_qca.c | 8 +------- 4 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c index 3b176257b993..e667933c3d70 100644 --- a/drivers/bluetooth/bcm203x.c +++ b/drivers/bluetooth/bcm203x.c @@ -106,7 +106,7 @@ static void bcm203x_complete(struct urb *urb) } data->state = BCM203X_LOAD_FIRMWARE; - /* fall through */ + fallthrough; case BCM203X_LOAD_FIRMWARE: if (data->fw_sent == data->fw_size) { usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index cc6e56223656..36eabf61717f 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -295,7 +295,6 @@ static void bluecard_write_wakeup(struct bluecard_info *info) baud_reg = REG_CONTROL_BAUD_RATE_115200; break; case PKT_BAUD_RATE_57600: - /* Fall through... */ default: baud_reg = REG_CONTROL_BAUD_RATE_57600; break; @@ -585,7 +584,6 @@ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200; break; case 57600: - /* Fall through... */ default: cmd[4] = 0x03; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index d9a4c6c691e0..8bfe024d1fcd 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -219,7 +219,7 @@ static void ll_device_want_to_wakeup(struct hci_uart *hu) * perfectly safe to always send one. */ BT_DBG("dual wake-up-indication"); - /* fall through */ + fallthrough; case HCILL_ASLEEP: /* acknowledge device wake up */ if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) { diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 99d14c777105..7e395469ca4f 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -472,8 +472,6 @@ static void hci_ibs_tx_idle_timeout(struct timer_list *t) case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_WAKING: - /* Fall through */ - default: BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); break; @@ -516,8 +514,6 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t) case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_AWAKE: - /* Fall through */ - default: BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); break; @@ -835,8 +831,6 @@ static void device_woke_up(struct hci_uart *hu) break; case HCI_IBS_TX_ASLEEP: - /* Fall through */ - default: BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", qca->tx_ibs_state); @@ -2072,7 +2066,7 @@ static int __maybe_unused qca_suspend(struct device *dev) switch (qca->tx_ibs_state) { case HCI_IBS_TX_WAKING: del_timer(&qca->wake_retrans_timer); - /* Fall through */ + fallthrough; case HCI_IBS_TX_AWAKE: del_timer(&qca->tx_idle_timer); From d5baf620e5ba31bfd0205bcda15f79a0fa2021ab Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Wed, 8 Jul 2020 15:36:38 +0200 Subject: [PATCH 0837/2056] Replace HTTP links with HTTPS ones: BLUETOOTH SUBSYSTEM Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Marcel Holtmann --- net/bluetooth/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 1d6d243cdde9..e2497d764e97 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -21,7 +21,7 @@ menuconfig BT It was designed as a replacement for cables and other short-range technologies like IrDA. Bluetooth operates in personal area range that typically extends up to 10 meters. More information about - Bluetooth can be found at . + Bluetooth can be found at . Linux Bluetooth subsystem consist of several layers: Bluetooth Core From a2b992c828f7651db369ba8f0eb0818d70232636 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:44 -0700 Subject: [PATCH 0838/2056] debugfs: make sure we can remove u32_array files cleanly debugfs_create_u32_array() allocates a small structure to wrap the data and size information about the array. If users ever try to remove the file this leads to a leak since nothing ever frees this wrapper. That said there are no upstream users of debugfs_create_u32_array() that'd remove a u32 array file (we only have one u32 array user in CMA), so there is no real bug here. Make callers pass a wrapper they allocated. This way the lifetime management of the wrapper is on the caller, and we can avoid the potential leak in debugfs. CC: Chucheng Luo Signed-off-by: Jakub Kicinski Reviewed-by: Greg Kroah-Hartman Signed-off-by: David S. Miller --- Documentation/filesystems/debugfs.rst | 12 ++++++++---- fs/debugfs/file.c | 27 +++++++-------------------- include/linux/debugfs.h | 12 +++++++++--- mm/cma.h | 3 +++ mm/cma_debug.c | 7 ++++--- 5 files changed, 31 insertions(+), 30 deletions(-) diff --git a/Documentation/filesystems/debugfs.rst b/Documentation/filesystems/debugfs.rst index 1da7a4b7383d..728ab57a611a 100644 --- a/Documentation/filesystems/debugfs.rst +++ b/Documentation/filesystems/debugfs.rst @@ -185,13 +185,17 @@ byte offsets over a base for the register block. If you want to dump an u32 array in debugfs, you can create file with:: + struct debugfs_u32_array { + u32 *array; + u32 n_elements; + }; + void debugfs_create_u32_array(const char *name, umode_t mode, struct dentry *parent, - u32 *array, u32 elements); + struct debugfs_u32_array *array); -The "array" argument provides data, and the "elements" argument is -the number of elements in the array. Note: Once array is created its -size can not be changed. +The "array" argument wraps a pointer to the array's data and the number +of its elements. Note: Once array is created its size can not be changed. There is a helper function to create device related seq_file:: diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index ae49a55bda00..d0ed71f37511 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -918,11 +918,6 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_blob); -struct array_data { - void *array; - u32 elements; -}; - static size_t u32_format_array(char *buf, size_t bufsize, u32 *array, int array_size) { @@ -943,8 +938,8 @@ static size_t u32_format_array(char *buf, size_t bufsize, static int u32_array_open(struct inode *inode, struct file *file) { - struct array_data *data = inode->i_private; - int size, elements = data->elements; + struct debugfs_u32_array *data = inode->i_private; + int size, elements = data->n_elements; char *buf; /* @@ -959,7 +954,7 @@ static int u32_array_open(struct inode *inode, struct file *file) buf[size] = 0; file->private_data = buf; - u32_format_array(buf, size, data->array, data->elements); + u32_format_array(buf, size, data->array, data->n_elements); return nonseekable_open(inode, file); } @@ -996,8 +991,7 @@ static const struct file_operations u32_array_fops = { * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. - * @array: u32 array that provides data. - * @elements: total number of elements in the array. + * @array: wrapper struct containing data pointer and size of the array. * * This function creates a file in debugfs with the given name that exports * @array as data. If the @mode variable is so set it can be read from. @@ -1005,17 +999,10 @@ static const struct file_operations u32_array_fops = { * Once array is created its size can not be changed. */ void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, u32 elements) + struct dentry *parent, + struct debugfs_u32_array *array) { - struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL); - - if (data == NULL) - return; - - data->array = array; - data->elements = elements; - - debugfs_create_file_unsafe(name, mode, parent, data, &u32_array_fops); + debugfs_create_file_unsafe(name, mode, parent, array, &u32_array_fops); } EXPORT_SYMBOL_GPL(debugfs_create_u32_array); diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 63cb3606dea7..851dd1f9a8a5 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -38,6 +38,11 @@ struct debugfs_regset32 { struct device *dev; /* Optional device for Runtime PM */ }; +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + extern struct dentry *arch_debugfs_dir; #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ @@ -136,7 +141,8 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, u32 elements); + struct dentry *parent, + struct debugfs_u32_array *array); struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, @@ -316,8 +322,8 @@ static inline bool debugfs_initialized(void) } static inline void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, - u32 elements) + struct dentry *parent, + struct debugfs_u32_array *array) { } diff --git a/mm/cma.h b/mm/cma.h index 33c0b517733c..6698fa63279b 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -2,6 +2,8 @@ #ifndef __MM_CMA_H__ #define __MM_CMA_H__ +#include + struct cma { unsigned long base_pfn; unsigned long count; @@ -11,6 +13,7 @@ struct cma { #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; + struct debugfs_u32_array dfs_bitmap; #endif const char *name; }; diff --git a/mm/cma_debug.c b/mm/cma_debug.c index 4e6cbe2f586e..d5bf8aa34fdc 100644 --- a/mm/cma_debug.c +++ b/mm/cma_debug.c @@ -164,7 +164,6 @@ static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) { struct dentry *tmp; char name[16]; - int u32s; scnprintf(name, sizeof(name), "cma-%s", cma->name); @@ -180,8 +179,10 @@ static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry) debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); - u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32)); - debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s); + cma->dfs_bitmap.array = (u32 *)cma->bitmap; + cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma), + BITS_PER_BYTE * sizeof(u32)); + debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap); } static int __init cma_debugfs_init(void) From 84a4160e5a5951357947ad296932b433de3e34a0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:45 -0700 Subject: [PATCH 0839/2056] udp_tunnel: re-number the offload tunnel types Make it possible to use tunnel types as flags more easily. There doesn't appear to be any user using the type as an array index, so this should make no difference. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/udp_tunnel.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index e7312ceb2794..0615e25f041c 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -106,9 +106,9 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, * call this function to perform Tx offloads on outgoing traffic. */ enum udp_parsable_tunnel_type { - UDP_TUNNEL_TYPE_VXLAN, /* RFC 7348 */ - UDP_TUNNEL_TYPE_GENEVE, /* draft-ietf-nvo3-geneve */ - UDP_TUNNEL_TYPE_VXLAN_GPE, /* draft-ietf-nvo3-vxlan-gpe */ + UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */ + UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */ + UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */ }; struct udp_tunnel_info { From cc4e3835eff474aa274d6e1d18f69d9d296d3b76 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:46 -0700 Subject: [PATCH 0840/2056] udp_tunnel: add central NIC RX port offload infrastructure Cater to devices which: (a) may want to sleep in the callbacks; (b) only have IPv4 support; (c) need all the programming to happen while the netdev is up. Drivers attach UDP tunnel offload info struct to their netdevs, where they declare how many UDP ports of various tunnel types they support. Core takes care of tracking which ports to offload. Use a fixed-size array since this matches what almost all drivers do, and avoids a complexity and uncertainty around memory allocations in an atomic context. Make sure that tunnel drivers don't try to replay the ports when new NIC netdev is registered. Automatic replays would mess up reference counting, and will be removed completely once all drivers are converted. v4: - use a #define NULL to avoid build issues with CONFIG_INET=n. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/geneve.c | 6 +- drivers/net/vxlan.c | 6 +- include/linux/netdevice.h | 8 + include/net/udp_tunnel.h | 137 ++++ net/ipv4/Makefile | 3 +- net/ipv4/{udp_tunnel.c => udp_tunnel_core.c} | 0 net/ipv4/udp_tunnel_nic.c | 821 +++++++++++++++++++ net/ipv4/udp_tunnel_stub.c | 7 + 8 files changed, 983 insertions(+), 5 deletions(-) rename net/ipv4/{udp_tunnel.c => udp_tunnel_core.c} (100%) create mode 100644 net/ipv4/udp_tunnel_nic.c create mode 100644 net/ipv4/udp_tunnel_stub.c diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index e3d074008da2..49b00def2eef 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1796,9 +1796,11 @@ static int geneve_netdevice_event(struct notifier_block *unused, event == NETDEV_UDP_TUNNEL_DROP_INFO) { geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); } else if (event == NETDEV_UNREGISTER) { - geneve_offload_rx_ports(dev, false); + if (!dev->udp_tunnel_nic_info) + geneve_offload_rx_ports(dev, false); } else if (event == NETDEV_REGISTER) { - geneve_offload_rx_ports(dev, true); + if (!dev->udp_tunnel_nic_info) + geneve_offload_rx_ports(dev, true); } return NOTIFY_DONE; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 89d85dcb200e..a43c97b13924 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -4477,10 +4477,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused, struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); if (event == NETDEV_UNREGISTER) { - vxlan_offload_rx_ports(dev, false); + if (!dev->udp_tunnel_nic_info) + vxlan_offload_rx_ports(dev, false); vxlan_handle_lowerdev_unregister(vn, dev); } else if (event == NETDEV_REGISTER) { - vxlan_offload_rx_ports(dev, true); + if (!dev->udp_tunnel_nic_info) + vxlan_offload_rx_ports(dev, true); } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || event == NETDEV_UDP_TUNNEL_DROP_INFO) { vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 39e28e11863c..ac2cd3f49aba 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -65,6 +65,8 @@ struct wpan_dev; struct mpls_dev; /* UDP Tunnel offloads */ struct udp_tunnel_info; +struct udp_tunnel_nic_info; +struct udp_tunnel_nic; struct bpf_prog; struct xdp_buff; @@ -1836,6 +1838,10 @@ enum netdev_priv_flags { * * @macsec_ops: MACsec offloading ops * + * @udp_tunnel_nic_info: static structure describing the UDP tunnel + * offload capabilities of the device + * @udp_tunnel_nic: UDP tunnel offload state + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2134,6 +2140,8 @@ struct net_device { /* MACsec management functions */ const struct macsec_ops *macsec_ops; #endif + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; }; #define to_net_dev(d) container_of(d, struct net_device, dev) diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 0615e25f041c..ee34619e4cfa 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -115,6 +115,7 @@ struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; + u8 hw_priv; }; /* Notify network devices of offloadable types */ @@ -181,4 +182,140 @@ static inline void udp_tunnel_encap_enable(struct socket *sock) udp_encap_enable(); } +#define UDP_TUNNEL_NIC_MAX_TABLES 4 + +enum udp_tunnel_nic_info_flags { + /* Device callbacks may sleep */ + UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0), + /* Device only supports offloads when it's open, all ports + * will be removed before close and re-added after open. + */ + UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1), + /* Device supports only IPv4 tunnels */ + UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2), +}; + +/** + * struct udp_tunnel_nic_info - driver UDP tunnel offload information + * @set_port: callback for adding a new port + * @unset_port: callback for removing a port + * @sync_table: callback for syncing the entire port table at once + * @flags: device flags from enum udp_tunnel_nic_info_flags + * @tables: UDP port tables this device has + * @tables.n_entries: number of entries in this table + * @tables.tunnel_types: types of tunnels this table accepts + * + * Drivers are expected to provide either @set_port and @unset_port callbacks + * or the @sync_table callback. Callbacks are invoked with rtnl lock held. + * + * Known limitations: + * - UDP tunnel port notifications are fundamentally best-effort - + * it is likely the driver will both see skbs which use a UDP tunnel port, + * while not being a tunneled skb, and tunnel skbs from other ports - + * drivers should only use these ports for non-critical RX-side offloads, + * e.g. the checksum offload; + * - none of the devices care about the socket family at present, so we don't + * track it. Please extend this code if you care. + */ +struct udp_tunnel_nic_info { + /* one-by-one */ + int (*set_port)(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti); + int (*unset_port)(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti); + + /* all at once */ + int (*sync_table)(struct net_device *dev, unsigned int table); + + unsigned int flags; + + struct udp_tunnel_nic_table_info { + unsigned int n_entries; + unsigned int tunnel_types; + } tables[UDP_TUNNEL_NIC_MAX_TABLES]; +}; + +/* UDP tunnel module dependencies + * + * Tunnel drivers are expected to have a hard dependency on the udp_tunnel + * module. NIC drivers are not, they just attach their + * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come. + * Loading a tunnel driver will cause the udp_tunnel module to be loaded + * and only then will all the required state structures be allocated. + * Since we want a weak dependency from the drivers and the core to udp_tunnel + * we call things through the following stubs. + */ +struct udp_tunnel_nic_ops { + void (*get_port)(struct net_device *dev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti); + void (*set_port_priv)(struct net_device *dev, unsigned int table, + unsigned int idx, u8 priv); + void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); + void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); + void (*reset_ntf)(struct net_device *dev); +}; + +#ifdef CONFIG_INET +extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; +#else +#define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL) +#endif + +static inline void +udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti) +{ + /* This helper is used from .sync_table, we indicate empty entries + * by zero'ed @ti. Drivers which need to know the details of a port + * when it gets deleted should use the .set_port / .unset_port + * callbacks. + * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings. + */ + memset(ti, 0, sizeof(*ti)); + + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->get_port(dev, table, idx, ti); +} + +static inline void +udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, + unsigned int idx, u8 priv) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv); +} + +static inline void +udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->add_port(dev, ti); +} + +static inline void +udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->del_port(dev, ti); +} + +/** + * udp_tunnel_nic_reset_ntf() - device-originating reset notification + * @dev: network interface device structure + * + * Called by the driver to inform the core that the entire UDP tunnel port + * state has been lost, usually due to device reset. Core will assume device + * forgot all the ports and issue .set_port and .sync_table callbacks as + * necessary. + * + * This function must be called with rtnl lock held, and will issue all + * the callbacks before returning. + */ +static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) +{ + if (udp_tunnel_nic_ops) + udp_tunnel_nic_ops->reset_ntf(dev); +} #endif diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index 9e1a186a3671..5b77a46885b9 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile @@ -14,7 +14,7 @@ obj-y := route.o inetpeer.o protocol.o \ udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \ fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \ inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \ - metrics.o netlink.o nexthop.o + metrics.o netlink.o nexthop.o udp_tunnel_stub.o obj-$(CONFIG_BPFILTER) += bpfilter/ @@ -29,6 +29,7 @@ gre-y := gre_demux.o obj-$(CONFIG_NET_FOU) += fou.o obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o obj-$(CONFIG_NET_IPGRE) += ip_gre.o +udp_tunnel-y := udp_tunnel_core.o udp_tunnel_nic.o obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o obj-$(CONFIG_NET_IPVTI) += ip_vti.o obj-$(CONFIG_SYN_COOKIES) += syncookies.o diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel_core.c similarity index 100% rename from net/ipv4/udp_tunnel.c rename to net/ipv4/udp_tunnel_core.c diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c new file mode 100644 index 000000000000..056cfe0b770e --- /dev/null +++ b/net/ipv4/udp_tunnel_nic.c @@ -0,0 +1,821 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2020 Facebook Inc. + +#include +#include +#include +#include +#include + +enum udp_tunnel_nic_table_entry_flags { + UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0), + UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1), + UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2), + UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3), +}; + +struct udp_tunnel_nic_table_entry { + __be16 port; + u8 type; + u8 use_cnt; + u8 flags; + u8 hw_priv; +}; + +/** + * struct udp_tunnel_nic - UDP tunnel port offload state + * @work: async work for talking to hardware from process context + * @dev: netdev pointer + * @need_sync: at least one port start changed + * @need_replay: space was freed, we need a replay of all ports + * @work_pending: @work is currently scheduled + * @n_tables: number of tables under @entries + * @missed: bitmap of tables which overflown + * @entries: table of tables of ports currently offloaded + */ +struct udp_tunnel_nic { + struct work_struct work; + + struct net_device *dev; + + u8 need_sync:1; + u8 need_replay:1; + u8 work_pending:1; + + unsigned int n_tables; + unsigned long missed; + struct udp_tunnel_nic_table_entry **entries; +}; + +/* We ensure all work structs are done using driver state, but not the code. + * We need a workqueue we can flush before module gets removed. + */ +static struct workqueue_struct *udp_tunnel_nic_workqueue; + +static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type) +{ + switch (type) { + case UDP_TUNNEL_TYPE_VXLAN: + return "vxlan"; + case UDP_TUNNEL_TYPE_GENEVE: + return "geneve"; + case UDP_TUNNEL_TYPE_VXLAN_GPE: + return "vxlan-gpe"; + default: + return "unknown"; + } +} + +static bool +udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->use_cnt == 0 && !entry->flags; +} + +static bool +udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN; +} + +static void +udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry) +{ + if (!udp_tunnel_nic_entry_is_free(entry)) + entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN; +} + +static void +udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry) +{ + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN; +} + +static bool +udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD | + UDP_TUNNEL_NIC_ENTRY_DEL); +} + +static void +udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn, + struct udp_tunnel_nic_table_entry *entry, + unsigned int flag) +{ + entry->flags |= flag; + utn->need_sync = 1; +} + +static void +udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry, + struct udp_tunnel_info *ti) +{ + memset(ti, 0, sizeof(*ti)); + ti->port = entry->port; + ti->type = entry->type; + ti->hw_priv = entry->hw_priv; +} + +static bool +udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) + return false; + return true; +} + +static bool +udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_table_info *table; + unsigned int i, j; + + if (!utn->missed) + return false; + + for (i = 0; i < utn->n_tables; i++) { + table = &dev->udp_tunnel_nic_info->tables[i]; + if (!test_bit(i, &utn->missed)) + continue; + + for (j = 0; j < table->n_entries; j++) + if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) + return true; + } + + return false; +} + +static void +__udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, + unsigned int idx, struct udp_tunnel_info *ti) +{ + struct udp_tunnel_nic_table_entry *entry; + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + entry = &utn->entries[table][idx]; + + if (entry->use_cnt) + udp_tunnel_nic_ti_from_entry(entry, ti); +} + +static void +__udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, + unsigned int idx, u8 priv) +{ + dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv; +} + +static void +udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry, + int err) +{ + bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; + + WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && + entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL); + + if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD && + (!err || (err == -EEXIST && dodgy))) + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD; + + if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL && + (!err || (err == -ENOENT && dodgy))) + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL; + + if (!err) + entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL; + else + entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL; +} + +static void +udp_tunnel_nic_device_sync_one(struct net_device *dev, + struct udp_tunnel_nic *utn, + unsigned int table, unsigned int idx) +{ + struct udp_tunnel_nic_table_entry *entry; + struct udp_tunnel_info ti; + int err; + + entry = &utn->entries[table][idx]; + if (!udp_tunnel_nic_entry_is_queued(entry)) + return; + + udp_tunnel_nic_ti_from_entry(entry, &ti); + if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD) + err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti); + else + err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx, + &ti); + udp_tunnel_nic_entry_update_done(entry, err); + + if (err) + netdev_warn(dev, + "UDP tunnel port sync failed port %d type %s: %d\n", + be16_to_cpu(entry->port), + udp_tunnel_nic_tunnel_type_name(entry->type), + err); +} + +static void +udp_tunnel_nic_device_sync_by_port(struct net_device *dev, + struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + udp_tunnel_nic_device_sync_one(dev, utn, i, j); +} + +static void +udp_tunnel_nic_device_sync_by_table(struct net_device *dev, + struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + int err; + + for (i = 0; i < utn->n_tables; i++) { + /* Find something that needs sync in this table */ + for (j = 0; j < info->tables[i].n_entries; j++) + if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j])) + break; + if (j == info->tables[i].n_entries) + continue; + + err = info->sync_table(dev, i); + if (err) + netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n", + i, err); + + for (j = 0; j < info->tables[i].n_entries; j++) { + struct udp_tunnel_nic_table_entry *entry; + + entry = &utn->entries[i][j]; + if (udp_tunnel_nic_entry_is_queued(entry)) + udp_tunnel_nic_entry_update_done(entry, err); + } + } +} + +static void +__udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + if (!utn->need_sync) + return; + + if (dev->udp_tunnel_nic_info->sync_table) + udp_tunnel_nic_device_sync_by_table(dev, utn); + else + udp_tunnel_nic_device_sync_by_port(dev, utn); + + utn->need_sync = 0; + /* Can't replay directly here, in case we come from the tunnel driver's + * notification - trying to replay may deadlock inside tunnel driver. + */ + utn->need_replay = udp_tunnel_nic_should_replay(dev, utn); +} + +static void +udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + bool may_sleep; + + if (!utn->need_sync) + return; + + /* Drivers which sleep in the callback need to update from + * the workqueue, if we come from the tunnel driver's notification. + */ + may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP; + if (!may_sleep) + __udp_tunnel_nic_device_sync(dev, utn); + if (may_sleep || utn->need_replay) { + queue_work(udp_tunnel_nic_workqueue, &utn->work); + utn->work_pending = 1; + } +} + +static bool +udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table, + struct udp_tunnel_info *ti) +{ + return table->tunnel_types & ti->type; +} + +static bool +udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i; + + /* Special case IPv4-only NICs */ + if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY && + ti->sa_family != AF_INET) + return false; + + for (i = 0; i < utn->n_tables; i++) + if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti)) + return true; + return false; +} + +static int +udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic_table_entry *entry; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) { + entry = &utn->entries[i][j]; + + if (!udp_tunnel_nic_entry_is_free(entry) && + entry->port == ti->port && + entry->type != ti->type) { + __set_bit(i, &utn->missed); + return true; + } + } + return false; +} + +static void +udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn, + unsigned int table, unsigned int idx, int use_cnt_adj) +{ + struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; + bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL; + unsigned int from, to; + + /* If not going from used to unused or vice versa - all done. + * For dodgy entries make sure we try to sync again (queue the entry). + */ + entry->use_cnt += use_cnt_adj; + if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj)) + return; + + /* Cancel the op before it was sent to the device, if possible, + * otherwise we'd need to take special care to issue commands + * in the same order the ports arrived. + */ + if (use_cnt_adj < 0) { + from = UDP_TUNNEL_NIC_ENTRY_ADD; + to = UDP_TUNNEL_NIC_ENTRY_DEL; + } else { + from = UDP_TUNNEL_NIC_ENTRY_DEL; + to = UDP_TUNNEL_NIC_ENTRY_ADD; + } + + if (entry->flags & from) { + entry->flags &= ~from; + if (!dodgy) + return; + } + + udp_tunnel_nic_entry_queue(utn, entry, to); +} + +static bool +udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn, + unsigned int table, unsigned int idx, + struct udp_tunnel_info *ti, int use_cnt_adj) +{ + struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx]; + + if (udp_tunnel_nic_entry_is_free(entry) || + entry->port != ti->port || + entry->type != ti->type) + return false; + + if (udp_tunnel_nic_entry_is_frozen(entry)) + return true; + + udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj); + return true; +} + +/* Try to find existing matching entry and adjust its use count, instead of + * adding a new one. Returns true if entry was found. In case of delete the + * entry may have gotten removed in the process, in which case it will be + * queued for removal. + */ +static bool +udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti, int use_cnt_adj) +{ + const struct udp_tunnel_nic_table_info *table; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) { + table = &dev->udp_tunnel_nic_info->tables[i]; + if (!udp_tunnel_nic_table_is_capable(table, ti)) + continue; + + for (j = 0; j < table->n_entries; j++) + if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti, + use_cnt_adj)) + return true; + } + + return false; +} + +static bool +udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + return udp_tunnel_nic_try_existing(dev, utn, ti, +1); +} + +static bool +udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + return udp_tunnel_nic_try_existing(dev, utn, ti, -1); +} + +static bool +udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn, + struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_table_info *table; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) { + table = &dev->udp_tunnel_nic_info->tables[i]; + if (!udp_tunnel_nic_table_is_capable(table, ti)) + continue; + + for (j = 0; j < table->n_entries; j++) { + struct udp_tunnel_nic_table_entry *entry; + + entry = &utn->entries[i][j]; + if (!udp_tunnel_nic_entry_is_free(entry)) + continue; + + entry->port = ti->port; + entry->type = ti->type; + entry->use_cnt = 1; + udp_tunnel_nic_entry_queue(utn, entry, + UDP_TUNNEL_NIC_ENTRY_ADD); + return true; + } + + /* The different table may still fit this port in, but there + * are no devices currently which have multiple tables accepting + * the same tunnel type, and false positives are okay. + */ + __set_bit(i, &utn->missed); + } + + return false; +} + +static void +__udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (!utn) + return; + if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) + return; + + if (!udp_tunnel_nic_is_capable(dev, utn, ti)) + return; + + /* It may happen that a tunnel of one type is removed and different + * tunnel type tries to reuse its port before the device was informed. + * Rely on utn->missed to re-add this port later. + */ + if (udp_tunnel_nic_has_collision(dev, utn, ti)) + return; + + if (!udp_tunnel_nic_add_existing(dev, utn, ti)) + udp_tunnel_nic_add_new(dev, utn, ti); + + udp_tunnel_nic_device_sync(dev, utn); +} + +static void +__udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) +{ + struct udp_tunnel_nic *utn; + + utn = dev->udp_tunnel_nic; + if (!utn) + return; + + if (!udp_tunnel_nic_is_capable(dev, utn, ti)) + return; + + udp_tunnel_nic_del_existing(dev, utn, ti); + + udp_tunnel_nic_device_sync(dev, utn); +} + +static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + unsigned int i, j; + + ASSERT_RTNL(); + + utn = dev->udp_tunnel_nic; + if (!utn) + return; + + utn->need_sync = false; + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) { + struct udp_tunnel_nic_table_entry *entry; + + entry = &utn->entries[i][j]; + + entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL | + UDP_TUNNEL_NIC_ENTRY_OP_FAIL); + /* We don't release rtnl across ops */ + WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN); + if (!entry->use_cnt) + continue; + + udp_tunnel_nic_entry_queue(utn, entry, + UDP_TUNNEL_NIC_ENTRY_ADD); + } + + __udp_tunnel_nic_device_sync(dev, utn); +} + +static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { + .get_port = __udp_tunnel_nic_get_port, + .set_port_priv = __udp_tunnel_nic_set_port_priv, + .add_port = __udp_tunnel_nic_add_port, + .del_port = __udp_tunnel_nic_del_port, + .reset_ntf = __udp_tunnel_nic_reset_ntf, +}; + +static void +udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) { + int adj_cnt = -utn->entries[i][j].use_cnt; + + if (adj_cnt) + udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt); + } + + __udp_tunnel_nic_device_sync(dev, utn); + + for (i = 0; i < utn->n_tables; i++) + memset(utn->entries[i], 0, array_size(info->tables[i].n_entries, + sizeof(**utn->entries))); + WARN_ON(utn->need_sync); + utn->need_replay = 0; +} + +static void +udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + unsigned int i, j; + + /* Freeze all the ports we are already tracking so that the replay + * does not double up the refcount. + */ + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]); + utn->missed = 0; + utn->need_replay = 0; + + udp_tunnel_get_rx_info(dev); + + for (i = 0; i < utn->n_tables; i++) + for (j = 0; j < info->tables[i].n_entries; j++) + udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]); +} + +static void udp_tunnel_nic_device_sync_work(struct work_struct *work) +{ + struct udp_tunnel_nic *utn = + container_of(work, struct udp_tunnel_nic, work); + + rtnl_lock(); + utn->work_pending = 0; + __udp_tunnel_nic_device_sync(utn->dev, utn); + + if (utn->need_replay) + udp_tunnel_nic_replay(utn->dev, utn); + rtnl_unlock(); +} + +static struct udp_tunnel_nic * +udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info, + unsigned int n_tables) +{ + struct udp_tunnel_nic *utn; + unsigned int i; + + utn = kzalloc(sizeof(*utn), GFP_KERNEL); + if (!utn) + return NULL; + utn->n_tables = n_tables; + INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work); + + utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL); + if (!utn->entries) + goto err_free_utn; + + for (i = 0; i < n_tables; i++) { + utn->entries[i] = kcalloc(info->tables[i].n_entries, + sizeof(*utn->entries[i]), GFP_KERNEL); + if (!utn->entries[i]) + goto err_free_prev_entries; + } + + return utn; + +err_free_prev_entries: + while (i--) + kfree(utn->entries[i]); + kfree(utn->entries); +err_free_utn: + kfree(utn); + return NULL; +} + +static int udp_tunnel_nic_register(struct net_device *dev) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + unsigned int n_tables, i; + + BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE < + UDP_TUNNEL_NIC_MAX_TABLES); + + if (WARN_ON(!info->set_port != !info->unset_port) || + WARN_ON(!info->set_port == !info->sync_table) || + WARN_ON(!info->tables[0].n_entries)) + return -EINVAL; + + n_tables = 1; + for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + continue; + + n_tables++; + if (WARN_ON(!info->tables[i - 1].n_entries)) + return -EINVAL; + } + + utn = udp_tunnel_nic_alloc(info, n_tables); + if (!utn) + return -ENOMEM; + + utn->dev = dev; + dev_hold(dev); + dev->udp_tunnel_nic = utn; + + if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) + udp_tunnel_get_rx_info(dev); + + return 0; +} + +static void +udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn) +{ + unsigned int i; + + /* Flush before we check work, so we don't waste time adding entries + * from the work which we will boot immediately. + */ + udp_tunnel_nic_flush(dev, utn); + + /* Wait for the work to be done using the state, netdev core will + * retry unregister until we give up our reference on this device. + */ + if (utn->work_pending) + return; + + for (i = 0; i < utn->n_tables; i++) + kfree(utn->entries[i]); + kfree(utn->entries); + kfree(utn); + dev->udp_tunnel_nic = NULL; + dev_put(dev); +} + +static int +udp_tunnel_nic_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + const struct udp_tunnel_nic_info *info; + struct udp_tunnel_nic *utn; + + info = dev->udp_tunnel_nic_info; + if (!info) + return NOTIFY_DONE; + + if (event == NETDEV_REGISTER) { + int err; + + err = udp_tunnel_nic_register(dev); + if (err) + netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err); + return notifier_from_errno(err); + } + /* All other events will need the udp_tunnel_nic state */ + utn = dev->udp_tunnel_nic; + if (!utn) + return NOTIFY_DONE; + + if (event == NETDEV_UNREGISTER) { + udp_tunnel_nic_unregister(dev, utn); + return NOTIFY_OK; + } + + /* All other events only matter if NIC has to be programmed open */ + if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) + return NOTIFY_DONE; + + if (event == NETDEV_UP) { + WARN_ON(!udp_tunnel_nic_is_empty(dev, utn)); + udp_tunnel_get_rx_info(dev); + return NOTIFY_OK; + } + if (event == NETDEV_GOING_DOWN) { + udp_tunnel_nic_flush(dev, utn); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = { + .notifier_call = udp_tunnel_nic_netdevice_event, +}; + +static int __init udp_tunnel_nic_init_module(void) +{ + int err; + + udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0); + if (!udp_tunnel_nic_workqueue) + return -ENOMEM; + + rtnl_lock(); + udp_tunnel_nic_ops = &__udp_tunnel_nic_ops; + rtnl_unlock(); + + err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block); + if (err) + goto err_unset_ops; + + return 0; + +err_unset_ops: + rtnl_lock(); + udp_tunnel_nic_ops = NULL; + rtnl_unlock(); + destroy_workqueue(udp_tunnel_nic_workqueue); + return err; +} +late_initcall(udp_tunnel_nic_init_module); + +static void __exit udp_tunnel_nic_cleanup_module(void) +{ + unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block); + + rtnl_lock(); + udp_tunnel_nic_ops = NULL; + rtnl_unlock(); + + destroy_workqueue(udp_tunnel_nic_workqueue); +} +module_exit(udp_tunnel_nic_cleanup_module); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/udp_tunnel_stub.c b/net/ipv4/udp_tunnel_stub.c new file mode 100644 index 000000000000..c4b2888f5fef --- /dev/null +++ b/net/ipv4/udp_tunnel_stub.c @@ -0,0 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2020 Facebook Inc. + +#include + +const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; +EXPORT_SYMBOL_GPL(udp_tunnel_nic_ops); From c7d759eb7b12f91a25f4d3cd03ff5209046ddfc2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:47 -0700 Subject: [PATCH 0841/2056] ethtool: add tunnel info interface Add an interface to report offloaded UDP ports via ethtool netlink. Now that core takes care of tracking which UDP tunnel ports the NICs are aware of we can quite easily export this information out to user space. The responsibility of writing the netlink dumps is split between ethtool code and udp_tunnel_nic.c - since udp_tunnel module may not always be loaded, yet we should always report the capabilities of the NIC. $ ethtool --show-tunnels eth0 Tunnel information for eth0: UDP port table 0: Size: 4 Types: vxlan No entries UDP port table 1: Size: 4 Types: geneve, vxlan-gpe Entries (1): port 1230, vxlan-gpe v4: - back to v2, build fix is now directly in udp_tunnel.h v3: - don't compile ETHTOOL_MSG_TUNNEL_INFO_GET in if CONFIG_INET not set. v2: - fix string set count, - reorder enums in the uAPI, - fix type of ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES to bitset in docs and comments. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- Documentation/networking/ethtool-netlink.rst | 33 +++ include/net/udp_tunnel.h | 21 ++ include/uapi/linux/ethtool.h | 2 + include/uapi/linux/ethtool_netlink.h | 55 ++++ net/ethtool/Makefile | 3 +- net/ethtool/common.c | 9 + net/ethtool/common.h | 1 + net/ethtool/netlink.c | 12 + net/ethtool/netlink.h | 4 + net/ethtool/strset.c | 5 + net/ethtool/tunnels.c | 259 +++++++++++++++++++ net/ipv4/udp_tunnel_nic.c | 69 +++++ 12 files changed, 472 insertions(+), 1 deletion(-) create mode 100644 net/ethtool/tunnels.c diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 459a0d11cfde..7d75f1e32152 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1230,6 +1230,39 @@ used to report the amplitude of the reflection for a given pair. | | | ``ETHTOOL_A_CABLE_AMPLITUDE_mV`` | s16 | Reflection amplitude | +-+-+-----------------------------------------+--------+----------------------+ +TUNNEL_INFO +=========== + +Gets information about the tunnel state NIC is aware of. + +Request contents: + + ===================================== ====== ========================== + ``ETHTOOL_A_TUNNEL_INFO_HEADER`` nested request header + ===================================== ====== ========================== + +Kernel response contents: + + +---------------------------------------------+--------+---------------------+ + | ``ETHTOOL_A_TUNNEL_INFO_HEADER`` | nested | reply header | + +---------------------------------------------+--------+---------------------+ + | ``ETHTOOL_A_TUNNEL_INFO_UDP_PORTS`` | nested | all UDP port tables | + +-+-------------------------------------------+--------+---------------------+ + | | ``ETHTOOL_A_TUNNEL_UDP_TABLE`` | nested | one UDP port table | + +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE`` | u32 | max size of the | + | | | | | table | + +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES`` | bitset | tunnel types which | + | | | | | table can hold | + +-+-+-----------------------------------------+--------+---------------------+ + | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY`` | nested | offloaded UDP port | + +-+-+-+---------------------------------------+--------+---------------------+ + | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT`` | be16 | UDP port | + +-+-+-+---------------------------------------+--------+---------------------+ + | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE`` | u32 | tunnel type | + +-+-+-+---------------------------------------+--------+---------------------+ + Request translation =================== diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index ee34619e4cfa..dd20ce99740c 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -255,6 +255,10 @@ struct udp_tunnel_nic_ops { void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*reset_ntf)(struct net_device *dev); + + size_t (*dump_size)(struct net_device *dev, unsigned int table); + int (*dump_write)(struct net_device *dev, unsigned int table, + struct sk_buff *skb); }; #ifdef CONFIG_INET @@ -318,4 +322,21 @@ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->reset_ntf(dev); } + +static inline size_t +udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) +{ + if (!udp_tunnel_nic_ops) + return 0; + return udp_tunnel_nic_ops->dump_size(dev, table); +} + +static inline int +udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, + struct sk_buff *skb) +{ + if (!udp_tunnel_nic_ops) + return 0; + return udp_tunnel_nic_ops->dump_write(dev, table, skb); +} #endif diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 60856e0f9618..b4f2d134e713 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -669,6 +669,7 @@ enum ethtool_link_ext_substate_cable_issue { * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags * @ETH_SS_TS_TX_TYPES: timestamping Tx types * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters + * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types */ enum ethtool_stringset { ETH_SS_TEST = 0, @@ -686,6 +687,7 @@ enum ethtool_stringset { ETH_SS_SOF_TIMESTAMPING, ETH_SS_TS_TX_TYPES, ETH_SS_TS_RX_FILTERS, + ETH_SS_UDP_TUNNEL_TYPES, /* add new constants above here */ ETH_SS_COUNT diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index c12ce4df4b6b..5dcd24cb33ea 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -41,6 +41,7 @@ enum { ETHTOOL_MSG_TSINFO_GET, ETHTOOL_MSG_CABLE_TEST_ACT, ETHTOOL_MSG_CABLE_TEST_TDR_ACT, + ETHTOOL_MSG_TUNNEL_INFO_GET, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -556,6 +557,60 @@ enum { ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1 }; +/* TUNNEL INFO */ + +enum { + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN, + ETHTOOL_UDP_TUNNEL_TYPE_GENEVE, + ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE, + + __ETHTOOL_UDP_TUNNEL_TYPE_CNT +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC, + + ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, /* be16 */ + ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, /* u32 */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT, + ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC, + + ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, /* u32 */ + ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, /* bitset */ + ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY, /* nest - _UDP_ENTRY_* */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT, + ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_UDP_UNSPEC, + + ETHTOOL_A_TUNNEL_UDP_TABLE, /* nest - _UDP_TABLE_* */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_UDP_CNT, + ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1) +}; + +enum { + ETHTOOL_A_TUNNEL_INFO_UNSPEC, + ETHTOOL_A_TUNNEL_INFO_HEADER, /* nest - _A_HEADER_* */ + + ETHTOOL_A_TUNNEL_INFO_UDP_PORTS, /* nest - _UDP_TABLE */ + + /* add new constants above here */ + __ETHTOOL_A_TUNNEL_INFO_CNT, + ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1) +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 0c2b94f20499..7a849ff22dad 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_ETHTOOL_NETLINK) += ethtool_nl.o ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \ linkstate.o debug.o wol.o features.o privflags.o rings.o \ - channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o + channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ + tunnels.o diff --git a/net/ethtool/common.c b/net/ethtool/common.c index c54166713797..ed19573fccd7 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include #include #include #include @@ -272,6 +273,14 @@ const char ts_rx_filter_names[][ETH_GSTRING_LEN] = { }; static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT); +const char udp_tunnel_type_names[][ETH_GSTRING_LEN] = { + [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN] = "vxlan", + [ETHTOOL_UDP_TUNNEL_TYPE_GENEVE] = "geneve", + [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE] = "vxlan-gpe", +}; +static_assert(ARRAY_SIZE(udp_tunnel_type_names) == + __ETHTOOL_UDP_TUNNEL_TYPE_CNT); + /* return false if legacy contained non-0 deprecated fields * maxtxpkt/maxrxpkt. rest of ksettings always updated */ diff --git a/net/ethtool/common.h b/net/ethtool/common.h index b83bef38368c..3d9251c95a8b 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -28,6 +28,7 @@ extern const char wol_mode_names[][ETH_GSTRING_LEN]; extern const char sof_timestamping_names[][ETH_GSTRING_LEN]; extern const char ts_tx_type_names[][ETH_GSTRING_LEN]; extern const char ts_rx_filter_names[][ETH_GSTRING_LEN]; +extern const char udp_tunnel_type_names[][ETH_GSTRING_LEN]; int __ethtool_get_link(struct net_device *dev); diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index 88fd07f47040..fb9d096faaa4 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -181,6 +181,12 @@ struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, return NULL; } +void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd) +{ + return genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + ðtool_genl_family, 0, cmd); +} + void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd) { return genlmsg_put(skb, 0, ++ethnl_bcast_seq, ðtool_genl_family, 0, @@ -849,6 +855,12 @@ static const struct genl_ops ethtool_genl_ops[] = { .flags = GENL_UNS_ADMIN_PERM, .doit = ethnl_act_cable_test_tdr, }, + { + .cmd = ETHTOOL_MSG_TUNNEL_INFO_GET, + .doit = ethnl_tunnel_info_doit, + .start = ethnl_tunnel_info_start, + .dumpit = ethnl_tunnel_info_dumpit, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 9a96b6e90dc2..e2085005caac 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -19,6 +19,7 @@ int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev, struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, u16 hdr_attrtype, struct genl_info *info, void **ehdrp); +void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd); void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd); int ethnl_multicast(struct sk_buff *skb, struct net_device *dev); @@ -361,5 +362,8 @@ int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info); int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info); int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info); int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info); +int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info); +int ethnl_tunnel_info_start(struct netlink_callback *cb); +int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb); #endif /* _NET_ETHTOOL_NETLINK_H */ diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c index 773634b6b048..82707b662fe4 100644 --- a/net/ethtool/strset.c +++ b/net/ethtool/strset.c @@ -75,6 +75,11 @@ static const struct strset_info info_template[] = { .count = __HWTSTAMP_FILTER_CNT, .strings = ts_rx_filter_names, }, + [ETH_SS_UDP_TUNNEL_TYPES] = { + .per_dev = false, + .count = __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + .strings = udp_tunnel_type_names, + }, }; struct strset_req_info { diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c new file mode 100644 index 000000000000..6b89255f1231 --- /dev/null +++ b/net/ethtool/tunnels.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "bitset.h" +#include "common.h" +#include "netlink.h" + +static const struct nla_policy +ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = { + [ETHTOOL_A_TUNNEL_INFO_UNSPEC] = { .type = NLA_REJECT }, + [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .type = NLA_NESTED }, +}; + +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN)); +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE)); +static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE == + ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE)); + +static ssize_t +ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, + struct netlink_ext_ack *extack) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct udp_tunnel_nic_info *info; + unsigned int i; + size_t size; + int ret; + + info = req_base->dev->udp_tunnel_nic_info; + if (!info) { + NL_SET_ERR_MSG(extack, + "device does not report tunnel offload info"); + return -EOPNOTSUPP; + } + + size = nla_total_size(0); /* _INFO_UDP_PORTS */ + + for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + return size; + + size += nla_total_size(0); /* _UDP_TABLE */ + size += nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ + ret = ethnl_bitset32_size(&info->tables[i].tunnel_types, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact); + if (ret < 0) + return ret; + size += ret; + + size += udp_tunnel_nic_dump_size(req_base->dev, i); + } + + return size; +} + +static int +ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, + struct sk_buff *skb) +{ + bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; + const struct udp_tunnel_nic_info *info; + struct nlattr *ports, *table; + unsigned int i; + + info = req_base->dev->udp_tunnel_nic_info; + if (!info) + return -EOPNOTSUPP; + + ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS); + if (!ports) + return -EMSGSIZE; + + for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { + if (!info->tables[i].n_entries) + break; + + table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); + if (!table) + goto err_cancel_ports; + + if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, + info->tables[i].n_entries)) + goto err_cancel_table; + + if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + &info->tables[i].tunnel_types, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact)) + goto err_cancel_table; + + if (udp_tunnel_nic_dump_write(req_base->dev, i, skb)) + goto err_cancel_table; + + nla_nest_end(skb, table); + } + + nla_nest_end(skb, ports); + + return 0; + +err_cancel_table: + nla_nest_cancel(skb, table); +err_cancel_ports: + nla_nest_cancel(skb, ports); + return -EMSGSIZE; +} + +static int +ethnl_tunnel_info_req_parse(struct ethnl_req_info *req_info, + const struct nlmsghdr *nlhdr, struct net *net, + struct netlink_ext_ack *extack, bool require_dev) +{ + struct nlattr *tb[ETHTOOL_A_TUNNEL_INFO_MAX + 1]; + int ret; + + ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_TUNNEL_INFO_MAX, + ethtool_tunnel_info_policy, extack); + if (ret < 0) + return ret; + + return ethnl_parse_header_dev_get(req_info, + tb[ETHTOOL_A_TUNNEL_INFO_HEADER], + net, extack, require_dev); +} + +int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + struct sk_buff *rskb; + void *reply_payload; + int reply_len; + int ret; + + ret = ethnl_tunnel_info_req_parse(&req_info, info->nlhdr, + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + + rtnl_lock(); + ret = ethnl_tunnel_info_reply_size(&req_info, info->extack); + if (ret < 0) + goto err_unlock_rtnl; + reply_len = ret + ethnl_reply_header_size(); + + rskb = ethnl_reply_init(reply_len, req_info.dev, + ETHTOOL_MSG_TUNNEL_INFO_GET, + ETHTOOL_A_TUNNEL_INFO_HEADER, + info, &reply_payload); + if (!rskb) { + ret = -ENOMEM; + goto err_unlock_rtnl; + } + + ret = ethnl_tunnel_info_fill_reply(&req_info, rskb); + if (ret) + goto err_free_msg; + rtnl_unlock(); + dev_put(req_info.dev); + genlmsg_end(rskb, reply_payload); + + return genlmsg_reply(rskb, info); + +err_free_msg: + nlmsg_free(rskb); +err_unlock_rtnl: + rtnl_unlock(); + dev_put(req_info.dev); + return ret; +} + +struct ethnl_tunnel_info_dump_ctx { + struct ethnl_req_info req_info; + int pos_hash; + int pos_idx; +}; + +int ethnl_tunnel_info_start(struct netlink_callback *cb) +{ + struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; + int ret; + + BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx)); + + memset(ctx, 0, sizeof(*ctx)); + + ret = ethnl_tunnel_info_req_parse(&ctx->req_info, cb->nlh, + sock_net(cb->skb->sk), cb->extack, + false); + if (ctx->req_info.dev) { + dev_put(ctx->req_info.dev); + ctx->req_info.dev = NULL; + } + + return ret; +} + +int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx; + struct net *net = sock_net(skb->sk); + int s_idx = ctx->pos_idx; + int h, idx = 0; + int ret = 0; + void *ehdr; + + rtnl_lock(); + cb->seq = net->dev_base_seq; + for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { + struct hlist_head *head; + struct net_device *dev; + + head = &net->dev_index_head[h]; + idx = 0; + hlist_for_each_entry(dev, head, index_hlist) { + if (idx < s_idx) + goto cont; + + ehdr = ethnl_dump_put(skb, cb, + ETHTOOL_MSG_TUNNEL_INFO_GET); + if (!ehdr) { + ret = -EMSGSIZE; + goto out; + } + + ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER); + if (ret < 0) { + genlmsg_cancel(skb, ehdr); + goto out; + } + + ctx->req_info.dev = dev; + ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb); + ctx->req_info.dev = NULL; + if (ret < 0) { + genlmsg_cancel(skb, ehdr); + if (ret == -EOPNOTSUPP) + goto cont; + goto out; + } + genlmsg_end(skb, ehdr); +cont: + idx++; + } + } +out: + rtnl_unlock(); + + ctx->pos_hash = h; + ctx->pos_idx = idx; + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); + + if (ret == -EMSGSIZE && skb->len) + return skb->len; + return ret; +} diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index 056cfe0b770e..f0dbd9905a53 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only // Copyright (c) 2020 Facebook Inc. +#include #include #include #include @@ -72,6 +73,12 @@ udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry) return entry->use_cnt == 0 && !entry->flags; } +static bool +udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry) +{ + return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN); +} + static bool udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry) { @@ -564,12 +571,74 @@ static void __udp_tunnel_nic_reset_ntf(struct net_device *dev) __udp_tunnel_nic_device_sync(dev, utn); } +static size_t +__udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + unsigned int j; + size_t size; + + utn = dev->udp_tunnel_nic; + if (!utn) + return 0; + + size = 0; + for (j = 0; j < info->tables[table].n_entries; j++) { + if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) + continue; + + size += nla_total_size(0) + /* _TABLE_ENTRY */ + nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ + nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ + } + + return size; +} + +static int +__udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, + struct sk_buff *skb) +{ + const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; + struct udp_tunnel_nic *utn; + struct nlattr *nest; + unsigned int j; + + utn = dev->udp_tunnel_nic; + if (!utn) + return 0; + + for (j = 0; j < info->tables[table].n_entries; j++) { + if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j])) + continue; + + nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); + + if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, + utn->entries[table][j].port) || + nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, + ilog2(utn->entries[table][j].type))) + goto err_cancel; + + nla_nest_end(skb, nest); + } + + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = { .get_port = __udp_tunnel_nic_get_port, .set_port_priv = __udp_tunnel_nic_set_port_priv, .add_port = __udp_tunnel_nic_add_port, .del_port = __udp_tunnel_nic_del_port, .reset_ntf = __udp_tunnel_nic_reset_ntf, + .dump_size = __udp_tunnel_nic_dump_size, + .dump_write = __udp_tunnel_nic_dump_write, }; static void From 424be63ad831fbd5fb04eb6576de44f4aa7661e2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:48 -0700 Subject: [PATCH 0842/2056] netdevsim: add UDP tunnel port offload support Add UDP tunnel port handlers to our fake driver so we can test the core infra. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/netdevsim/Makefile | 2 +- drivers/net/netdevsim/dev.c | 1 + drivers/net/netdevsim/netdev.c | 12 +- drivers/net/netdevsim/netdevsim.h | 19 +++ drivers/net/netdevsim/udp_tunnels.c | 192 ++++++++++++++++++++++++++++ 5 files changed, 224 insertions(+), 2 deletions(-) create mode 100644 drivers/net/netdevsim/udp_tunnels.c diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile index f4d8f62f28c2..4dfb389dbfd8 100644 --- a/drivers/net/netdevsim/Makefile +++ b/drivers/net/netdevsim/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_NETDEVSIM) += netdevsim.o netdevsim-objs := \ - netdev.o dev.o fib.o bus.o health.o + netdev.o dev.o fib.o bus.o health.o udp_tunnels.o ifeq ($(CONFIG_BPF_SYSCALL),y) netdevsim-objs += \ diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 0dc2c66a5d56..ce719c830a77 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -225,6 +225,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) debugfs_create_bool("fail_trap_policer_counter_get", 0600, nsim_dev->ddir, &nsim_dev->fail_trap_policer_counter_get); + nsim_udp_tunnels_debugfs_create(nsim_dev); return 0; } diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c index 2908e0a0d6e1..9d0d18026434 100644 --- a/drivers/net/netdevsim/netdev.c +++ b/drivers/net/netdevsim/netdev.c @@ -22,6 +22,7 @@ #include #include #include +#include #include "netdevsim.h" @@ -257,6 +258,8 @@ static const struct net_device_ops nsim_netdev_ops = { .ndo_setup_tc = nsim_setup_tc, .ndo_set_features = nsim_set_features, .ndo_bpf = nsim_bpf, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_get_devlink_port = nsim_get_devlink_port, }; @@ -299,10 +302,14 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev); dev->netdev_ops = &nsim_netdev_ops; + err = nsim_udp_tunnels_info_create(nsim_dev, dev); + if (err) + goto err_free_netdev; + rtnl_lock(); err = nsim_bpf_init(ns); if (err) - goto err_free_netdev; + goto err_utn_destroy; nsim_ipsec_init(ns); @@ -317,6 +324,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port) nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); rtnl_unlock(); +err_utn_destroy: + nsim_udp_tunnels_info_destroy(dev); err_free_netdev: free_netdev(dev); return ERR_PTR(err); @@ -331,6 +340,7 @@ void nsim_destroy(struct netdevsim *ns) nsim_ipsec_teardown(ns); nsim_bpf_uninit(ns); rtnl_unlock(); + nsim_udp_tunnels_info_destroy(dev); free_netdev(dev); } diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 4ded54a21e1e..d164052e0393 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -13,6 +13,7 @@ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. */ +#include #include #include #include @@ -29,6 +30,7 @@ #define NSIM_IPSEC_MAX_SA_COUNT 33 #define NSIM_IPSEC_VALID BIT(31) +#define NSIM_UDP_TUNNEL_N_PORTS 4 struct nsim_sa { struct xfrm_state *xs; @@ -72,12 +74,23 @@ struct netdevsim { bool bpf_map_accept; struct nsim_ipsec ipsec; + struct { + u32 inject_error; + u32 sleep; + u32 ports[2][NSIM_UDP_TUNNEL_N_PORTS]; + struct debugfs_u32_array dfs_ports[2]; + } udp_ports; }; struct netdevsim * nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port); void nsim_destroy(struct netdevsim *ns); +void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev); +int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev, + struct net_device *dev); +void nsim_udp_tunnels_info_destroy(struct net_device *dev); + #ifdef CONFIG_BPF_SYSCALL int nsim_bpf_dev_init(struct nsim_dev *nsim_dev); void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev); @@ -183,6 +196,12 @@ struct nsim_dev { bool fail_trap_group_set; bool fail_trap_policer_set; bool fail_trap_policer_counter_get; + struct { + bool sync_all; + bool open_only; + bool ipv4_only; + u32 sleep; + } udp_ports; }; static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev) diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c new file mode 100644 index 000000000000..22c06a76033c --- /dev/null +++ b/drivers/net/netdevsim/udp_tunnels.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (c) 2020 Facebook Inc. + +#include +#include +#include +#include + +#include "netdevsim.h" + +static int +nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct netdevsim *ns = netdev_priv(dev); + int ret; + + ret = -ns->udp_ports.inject_error; + ns->udp_ports.inject_error = 0; + + if (ns->udp_ports.sleep) + msleep(ns->udp_ports.sleep); + + if (!ret) { + if (ns->udp_ports.ports[table][entry]) + ret = -EBUSY; + else + ns->udp_ports.ports[table][entry] = + be16_to_cpu(ti->port) << 16 | ti->type; + } + + netdev_info(dev, "set [%d, %d] type %d family %d port %d - %d\n", + table, entry, ti->type, ti->sa_family, ntohs(ti->port), + ret); + return ret; +} + +static int +nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct netdevsim *ns = netdev_priv(dev); + int ret; + + ret = -ns->udp_ports.inject_error; + ns->udp_ports.inject_error = 0; + + if (ns->udp_ports.sleep) + msleep(ns->udp_ports.sleep); + if (!ret) { + u32 val = be16_to_cpu(ti->port) << 16 | ti->type; + + if (val == ns->udp_ports.ports[table][entry]) + ns->udp_ports.ports[table][entry] = 0; + else + ret = -ENOENT; + } + + netdev_info(dev, "unset [%d, %d] type %d family %d port %d - %d\n", + table, entry, ti->type, ti->sa_family, ntohs(ti->port), + ret); + return ret; +} + +static int +nsim_udp_tunnel_sync_table(struct net_device *dev, unsigned int table) +{ + struct netdevsim *ns = netdev_priv(dev); + struct udp_tunnel_info ti; + unsigned int i; + int ret; + + ret = -ns->udp_ports.inject_error; + ns->udp_ports.inject_error = 0; + + for (i = 0; i < NSIM_UDP_TUNNEL_N_PORTS; i++) { + udp_tunnel_nic_get_port(dev, table, i, &ti); + ns->udp_ports.ports[table][i] = + be16_to_cpu(ti.port) << 16 | ti.type; + } + + return ret; +} + +static const struct udp_tunnel_nic_info nsim_udp_tunnel_info = { + .set_port = nsim_udp_tunnel_set_port, + .unset_port = nsim_udp_tunnel_unset_port, + .sync_table = nsim_udp_tunnel_sync_table, + + .tables = { + { + .n_entries = NSIM_UDP_TUNNEL_N_PORTS, + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, + }, + { + .n_entries = NSIM_UDP_TUNNEL_N_PORTS, + .tunnel_types = UDP_TUNNEL_TYPE_GENEVE | + UDP_TUNNEL_TYPE_VXLAN_GPE, + }, + }, +}; + +static ssize_t +nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data, + size_t count, loff_t *ppos) +{ + struct net_device *dev = file->private_data; + struct netdevsim *ns = netdev_priv(dev); + + memset(&ns->udp_ports.ports, 0, sizeof(ns->udp_ports.ports)); + rtnl_lock(); + udp_tunnel_nic_reset_ntf(dev); + rtnl_unlock(); + + return count; +} + +static const struct file_operations nsim_udp_tunnels_info_reset_fops = { + .open = simple_open, + .write = nsim_udp_tunnels_info_reset_write, + .llseek = generic_file_llseek, +}; + +int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev, + struct net_device *dev) +{ + struct netdevsim *ns = netdev_priv(dev); + struct udp_tunnel_nic_info *info; + + debugfs_create_u32("udp_ports_inject_error", 0600, + ns->nsim_dev_port->ddir, + &ns->udp_ports.inject_error); + + ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0]; + ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS; + debugfs_create_u32_array("udp_ports_table0", 0400, + ns->nsim_dev_port->ddir, + &ns->udp_ports.dfs_ports[0]); + + ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1]; + ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS; + debugfs_create_u32_array("udp_ports_table1", 0400, + ns->nsim_dev_port->ddir, + &ns->udp_ports.dfs_ports[1]); + + debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir, + dev, &nsim_udp_tunnels_info_reset_fops); + + /* Note: it's not normal to allocate the info struct like this! + * Drivers are expected to use a static const one, here we're testing. + */ + info = kmemdup(&nsim_udp_tunnel_info, sizeof(nsim_udp_tunnel_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + ns->udp_ports.sleep = nsim_dev->udp_ports.sleep; + + if (nsim_dev->udp_ports.sync_all) { + info->set_port = NULL; + info->unset_port = NULL; + } else { + info->sync_table = NULL; + } + + if (ns->udp_ports.sleep) + info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP; + if (nsim_dev->udp_ports.open_only) + info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY; + if (nsim_dev->udp_ports.ipv4_only) + info->flags |= UDP_TUNNEL_NIC_INFO_IPV4_ONLY; + + dev->udp_tunnel_nic_info = info; + return 0; +} + +void nsim_udp_tunnels_info_destroy(struct net_device *dev) +{ + kfree(dev->udp_tunnel_nic_info); + dev->udp_tunnel_nic_info = NULL; +} + +void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev) +{ + debugfs_create_bool("udp_ports_sync_all", 0600, nsim_dev->ddir, + &nsim_dev->udp_ports.sync_all); + debugfs_create_bool("udp_ports_open_only", 0600, nsim_dev->ddir, + &nsim_dev->udp_ports.open_only); + debugfs_create_bool("udp_ports_ipv4_only", 0600, nsim_dev->ddir, + &nsim_dev->udp_ports.ipv4_only); + debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir, + &nsim_dev->udp_ports.sleep); +} From 91f430b2c49d2d8f4445824f70a8cf7b73e1094c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:49 -0700 Subject: [PATCH 0843/2056] selftests: net: add a test for UDP tunnel info infra Add validating the UDP tunnel infra works. $ ./udp_tunnel_nic.sh PASSED all 383 checks Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../drivers/net/netdevsim/udp_tunnel_nic.sh | 786 ++++++++++++++++++ 1 file changed, 786 insertions(+) create mode 100644 tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh new file mode 100644 index 000000000000..ba1d53b9f815 --- /dev/null +++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh @@ -0,0 +1,786 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-only + +VNI_GEN=$RANDOM +NSIM_ID=$((RANDOM % 1024)) +NSIM_DEV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_ID +NSIM_DEV_DFS=/sys/kernel/debug/netdevsim/netdevsim$NSIM_ID +NSIM_NETDEV= +HAS_ETHTOOL= +EXIT_STATUS=0 +num_cases=0 +num_errors=0 + +clean_up_devs=( ) + +function err_cnt { + echo "ERROR:" $@ + EXIT_STATUS=1 + ((num_errors++)) + ((num_cases++)) +} + +function pass_cnt { + ((num_cases++)) +} + +function cleanup_tuns { + for dev in "${clean_up_devs[@]}"; do + [ -e /sys/class/net/$dev ] && ip link del dev $dev + done + clean_up_devs=( ) +} + +function cleanup_nsim { + if [ -e $NSIM_DEV_SYS ]; then + echo $NSIM_ID > /sys/bus/netdevsim/del_device + fi +} + +function cleanup { + cleanup_tuns + cleanup_nsim +} + +trap cleanup EXIT + +function new_vxlan { + local dev=$1 + local dstport=$2 + local lower=$3 + local ipver=$4 + local flags=$5 + + local group ipfl + + [ "$ipver" != '6' ] && group=239.1.1.1 || group=fff1::1 + [ "$ipver" != '6' ] || ipfl="-6" + + [[ ! "$flags" =~ "external" ]] && flags="$flags id $((VNI_GEN++))" + + ip $ipfl link add $dev type vxlan \ + group $group \ + dev $lower \ + dstport $dstport \ + $flags + + ip link set dev $dev up + + clean_up_devs=("${clean_up_devs[@]}" $dev) + + check_tables +} + +function new_geneve { + local dev=$1 + local dstport=$2 + local ipver=$3 + local flags=$4 + + local group ipfl + + [ "$ipver" != '6' ] && remote=1.1.1.2 || group=::2 + [ "$ipver" != '6' ] || ipfl="-6" + + [[ ! "$flags" =~ "external" ]] && flags="$flags vni $((VNI_GEN++))" + + ip $ipfl link add $dev type geneve \ + remote $remote \ + dstport $dstport \ + $flags + + ip link set dev $dev up + + clean_up_devs=("${clean_up_devs[@]}" $dev) + + check_tables +} + +function del_dev { + local dev=$1 + + ip link del dev $dev + check_tables +} + +# Helpers for netdevsim port/type encoding +function mke { + local port=$1 + local type=$2 + + echo $((port << 16 | type)) +} + +function pre { + local val=$1 + + echo -e "port: $((val >> 16))\ttype: $((val & 0xffff))" +} + +function pre_ethtool { + local val=$1 + local port=$((val >> 16)) + local type=$((val & 0xffff)) + + case $type in + 1) + type_name="vxlan" + ;; + 2) + type_name="geneve" + ;; + 4) + type_name="vxlan-gpe" + ;; + *) + type_name="bit X" + ;; + esac + + echo "port $port, $type_name" +} + +function check_table { + local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1 + local -n expected=$2 + local last=$3 + + read -a have < $path + + if [ ${#expected[@]} -ne ${#have[@]} ]; then + echo "check_table: BAD NUMBER OF ITEMS" + return 0 + fi + + for i in "${!expected[@]}"; do + if [ -n "$HAS_ETHTOOL" -a ${expected[i]} -ne 0 ]; then + pp_expected=`pre_ethtool ${expected[i]}` + ethtool --show-tunnels $NSIM_NETDEV | grep "$pp_expected" >/dev/null + if [ $? -ne 0 -a $last -ne 0 ]; then + err_cnt "ethtool table $1 on port $port: $pfx - $msg" + echo " check_table: ethtool does not contain '$pp_expected'" + ethtool --show-tunnels $NSIM_NETDEV + return 0 + + fi + fi + + if [ ${expected[i]} != ${have[i]} ]; then + if [ $last -ne 0 ]; then + err_cnt "table $1 on port $port: $pfx - $msg" + echo " check_table: wrong entry $i" + echo " expected: `pre ${expected[i]}`" + echo " have: `pre ${have[i]}`" + return 0 + fi + return 1 + fi + done + + pass_cnt + return 0 +} + +function check_tables { + # Need retries in case we have workqueue making the changes + local retries=10 + + while ! check_table 0 exp0 $((retries == 0)); do + sleep 0.02 + ((retries--)) + done + while ! check_table 1 exp1 $((retries == 0)); do + sleep 0.02 + ((retries--)) + done +} + +function print_table { + local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1 + read -a have < $path + + tree $NSIM_DEV_DFS/ + + echo "Port $port table $1:" + + for i in "${!have[@]}"; do + echo " `pre ${have[i]}`" + done + +} + +function print_tables { + print_table 0 + print_table 1 +} + +function get_netdev_name { + local -n old=$1 + + new=$(ls /sys/class/net) + + for netdev in $new; do + for check in $old; do + [ $netdev == $check ] && break + done + + if [ $netdev != $check ]; then + echo $netdev + break + fi + done +} + +### +### Code start +### + +# Probe ethtool support +ethtool -h | grep show-tunnels 2>&1 >/dev/null && HAS_ETHTOOL=y + +modprobe netdevsim + +# Basic test +pfx="basic" + +for port in 0 1; do + old_netdevs=$(ls /sys/class/net) + if [ $port -eq 0 ]; then + echo $NSIM_ID > /sys/bus/netdevsim/new_device + else + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + echo 1 > $NSIM_DEV_SYS/new_port + fi + NSIM_NETDEV=`get_netdev_name old_netdevs` + + msg="new NIC device created" + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + check_tables + + msg="VxLAN v4 devices" + exp0=( `mke 4789 1` 0 0 0 ) + new_vxlan vxlan0 4789 $NSIM_NETDEV + new_vxlan vxlan1 4789 $NSIM_NETDEV + + msg="VxLAN v4 devices go down" + exp0=( 0 0 0 0 ) + ifconfig vxlan1 down + ifconfig vxlan0 down + check_tables + + msg="VxLAN v6 devices" + exp0=( `mke 4789 1` 0 0 0 ) + new_vxlan vxlanA 4789 $NSIM_NETDEV 6 + + for ifc in vxlan0 vxlan1; do + ifconfig $ifc up + done + + new_vxlan vxlanB 4789 $NSIM_NETDEV 6 + + msg="another VxLAN v6 devices" + exp0=( `mke 4789 1` `mke 4790 1` 0 0 ) + new_vxlan vxlanC 4790 $NSIM_NETDEV 6 + + msg="Geneve device" + exp1=( `mke 6081 2` 0 0 0 ) + new_geneve gnv0 6081 + + msg="NIC device goes down" + ifconfig $NSIM_NETDEV down + if [ $port -eq 1 ]; then + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + fi + check_tables + msg="NIC device goes up again" + ifconfig $NSIM_NETDEV up + exp0=( `mke 4789 1` `mke 4790 1` 0 0 ) + exp1=( `mke 6081 2` 0 0 0 ) + check_tables + + cleanup_tuns + + msg="tunnels destroyed" + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + check_tables + + modprobe -r geneve + modprobe -r vxlan + modprobe -r udp_tunnel + + check_tables +done + +modprobe -r netdevsim + +# Module tests +pfx="module tests" + +if modinfo netdevsim | grep udp_tunnel >/dev/null; then + err_cnt "netdevsim depends on udp_tunnel" +else + pass_cnt +fi + +modprobe netdevsim + +old_netdevs=$(ls /sys/class/net) +port=0 +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port +echo 1000 > $NSIM_DEV_DFS/udp_ports_sleep +echo 0 > $NSIM_DEV_SYS/new_port +NSIM_NETDEV=`get_netdev_name old_netdevs` + +msg="create VxLANs" +exp0=( 0 0 0 0 ) # sleep is longer than out wait +new_vxlan vxlan0 10000 $NSIM_NETDEV + +modprobe -r vxlan +modprobe -r udp_tunnel + +msg="remove tunnels" +exp0=( 0 0 0 0 ) +check_tables + +msg="create VxLANs" +exp0=( 0 0 0 0 ) # sleep is longer than out wait +new_vxlan vxlan0 10000 $NSIM_NETDEV + +exp0=( 0 0 0 0 ) + +modprobe -r netdevsim +modprobe netdevsim + +# Overflow the table + +function overflow_table0 { + local pfx=$1 + + msg="create VxLANs 1/5" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="create VxLANs 2/5" + exp0=( `mke 10000 1` `mke 10001 1` 0 0 ) + new_vxlan vxlan1 10001 $NSIM_NETDEV + + msg="create VxLANs 3/5" + exp0=( `mke 10000 1` `mke 10001 1` `mke 10002 1` 0 ) + new_vxlan vxlan2 10002 $NSIM_NETDEV + + msg="create VxLANs 4/5" + exp0=( `mke 10000 1` `mke 10001 1` `mke 10002 1` `mke 10003 1` ) + new_vxlan vxlan3 10003 $NSIM_NETDEV + + msg="create VxLANs 5/5" + new_vxlan vxlan4 10004 $NSIM_NETDEV +} + +function overflow_table1 { + local pfx=$1 + + msg="create GENEVE 1/5" + exp1=( `mke 20000 2` 0 0 0 ) + new_geneve gnv0 20000 + + msg="create GENEVE 2/5" + exp1=( `mke 20000 2` `mke 20001 2` 0 0 ) + new_geneve gnv1 20001 + + msg="create GENEVE 3/5" + exp1=( `mke 20000 2` `mke 20001 2` `mke 20002 2` 0 ) + new_geneve gnv2 20002 + + msg="create GENEVE 4/5" + exp1=( `mke 20000 2` `mke 20001 2` `mke 20002 2` `mke 20003 2` ) + new_geneve gnv3 20003 + + msg="create GENEVE 5/5" + new_geneve gnv4 20004 +} + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + overflow_table0 "overflow NIC table" + overflow_table1 "overflow NIC table" + + msg="replace VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` ) + del_dev vxlan1 + + msg="vacate VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` ) + del_dev vxlan2 + + msg="replace GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` `mke 20002 2` `mke 20003 2` ) + del_dev gnv1 + + msg="vacate GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` 0 `mke 20003 2` ) + del_dev gnv2 + + msg="table sharing - share" + exp1=( `mke 20000 2` `mke 20004 2` `mke 30001 4` `mke 20003 2` ) + new_vxlan vxlanG0 30001 $NSIM_NETDEV 4 "gpe external" + + msg="table sharing - overflow" + new_vxlan vxlanG1 30002 $NSIM_NETDEV 4 "gpe external" + msg="table sharing - overflow v6" + new_vxlan vxlanG2 30002 $NSIM_NETDEV 6 "gpe external" + + exp1=( `mke 20000 2` `mke 30002 4` `mke 30001 4` `mke 20003 2` ) + del_dev gnv4 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# Sync all +pfx="sync all" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port +echo 1 > $NSIM_DEV_DFS/udp_ports_sync_all + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + overflow_table0 "overflow NIC table" + overflow_table1 "overflow NIC table" + + msg="replace VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` ) + del_dev vxlan1 + + msg="vacate VxLAN in overflow table" + exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` ) + del_dev vxlan2 + + msg="replace GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` `mke 20002 2` `mke 20003 2` ) + del_dev gnv1 + + msg="vacate GENEVE in overflow table" + exp1=( `mke 20000 2` `mke 20004 2` 0 `mke 20003 2` ) + del_dev gnv2 + + msg="table sharing - share" + exp1=( `mke 20000 2` `mke 20004 2` `mke 30001 4` `mke 20003 2` ) + new_vxlan vxlanG0 30001 $NSIM_NETDEV 4 "gpe external" + + msg="table sharing - overflow" + new_vxlan vxlanG1 30002 $NSIM_NETDEV 4 "gpe external" + msg="table sharing - overflow v6" + new_vxlan vxlanG2 30002 $NSIM_NETDEV 6 "gpe external" + + exp1=( `mke 20000 2` `mke 30002 4` `mke 30001 4` `mke 20003 2` ) + del_dev gnv4 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# Destroy full NIC +pfx="destroy full" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + overflow_table0 "destroy NIC" + overflow_table1 "destroy NIC" + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# IPv4 only +pfx="IPv4 only" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port +echo 1 > $NSIM_DEV_DFS/udp_ports_ipv4_only + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + msg="create VxLANs v6" + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="create VxLANs v6" + new_vxlan vxlanA1 10000 $NSIM_NETDEV 6 + + ip link set dev vxlanA0 down + ip link set dev vxlanA0 up + check_tables + + msg="create VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="down VxLANs v4" + exp0=( 0 0 0 0 ) + ip link set dev vxlan0 down + check_tables + + msg="up VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + ip link set dev vxlan0 up + check_tables + + msg="destroy VxLANs v4" + exp0=( 0 0 0 0 ) + del_dev vxlan0 + + msg="recreate VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + del_dev vxlanA0 + del_dev vxlanA1 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# Failures +pfx="error injection" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error + + msg="1 - create VxLANs v6" + exp0=( 0 0 0 0 ) + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="1 - create VxLANs v4" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="1 - remove VxLANs v4" + del_dev vxlan0 + + msg="1 - remove VxLANs v6" + exp0=( 0 0 0 0 ) + del_dev vxlanA0 + + msg="2 - create GENEVE" + exp1=( `mke 20000 2` 0 0 0 ) + new_geneve gnv0 20000 + + msg="2 - destroy GENEVE" + echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error + exp1=( `mke 20000 2` 0 0 0 ) + del_dev gnv0 + + msg="2 - create second GENEVE" + exp1=( 0 `mke 20001 2` 0 0 ) + new_geneve gnv0 20001 + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# netdev flags +pfx="netdev flags" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + msg="create VxLANs v6" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="create VxLANs v4" + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="turn off" + exp0=( 0 0 0 0 ) + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off + check_tables + + msg="turn on" + exp0=( `mke 10000 1` 0 0 0 ) + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on + check_tables + + msg="remove both" + del_dev vxlanA0 + exp0=( 0 0 0 0 ) + del_dev vxlan0 + check_tables + + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off + + msg="create VxLANs v4 - off" + exp0=( 0 0 0 0 ) + new_vxlan vxlan0 10000 $NSIM_NETDEV + + msg="created off - turn on" + exp0=( `mke 10000 1` 0 0 0 ) + ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on + check_tables + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +cleanup_nsim + +# device initiated reset +pfx="reset notification" + +echo $NSIM_ID > /sys/bus/netdevsim/new_device +echo 0 > $NSIM_DEV_SYS/del_port + +for port in 0 1; do + if [ $port -ne 0 ]; then + echo 1 > $NSIM_DEV_DFS/udp_ports_open_only + echo 1 > $NSIM_DEV_DFS/udp_ports_sleep + fi + + echo $port > $NSIM_DEV_SYS/new_port + ifconfig $NSIM_NETDEV up + + msg="create VxLANs v6" + exp0=( `mke 10000 1` 0 0 0 ) + new_vxlan vxlanA0 10000 $NSIM_NETDEV 6 + + msg="create VxLANs v4" + new_vxlan vxlan0 10000 $NSIM_NETDEV + + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + check_tables + + msg="NIC device goes down" + ifconfig $NSIM_NETDEV down + if [ $port -eq 1 ]; then + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) + fi + check_tables + + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + check_tables + + msg="NIC device goes up again" + ifconfig $NSIM_NETDEV up + exp0=( `mke 10000 1` 0 0 0 ) + check_tables + + msg="remove both" + del_dev vxlanA0 + exp0=( 0 0 0 0 ) + del_dev vxlan0 + check_tables + + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + check_tables + + msg="destroy NIC" + echo $port > $NSIM_DEV_SYS/del_port + + cleanup_tuns + exp0=( 0 0 0 0 ) + exp1=( 0 0 0 0 ) +done + +modprobe -r netdevsim + +if [ $num_errors -eq 0 ]; then + echo "PASSED all $num_cases checks" +else + echo "FAILED $num_errors/$num_cases checks" +fi + +exit $EXIT_STATUS From abc0c78c0ab29afca40a549aef107b596915e312 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:50 -0700 Subject: [PATCH 0844/2056] ixgbe: don't clear UDP tunnel ports when RXCSUM is disabled It appears the clearing of UDP tunnel ports when RXCSUM is disabled is unnecessary. Driver will not pay attention to checksum bits if RXCSUM is not set, so we can let the hardware parse the packets. Note that the UDP tunnel port NDO handlers don't pay attention to the state of RXCSUM, so the ports could had been re-programmed, anyway. This cleanup simplifies later conversion patch. v2: - break this out of the following patch. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 20 ------------------- 1 file changed, 20 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f5d3d6230786..acdf525272a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9784,26 +9784,6 @@ static int ixgbe_set_features(struct net_device *netdev, netdev->features = features; - if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } - } - - if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) { - if (features & NETIF_F_RXCSUM) { - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - } else { - u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - } - } - if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1) ixgbe_reset_l2fw_offload(adapter); else if (need_reset) From dc221851ffd1e6ebb709f85e60f93262413a488a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:51 -0700 Subject: [PATCH 0845/2056] ixgbe: convert to new udp_tunnel_nic infra Make use of new common udp_tunnel_nic infra. ixgbe supports IPv4 only, and only single VxLAN and Geneve ports (one each). v2: - split out the RXCSUM feature handling to separate change; - declare structs separately; - use ti.type instead of assuming table 0 is VxLAN; - move setting netdev->udp_tunnel_nic_info to its own switch. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 182 +++++------------- 2 files changed, 45 insertions(+), 140 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index debbcf216134..1e8a809233a0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -588,11 +588,9 @@ struct ixgbe_adapter { #define IXGBE_FLAG_FCOE_ENABLED BIT(21) #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) #define IXGBE_FLAG_SRIOV_ENABLED BIT(23) -#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) #define IXGBE_FLAG_DCB_CAPABLE BIT(27) -#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(28) u32 flags2; #define IXGBE_FLAG2_RSC_CAPABLE BIT(0) @@ -606,7 +604,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) -#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_ENABLED BIT(15) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index acdf525272a3..4d898ff21a46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4994,25 +4994,42 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) napi_disable(&adapter->q_vector[q_idx]->napi); } -static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) +static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table) { + struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - u32 vxlanctrl; + struct udp_tunnel_info ti; - if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | - IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) - return; + udp_tunnel_nic_get_port(dev, table, 0, &ti); + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + adapter->vxlan_port = ti.port; + else + adapter->geneve_port = ti.port; - vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); - - if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) - adapter->vxlan_port = 0; - - if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) - adapter->geneve_port = 0; + IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, + ntohs(adapter->vxlan_port) | + ntohs(adapter->geneve_port) << + IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT); + return 0; } +static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = { + .sync_table = ixgbe_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + +static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = { + .sync_table = ixgbe_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + #ifdef CONFIG_IXGBE_DCB /** * ixgbe_configure_dcb - Configure DCB hardware @@ -6332,7 +6349,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_x550em_a: - adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: @@ -6359,7 +6375,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif - adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; break; default: break; @@ -6798,8 +6813,7 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); - ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); - udp_tunnel_get_rx_info(netdev); + udp_tunnel_nic_reset_ntf(netdev); return 0; @@ -7921,12 +7935,6 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } - if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { - rtnl_lock(); - adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; - udp_tunnel_get_rx_info(adapter->netdev); - rtnl_unlock(); - } ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -9795,118 +9803,6 @@ static int ixgbe_set_features(struct net_device *netdev, return 1; } -/** - * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_add_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_hw *hw = &adapter->hw; - __be16 port = ti->port; - u32 port_shift = 0; - u32 reg; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port == port) - return; - - if (adapter->vxlan_port) { - netdev_info(dev, - "VXLAN port %d set, not adding port %d\n", - ntohs(adapter->vxlan_port), - ntohs(port)); - return; - } - - adapter->vxlan_port = port; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port == port) - return; - - if (adapter->geneve_port) { - netdev_info(dev, - "GENEVE port %d set, not adding port %d\n", - ntohs(adapter->geneve_port), - ntohs(port)); - return; - } - - port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; - adapter->geneve_port = port; - break; - default: - return; - } - - reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; - IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); -} - -/** - * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports - * @dev: The port's netdev - * @ti: Tunnel endpoint information - **/ -static void ixgbe_del_udp_tunnel_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct ixgbe_adapter *adapter = netdev_priv(dev); - u32 port_mask; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN && - ti->type != UDP_TUNNEL_TYPE_GENEVE) - return; - - if (ti->sa_family != AF_INET) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) - return; - - if (adapter->vxlan_port != ti->port) { - netdev_info(dev, "VXLAN port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - if (adapter->geneve_port != ti->port) { - netdev_info(dev, "GENEVE port %d not found\n", - ntohs(ti->port)); - return; - } - - port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; - break; - default: - return; - } - - ixgbe_clear_udp_tunnel_port(adapter, port_mask); - adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; -} - static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, @@ -10396,8 +10292,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, - .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = ixgbe_features_check, .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, @@ -10842,6 +10738,18 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550; + break; + case ixgbe_mac_x550em_a: + netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a; + break; + default: + break; + } + /* Make sure the SWFW semaphore is in a valid state */ if (hw->mac.ops.init_swfw_sync) hw->mac.ops.init_swfw_sync(hw); From 442a35a5a7aa7277ace9a2671260dbff1a04e029 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:52 -0700 Subject: [PATCH 0846/2056] bnxt: convert to new udp_tunnel_nic infra Convert to new infra, taking advantage of sleeping in callbacks. v2: - use bp->*_fw_dst_port_id != INVALID_HW_RING_ID as indication that the offload is active. Signed-off-by: Jakub Kicinski Reviewed-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 141 ++++++---------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 12 +- 2 files changed, 40 insertions(+), 113 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 43956232b0a4..0911eb3b8007 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4509,10 +4509,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id; + req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = bp->nge_fw_dst_port_id; + req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; break; default: break; @@ -4547,10 +4549,11 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, switch (tunnel_type) { case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: - bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->vxlan_fw_dst_port_id = + le16_to_cpu(resp->tunnel_dst_port_id); break; case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: - bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; + bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id); break; default: break; @@ -7578,16 +7581,12 @@ static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { - if (bp->vxlan_port_cnt) { + if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - bp->vxlan_port_cnt = 0; - if (bp->nge_port_cnt) { + if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } - bp->nge_port_cnt = 0; } static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) @@ -9305,7 +9304,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } if (irq_re_init) - udp_tunnel_get_rx_info(bp->dev); + udp_tunnel_nic_reset_ntf(bp->dev); set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); @@ -10456,24 +10455,6 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_cfg_ntp_filters(bp); if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); - if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_alloc( - bp, bp->vxlan_port, - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_free( - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); - } - if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_alloc( - bp, bp->nge_port, - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } - if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_tunnel_dst_port_free( - bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); - } if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); bnxt_hwrm_port_qstats_ext(bp); @@ -11070,6 +11051,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) timer_setup(&bp->timer, bnxt_timer, 0); bp->current_interval = BNXT_TIMER_INTERVAL; + bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; + bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; + clear_bit(BNXT_STATE_OPEN, &bp->state); return 0; @@ -11397,84 +11381,33 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) #endif /* CONFIG_RFS_ACCEL */ -static void bnxt_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) +static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table) { - struct bnxt *bp = netdev_priv(dev); + struct bnxt *bp = netdev_priv(netdev); + struct udp_tunnel_info ti; + unsigned int cmd; - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) - return; + udp_tunnel_nic_get_port(netdev, table, 0, &ti); + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN; + else + cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE; - if (!netif_running(dev)) - return; + if (ti.port) + return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd); - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) - return; - - bp->vxlan_port_cnt++; - if (bp->vxlan_port_cnt == 1) { - bp->vxlan_port = ti->port; - set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (bp->nge_port_cnt && bp->nge_port != ti->port) - return; - - bp->nge_port_cnt++; - if (bp->nge_port_cnt == 1) { - bp->nge_port = ti->port; - set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); - } - break; - default: - return; - } - - bnxt_queue_sp_work(bp); + return bnxt_hwrm_tunnel_dst_port_free(bp, cmd); } -static void bnxt_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct bnxt *bp = netdev_priv(dev); - - if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) - return; - - if (!netif_running(dev)) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) - return; - bp->vxlan_port_cnt--; - - if (bp->vxlan_port_cnt != 0) - return; - - set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!bp->nge_port_cnt || bp->nge_port != ti->port) - return; - bp->nge_port_cnt--; - - if (bp->nge_port_cnt != 0) - return; - - set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); - break; - default: - return; - } - - bnxt_queue_sp_work(bp); -} +static const struct udp_tunnel_nic_info bnxt_udp_tunnels = { + .sync_table = bnxt_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, u32 filter_mask, @@ -11572,8 +11505,8 @@ static const struct net_device_ops bnxt_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, #endif - .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, - .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_bpf = bnxt_xdp, .ndo_xdp_xmit = bnxt_xdp_xmit, .ndo_bridge_getlink = bnxt_bridge_getlink, @@ -12063,6 +11996,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL; + dev->udp_tunnel_nic_info = &bnxt_udp_tunnels; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d556e5660a02..2acd7f958246 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1765,12 +1765,8 @@ struct bnxt { ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv)) #define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48) - __be16 vxlan_port; - u8 vxlan_port_cnt; - __le16 vxlan_fw_dst_port_id; - __be16 nge_port; - u8 nge_port_cnt; - __le16 nge_fw_dst_port_id; + u16 vxlan_fw_dst_port_id; + u16 nge_fw_dst_port_id; u8 port_partition_type; u8 port_count; u16 br_mode; @@ -1790,16 +1786,12 @@ struct bnxt { #define BNXT_RX_NTP_FLTR_SP_EVENT 1 #define BNXT_LINK_CHNG_SP_EVENT 2 #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3 -#define BNXT_VXLAN_ADD_PORT_SP_EVENT 4 -#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5 #define BNXT_RESET_TASK_SP_EVENT 6 #define BNXT_RST_RING_SP_EVENT 7 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 #define BNXT_PERIODIC_STATS_SP_EVENT 9 #define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 #define BNXT_RESET_TASK_SILENT_SP_EVENT 11 -#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 -#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 #define BNXT_FLOW_STATS_SP_EVENT 15 #define BNXT_UPDATE_PHY_SP_EVENT 16 From fb6f8970bd9e6ecce03fbe2453fe03592595ebc9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 9 Jul 2020 17:42:53 -0700 Subject: [PATCH 0847/2056] mlx4: convert to new udp_tunnel_nic infra Convert to new infra, make use of the ability to sleep in the callback. Signed-off-by: Jakub Kicinski Acked-by: Tariq Toukan Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx4/en_netdev.c | 107 ++++-------------- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 - 2 files changed, 25 insertions(+), 84 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5bd3cd37d50f..2b8608f8f0a9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1816,7 +1816,7 @@ int mlx4_en_start_port(struct net_device *dev) queue_work(mdev->workqueue, &priv->rx_mode_task); if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - udp_tunnel_get_rx_info(dev); + udp_tunnel_nic_reset_ntf(dev); priv->port_up = true; @@ -2628,89 +2628,32 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev, return 0; } -static void mlx4_en_add_vxlan_offloads(struct work_struct *work) +static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table) { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct udp_tunnel_info ti; int ret; - struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - vxlan_add_task); + + udp_tunnel_nic_get_port(dev, table, 0, &ti); + priv->vxlan_port = ti.port; ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port); if (ret) - goto out; + return ret; - ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, - VXLAN_STEER_BY_OUTER_MAC, 1); -out: - if (ret) { - en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); - return; - } + return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, + VXLAN_STEER_BY_OUTER_MAC, + !!priv->vxlan_port); } -static void mlx4_en_del_vxlan_offloads(struct work_struct *work) -{ - int ret; - struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, - vxlan_del_task); - ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, - VXLAN_STEER_BY_OUTER_MAC, 0); - if (ret) - en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); - - priv->vxlan_port = 0; -} - -static void mlx4_en_add_vxlan_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - __be16 port = ti->port; - __be16 current_port; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (ti->sa_family != AF_INET) - return; - - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - return; - - current_port = priv->vxlan_port; - if (current_port && current_port != port) { - en_warn(priv, "vxlan port %d configured, can't add port %d\n", - ntohs(current_port), ntohs(port)); - return; - } - - priv->vxlan_port = port; - queue_work(priv->mdev->workqueue, &priv->vxlan_add_task); -} - -static void mlx4_en_del_vxlan_port(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - __be16 port = ti->port; - __be16 current_port; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (ti->sa_family != AF_INET) - return; - - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - return; - - current_port = priv->vxlan_port; - if (current_port != port) { - en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port)); - return; - } - - queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); -} +static const struct udp_tunnel_nic_info mlx4_udp_tunnels = { + .sync_table = mlx4_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, struct net_device *dev, @@ -2914,8 +2857,8 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, - .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, - .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = mlx4_en_features_check, .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, .ndo_bpf = mlx4_xdp, @@ -2948,8 +2891,8 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, - .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, - .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = mlx4_en_features_check, .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, .ndo_bpf = mlx4_xdp, @@ -3250,8 +3193,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); - INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); - INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); @@ -3406,6 +3347,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_PARTIAL; + + dev->udp_tunnel_nic_info = &mlx4_udp_tunnels; } dev->vlan_features = dev->hw_features; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9f5603612960..a46efe37cfa9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -599,8 +599,6 @@ struct mlx4_en_priv { struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; - struct work_struct vxlan_add_task; - struct work_struct vxlan_del_task; struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_counter_stats pf_stats; From eef8a42d6ce087d1c81c960ae0d14f955b742feb Mon Sep 17 00:00:00 2001 From: Wenbo Zhang Date: Fri, 10 Jul 2020 05:20:35 -0400 Subject: [PATCH 0848/2056] bpf: Fix fds_example SIGSEGV error The `BPF_LOG_BUF_SIZE`'s value is `UINT32_MAX >> 8`, so define an array with it on stack caused an overflow. Signed-off-by: Wenbo Zhang Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200710092035.28919-1-ethercflow@gmail.com --- samples/bpf/fds_example.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/samples/bpf/fds_example.c b/samples/bpf/fds_example.c index d5992f787232..59f45fef5110 100644 --- a/samples/bpf/fds_example.c +++ b/samples/bpf/fds_example.c @@ -30,6 +30,8 @@ #define BPF_M_MAP 1 #define BPF_M_PROG 2 +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + static void usage(void) { printf("Usage: fds_example [...]\n"); @@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object) BPF_EXIT_INSN(), }; size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn); - char bpf_log_buf[BPF_LOG_BUF_SIZE]; struct bpf_object *obj; int prog_fd; From c57544b3dec4480c1b455a43f18be5ec91ab3136 Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:07 +0300 Subject: [PATCH 0849/2056] devlink: Refactor devlink health reporter constructor Prepare a common routine in devlink_health_reporter_create() for usage in similar functions for devlink port health reporters. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 346d385ba09f..a203d35fb56f 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -5321,6 +5321,31 @@ devlink_health_reporter_find_by_name(struct devlink *devlink, return NULL; } +static struct devlink_health_reporter * +__devlink_health_reporter_create(struct devlink *devlink, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv) +{ + struct devlink_health_reporter *reporter; + + if (WARN_ON(graceful_period && !ops->recover)) + return ERR_PTR(-EINVAL); + + reporter = kzalloc(sizeof(*reporter), GFP_KERNEL); + if (!reporter) + return ERR_PTR(-ENOMEM); + + reporter->priv = priv; + reporter->ops = ops; + reporter->devlink = devlink; + reporter->graceful_period = graceful_period; + reporter->auto_recover = !!ops->recover; + reporter->auto_dump = !!ops->dump; + mutex_init(&reporter->dump_lock); + refcount_set(&reporter->refcount, 1); + return reporter; +} + /** * devlink_health_reporter_create - create devlink health reporter * @@ -5342,25 +5367,11 @@ devlink_health_reporter_create(struct devlink *devlink, goto unlock; } - if (WARN_ON(graceful_period && !ops->recover)) { - reporter = ERR_PTR(-EINVAL); + reporter = __devlink_health_reporter_create(devlink, ops, + graceful_period, priv); + if (IS_ERR(reporter)) goto unlock; - } - reporter = kzalloc(sizeof(*reporter), GFP_KERNEL); - if (!reporter) { - reporter = ERR_PTR(-ENOMEM); - goto unlock; - } - - reporter->priv = priv; - reporter->ops = ops; - reporter->devlink = devlink; - reporter->graceful_period = graceful_period; - reporter->auto_recover = !!ops->recover; - reporter->auto_dump = !!ops->dump; - mutex_init(&reporter->dump_lock); - refcount_set(&reporter->refcount, 1); list_add_tail(&reporter->list, &devlink->reporter_list); unlock: mutex_unlock(&devlink->reporters_lock); From 3c5584bf0a0493e8d232ade65f4b9c5e995f3a0c Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:08 +0300 Subject: [PATCH 0850/2056] devlink: Rework devlink health reporter destructor Devlink keeps its own reference to every reporter in a list and inits refcount to 1 upon reporter's creation. Existing destructor waits to free the memory indefinitely using msleep() until all references except devlink's own are put. Rework this mechanism by moving memory free routine to a separate function, which is called when the last reporter reference is put. Besides, it allows to call __devlink_health_reporter_destroy() while locked on a reporters list mutex in symmetry to __devlink_health_reporter_create(), which is required in follow-up patch. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index a203d35fb56f..b85f2113398d 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -5379,6 +5379,29 @@ devlink_health_reporter_create(struct devlink *devlink, } EXPORT_SYMBOL_GPL(devlink_health_reporter_create); +static void +devlink_health_reporter_free(struct devlink_health_reporter *reporter) +{ + mutex_destroy(&reporter->dump_lock); + if (reporter->dump_fmsg) + devlink_fmsg_free(reporter->dump_fmsg); + kfree(reporter); +} + +static void +devlink_health_reporter_put(struct devlink_health_reporter *reporter) +{ + if (refcount_dec_and_test(&reporter->refcount)) + devlink_health_reporter_free(reporter); +} + +static void +__devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) +{ + list_del(&reporter->list); + devlink_health_reporter_put(reporter); +} + /** * devlink_health_reporter_destroy - destroy devlink health reporter * @@ -5388,14 +5411,8 @@ void devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) { mutex_lock(&reporter->devlink->reporters_lock); - list_del(&reporter->list); + __devlink_health_reporter_destroy(reporter); mutex_unlock(&reporter->devlink->reporters_lock); - while (refcount_read(&reporter->refcount) > 1) - msleep(100); - mutex_destroy(&reporter->dump_lock); - if (reporter->dump_fmsg) - devlink_fmsg_free(reporter->dump_fmsg); - kfree(reporter); } EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy); @@ -5665,12 +5682,6 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb) return NULL; } -static void -devlink_health_reporter_put(struct devlink_health_reporter *reporter) -{ - refcount_dec(&reporter->refcount); -} - void devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, enum devlink_health_reporter_state state) From bd8210055c36a453358d75017dce7522d950dd38 Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:09 +0300 Subject: [PATCH 0851/2056] devlink: Create generic devlink health reporter search function Add a generic __devlink_health_reporter_find_by_name() that can be used with arbitrary devlink health reporter list. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index b85f2113398d..4e995de65b36 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -5309,18 +5309,28 @@ devlink_health_reporter_priv(struct devlink_health_reporter *reporter) EXPORT_SYMBOL_GPL(devlink_health_reporter_priv); static struct devlink_health_reporter * -devlink_health_reporter_find_by_name(struct devlink *devlink, - const char *reporter_name) +__devlink_health_reporter_find_by_name(struct list_head *reporter_list, + struct mutex *list_lock, + const char *reporter_name) { struct devlink_health_reporter *reporter; - lockdep_assert_held(&devlink->reporters_lock); - list_for_each_entry(reporter, &devlink->reporter_list, list) + lockdep_assert_held(list_lock); + list_for_each_entry(reporter, reporter_list, list) if (!strcmp(reporter->ops->name, reporter_name)) return reporter; return NULL; } +static struct devlink_health_reporter * +devlink_health_reporter_find_by_name(struct devlink *devlink, + const char *reporter_name) +{ + return __devlink_health_reporter_find_by_name(&devlink->reporter_list, + &devlink->reporters_lock, + reporter_name); +} + static struct devlink_health_reporter * __devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, From f4f541660121aeb91a1b462ab3f3c5a86ab7c3dd Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:10 +0300 Subject: [PATCH 0852/2056] devlink: Implement devlink health reporters on per-port basis Add devlink-health reporter support on per-port basis. The main difference existing devlink-health is that port reporters are stored in per-devlink_port lists. Upon creation of such health reporter the reference to a port it belongs to is stored in reporter struct. Fill the port index attribute in devlink-health response to allow devlink userspace utility to distinguish between device and port reporters. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 2 + net/core/devlink.c | 94 +++++++++++++++++++++++++++++++++++-------- 2 files changed, 79 insertions(+), 17 deletions(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index 746bed538664..bb1139752405 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -101,6 +101,8 @@ struct devlink_port { u8 attrs_set:1, switch_port:1; struct delayed_work type_warn_dw; + struct list_head reporter_list; + struct mutex reporters_lock; /* Protects reporter_list */ }; struct devlink_sb_pool_info { diff --git a/net/core/devlink.c b/net/core/devlink.c index 4e995de65b36..b4a231ca7135 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -386,19 +386,21 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id) return NULL; } -#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) -#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) -#define DEVLINK_NL_FLAG_NEED_SB BIT(2) +#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) +#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) +#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(2) +#define DEVLINK_NL_FLAG_NEED_SB BIT(3) /* The per devlink instance lock is taken by default in the pre-doit * operation, yet several commands do not require this. The global * devlink lock is taken and protects from disruption by user-calls. */ -#define DEVLINK_NL_FLAG_NO_LOCK BIT(3) +#define DEVLINK_NL_FLAG_NO_LOCK BIT(4) static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { + struct devlink_port *devlink_port; struct devlink *devlink; int err; @@ -413,14 +415,17 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) { info->user_ptr[0] = devlink; } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) { - struct devlink_port *devlink_port; - devlink_port = devlink_port_get_from_info(devlink, info); if (IS_ERR(devlink_port)) { err = PTR_ERR(devlink_port); goto unlock; } info->user_ptr[0] = devlink_port; + } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) { + info->user_ptr[0] = devlink; + devlink_port = devlink_port_get_from_info(devlink, info); + if (!IS_ERR(devlink_port)) + info->user_ptr[1] = devlink_port; } if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) { struct devlink_sb *devlink_sb; @@ -5287,6 +5292,7 @@ struct devlink_health_reporter { void *priv; const struct devlink_health_reporter_ops *ops; struct devlink *devlink; + struct devlink_port *devlink_port; struct devlink_fmsg *dump_fmsg; struct mutex dump_lock; /* lock parallel read/write from dump buffers */ u64 graceful_period; @@ -5331,6 +5337,15 @@ devlink_health_reporter_find_by_name(struct devlink *devlink, reporter_name); } +static struct devlink_health_reporter * +devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port, + const char *reporter_name) +{ + return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list, + &devlink_port->reporters_lock, + reporter_name); +} + static struct devlink_health_reporter * __devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, @@ -5443,6 +5458,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg, if (devlink_nl_put_handle(msg, devlink)) goto genlmsg_cancel; + if (reporter->devlink_port) { + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index)) + goto genlmsg_cancel; + } reporter_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_HEALTH_REPORTER); if (!reporter_attr) @@ -5650,17 +5669,28 @@ devlink_health_reporter_get_from_attrs(struct devlink *devlink, struct nlattr **attrs) { struct devlink_health_reporter *reporter; + struct devlink_port *devlink_port; char *reporter_name; if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]) return NULL; reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]); - mutex_lock(&devlink->reporters_lock); - reporter = devlink_health_reporter_find_by_name(devlink, reporter_name); - if (reporter) - refcount_inc(&reporter->refcount); - mutex_unlock(&devlink->reporters_lock); + devlink_port = devlink_port_get_from_attrs(devlink, attrs); + if (IS_ERR(devlink_port)) { + mutex_lock(&devlink->reporters_lock); + reporter = devlink_health_reporter_find_by_name(devlink, reporter_name); + if (reporter) + refcount_inc(&reporter->refcount); + mutex_unlock(&devlink->reporters_lock); + } else { + mutex_lock(&devlink_port->reporters_lock); + reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name); + if (reporter) + refcount_inc(&reporter->refcount); + mutex_unlock(&devlink_port->reporters_lock); + } + return reporter; } @@ -5748,6 +5778,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { struct devlink_health_reporter *reporter; + struct devlink_port *port; struct devlink *devlink; int start = cb->args[0]; int idx = 0; @@ -5778,6 +5809,31 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg, } mutex_unlock(&devlink->reporters_lock); } + + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + list_for_each_entry(port, &devlink->port_list, list) { + mutex_lock(&port->reporters_lock); + list_for_each_entry(reporter, &port->reporter_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_health_reporter_fill(msg, devlink, reporter, + DEVLINK_CMD_HEALTH_REPORTER_GET, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) { + mutex_unlock(&port->reporters_lock); + goto out; + } + idx++; + } + mutex_unlock(&port->reporters_lock); + } + } out: mutex_unlock(&devlink_mutex); @@ -7157,7 +7213,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_get_doit, .dumpit = devlink_nl_cmd_health_reporter_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, /* can be retrieved by unprivileged users */ }, @@ -7166,7 +7222,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7174,7 +7230,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_recover_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7182,7 +7238,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_diagnose_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7191,7 +7247,7 @@ static const struct genl_ops devlink_nl_ops[] = { GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7199,7 +7255,7 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_health_reporter_dump_clear_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT | DEVLINK_NL_FLAG_NO_LOCK, }, { @@ -7459,6 +7515,8 @@ int devlink_port_register(struct devlink *devlink, list_add_tail(&devlink_port->list, &devlink->port_list); INIT_LIST_HEAD(&devlink_port->param_list); mutex_unlock(&devlink->lock); + INIT_LIST_HEAD(&devlink_port->reporter_list); + mutex_init(&devlink_port->reporters_lock); INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn); devlink_port_type_warn_schedule(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); @@ -7475,6 +7533,8 @@ void devlink_port_unregister(struct devlink_port *devlink_port) { struct devlink *devlink = devlink_port->devlink; + WARN_ON(!list_empty(&devlink_port->reporter_list)); + mutex_destroy(&devlink_port->reporters_lock); devlink_port_type_warn_cancel(devlink_port); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); mutex_lock(&devlink->lock); From 15c724b997a8fe1a677cf11797fb29c0bdecc63f Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:11 +0300 Subject: [PATCH 0853/2056] devlink: Add devlink health port reporters API In order to use new devlink port health reporters infrastructure, add corresponding constructor and destructor functions. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 9 ++++++++ net/core/devlink.c | 50 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/include/net/devlink.h b/include/net/devlink.h index bb1139752405..913e8679ae35 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1338,9 +1338,18 @@ struct devlink_health_reporter * devlink_health_reporter_create(struct devlink *devlink, const struct devlink_health_reporter_ops *ops, u64 graceful_period, void *priv); + +struct devlink_health_reporter * +devlink_port_health_reporter_create(struct devlink_port *port, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv); + void devlink_health_reporter_destroy(struct devlink_health_reporter *reporter); +void +devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter); + void * devlink_health_reporter_priv(struct devlink_health_reporter *reporter); int devlink_health_report(struct devlink_health_reporter *reporter, diff --git a/net/core/devlink.c b/net/core/devlink.c index b4a231ca7135..20a83aace642 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -5371,6 +5371,42 @@ __devlink_health_reporter_create(struct devlink *devlink, return reporter; } +/** + * devlink_port_health_reporter_create - create devlink health reporter for + * specified port instance + * + * @port: devlink_port which should contain the new reporter + * @ops: ops + * @graceful_period: to avoid recovery loops, in msecs + * @priv: priv + */ +struct devlink_health_reporter * +devlink_port_health_reporter_create(struct devlink_port *port, + const struct devlink_health_reporter_ops *ops, + u64 graceful_period, void *priv) +{ + struct devlink_health_reporter *reporter; + + mutex_lock(&port->reporters_lock); + if (__devlink_health_reporter_find_by_name(&port->reporter_list, + &port->reporters_lock, ops->name)) { + reporter = ERR_PTR(-EEXIST); + goto unlock; + } + + reporter = __devlink_health_reporter_create(port->devlink, ops, + graceful_period, priv); + if (IS_ERR(reporter)) + goto unlock; + + reporter->devlink_port = port; + list_add_tail(&reporter->list, &port->reporter_list); +unlock: + mutex_unlock(&port->reporters_lock); + return reporter; +} +EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create); + /** * devlink_health_reporter_create - create devlink health reporter * @@ -5441,6 +5477,20 @@ devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) } EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy); +/** + * devlink_port_health_reporter_destroy - destroy devlink port health reporter + * + * @reporter: devlink health reporter to destroy + */ +void +devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter) +{ + mutex_lock(&reporter->devlink_port->reporters_lock); + __devlink_health_reporter_destroy(reporter); + mutex_unlock(&reporter->devlink_port->reporters_lock); +} +EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy); + static int devlink_nl_health_reporter_fill(struct sk_buff *msg, struct devlink *devlink, From 4d54d3251ea3d85f9f2b12de308ddae68fb7546a Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:12 +0300 Subject: [PATCH 0854/2056] net/mlx5e: Move devlink port register and unregister calls Register devlink ports upon NIC init. TX and RX health reporters handle errors which may occur early on at driver initialization. And because these reporters are to be moved to port context, they require devlink ports to be already registered. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b04c8572adea..e054726b2ac6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5086,6 +5086,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_build_nic_netdev(netdev); + err = mlx5e_devlink_port_register(priv); + if (err) + mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); mlx5e_health_create_reporters(priv); return 0; @@ -5094,6 +5097,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { mlx5e_health_destroy_reporters(priv); + mlx5e_devlink_port_unregister(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); mlx5e_netdev_cleanup(priv->netdev, priv); @@ -5526,16 +5530,10 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) goto err_destroy_netdev; } - err = mlx5e_devlink_port_register(priv); - if (err) { - mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); - goto err_detach; - } - err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_devlink_port_unregister; + goto err_detach; } mlx5e_devlink_port_type_eth_set(priv); @@ -5543,8 +5541,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) mlx5e_dcbnl_init_app(priv); return priv; -err_devlink_port_unregister: - mlx5e_devlink_port_unregister(priv); err_detach: mlx5e_detach(mdev, priv); err_destroy_netdev: @@ -5565,7 +5561,6 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) priv = vpriv; mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); - mlx5e_devlink_port_unregister(priv); mlx5e_detach(mdev, vpriv); mlx5e_destroy_netdev(priv); } From b7e93bb6b10434c09837fbe3fce26c6b66592bc9 Mon Sep 17 00:00:00 2001 From: Vladyslav Tarasiuk Date: Fri, 10 Jul 2020 15:25:13 +0300 Subject: [PATCH 0855/2056] net/mlx5e: Move devlink-health rx and tx reporters to devlink port Utilize new devlink-health port reporters API to move rx and tx reporters from device to port. Signed-off-by: Vladyslav Tarasiuk Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlx5/core/en/reporter_rx.c | 9 +++------ .../net/ethernet/mellanox/mlx5/core/en/reporter_tx.c | 11 +++-------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c index 32ed1067e6dc..9913647a1faf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c @@ -611,13 +611,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = { void mlx5e_reporter_rx_create(struct mlx5e_priv *priv) { - struct devlink *devlink = priv_to_devlink(priv->mdev); struct devlink_health_reporter *reporter; - reporter = devlink_health_reporter_create(devlink, - &mlx5_rx_reporter_ops, - MLX5E_REPORTER_RX_GRACEFUL_PERIOD, - priv); + reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops, + MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv); if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n", PTR_ERR(reporter)); @@ -631,5 +628,5 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv) if (!priv->rx_reporter) return; - devlink_health_reporter_destroy(priv->rx_reporter); + devlink_port_health_reporter_destroy(priv->rx_reporter); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 826584380216..8be6eaa3eeb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -440,14 +440,9 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = { void mlx5e_reporter_tx_create(struct mlx5e_priv *priv) { struct devlink_health_reporter *reporter; - struct mlx5_core_dev *mdev = priv->mdev; - struct devlink *devlink; - devlink = priv_to_devlink(mdev); - reporter = - devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops, - MLX5_REPORTER_TX_GRACEFUL_PERIOD, - priv); + reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_tx_reporter_ops, + MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv); if (IS_ERR(reporter)) { netdev_warn(priv->netdev, "Failed to create tx reporter, err = %ld\n", @@ -462,5 +457,5 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv) if (!priv->tx_reporter) return; - devlink_health_reporter_destroy(priv->tx_reporter); + devlink_port_health_reporter_destroy(priv->tx_reporter); } From a594920f8747fa032c784c3660d6cd5a8ab291f8 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Sat, 11 Jul 2020 00:57:59 +0900 Subject: [PATCH 0856/2056] inet: Remove an unnecessary argument of syn_ack_recalc(). Commit 0c3d79bce48034018e840468ac5a642894a521a3 ("tcp: reduce SYN-ACK retrans for TCP_DEFER_ACCEPT") introduces syn_ack_recalc() which decides if a minisock is held and a SYN+ACK is retransmitted or not. If rskq_defer_accept is not zero in syn_ack_recalc(), max_retries always has the same value because max_retries is overwritten by rskq_defer_accept in reqsk_timer_handler(). This commit adds three changes: - remove redundant non-zero check for rskq_defer_accept in reqsk_timer_handler(). - remove max_retries from the arguments of syn_ack_recalc() and use rskq_defer_accept instead. - rename thresh to max_syn_ack_retries for readability. Signed-off-by: Kuniyuki Iwashima Reviewed-by: Benjamin Herrenschmidt CC: Julian Anastasov Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/inet_connection_sock.c | 33 +++++++++++++-------------------- 1 file changed, 13 insertions(+), 20 deletions(-) diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index afaf582a5aa9..22b0e7336360 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -648,20 +648,19 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); /* Decide when to expire the request and when to resend SYN-ACK */ -static inline void syn_ack_recalc(struct request_sock *req, const int thresh, - const int max_retries, - const u8 rskq_defer_accept, - int *expire, int *resend) +static void syn_ack_recalc(struct request_sock *req, + const int max_syn_ack_retries, + const u8 rskq_defer_accept, + int *expire, int *resend) { if (!rskq_defer_accept) { - *expire = req->num_timeout >= thresh; + *expire = req->num_timeout >= max_syn_ack_retries; *resend = 1; return; } - *expire = req->num_timeout >= thresh && - (!inet_rsk(req)->acked || req->num_timeout >= max_retries); - /* - * Do not resend while waiting for data after ACK, + *expire = req->num_timeout >= max_syn_ack_retries && + (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); + /* Do not resend while waiting for data after ACK, * start to resend on end of deferring period to give * last chance for data or ACK to create established socket. */ @@ -720,15 +719,12 @@ static void reqsk_timer_handler(struct timer_list *t) struct net *net = sock_net(sk_listener); struct inet_connection_sock *icsk = inet_csk(sk_listener); struct request_sock_queue *queue = &icsk->icsk_accept_queue; - int qlen, expire = 0, resend = 0; - int max_retries, thresh; - u8 defer_accept; + int max_syn_ack_retries, qlen, expire = 0, resend = 0; if (inet_sk_state_load(sk_listener) != TCP_LISTEN) goto drop; - max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; - thresh = max_retries; + max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. * If synack was not acknowledged for 1 second, it means @@ -750,17 +746,14 @@ static void reqsk_timer_handler(struct timer_list *t) if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) { int young = reqsk_queue_len_young(queue) << 1; - while (thresh > 2) { + while (max_syn_ack_retries > 2) { if (qlen < young) break; - thresh--; + max_syn_ack_retries--; young <<= 1; } } - defer_accept = READ_ONCE(queue->rskq_defer_accept); - if (defer_accept) - max_retries = defer_accept; - syn_ack_recalc(req, thresh, max_retries, defer_accept, + syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), &expire, &resend); req->rsk_ops->syn_ack_timeout(req); if (!expire && From 94339443686b36d3223bc032b7947267474e2679 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sat, 11 Jul 2020 18:05:04 +0300 Subject: [PATCH 0857/2056] net: bridge: notify on vlan tunnel changes done via the old api If someone uses the old vlan API to configure tunnel mappings we'll only generate the old-style full port notification. That would be a problem if we are monitoring the new vlan notifications for changes. The patch resolves the issue by adding vlan notifications to the old tunnel netlink code. As usual we try to compress the notifications for as many vlans in a range as possible, thus a vlan tunnel change is considered able to enter the "current" vlan notification range if: 1. vlan exists 2. it has actually changed (curr_change == true) 3. it passes all standard vlan notification range checks done by br_vlan_can_enter_range() such as option equality, id continuity etc Note that vlan tunnel changes (add/del) are considered a part of vlan options so only RTM_NEWVLAN notification is generated with the relevant information inside. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_netlink_tunnel.c | 49 ++++++++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index 162998e2f039..8914290c75d4 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -250,6 +250,36 @@ int br_parse_vlan_tunnel_info(struct nlattr *attr, return 0; } +/* send a notification if v_curr can't enter the range and start a new one */ +static void __vlan_tunnel_handle_range(const struct net_bridge_port *p, + struct net_bridge_vlan **v_start, + struct net_bridge_vlan **v_end, + int v_curr, bool curr_change) +{ + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + + vg = nbp_vlan_group(p); + if (!vg) + return; + + v = br_vlan_find(vg, v_curr); + + if (!*v_start) + goto out_init; + + if (v && curr_change && br_vlan_can_enter_range(v, *v_end)) { + *v_end = v; + return; + } + + br_vlan_notify(p->br, p, (*v_start)->vid, (*v_end)->vid, RTM_NEWVLAN); +out_init: + /* we start a range only if there are any changes to notify about */ + *v_start = curr_change ? v : NULL; + *v_end = *v_start; +} + int br_process_vlan_tunnel_info(const struct net_bridge *br, const struct net_bridge_port *p, int cmd, struct vtunnel_info *tinfo_curr, @@ -263,6 +293,7 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br, return -EINVAL; memcpy(tinfo_last, tinfo_curr, sizeof(struct vtunnel_info)); } else if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END) { + struct net_bridge_vlan *v_start = NULL, *v_end = NULL; int t, v; if (!(tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN)) @@ -272,11 +303,24 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br, return -EINVAL; t = tinfo_last->tunid; for (v = tinfo_last->vid; v <= tinfo_curr->vid; v++) { - err = br_vlan_tunnel_info(p, cmd, v, t, changed); + bool curr_change = false; + + err = br_vlan_tunnel_info(p, cmd, v, t, &curr_change); if (err) - return err; + break; t++; + + if (curr_change) + *changed = curr_change; + __vlan_tunnel_handle_range(p, &v_start, &v_end, v, + curr_change); } + if (v_start && v_end) + br_vlan_notify(br, p, v_start->vid, v_end->vid, + RTM_NEWVLAN); + if (err) + return err; + memset(tinfo_last, 0, sizeof(struct vtunnel_info)); memset(tinfo_curr, 0, sizeof(struct vtunnel_info)); } else { @@ -286,6 +330,7 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br, tinfo_curr->tunid, changed); if (err) return err; + br_vlan_notify(br, p, tinfo_curr->vid, 0, RTM_NEWVLAN); memset(tinfo_last, 0, sizeof(struct vtunnel_info)); memset(tinfo_curr, 0, sizeof(struct vtunnel_info)); } From 2a550aec36543b20f087e4b3063882e9465f7175 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 11 Jul 2020 22:32:05 +0200 Subject: [PATCH 0858/2056] net: dsa: mv88e6xxx: Implement MTU change The Marvell Switches support jumbo packages. So implement the callbacks needed for changing the MTU. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d995f5bf0d40..6f019955ae42 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2693,6 +2693,31 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN, 0); } +static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port) +{ + struct mv88e6xxx_chip *chip = ds->priv; + + if (chip->info->ops->port_set_jumbo_size) + return 10240; + return 1522; +} + +static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int ret = 0; + + mv88e6xxx_reg_lock(chip); + if (chip->info->ops->port_set_jumbo_size) + ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu); + else + if (new_mtu > 1522) + ret = -EINVAL; + mv88e6xxx_reg_unlock(chip); + + return ret; +} + static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port, struct phy_device *phydev) { @@ -5525,6 +5550,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .port_enable = mv88e6xxx_port_enable, .port_disable = mv88e6xxx_port_disable, + .port_max_mtu = mv88e6xxx_get_max_mtu, + .port_change_mtu = mv88e6xxx_change_mtu, .get_mac_eee = mv88e6xxx_get_mac_eee, .set_mac_eee = mv88e6xxx_set_mac_eee, .get_eeprom_len = mv88e6xxx_get_eeprom_len, From 5919305351475dda04c0f6f28f388c89ffafcd2b Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 11 Jul 2020 22:32:06 +0200 Subject: [PATCH 0859/2056] net: fec: Set max MTU size to allow the MTU to be changed The FEC allocates 2K buffers, but looses some of it due to alignment. It can however support an MTU bigger than the default. This is particularly interesting when used in combination with Ethernet switches supporting DSA, which have extra headers. The DSA core will try to increase the MTU to support these extra headers. If the max size defaults to that of standard Ethernet we get a warning. By setting the max to what the driver actually supports, we avoid this warning. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 795693874e31..a93381284f08 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3678,6 +3678,8 @@ fec_probe(struct platform_device *pdev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&pdev->dev); + ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; + ret = register_netdev(ndev); if (ret) goto failed_register; From 6d905436a2098dadb7831f928cf311e28c84ef25 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 11 Jul 2020 22:35:18 +0200 Subject: [PATCH 0860/2056] net: skge: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GPF_ with a correct flag. It has been compile tested. When memory is allocated in 'skge_up()', GFP_KERNEL can be used because some other memory allocations done a few lines below in 'skge_ring_alloc()' already use this flag. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 76 ++++++++++++++--------------- 1 file changed, 36 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 3c89206f18a7..b792f6306a64 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -939,10 +939,10 @@ static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, struct skge_rx_desc *rd = e->desc; dma_addr_t map; - map = pci_map_single(skge->hw->pdev, skb->data, bufsize, - PCI_DMA_FROMDEVICE); + map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize, + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(skge->hw->pdev, map)) + if (dma_mapping_error(&skge->hw->pdev->dev, map)) return -1; rd->dma_lo = lower_32_bits(map); @@ -990,10 +990,10 @@ static void skge_rx_clean(struct skge_port *skge) struct skge_rx_desc *rd = e->desc; rd->control = 0; if (e->skb) { - pci_unmap_single(hw->pdev, + dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), dma_unmap_len(e, maplen), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(e->skb); e->skb = NULL; } @@ -2547,14 +2547,15 @@ static int skge_up(struct net_device *dev) rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); skge->mem_size = tx_size + rx_size; - skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); + skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size, + &skge->dma, GFP_KERNEL); if (!skge->mem) return -ENOMEM; BUG_ON(skge->dma & 7); if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { - dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); + dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n"); err = -EINVAL; goto free_pci_mem; } @@ -2625,7 +2626,8 @@ static int skge_up(struct net_device *dev) skge_rx_clean(skge); kfree(skge->rx_ring.start); free_pci_mem: - pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, + skge->dma); skge->mem = NULL; return err; @@ -2715,7 +2717,8 @@ static int skge_down(struct net_device *dev) kfree(skge->rx_ring.start); kfree(skge->tx_ring.start); - pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem, + skge->dma); skge->mem = NULL; return 0; } @@ -2749,8 +2752,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, BUG_ON(td->control & BMU_OWN); e->skb = skb; len = skb_headlen(skb); - map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(hw->pdev, map)) + map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) goto mapping_error; dma_unmap_addr_set(e, mapaddr, map); @@ -2830,16 +2833,12 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, mapping_unwind: e = skge->tx_ring.to_use; - pci_unmap_single(hw->pdev, - dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_TODEVICE); + dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); while (i-- > 0) { e = e->next; - pci_unmap_page(hw->pdev, - dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_TODEVICE); + dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); } mapping_error: @@ -2856,13 +2855,11 @@ static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, { /* skb header vs. fragment */ if (control & BMU_STF) - pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); else - pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), DMA_TO_DEVICE); } /* Free all buffers in transmit ring */ @@ -3072,15 +3069,15 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, if (!skb) goto resubmit; - pci_dma_sync_single_for_cpu(skge->hw->pdev, - dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&skge->hw->pdev->dev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + DMA_FROM_DEVICE); skb_copy_from_linear_data(e->skb, skb->data, len); - pci_dma_sync_single_for_device(skge->hw->pdev, - dma_unmap_addr(e, mapaddr), - dma_unmap_len(e, maplen), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&skge->hw->pdev->dev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + DMA_FROM_DEVICE); skge_rx_reuse(e, skge->rx_buf_size); } else { struct skge_element ee; @@ -3100,10 +3097,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, goto resubmit; } - pci_unmap_single(skge->hw->pdev, + dma_unmap_single(&skge->hw->pdev->dev, dma_unmap_addr(&ee, mapaddr), - dma_unmap_len(&ee, maplen), - PCI_DMA_FROMDEVICE); + dma_unmap_len(&ee, maplen), DMA_FROM_DEVICE); } skb_put(skb, len); @@ -3895,12 +3891,12 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { using_dac = 0; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (err) { From c86768cf5cf6bfb2ce3742daaf09446465e96349 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 11 Jul 2020 22:49:44 +0200 Subject: [PATCH 0861/2056] net: sky2: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GPF_ with a correct flag. It has been compile tested. When memory is allocated in 'sky2_alloc_buffers()', GFP_KERNEL can be used because some other memory allocations in the same function already use this flag. When memory is allocated in 'sky2_probe()', GFP_KERNEL can be used because another memory allocations in the same function already uses this flag. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/sky2.c | 87 +++++++++++++++-------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fe54764caea9..cec8124301c7 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -1209,8 +1209,9 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, struct sk_buff *skb = re->skb; int i; - re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, re->data_addr)) + re->data_addr = dma_map_single(&pdev->dev, skb->data, size, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, re->data_addr)) goto mapping_error; dma_unmap_len_set(re, data_size, size); @@ -1229,13 +1230,13 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re, map_page_error: while (--i >= 0) { - pci_unmap_page(pdev, re->frag_addr[i], + dma_unmap_page(&pdev->dev, re->frag_addr[i], skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } - pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, re->data_addr, + dma_unmap_len(re, data_size), DMA_FROM_DEVICE); mapping_error: if (net_ratelimit()) @@ -1249,13 +1250,13 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re) struct sk_buff *skb = re->skb; int i; - pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size), - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, re->data_addr, + dma_unmap_len(re, data_size), DMA_FROM_DEVICE); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - pci_unmap_page(pdev, re->frag_addr[i], + dma_unmap_page(&pdev->dev, re->frag_addr[i], skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } /* Tell chip where to start receive checksum. @@ -1592,10 +1593,9 @@ static int sky2_alloc_buffers(struct sky2_port *sky2) struct sky2_hw *hw = sky2->hw; /* must be power of 2 */ - sky2->tx_le = pci_alloc_consistent(hw->pdev, - sky2->tx_ring_size * - sizeof(struct sky2_tx_le), - &sky2->tx_le_map); + sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev, + sky2->tx_ring_size * sizeof(struct sky2_tx_le), + &sky2->tx_le_map, GFP_KERNEL); if (!sky2->tx_le) goto nomem; @@ -1604,8 +1604,8 @@ static int sky2_alloc_buffers(struct sky2_port *sky2) if (!sky2->tx_ring) goto nomem; - sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES, - &sky2->rx_le_map); + sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES, + &sky2->rx_le_map, GFP_KERNEL); if (!sky2->rx_le) goto nomem; @@ -1626,14 +1626,14 @@ static void sky2_free_buffers(struct sky2_port *sky2) sky2_rx_clean(sky2); if (sky2->rx_le) { - pci_free_consistent(hw->pdev, RX_LE_BYTES, - sky2->rx_le, sky2->rx_le_map); + dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le, + sky2->rx_le_map); sky2->rx_le = NULL; } if (sky2->tx_le) { - pci_free_consistent(hw->pdev, - sky2->tx_ring_size * sizeof(struct sky2_tx_le), - sky2->tx_le, sky2->tx_le_map); + dma_free_coherent(&hw->pdev->dev, + sky2->tx_ring_size * sizeof(struct sky2_tx_le), + sky2->tx_le, sky2->tx_le_map); sky2->tx_le = NULL; } kfree(sky2->tx_ring); @@ -1806,13 +1806,11 @@ static unsigned tx_le_req(const struct sk_buff *skb) static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) { if (re->flags & TX_MAP_SINGLE) - pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr), - dma_unmap_len(re, maplen), - PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), DMA_TO_DEVICE); else if (re->flags & TX_MAP_PAGE) - pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr), - dma_unmap_len(re, maplen), - PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr), + dma_unmap_len(re, maplen), DMA_TO_DEVICE); re->flags = 0; } @@ -1840,9 +1838,10 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb, return NETDEV_TX_BUSY; len = skb_headlen(skb); - mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&hw->pdev->dev, skb->data, len, + DMA_TO_DEVICE); - if (pci_dma_mapping_error(hw->pdev, mapping)) + if (dma_mapping_error(&hw->pdev->dev, mapping)) goto mapping_error; slot = sky2->tx_prod; @@ -2464,16 +2463,17 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb = netdev_alloc_skb_ip_align(sky2->netdev, length); if (likely(skb)) { - pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, - length, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr, + length, DMA_FROM_DEVICE); skb_copy_from_linear_data(re->skb, skb->data, length); skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; skb_copy_hash(skb, re->skb); __vlan_hwaccel_copy_tag(skb, re->skb); - pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, - length, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&sky2->hw->pdev->dev, + re->data_addr, length, + DMA_FROM_DEVICE); __vlan_hwaccel_clear_tag(re->skb); skb_clear_hash(re->skb); re->skb->ip_summed = CHECKSUM_NONE; @@ -4985,16 +4985,16 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); if (sizeof(dma_addr_t) > sizeof(u32) && - !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) { + !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) { using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "unable to obtain 64 bit DMA " "for consistent allocations\n"); goto err_out_free_regions; } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_out_free_regions; @@ -5038,8 +5038,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* ring for status responses */ hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING); - hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), - &hw->st_dma); + hw->st_le = dma_alloc_coherent(&pdev->dev, + hw->st_size * sizeof(struct sky2_status_le), + &hw->st_dma, GFP_KERNEL); if (!hw->st_le) { err = -ENOMEM; goto err_out_reset; @@ -5119,8 +5120,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_disable_msi(pdev); free_netdev(dev); err_out_free_pci: - pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), - hw->st_le, hw->st_dma); + dma_free_coherent(&pdev->dev, + hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); err_out_reset: sky2_write8(hw, B0_CTST, CS_RST_SET); err_out_iounmap: @@ -5164,8 +5166,9 @@ static void sky2_remove(struct pci_dev *pdev) if (hw->flags & SKY2_HW_USE_MSI) pci_disable_msi(pdev); - pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le), - hw->st_le, hw->st_dma); + dma_free_coherent(&pdev->dev, + hw->st_size * sizeof(struct sky2_status_le), + hw->st_le, hw->st_dma); pci_release_regions(pdev); pci_disable_device(pdev); From 2749c69734298905aedb0629f2bc66346f0031f9 Mon Sep 17 00:00:00 2001 From: Eyal Birger Date: Thu, 9 Jul 2020 13:16:51 +0300 Subject: [PATCH 0862/2056] xfrm interface: avoid xi lookup in xfrmi_decode_session() The xfrmi context exists in the netdevice priv context. Avoid looking for it in a separate list. Signed-off-by: Eyal Birger Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index a79eb49a4e0d..36a765eac034 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -47,6 +47,7 @@ static int xfrmi_dev_init(struct net_device *dev); static void xfrmi_dev_setup(struct net_device *dev); static struct rtnl_link_ops xfrmi_link_ops __read_mostly; static unsigned int xfrmi_net_id __read_mostly; +static const struct net_device_ops xfrmi_netdev_ops; struct xfrmi_net { /* lists for storing interfaces in use */ @@ -73,8 +74,7 @@ static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x) static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb, unsigned short family) { - struct xfrmi_net *xfrmn; - struct xfrm_if *xi; + struct net_device *dev; int ifindex = 0; if (!secpath_exists(skb) || !skb->dev) @@ -88,18 +88,21 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb, ifindex = inet_sdif(skb); break; } - if (!ifindex) - ifindex = skb->dev->ifindex; - xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id); + if (ifindex) { + struct net *net = xs_net(xfrm_input_state(skb)); - for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { - if (ifindex == xi->dev->ifindex && - (xi->dev->flags & IFF_UP)) - return xi; + dev = dev_get_by_index_rcu(net, ifindex); + } else { + dev = skb->dev; } - return NULL; + if (!dev || !(dev->flags & IFF_UP)) + return NULL; + if (dev->netdev_ops != &xfrmi_netdev_ops) + return NULL; + + return netdev_priv(dev); } static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi) From e98e44562ba2ee994f4fd1e32be7e327edd263ca Mon Sep 17 00:00:00 2001 From: Eyal Birger Date: Thu, 9 Jul 2020 13:16:52 +0300 Subject: [PATCH 0863/2056] xfrm interface: store xfrmi contexts in a hash by if_id xfrmi_lookup() is called on every packet. Using a single list for looking up if_id becomes a bottleneck when having many xfrm interfaces. Signed-off-by: Eyal Birger Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 36a765eac034..96496fdfe3ce 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -49,20 +49,28 @@ static struct rtnl_link_ops xfrmi_link_ops __read_mostly; static unsigned int xfrmi_net_id __read_mostly; static const struct net_device_ops xfrmi_netdev_ops; +#define XFRMI_HASH_BITS 8 +#define XFRMI_HASH_SIZE BIT(XFRMI_HASH_BITS) + struct xfrmi_net { /* lists for storing interfaces in use */ - struct xfrm_if __rcu *xfrmi[1]; + struct xfrm_if __rcu *xfrmi[XFRMI_HASH_SIZE]; }; #define for_each_xfrmi_rcu(start, xi) \ for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next)) +static u32 xfrmi_hash(u32 if_id) +{ + return hash_32(if_id, XFRMI_HASH_BITS); +} + static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x) { struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); struct xfrm_if *xi; - for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { + for_each_xfrmi_rcu(xfrmn->xfrmi[xfrmi_hash(x->if_id)], xi) { if (x->if_id == xi->p.if_id && (xi->dev->flags & IFF_UP)) return xi; @@ -107,7 +115,7 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb, static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi) { - struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0]; + struct xfrm_if __rcu **xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)]; rcu_assign_pointer(xi->next , rtnl_dereference(*xip)); rcu_assign_pointer(*xip, xi); @@ -118,7 +126,7 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi) struct xfrm_if __rcu **xip; struct xfrm_if *iter; - for (xip = &xfrmn->xfrmi[0]; + for (xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)]; (iter = rtnl_dereference(*xip)) != NULL; xip = &iter->next) { if (xi == iter) { @@ -162,7 +170,7 @@ static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p) struct xfrm_if *xi; struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); - for (xip = &xfrmn->xfrmi[0]; + for (xip = &xfrmn->xfrmi[xfrmi_hash(p->if_id)]; (xi = rtnl_dereference(*xip)) != NULL; xip = &xi->next) if (xi->p.if_id == p->if_id) @@ -761,11 +769,14 @@ static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); struct xfrm_if __rcu **xip; struct xfrm_if *xi; + int i; - for (xip = &xfrmn->xfrmi[0]; - (xi = rtnl_dereference(*xip)) != NULL; - xip = &xi->next) - unregister_netdevice_queue(xi->dev, &list); + for (i = 0; i < XFRMI_HASH_SIZE; i++) { + for (xip = &xfrmn->xfrmi[i]; + (xi = rtnl_dereference(*xip)) != NULL; + xip = &xi->next) + unregister_netdevice_queue(xi->dev, &list); + } } unregister_netdevice_many(&list); rtnl_unlock(); From 33bfd94a05abb5a63e323dd1454bc580d4bf992c Mon Sep 17 00:00:00 2001 From: Joseph Hwang Date: Mon, 13 Jul 2020 15:45:29 +0800 Subject: [PATCH 0864/2056] Bluetooth: btusb: add Realtek 8822CE to usb_device_id table This patch adds the Realtek 8822CE controller to the usb_device_id table to support the wideband speech capability. Signed-off-by: Joseph Hwang Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index faa863dd5d0a..aa0bc9942afd 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -359,6 +359,10 @@ static const struct usb_device_id blacklist_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), .driver_info = BTUSB_IGNORE }, + /* Realtek 8822CE Bluetooth devices */ + { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), .driver_info = BTUSB_REALTEK }, From 3344537f614b966f726c1ec044d1c70a8cabe178 Mon Sep 17 00:00:00 2001 From: Venkata Lakshmi Narayana Gubba Date: Sat, 11 Jul 2020 17:01:12 +0530 Subject: [PATCH 0865/2056] Bluetooth: hci_qca: Bug fixes for SSR 1.During SSR for command time out if BT SoC goes to inresponsive state, power cycling of BT SoC was not happening. Given the fix by sending hw error event to reset the BT SoC. 2.If SSR is triggered then ignore the transmit data requests to BT SoC until SSR is completed. Signed-off-by: Venkata Lakshmi Narayana Gubba Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 40 +++++++++++++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 7e395469ca4f..3d1300286993 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -72,7 +72,8 @@ enum qca_flags { QCA_DROP_VENDOR_EVENT, QCA_SUSPENDING, QCA_MEMDUMP_COLLECTION, - QCA_HW_ERROR_EVENT + QCA_HW_ERROR_EVENT, + QCA_SSR_TRIGGERED }; enum qca_capabilities { @@ -854,6 +855,13 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, qca->tx_ibs_state); + if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { + /* As SSR is in progress, ignore the packets */ + bt_dev_dbg(hu->hdev, "SSR is in progress"); + kfree_skb(skb); + return 0; + } + /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); @@ -1123,6 +1131,7 @@ static int qca_controller_memdump_event(struct hci_dev *hdev, struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; + set_bit(QCA_SSR_TRIGGERED, &qca->flags); skb_queue_tail(&qca->rx_memdump_q, skb); queue_work(qca->workqueue, &qca->ctrl_memdump_evt); @@ -1481,6 +1490,7 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code) struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; + set_bit(QCA_SSR_TRIGGERED, &qca->flags); set_bit(QCA_HW_ERROR_EVENT, &qca->flags); bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); @@ -1529,10 +1539,30 @@ static void qca_cmd_timeout(struct hci_dev *hdev) struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; - if (qca->memdump_state == QCA_MEMDUMP_IDLE) + set_bit(QCA_SSR_TRIGGERED, &qca->flags); + if (qca->memdump_state == QCA_MEMDUMP_IDLE) { + set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); qca_send_crashbuffer(hu); - else - bt_dev_info(hdev, "Dump collection is in process"); + qca_wait_for_dump_collection(hdev); + } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { + /* Let us wait here until memory dump collected or + * memory dump timer expired. + */ + bt_dev_info(hdev, "waiting for dump to complete"); + qca_wait_for_dump_collection(hdev); + } + + mutex_lock(&qca->hci_memdump_lock); + if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { + qca->memdump_state = QCA_MEMDUMP_TIMEOUT; + if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { + /* Inject hw error event to reset the device + * and driver. + */ + hci_reset_dev(hu->hdev); + } + } + mutex_unlock(&qca->hci_memdump_lock); } static int qca_wcn3990_init(struct hci_uart *hu) @@ -1643,6 +1673,8 @@ static int qca_setup(struct hci_uart *hu) if (ret) return ret; + clear_bit(QCA_SSR_TRIGGERED, &qca->flags); + if (qca_is_wcn399x(soc_type)) { set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); From 75bbd2ea50ba1c5d9da878a17e92eac02fe0fd3a Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 10 Jul 2020 17:39:18 -0400 Subject: [PATCH 0866/2056] Bluetooth: Prevent out-of-bounds read in hci_inquiry_result_evt() Check `num_rsp` before using it as for-loop counter. Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 927bde511170..b97d0247983c 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2517,7 +2517,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) BT_DBG("%s num_rsp %d", hdev->name, num_rsp); - if (!num_rsp) + if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1) return; if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) From 629b49c848ee71244203934347bd7730b0ddee8d Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 10 Jul 2020 17:45:26 -0400 Subject: [PATCH 0867/2056] Bluetooth: Prevent out-of-bounds read in hci_inquiry_result_with_rssi_evt() Check `num_rsp` before using it as for-loop counter. Add `unlock` label. Cc: stable@vger.kernel.org Signed-off-by: Peilin Ye Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index b97d0247983c..61f8c4d12028 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -4159,6 +4159,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct inquiry_info_with_rssi_and_pscan_mode *info; info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -4180,6 +4183,9 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } else { struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); + if (skb->len < num_rsp * sizeof(*info) + 1) + goto unlock; + for (; num_rsp; num_rsp--, info++) { u32 flags; @@ -4200,6 +4206,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, } } +unlock: hci_dev_unlock(hdev); } From fbbb68de80a45ad66b66600bee275485c5073aa7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:21 +0200 Subject: [PATCH 0868/2056] bpf: Add resolve_btfids tool to resolve BTF IDs in ELF object The resolve_btfids tool scans elf object for .BTF_ids section and resolves its symbols with BTF ID values. It will be used to during linking time to resolve arrays of BTF ID values used in verifier, so these IDs do not need to be resolved in runtime. The expected layout of .BTF_ids section is described in main.c header. Related kernel changes are coming in following changes. Build issue reported by 0-DAY CI Kernel Test Service. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200711215329.41165-2-jolsa@kernel.org --- tools/bpf/resolve_btfids/Build | 10 + tools/bpf/resolve_btfids/Makefile | 77 ++++ tools/bpf/resolve_btfids/main.c | 721 ++++++++++++++++++++++++++++++ 3 files changed, 808 insertions(+) create mode 100644 tools/bpf/resolve_btfids/Build create mode 100644 tools/bpf/resolve_btfids/Makefile create mode 100644 tools/bpf/resolve_btfids/main.c diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build new file mode 100644 index 000000000000..ae82da03f9bf --- /dev/null +++ b/tools/bpf/resolve_btfids/Build @@ -0,0 +1,10 @@ +resolve_btfids-y += main.o +resolve_btfids-y += rbtree.o +resolve_btfids-y += zalloc.o +resolve_btfids-y += string.o +resolve_btfids-y += ctype.o +resolve_btfids-y += str_error_r.o + +$(OUTPUT)%.o: ../../lib/%.c FORCE + $(call rule_mkdir) + $(call if_changed_dep,cc_o_c) diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile new file mode 100644 index 000000000000..948378ca73d4 --- /dev/null +++ b/tools/bpf/resolve_btfids/Makefile @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0-only +include ../../scripts/Makefile.include + +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + +ifeq ($(V),1) + Q = + msg = +else + Q = @ + msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))"; + MAKEFLAGS=--no-print-directory +endif + +OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ + +LIBBPF_SRC := $(srctree)/tools/lib/bpf/ +SUBCMD_SRC := $(srctree)/tools/lib/subcmd/ + +BPFOBJ := $(OUTPUT)/libbpf.a +SUBCMDOBJ := $(OUTPUT)/libsubcmd.a + +BINARY := $(OUTPUT)/resolve_btfids +BINARY_IN := $(BINARY)-in.o + +all: $(BINARY) + +$(OUTPUT): + $(call msg,MKDIR,,$@) + $(Q)mkdir -p $(OUTPUT) + +$(SUBCMDOBJ): fixdep FORCE + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) + +$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT) + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) + +CFLAGS := -g \ + -I$(srctree)/tools/include \ + -I$(srctree)/tools/include/uapi \ + -I$(LIBBPF_SRC) \ + -I$(SUBCMD_SRC) + +LIBS = -lelf -lz + +export srctree OUTPUT CFLAGS Q +include $(srctree)/tools/build/Makefile.include + +$(BINARY_IN): fixdep FORCE + $(Q)$(MAKE) $(build)=resolve_btfids + +$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN) + $(call msg,LINK,$@) + $(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS) + +libsubcmd-clean: + $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean + +libbpf-clean: + $(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean + +clean: libsubcmd-clean libbpf-clean fixdep-clean + $(call msg,CLEAN,$(BINARY)) + $(Q)$(RM) -f $(BINARY); \ + find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM) + +tags: + $(call msg,GEN,,tags) + $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC) + +FORCE: + +.PHONY: all FORCE clean tags diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c new file mode 100644 index 000000000000..6956b6350cad --- /dev/null +++ b/tools/bpf/resolve_btfids/main.c @@ -0,0 +1,721 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * resolve_btfids scans Elf object for .BTF_ids section and resolves + * its symbols with BTF ID values. + * + * Each symbol points to 4 bytes data and is expected to have + * following name syntax: + * + * __BTF_ID____[__] + * + * type is: + * + * func - lookup BTF_KIND_FUNC symbol with name + * and store its ID into the data: + * + * __BTF_ID__func__vfs_close__1: + * .zero 4 + * + * struct - lookup BTF_KIND_STRUCT symbol with name + * and store its ID into the data: + * + * __BTF_ID__struct__sk_buff__1: + * .zero 4 + * + * union - lookup BTF_KIND_UNION symbol with name + * and store its ID into the data: + * + * __BTF_ID__union__thread_union__1: + * .zero 4 + * + * typedef - lookup BTF_KIND_TYPEDEF symbol with name + * and store its ID into the data: + * + * __BTF_ID__typedef__pid_t__1: + * .zero 4 + * + * set - store symbol size into first 4 bytes and sort following + * ID list + * + * __BTF_ID__set__list: + * .zero 4 + * list: + * __BTF_ID__func__vfs_getattr__3: + * .zero 4 + * __BTF_ID__func__vfs_fallocate__4: + * .zero 4 + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BTF_IDS_SECTION ".BTF_ids" +#define BTF_ID "__BTF_ID__" + +#define BTF_STRUCT "struct" +#define BTF_UNION "union" +#define BTF_TYPEDEF "typedef" +#define BTF_FUNC "func" +#define BTF_SET "set" + +#define ADDR_CNT 100 + +struct btf_id { + struct rb_node rb_node; + char *name; + union { + int id; + int cnt; + }; + int addr_cnt; + Elf64_Addr addr[ADDR_CNT]; +}; + +struct object { + const char *path; + const char *btf; + + struct { + int fd; + Elf *elf; + Elf_Data *symbols; + Elf_Data *idlist; + int symbols_shndx; + int idlist_shndx; + size_t strtabidx; + unsigned long idlist_addr; + } efile; + + struct rb_root sets; + struct rb_root structs; + struct rb_root unions; + struct rb_root typedefs; + struct rb_root funcs; + + int nr_funcs; + int nr_structs; + int nr_unions; + int nr_typedefs; +}; + +static int verbose; + +int eprintf(int level, int var, const char *fmt, ...) +{ + va_list args; + int ret; + + if (var >= level) { + va_start(args, fmt); + ret = vfprintf(stderr, fmt, args); + va_end(args); + } + return ret; +} + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_debug(fmt, ...) \ + eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debugN(n, fmt, ...) \ + eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__) + +static bool is_btf_id(const char *name) +{ + return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1); +} + +static struct btf_id *btf_id__find(struct rb_root *root, const char *name) +{ + struct rb_node *p = root->rb_node; + struct btf_id *id; + int cmp; + + while (p) { + id = rb_entry(p, struct btf_id, rb_node); + cmp = strcmp(id->name, name); + if (cmp < 0) + p = p->rb_left; + else if (cmp > 0) + p = p->rb_right; + else + return id; + } + return NULL; +} + +static struct btf_id* +btf_id__add(struct rb_root *root, char *name, bool unique) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct btf_id *id; + int cmp; + + while (*p != NULL) { + parent = *p; + id = rb_entry(parent, struct btf_id, rb_node); + cmp = strcmp(id->name, name); + if (cmp < 0) + p = &(*p)->rb_left; + else if (cmp > 0) + p = &(*p)->rb_right; + else + return unique ? NULL : id; + } + + id = zalloc(sizeof(*id)); + if (id) { + pr_debug("adding symbol %s\n", name); + id->name = name; + rb_link_node(&id->rb_node, parent, p); + rb_insert_color(&id->rb_node, root); + } + return id; +} + +static char *get_id(const char *prefix_end) +{ + /* + * __BTF_ID__func__vfs_truncate__0 + * prefix_end = ^ + */ + char *p, *id = strdup(prefix_end + sizeof("__") - 1); + + if (id) { + /* + * __BTF_ID__func__vfs_truncate__0 + * id = ^ + * + * cut the unique id part + */ + p = strrchr(id, '_'); + p--; + if (*p != '_') { + free(id); + return NULL; + } + *p = '\0'; + } + return id; +} + +static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size) +{ + char *id; + + id = get_id(name + size); + if (!id) { + pr_err("FAILED to parse symbol name: %s\n", name); + return NULL; + } + + return btf_id__add(root, id, false); +} + +static int elf_collect(struct object *obj) +{ + Elf_Scn *scn = NULL; + size_t shdrstrndx; + int idx = 0; + Elf *elf; + int fd; + + fd = open(obj->path, O_RDWR, 0666); + if (fd == -1) { + pr_err("FAILED cannot open %s: %s\n", + obj->path, strerror(errno)); + return -1; + } + + elf_version(EV_CURRENT); + + elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL); + if (!elf) { + pr_err("FAILED cannot create ELF descriptor: %s\n", + elf_errmsg(-1)); + return -1; + } + + obj->efile.fd = fd; + obj->efile.elf = elf; + + elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT); + + if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) { + pr_err("FAILED cannot get shdr str ndx\n"); + return -1; + } + + /* + * Scan all the elf sections and look for save data + * from .BTF_ids section and symbols. + */ + while ((scn = elf_nextscn(elf, scn)) != NULL) { + Elf_Data *data; + GElf_Shdr sh; + char *name; + + idx++; + if (gelf_getshdr(scn, &sh) != &sh) { + pr_err("FAILED get section(%d) header\n", idx); + return -1; + } + + name = elf_strptr(elf, shdrstrndx, sh.sh_name); + if (!name) { + pr_err("FAILED get section(%d) name\n", idx); + return -1; + } + + data = elf_getdata(scn, 0); + if (!data) { + pr_err("FAILED to get section(%d) data from %s\n", + idx, name); + return -1; + } + + pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", + idx, name, (unsigned long) data->d_size, + (int) sh.sh_link, (unsigned long) sh.sh_flags, + (int) sh.sh_type); + + if (sh.sh_type == SHT_SYMTAB) { + obj->efile.symbols = data; + obj->efile.symbols_shndx = idx; + obj->efile.strtabidx = sh.sh_link; + } else if (!strcmp(name, BTF_IDS_SECTION)) { + obj->efile.idlist = data; + obj->efile.idlist_shndx = idx; + obj->efile.idlist_addr = sh.sh_addr; + } + } + + return 0; +} + +static int symbols_collect(struct object *obj) +{ + Elf_Scn *scn = NULL; + int n, i, err = 0; + GElf_Shdr sh; + char *name; + + scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx); + if (!scn) + return -1; + + if (gelf_getshdr(scn, &sh) != &sh) + return -1; + + n = sh.sh_size / sh.sh_entsize; + + /* + * Scan symbols and look for the ones starting with + * __BTF_ID__* over .BTF_ids section. + */ + for (i = 0; !err && i < n; i++) { + char *tmp, *prefix; + struct btf_id *id; + GElf_Sym sym; + int err = -1; + + if (!gelf_getsym(obj->efile.symbols, i, &sym)) + return -1; + + if (sym.st_shndx != obj->efile.idlist_shndx) + continue; + + name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, + sym.st_name); + + if (!is_btf_id(name)) + continue; + + /* + * __BTF_ID__TYPE__vfs_truncate__0 + * prefix = ^ + */ + prefix = name + sizeof(BTF_ID) - 1; + + /* struct */ + if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) { + obj->nr_structs++; + id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1); + /* union */ + } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) { + obj->nr_unions++; + id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1); + /* typedef */ + } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) { + obj->nr_typedefs++; + id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1); + /* func */ + } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) { + obj->nr_funcs++; + id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1); + /* set */ + } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) { + id = add_symbol(&obj->sets, prefix, sizeof(BTF_SET) - 1); + /* + * SET objects store list's count, which is encoded + * in symbol's size, together with 'cnt' field hence + * that - 1. + */ + if (id) + id->cnt = sym.st_size / sizeof(int) - 1; + } else { + pr_err("FAILED unsupported prefix %s\n", prefix); + return -1; + } + + if (!id) + return -ENOMEM; + + if (id->addr_cnt >= ADDR_CNT) { + pr_err("FAILED symbol %s crossed the number of allowed lists", + id->name); + return -1; + } + id->addr[id->addr_cnt++] = sym.st_value; + } + + return 0; +} + +static struct btf *btf__parse_raw(const char *file) +{ + struct btf *btf; + struct stat st; + __u8 *buf; + FILE *f; + + if (stat(file, &st)) + return NULL; + + f = fopen(file, "rb"); + if (!f) + return NULL; + + buf = malloc(st.st_size); + if (!buf) { + btf = ERR_PTR(-ENOMEM); + goto exit_close; + } + + if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) { + btf = ERR_PTR(-EINVAL); + goto exit_free; + } + + btf = btf__new(buf, st.st_size); + +exit_free: + free(buf); +exit_close: + fclose(f); + return btf; +} + +static bool is_btf_raw(const char *file) +{ + __u16 magic = 0; + int fd, nb_read; + + fd = open(file, O_RDONLY); + if (fd < 0) + return false; + + nb_read = read(fd, &magic, sizeof(magic)); + close(fd); + return nb_read == sizeof(magic) && magic == BTF_MAGIC; +} + +static struct btf *btf_open(const char *path) +{ + if (is_btf_raw(path)) + return btf__parse_raw(path); + else + return btf__parse_elf(path, NULL); +} + +static int symbols_resolve(struct object *obj) +{ + int nr_typedefs = obj->nr_typedefs; + int nr_structs = obj->nr_structs; + int nr_unions = obj->nr_unions; + int nr_funcs = obj->nr_funcs; + int err, type_id; + struct btf *btf; + __u32 nr; + + btf = btf_open(obj->btf ?: obj->path); + err = libbpf_get_error(btf); + if (err) { + pr_err("FAILED: load BTF from %s: %s", + obj->path, strerror(err)); + return -1; + } + + err = -1; + nr = btf__get_nr_types(btf); + + /* + * Iterate all the BTF types and search for collected symbol IDs. + */ + for (type_id = 1; type_id <= nr; type_id++) { + const struct btf_type *type; + struct rb_root *root; + struct btf_id *id; + const char *str; + int *nr; + + type = btf__type_by_id(btf, type_id); + if (!type) { + pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n", + type_id); + goto out; + } + + if (btf_is_func(type) && nr_funcs) { + nr = &nr_funcs; + root = &obj->funcs; + } else if (btf_is_struct(type) && nr_structs) { + nr = &nr_structs; + root = &obj->structs; + } else if (btf_is_union(type) && nr_unions) { + nr = &nr_unions; + root = &obj->unions; + } else if (btf_is_typedef(type) && nr_typedefs) { + nr = &nr_typedefs; + root = &obj->typedefs; + } else + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n", + type_id); + goto out; + } + + id = btf_id__find(root, str); + if (id) { + id->id = type_id; + (*nr)--; + } + } + + err = 0; +out: + btf__free(btf); + return err; +} + +static int id_patch(struct object *obj, struct btf_id *id) +{ + Elf_Data *data = obj->efile.idlist; + int *ptr = data->d_buf; + int i; + + if (!id->id) { + pr_err("FAILED unresolved symbol %s\n", id->name); + return -EINVAL; + } + + for (i = 0; i < id->addr_cnt; i++) { + unsigned long addr = id->addr[i]; + unsigned long idx = addr - obj->efile.idlist_addr; + + pr_debug("patching addr %5lu: ID %7d [%s]\n", + idx, id->id, id->name); + + if (idx >= data->d_size) { + pr_err("FAILED patching index %lu out of bounds %lu\n", + idx, data->d_size); + return -1; + } + + idx = idx / sizeof(int); + ptr[idx] = id->id; + } + + return 0; +} + +static int __symbols_patch(struct object *obj, struct rb_root *root) +{ + struct rb_node *next; + struct btf_id *id; + + next = rb_first(root); + while (next) { + id = rb_entry(next, struct btf_id, rb_node); + + if (id_patch(obj, id)) + return -1; + + next = rb_next(next); + } + return 0; +} + +static int cmp_id(const void *pa, const void *pb) +{ + const int *a = pa, *b = pb; + + return *a - *b; +} + +static int sets_patch(struct object *obj) +{ + Elf_Data *data = obj->efile.idlist; + int *ptr = data->d_buf; + struct rb_node *next; + + next = rb_first(&obj->sets); + while (next) { + unsigned long addr, idx; + struct btf_id *id; + int *base; + int cnt; + + id = rb_entry(next, struct btf_id, rb_node); + addr = id->addr[0]; + idx = addr - obj->efile.idlist_addr; + + /* sets are unique */ + if (id->addr_cnt != 1) { + pr_err("FAILED malformed data for set '%s'\n", + id->name); + return -1; + } + + idx = idx / sizeof(int); + base = &ptr[idx] + 1; + cnt = ptr[idx]; + + pr_debug("sorting addr %5lu: cnt %6d [%s]\n", + (idx + 1) * sizeof(int), cnt, id->name); + + qsort(base, cnt, sizeof(int), cmp_id); + + next = rb_next(next); + } +} + +static int symbols_patch(struct object *obj) +{ + int err; + + if (__symbols_patch(obj, &obj->structs) || + __symbols_patch(obj, &obj->unions) || + __symbols_patch(obj, &obj->typedefs) || + __symbols_patch(obj, &obj->funcs) || + __symbols_patch(obj, &obj->sets)) + return -1; + + if (sets_patch(obj)) + return -1; + + elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY); + + err = elf_update(obj->efile.elf, ELF_C_WRITE); + if (err < 0) { + pr_err("FAILED elf_update(WRITE): %s\n", + elf_errmsg(-1)); + } + + pr_debug("update %s for %s\n", + err >= 0 ? "ok" : "failed", obj->path); + return err < 0 ? -1 : 0; +} + +static const char * const resolve_btfids_usage[] = { + "resolve_btfids [] ", + NULL +}; + +int main(int argc, const char **argv) +{ + bool no_fail = false; + struct object obj = { + .efile = { + .idlist_shndx = -1, + .symbols_shndx = -1, + }, + .structs = RB_ROOT, + .unions = RB_ROOT, + .typedefs = RB_ROOT, + .funcs = RB_ROOT, + .sets = RB_ROOT, + }; + struct option btfid_options[] = { + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show errors, etc)"), + OPT_STRING(0, "btf", &obj.btf, "BTF data", + "BTF data"), + OPT_BOOLEAN(0, "no-fail", &no_fail, + "do not fail if " BTF_IDS_SECTION " section is not found"), + OPT_END() + }; + int err = -1; + + argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage, + PARSE_OPT_STOP_AT_NON_OPTION); + if (argc != 1) + usage_with_options(resolve_btfids_usage, btfid_options); + + obj.path = argv[0]; + + if (elf_collect(&obj)) + goto out; + + /* + * We did not find .BTF_ids section or symbols section, + * nothing to do.. + */ + if (obj.efile.idlist_shndx == -1 || + obj.efile.symbols_shndx == -1) { + if (no_fail) + return 0; + pr_err("FAILED to find needed sections\n"); + return -1; + } + + if (symbols_collect(&obj)) + goto out; + + if (symbols_resolve(&obj)) + goto out; + + if (symbols_patch(&obj)) + goto out; + + err = 0; +out: + if (obj.efile.elf) + elf_end(obj.efile.elf); + close(obj.efile.fd); + return err; +} From 33a57ce0a54d498275f432db04850001175dfdfa Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:22 +0200 Subject: [PATCH 0869/2056] bpf: Compile resolve_btfids tool at kernel compilation start The resolve_btfids tool will be used during the vmlinux linking, so it's necessary it's ready for it. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-3-jolsa@kernel.org --- Makefile | 22 ++++++++++++++++++---- tools/Makefile | 3 +++ tools/bpf/Makefile | 9 ++++++++- 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index ac2c61c37a73..017e775b3288 100644 --- a/Makefile +++ b/Makefile @@ -1053,9 +1053,10 @@ export mod_sign_cmd HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf) +has_libelf = $(call try-run,\ + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) + ifdef CONFIG_STACK_VALIDATION - has_libelf := $(call try-run,\ - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0) ifeq ($(has_libelf),1) objtool_target := tools/objtool FORCE else @@ -1064,6 +1065,14 @@ ifdef CONFIG_STACK_VALIDATION endif endif +ifdef CONFIG_DEBUG_INFO_BTF + ifeq ($(has_libelf),1) + resolve_btfids_target := tools/bpf/resolve_btfids FORCE + else + ERROR_RESOLVE_BTFIDS := 1 + endif +endif + PHONY += prepare0 export MODORDER := $(extmod-prefix)modules.order @@ -1175,7 +1184,7 @@ prepare0: archprepare $(Q)$(MAKE) $(build)=. # All the preparing.. -prepare: prepare0 prepare-objtool +prepare: prepare0 prepare-objtool prepare-resolve_btfids # Support for using generic headers in asm-generic asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj @@ -1188,7 +1197,7 @@ uapi-asm-generic: $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \ generic=include/uapi/asm-generic -PHONY += prepare-objtool +PHONY += prepare-objtool prepare-resolve_btfids prepare-objtool: $(objtool_target) ifeq ($(SKIP_STACK_VALIDATION),1) ifdef CONFIG_UNWINDER_ORC @@ -1199,6 +1208,11 @@ else endif endif +prepare-resolve_btfids: $(resolve_btfids_target) +ifeq ($(ERROR_RESOLVE_BTFIDS),1) + @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2 + @false +endif # Generate some files # --------------------------------------------------------------------------- diff --git a/tools/Makefile b/tools/Makefile index bd778812e915..85af6ebbce91 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -67,6 +67,9 @@ cpupower: FORCE cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE $(call descend,$@) +bpf/%: FORCE + $(call descend,$@) + liblockdep: FORCE $(call descend,lib/lockdep) diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 6df1850f8353..74bc9a105225 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -123,5 +123,12 @@ runqslower_install: runqslower_clean: $(call descend,runqslower,clean) +resolve_btfids: + $(call descend,resolve_btfids) + +resolve_btfids_clean: + $(call descend,resolve_btfids,clean) + .PHONY: all install clean bpftool bpftool_install bpftool_clean \ - runqslower runqslower_install runqslower_clean + runqslower runqslower_install runqslower_clean \ + resolve_btfids resolve_btfids_clean From 5a2798ab32ba2952cfe25701ee460bccbd434c75 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:23 +0200 Subject: [PATCH 0870/2056] bpf: Add BTF_ID_LIST/BTF_ID/BTF_ID_UNUSED macros Adding support to generate .BTF_ids section that will hold BTF ID lists for verifier. Adding macros that will help to define lists of BTF ID values placed in .BTF_ids section. They are initially filled with zeros (during compilation) and resolved later during the linking phase by resolve_btfids tool. Following defines list of one BTF ID value: BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID(struct, sk_buff) It also defines following variable to access the list: extern u32 bpf_skb_output_btf_ids[]; The BTF_ID_UNUSED macro defines 4 zero bytes. It's used when we want to define 'unused' entry in BTF_ID_LIST, like: BTF_ID_LIST(bpf_skb_output_btf_ids) BTF_ID(struct, sk_buff) BTF_ID_UNUSED BTF_ID(struct, task_struct) Suggested-by: Andrii Nakryiko Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-4-jolsa@kernel.org --- include/asm-generic/vmlinux.lds.h | 4 ++ include/linux/btf_ids.h | 87 +++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 include/linux/btf_ids.h diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index db600ef218d7..0be2ee265931 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -641,6 +641,10 @@ __start_BTF = .; \ *(.BTF) \ __stop_BTF = .; \ + } \ + . = ALIGN(4); \ + .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ + *(.BTF_ids) \ } #else #define BTF diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h new file mode 100644 index 000000000000..fe019774f8a7 --- /dev/null +++ b/include/linux/btf_ids.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#include /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", @object; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name) \ +extern u32 name[]; + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + + +#endif From c9a0f3b85e09dd16665b639cb884490410619434 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:24 +0200 Subject: [PATCH 0871/2056] bpf: Resolve BTF IDs in vmlinux image Using BTF_ID_LIST macro to define lists for several helpers using BTF arguments. And running resolve_btfids on vmlinux elf object during linking, so the .BTF_ids section gets the IDs resolved. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-5-jolsa@kernel.org --- Makefile | 3 ++- kernel/bpf/stackmap.c | 5 ++++- kernel/trace/bpf_trace.c | 9 +++++++-- net/core/filter.c | 9 +++++++-- scripts/link-vmlinux.sh | 6 ++++++ 5 files changed, 26 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 017e775b3288..462f1a75fcb3 100644 --- a/Makefile +++ b/Makefile @@ -448,6 +448,7 @@ OBJSIZE = $(CROSS_COMPILE)size STRIP = $(CROSS_COMPILE)strip endif PAHOLE = pahole +RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids LEX = flex YACC = bison AWK = awk @@ -510,7 +511,7 @@ GCC_PLUGINS_CFLAGS := CLANG_FLAGS := export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index a6c361ed7937..48d8e739975f 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "percpu_freelist.h" #define STACK_CREATE_FLAG_MASK \ @@ -576,7 +577,9 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, return __bpf_get_stack(regs, task, buf, size, flags); } -static int bpf_get_task_stack_btf_ids[5]; +BTF_ID_LIST(bpf_get_task_stack_btf_ids) +BTF_ID(struct, task_struct) + const struct bpf_func_proto bpf_get_task_stack_proto = { .func = bpf_get_task_stack, .gpl_only = false, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e0b7775039ab..e178e8e32b33 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -13,6 +13,7 @@ #include #include #include +#include #include @@ -710,7 +711,9 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, return err; } -static int bpf_seq_printf_btf_ids[5]; +BTF_ID_LIST(bpf_seq_printf_btf_ids) +BTF_ID(struct, seq_file) + static const struct bpf_func_proto bpf_seq_printf_proto = { .func = bpf_seq_printf, .gpl_only = true, @@ -728,7 +731,9 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len) return seq_write(m, data, len) ? -EOVERFLOW : 0; } -static int bpf_seq_write_btf_ids[5]; +BTF_ID_LIST(bpf_seq_write_btf_ids) +BTF_ID(struct, seq_file) + static const struct bpf_func_proto bpf_seq_write_proto = { .func = bpf_seq_write, .gpl_only = true, diff --git a/net/core/filter.c b/net/core/filter.c index ddcc0d6209e1..4e572441e64a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -75,6 +75,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -3779,7 +3780,9 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; -static int bpf_skb_output_btf_ids[5]; +BTF_ID_LIST(bpf_skb_output_btf_ids) +BTF_ID(struct, sk_buff) + const struct bpf_func_proto bpf_skb_output_proto = { .func = bpf_skb_event_output, .gpl_only = true, @@ -4173,7 +4176,9 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = { .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; -static int bpf_xdp_output_btf_ids[5]; +BTF_ID_LIST(bpf_xdp_output_btf_ids) +BTF_ID(struct, xdp_buff) + const struct bpf_func_proto bpf_xdp_output_proto = { .func = bpf_xdp_event_output, .gpl_only = true, diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh index 92dd745906f4..e26f02dbedee 100755 --- a/scripts/link-vmlinux.sh +++ b/scripts/link-vmlinux.sh @@ -336,6 +336,12 @@ fi vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o} +# fill in BTF IDs +if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then +info BTFIDS vmlinux +${RESOLVE_BTFIDS} vmlinux +fi + if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then info SORTTAB vmlinux if ! sorttable vmlinux; then From 138b9a0511c789f2451ff1d80e7fd3f9eef3a9e3 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:25 +0200 Subject: [PATCH 0872/2056] bpf: Remove btf_id helpers resolving Now when we moved the helpers btf_id arrays into .BTF_ids section, we can remove the code that resolve those IDs in runtime. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-6-jolsa@kernel.org --- kernel/bpf/btf.c | 89 +++--------------------------------------------- 1 file changed, 5 insertions(+), 84 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 4c3007f428b1..71140b73ae3c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -4079,96 +4079,17 @@ int btf_struct_access(struct bpf_verifier_log *log, return -EINVAL; } -static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn, - int arg) -{ - char fnname[KSYM_SYMBOL_LEN + 4] = "btf_"; - const struct btf_param *args; - const struct btf_type *t; - const char *tname, *sym; - u32 btf_id, i; - - if (IS_ERR(btf_vmlinux)) { - bpf_log(log, "btf_vmlinux is malformed\n"); - return -EINVAL; - } - - sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4); - if (!sym) { - bpf_log(log, "kernel doesn't have kallsyms\n"); - return -EFAULT; - } - - for (i = 1; i <= btf_vmlinux->nr_types; i++) { - t = btf_type_by_id(btf_vmlinux, i); - if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) - continue; - tname = __btf_name_by_offset(btf_vmlinux, t->name_off); - if (!strcmp(tname, fnname)) - break; - } - if (i > btf_vmlinux->nr_types) { - bpf_log(log, "helper %s type is not found\n", fnname); - return -ENOENT; - } - - t = btf_type_by_id(btf_vmlinux, t->type); - if (!btf_type_is_ptr(t)) - return -EFAULT; - t = btf_type_by_id(btf_vmlinux, t->type); - if (!btf_type_is_func_proto(t)) - return -EFAULT; - - args = (const struct btf_param *)(t + 1); - if (arg >= btf_type_vlen(t)) { - bpf_log(log, "bpf helper %s doesn't have %d-th argument\n", - fnname, arg); - return -EINVAL; - } - - t = btf_type_by_id(btf_vmlinux, args[arg].type); - if (!btf_type_is_ptr(t) || !t->type) { - /* anything but the pointer to struct is a helper config bug */ - bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n"); - return -EFAULT; - } - btf_id = t->type; - t = btf_type_by_id(btf_vmlinux, t->type); - /* skip modifiers */ - while (btf_type_is_modifier(t)) { - btf_id = t->type; - t = btf_type_by_id(btf_vmlinux, t->type); - } - if (!btf_type_is_struct(t)) { - bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n"); - return -EFAULT; - } - bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4, - arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off)); - return btf_id; -} - int btf_resolve_helper_id(struct bpf_verifier_log *log, const struct bpf_func_proto *fn, int arg) { - int *btf_id = &fn->btf_id[arg]; - int ret; + int id; if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID) return -EINVAL; - - ret = READ_ONCE(*btf_id); - if (ret) - return ret; - /* ok to race the search. The result is the same */ - ret = __btf_resolve_helper_id(log, fn->func, arg); - if (!ret) { - /* Function argument cannot be type 'void' */ - bpf_log(log, "BTF resolution bug\n"); - return -EFAULT; - } - WRITE_ONCE(*btf_id, ret); - return ret; + id = fn->btf_id[arg]; + if (!id || id > btf_vmlinux->nr_types) + return -EINVAL; + return id; } static int __get_type_size(struct btf *btf, u32 btf_id, From 49f4e6720748c778795df62d222d0200b61345be Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:26 +0200 Subject: [PATCH 0873/2056] bpf: Use BTF_ID to resolve bpf_ctx_convert struct This way the ID is resolved during compile time, and we can remove the runtime name search. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-7-jolsa@kernel.org --- kernel/bpf/btf.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 71140b73ae3c..a710e3ee1f18 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -3621,12 +3622,15 @@ static int btf_translate_to_vmlinux(struct bpf_verifier_log *log, return kern_ctx_type->type; } +BTF_ID_LIST(bpf_ctx_convert_btf_id) +BTF_ID(struct, bpf_ctx_convert) + struct btf *btf_parse_vmlinux(void) { struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; struct btf *btf = NULL; - int err, btf_id; + int err; env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); if (!env) @@ -3659,14 +3663,8 @@ struct btf *btf_parse_vmlinux(void) if (err) goto errout; - /* find struct bpf_ctx_convert for type checking later */ - btf_id = btf_find_by_name_kind(btf, "bpf_ctx_convert", BTF_KIND_STRUCT); - if (btf_id < 0) { - err = btf_id; - goto errout; - } /* btf_parse_vmlinux() runs under bpf_verifier_lock */ - bpf_ctx_convert.t = btf_type_by_id(btf, btf_id); + bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); /* find bpf map structs for map_ptr access checking */ err = btf_vmlinux_map_ids_init(btf, log); From 232ce4be295743deb2cf9a7cf9c60a4fde367964 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:27 +0200 Subject: [PATCH 0874/2056] bpf: Add info about .BTF_ids section to btf.rst Updating btf.rst doc with info about .BTF_ids section Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-8-jolsa@kernel.org --- Documentation/bpf/btf.rst | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst index 4d565d202ce3..b5361b8621c9 100644 --- a/Documentation/bpf/btf.rst +++ b/Documentation/bpf/btf.rst @@ -691,6 +691,42 @@ kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the beginning of section (``btf_ext_info_sec->sec_name_off``). +4.2 .BTF_ids section +==================== + +The .BTF_ids section encodes BTF ID values that are used within the kernel. + +This section is created during the kernel compilation with the help of +macros defined in ``include/linux/btf_ids.h`` header file. Kernel code can +use them to create lists and sets (sorted lists) of BTF ID values. + +The ``BTF_ID_LIST`` and ``BTF_ID`` macros define unsorted list of BTF ID values, +with following syntax:: + + BTF_ID_LIST(list) + BTF_ID(type1, name1) + BTF_ID(type2, name2) + +resulting in following layout in .BTF_ids section:: + + __BTF_ID__type1__name1__1: + .zero 4 + __BTF_ID__type2__name2__2: + .zero 4 + +The ``u32 list[];`` variable is defined to access the list. + +The ``BTF_ID_UNUSED`` macro defines 4 zero bytes. It's used when we +want to define unused entry in BTF_ID_LIST, like:: + + BTF_ID_LIST(bpf_skb_output_btf_ids) + BTF_ID(struct, sk_buff) + BTF_ID_UNUSED + BTF_ID(struct, task_struct) + +All the BTF ID lists and sets are compiled in the .BTF_ids section and +resolved during the linking phase of kernel build by ``resolve_btfids`` tool. + 5. Using BTF ************ From e5a0516ec9681daf5c1f0d05144d21430b6ca6d7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:28 +0200 Subject: [PATCH 0875/2056] tools headers: Adopt verbatim copy of btf_ids.h from kernel sources It will be needed by bpf selftest for resolve_btfids tool. Also adding __PASTE macro as btf_ids.h dependency, which is defined in: include/linux/compiler_types.h but because tools/include do not have this header, I'm putting the macro into linux/compiler.h header. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-9-jolsa@kernel.org --- tools/include/linux/btf_ids.h | 87 ++++++++++++++++++++++++++++++++++ tools/include/linux/compiler.h | 4 ++ 2 files changed, 91 insertions(+) create mode 100644 tools/include/linux/btf_ids.h diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h new file mode 100644 index 000000000000..fe019774f8a7 --- /dev/null +++ b/tools/include/linux/btf_ids.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#include /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", @object; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name) \ +extern u32 name[]; + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + + +#endif diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 9f9002734e19..6eac24d44e81 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -201,4 +201,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s # define __fallthrough #endif +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a, b) a##b +#define __PASTE(a, b) ___PASTE(a, b) + #endif /* _TOOLS_LINUX_COMPILER_H */ From cc15a20d5f3abc3cbd7911b70156b7b9e2bc7d41 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sat, 11 Jul 2020 23:53:29 +0200 Subject: [PATCH 0876/2056] selftests/bpf: Add test for resolve_btfids Adding resolve_btfids test under test_progs suite. It's possible to use btf_ids.h header and its logic in user space application, so we can add easy test for it. The test defines BTF_ID_LIST and checks it gets properly resolved. For this reason the test_progs binary (and other binaries that use TRUNNER* macros) is processed with resolve_btfids tool, which resolves BTF IDs in .BTF_ids section. The BTF data are taken from btf_data.o object rceated from progs/btf_data.c. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200711215329.41165-10-jolsa@kernel.org --- tools/testing/selftests/bpf/Makefile | 15 ++- .../selftests/bpf/prog_tests/resolve_btfids.c | 111 ++++++++++++++++++ tools/testing/selftests/bpf/progs/btf_data.c | 50 ++++++++ 3 files changed, 175 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/resolve_btfids.c create mode 100644 tools/testing/selftests/bpf/progs/btf_data.c diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 1f9c696b3edf..e7a8cf83ba48 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -111,6 +111,7 @@ SCRATCH_DIR := $(OUTPUT)/tools BUILD_DIR := $(SCRATCH_DIR)/build INCLUDE_DIR := $(SCRATCH_DIR)/include BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a +RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids # Define simple and short `make test_progs`, `make test_sysctl`, etc targets # to build individual tests. @@ -177,7 +178,7 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \ DESTDIR=$(SCRATCH_DIR) prefix= all install_headers -$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR): +$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR): $(call msg,MKDIR,,$@) mkdir -p $@ @@ -190,6 +191,16 @@ else cp "$(VMLINUX_H)" $@ endif +$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \ + $(TOOLSDIR)/bpf/resolve_btfids/main.c \ + $(TOOLSDIR)/lib/rbtree.c \ + $(TOOLSDIR)/lib/zalloc.c \ + $(TOOLSDIR)/lib/string.c \ + $(TOOLSDIR)/lib/ctype.c \ + $(TOOLSDIR)/lib/str_error_r.c + $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \ + OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ) + # Get Clang's default includes on this system, as opposed to those seen by # '-target bpf'. This fixes "missing" files on some architectures/distros, # such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc. @@ -352,9 +363,11 @@ endif $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \ $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ) \ + $(RESOLVE_BTFIDS) \ | $(TRUNNER_BINARY)-extras $$(call msg,BINARY,,$$@) $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@ + $(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@ endef diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c new file mode 100644 index 000000000000..403be6f36cba --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include "test_progs.h" + +static int duration; + +struct symbol { + const char *name; + int type; + int id; +}; + +struct symbol test_symbols[] = { + { "unused", BTF_KIND_UNKN, 0 }, + { "S", BTF_KIND_TYPEDEF, -1 }, + { "T", BTF_KIND_TYPEDEF, -1 }, + { "U", BTF_KIND_TYPEDEF, -1 }, + { "S", BTF_KIND_STRUCT, -1 }, + { "U", BTF_KIND_UNION, -1 }, + { "func", BTF_KIND_FUNC, -1 }, +}; + +BTF_ID_LIST(test_list) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +static int +__resolve_symbol(struct btf *btf, int type_id) +{ + const struct btf_type *type; + const char *str; + unsigned int i; + + type = btf__type_by_id(btf, type_id); + if (!type) { + PRINT_FAIL("Failed to get type for ID %d\n", type_id); + return -1; + } + + for (i = 0; i < ARRAY_SIZE(test_symbols); i++) { + if (test_symbols[i].id != -1) + continue; + + if (BTF_INFO_KIND(type->info) != test_symbols[i].type) + continue; + + str = btf__name_by_offset(btf, type->name_off); + if (!str) { + PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id); + return -1; + } + + if (!strcmp(str, test_symbols[i].name)) + test_symbols[i].id = type_id; + } + + return 0; +} + +static int resolve_symbols(void) +{ + struct btf *btf; + int type_id; + __u32 nr; + + btf = btf__parse_elf("btf_data.o", NULL); + if (CHECK(libbpf_get_error(btf), "resolve", + "Failed to load BTF from btf_data.o\n")) + return -1; + + nr = btf__get_nr_types(btf); + + for (type_id = 1; type_id <= nr; type_id++) { + if (__resolve_symbol(btf, type_id)) + break; + } + + btf__free(btf); + return 0; +} + +int test_resolve_btfids(void) +{ + unsigned int i; + int ret = 0; + + if (resolve_symbols()) + return -1; + + /* Check BTF_ID_LIST(test_list) IDs */ + for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { + ret = CHECK(test_list[i] != test_symbols[i].id, + "id_check", + "wrong ID for %s (%d != %d)\n", test_symbols[i].name, + test_list[i], test_symbols[i].id); + } + + return ret; +} diff --git a/tools/testing/selftests/bpf/progs/btf_data.c b/tools/testing/selftests/bpf/progs/btf_data.c new file mode 100644 index 000000000000..baa525275bde --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf_data.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +struct S { + int a; + int b; + int c; +}; + +union U { + int a; + int b; + int c; +}; + +struct S1 { + int a; + int b; + int c; +}; + +union U1 { + int a; + int b; + int c; +}; + +typedef int T; +typedef int S; +typedef int U; +typedef int T1; +typedef int S1; +typedef int U1; + +struct root_struct { + S m_1; + T m_2; + U m_3; + S1 m_4; + T1 m_5; + U1 m_6; + struct S m_7; + struct S1 m_8; + union U m_9; + union U1 m_10; +}; + +int func(struct root_struct *root) +{ + return 0; +} From 528ae84a34ffd40da5d3fbff740d28d6dc2c8f8a Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Mon, 13 Jul 2020 10:55:46 +0300 Subject: [PATCH 0877/2056] net: bridge: fix undefined br_vlan_can_enter_range in tunnel code If bridge vlan filtering is not defined we won't have br_vlan_can_enter_range and thus will get a compile error as was reported by Stephen and the build bot. So let's define a stub for when vlan filtering is not used. Fixes: 94339443686b ("net: bridge: notify on vlan tunnel changes done via the old api") Reported-by: Stephen Rothwell Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_private.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index a6f348bea29a..baa1500f384f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -1200,6 +1200,12 @@ static inline void br_vlan_notify(const struct net_bridge *br, int cmd) { } + +static inline bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr, + const struct net_bridge_vlan *range_end) +{ + return true; +} #endif /* br_vlan_options.c */ From c19b05b84ddece7708ed0537a92d1dfabdfd98fb Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Mon, 13 Jul 2020 13:53:30 +0900 Subject: [PATCH 0878/2056] net: fddi: skfp: Remove addr_to_string(). kbuild test robot found that addr_to_string() is available only when DEBUG is defined. And I found that what that function is doing is what %pM will do. Thus, replace %s with %pM and remove thread-unsafe addr_to_string() function. Reported-by: kbuild test robot Signed-off-by: Tetsuo Handa Signed-off-by: David S. Miller --- drivers/net/fddi/skfp/ess.c | 10 +++--- drivers/net/fddi/skfp/h/cmtdef.h | 1 - drivers/net/fddi/skfp/smt.c | 60 ++++++++++++-------------------- 3 files changed, 27 insertions(+), 44 deletions(-) diff --git a/drivers/net/fddi/skfp/ess.c b/drivers/net/fddi/skfp/ess.c index a546eaf071f7..afd5ca39f43b 100644 --- a/drivers/net/fddi/skfp/ess.c +++ b/drivers/net/fddi/skfp/ess.c @@ -148,7 +148,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, DB_ESSN(2, "fc %x ft %x", sm->smt_class, sm->smt_type); DB_ESSN(2, "ver %x tran %x", sm->smt_version, sm->smt_tid); - DB_ESSN(2, "stn_id %s", addr_to_string(&sm->smt_source)); + DB_ESSN(2, "stn_id %pM", &sm->smt_source); DB_ESSN(2, "infolen %x res %lx", sm->smt_len, msg_res_type); DB_ESSN(2, "sbacmd %x", cmd->sba_cmd); @@ -308,8 +308,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, p = (void *) sm_to_para(smc,sm,SMT_P3210) ; overhead = ((struct smt_p_3210 *)p)->mib_overhead ; - DB_ESSN(2, "ESS: Change Request from %s", - addr_to_string(&sm->smt_source)); + DB_ESSN(2, "ESS: Change Request from %pM", + &sm->smt_source); DB_ESSN(2, "payload= %lx overhead= %lx", payload, overhead); @@ -339,8 +339,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm, return fs; } - DB_ESSN(2, "ESS: Report Request from %s", - addr_to_string(&sm->smt_source)); + DB_ESSN(2, "ESS: Report Request from %pM", + &sm->smt_source); /* * verify that the resource type is sync bw only diff --git a/drivers/net/fddi/skfp/h/cmtdef.h b/drivers/net/fddi/skfp/h/cmtdef.h index 3a1ceb7cb8d2..4dd590d65d76 100644 --- a/drivers/net/fddi/skfp/h/cmtdef.h +++ b/drivers/net/fddi/skfp/h/cmtdef.h @@ -640,7 +640,6 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text); #define dump_smt(smc,sm,text) #endif -char* addr_to_string(struct fddi_addr *addr); #ifdef DEBUG void dump_hex(char *p, int len); #endif diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index 47c48202a68c..b8c59d803ce6 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -520,8 +520,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) * ignore any packet with NSA and A-indicator set */ if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) { - DB_SMT("SMT : ignoring NSA with A-indicator set from %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT : ignoring NSA with A-indicator set from %pM", + &sm->smt_source); smt_free_mbuf(smc,mb) ; return ; } @@ -552,8 +552,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) break ; } if (illegal) { - DB_SMT("SMT : version = %d, dest = %s", - sm->smt_version, addr_to_string(&sm->smt_source)); + DB_SMT("SMT : version = %d, dest = %pM", + sm->smt_version, &sm->smt_source); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ; smt_free_mbuf(smc,mb) ; return ; @@ -582,8 +582,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) if (!is_equal( &smc->mib.m[MAC0].fddiMACUpstreamNbr, &sm->smt_source)) { - DB_SMT("SMT : updated my UNA = %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT : updated my UNA = %pM", + &sm->smt_source); if (!is_equal(&smc->mib.m[MAC0]. fddiMACUpstreamNbr,&SMT_Unknown)){ /* Do not update unknown address */ @@ -612,8 +612,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) is_individual(&sm->smt_source) && ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) || (m_fc(mb) != FC_SMT_NSA))) { - DB_SMT("SMT : replying to NIF request %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT : replying to NIF request %pM", + &sm->smt_source); smt_send_nif(smc,&sm->smt_source, FC_SMT_INFO, sm->smt_tid, @@ -621,8 +621,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) } break ; case SMT_REPLY : - DB_SMT("SMT : received NIF response from %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT : received NIF response from %pM", + &sm->smt_source); if (fs & A_INDICATOR) { smc->sm.pend[SMT_TID_NIF] = 0 ; DB_SMT("SMT : duplicate address"); @@ -682,23 +682,23 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) case SMT_SIF_CONFIG : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; - DB_SMT("SMT : replying to SIF Config request from %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT : replying to SIF Config request from %pM", + &sm->smt_source); smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_SIF_OPER : /* station information */ if (sm->smt_type != SMT_REQUEST) break ; - DB_SMT("SMT : replying to SIF Operation request from %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT : replying to SIF Operation request from %pM", + &sm->smt_source); smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ; break ; case SMT_ECF : /* echo frame */ switch (sm->smt_type) { case SMT_REPLY : smc->mib.priv.fddiPRIVECF_Reply_Rx++ ; - DB_SMT("SMT: received ECF reply from %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT: received ECF reply from %pM", + &sm->smt_source); if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) { DB_SMT("SMT: ECHODATA missing"); break ; @@ -727,8 +727,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) local) ; break ; } - DB_SMT("SMT - sending ECF reply to %s", - addr_to_string(&sm->smt_source)); + DB_SMT("SMT - sending ECF reply to %pM", + &sm->smt_source); /* set destination addr. & reply */ sm->smt_dest = sm->smt_source ; @@ -794,8 +794,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs) * we need to send a RDF frame according to 8.1.3.1.1, * only if it is a REQUEST. */ - DB_SMT("SMT : class = %d, send RDF to %s", - sm->smt_class, addr_to_string(&sm->smt_source)); + DB_SMT("SMT : class = %d, send RDF to %pM", + sm->smt_class, &sm->smt_source); smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ; break ; @@ -864,8 +864,8 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason, if (sm->smt_type != SMT_REQUEST) return ; - DB_SMT("SMT: sending RDF to %s,reason = 0x%x", - addr_to_string(&sm->smt_source), reason); + DB_SMT("SMT: sending RDF to %pM,reason = 0x%x", + &sm->smt_source, reason); /* @@ -1715,22 +1715,6 @@ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest) } #endif -#ifdef DEBUG -char *addr_to_string(struct fddi_addr *addr) -{ - int i ; - static char string[6*3] = "****" ; - - for (i = 0 ; i < 6 ; i++) { - string[i * 3] = hex_asc_hi(addr->a[i]); - string[i * 3 + 1] = hex_asc_lo(addr->a[i]); - string[i * 3 + 2] = ':'; - } - string[5 * 3 + 2] = 0; - return string; -} -#endif - /* * return static mac index */ From 266f312845857994f761699e72b1023f576e5f13 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 09:51:08 +0200 Subject: [PATCH 0879/2056] dccp: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- net/dccp/Kconfig | 2 +- net/dccp/ccids/Kconfig | 4 ++-- net/dccp/ccids/ccid3.c | 2 +- net/dccp/ccids/ccid3.h | 2 +- net/dccp/ccids/lib/packet_history.c | 2 +- net/dccp/ccids/lib/packet_history.h | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig index 51ac2631fb48..0c7d2f66ba27 100644 --- a/net/dccp/Kconfig +++ b/net/dccp/Kconfig @@ -5,7 +5,7 @@ menuconfig IP_DCCP help Datagram Congestion Control Protocol (RFC 4340) - From http://www.ietf.org/rfc/rfc4340.txt: + From https://www.ietf.org/rfc/rfc4340.txt: The Datagram Congestion Control Protocol (DCCP) is a transport protocol that implements bidirectional, unicast connections of diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig index 4d7771f36eff..a3eeb84d16f9 100644 --- a/net/dccp/ccids/Kconfig +++ b/net/dccp/ccids/Kconfig @@ -26,13 +26,13 @@ config IP_DCCP_CCID3 relatively smooth sending rate is of importance. CCID-3 is further described in RFC 4342, - http://www.ietf.org/rfc/rfc4342.txt + https://www.ietf.org/rfc/rfc4342.txt The TFRC congestion control algorithms were initially described in RFC 5348. This text was extracted from RFC 4340 (sec. 10.2), - http://www.ietf.org/rfc/rfc4340.txt + https://www.ietf.org/rfc/rfc4340.txt If in doubt, say N. diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index 9ef9bee9610f..aef72f6a2829 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -7,7 +7,7 @@ * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND - * research group. For further information please see http://www.wand.net.nz/ + * research group. For further information please see https://www.wand.net.nz/ * * This code also uses code from Lulea University, rereleased as GPL by its * authors: diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h index 081c195e7f7d..02e0fc9f6334 100644 --- a/net/dccp/ccids/ccid3.h +++ b/net/dccp/ccids/ccid3.h @@ -6,7 +6,7 @@ * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND - * research group. For further information please see http://www.wand.net.nz/ + * research group. For further information please see https://www.wand.net.nz/ * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index 2d41bb036271..0bef57b908fb 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c @@ -6,7 +6,7 @@ * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND - * research group. For further information please see http://www.wand.net.nz/ + * research group. For further information please see https://www.wand.net.nz/ * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index a157d874840b..159cc9326eab 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -6,7 +6,7 @@ * Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand. * * This code has been developed by the University of Waikato WAND - * research group. For further information please see http://www.wand.net.nz/ + * research group. For further information please see https://www.wand.net.nz/ * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz * * This code also uses code from Lulea University, rereleased as GPL by its From 2be53e0e4690a764b6af18536eb78b811f40eacc Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 11:02:51 +0200 Subject: [PATCH 0880/2056] AX.25 Kconfig: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- net/ax25/Kconfig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/ax25/Kconfig b/net/ax25/Kconfig index 97d686d115c0..d3a9843a043d 100644 --- a/net/ax25/Kconfig +++ b/net/ax25/Kconfig @@ -8,7 +8,7 @@ menuconfig HAMRADIO bool "Amateur Radio support" help If you want to connect your Linux box to an amateur radio, answer Y - here. You want to read + here. You want to read and more specifically about AX.25 on Linux . @@ -39,11 +39,11 @@ config AX25 Information about where to get supporting software for Linux amateur radio as well as information about how to configure an AX.25 port is contained in the AX25-HOWTO, available from - . You might also want to + . You might also want to check out the file in the kernel source. More information about digital amateur radio in general is on the WWW at - . + . To compile this driver as a module, choose M here: the module will be called ax25. @@ -90,7 +90,7 @@ config NETROM . You also might want to check out the file . More information about digital amateur radio in general is on the WWW at - . + . To compile this driver as a module, choose M here: the module will be called netrom. @@ -109,7 +109,7 @@ config ROSE . You also might want to check out the file . More information about digital amateur radio in general is on the WWW at - . + . To compile this driver as a module, choose M here: the module will be called rose. From 8aa5a33578e9685d06020bd10d1637557423e945 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:33 +0000 Subject: [PATCH 0881/2056] xsk: Add new statistics It can be useful for the user to know the reason behind a dropped packet. Introduce new counters which track drops on the receive path caused by: 1. rx ring being full 2. fill ring being empty Also, on the tx path introduce a counter which tracks the number of times we attempt pull from the tx ring when it is empty. Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-2-ciara.loftus@intel.com --- include/net/xdp_sock.h | 4 ++++ include/uapi/linux/if_xdp.h | 5 ++++- net/xdp/xsk.c | 36 ++++++++++++++++++++++++++----- net/xdp/xsk_buff_pool.c | 1 + net/xdp/xsk_queue.h | 6 ++++++ tools/include/uapi/linux/if_xdp.h | 5 ++++- 6 files changed, 50 insertions(+), 7 deletions(-) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 96bfc5f5f24e..c9d87cc40c11 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -69,7 +69,11 @@ struct xdp_sock { spinlock_t tx_completion_lock; /* Protects generic receive. */ spinlock_t rx_lock; + + /* Statistics */ u64 rx_dropped; + u64 rx_queue_full; + struct list_head map_list; /* Protects map_list */ spinlock_t map_list_lock; diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 3700266229f6..26e3bba8c204 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) addr = xp_get_handle(xskb); err = xskq_prod_reserve_desc(xs->rx, addr, len); if (err) { - xs->rx_dropped++; + xs->rx_queue_full++; return err; } @@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) rcu_read_lock(); list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { - if (!xskq_cons_peek_desc(xs->tx, desc, umem)) + if (!xskq_cons_peek_desc(xs->tx, desc, umem)) { + xs->tx->queue_empty_descs++; continue; + } /* This is the backpressure mechanism for the Tx path. * Reserve space in the completion queue and only proceed @@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk) sent_frame = true; } + xs->tx->queue_empty_descs++; + out: if (sent_frame) sk->sk_write_space(sk); @@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) ring->desc = offsetof(struct xdp_umem_ring, desc); } +struct xdp_statistics_v1 { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; +}; + static int xsk_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname, case XDP_STATISTICS: { struct xdp_statistics stats; + bool extra_stats = true; + size_t stats_size; - if (len < sizeof(stats)) + if (len < sizeof(struct xdp_statistics_v1)) { return -EINVAL; + } else if (len < sizeof(stats)) { + extra_stats = false; + stats_size = sizeof(struct xdp_statistics_v1); + } else { + stats_size = sizeof(stats); + } mutex_lock(&xs->mutex); stats.rx_dropped = xs->rx_dropped; + if (extra_stats) { + stats.rx_ring_full = xs->rx_queue_full; + stats.rx_fill_ring_empty_descs = + xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); + } else { + stats.rx_dropped += xs->rx_queue_full; + } stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); mutex_unlock(&xs->mutex); - if (copy_to_user(optval, &stats, sizeof(stats))) + if (copy_to_user(optval, &stats, stats_size)) return -EFAULT; - if (put_user(sizeof(stats), optlen)) + if (put_user(stats_size, optlen)) return -EFAULT; return 0; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 540ed75e4482..89cf3551d3e9 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -235,6 +235,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) for (;;) { if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { + pool->fq->queue_empty_descs++; xp_release(xskb); return NULL; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 5b5d24d2dd37..bf42cfd74b89 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -38,6 +38,7 @@ struct xsk_queue { u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; + u64 queue_empty_descs; }; /* The structure of the shared state of the rings are the same as the @@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) return q ? q->invalid_descs : 0; } +static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) +{ + return q ? q->queue_empty_descs : 0; +} + struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/tools/include/uapi/linux/if_xdp.h +++ b/tools/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { From b36c3206f9ef3ea2844e9092c12d29c0d1f56c54 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:34 +0000 Subject: [PATCH 0882/2056] samples: bpf: Add an option for printing extra statistics in xdpsock Introduce the --extra-stats (or simply -x) flag to the xdpsock application which prints additional statistics alongside the regular rx and tx counters. The new statistics printed report error conditions eg. rx ring full, invalid descriptors, etc. Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-3-ciara.loftus@intel.com --- samples/bpf/xdpsock_user.c | 87 +++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index c91e91362a0c..19c679456a0e 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -77,6 +77,7 @@ static u32 opt_batch_size = 64; static int opt_pkt_count; static u16 opt_pkt_size = MIN_PKT_SIZE; static u32 opt_pkt_fill_pattern = 0x12345678; +static bool opt_extra_stats; static int opt_poll; static int opt_interval = 1; static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP; @@ -103,8 +104,20 @@ struct xsk_socket_info { struct xsk_socket *xsk; unsigned long rx_npkts; unsigned long tx_npkts; + unsigned long rx_dropped_npkts; + unsigned long rx_invalid_npkts; + unsigned long tx_invalid_npkts; + unsigned long rx_full_npkts; + unsigned long rx_fill_empty_npkts; + unsigned long tx_empty_npkts; unsigned long prev_rx_npkts; unsigned long prev_tx_npkts; + unsigned long prev_rx_dropped_npkts; + unsigned long prev_rx_invalid_npkts; + unsigned long prev_tx_invalid_npkts; + unsigned long prev_rx_full_npkts; + unsigned long prev_rx_fill_empty_npkts; + unsigned long prev_tx_empty_npkts; u32 outstanding_tx; }; @@ -147,6 +160,30 @@ static void print_benchmark(bool running) } } +static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk) +{ + struct xdp_statistics stats; + socklen_t optlen; + int err; + + optlen = sizeof(stats); + err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); + if (err) + return err; + + if (optlen == sizeof(struct xdp_statistics)) { + xsk->rx_dropped_npkts = stats.rx_dropped; + xsk->rx_invalid_npkts = stats.rx_invalid_descs; + xsk->tx_invalid_npkts = stats.tx_invalid_descs; + xsk->rx_full_npkts = stats.rx_ring_full; + xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs; + xsk->tx_empty_npkts = stats.tx_ring_empty_descs; + return 0; + } + + return -EINVAL; +} + static void dump_stats(void) { unsigned long now = get_nsecs(); @@ -157,7 +194,8 @@ static void dump_stats(void) for (i = 0; i < num_socks && xsks[i]; i++) { char *fmt = "%-15s %'-11.0f %'-11lu\n"; - double rx_pps, tx_pps; + double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps, + tx_invalid_pps, tx_empty_pps; rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) * 1000000000. / dt; @@ -175,6 +213,46 @@ static void dump_stats(void) xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts; xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts; + + if (opt_extra_stats) { + if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) { + dropped_pps = (xsks[i]->rx_dropped_npkts - + xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt; + rx_invalid_pps = (xsks[i]->rx_invalid_npkts - + xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt; + tx_invalid_pps = (xsks[i]->tx_invalid_npkts - + xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt; + full_pps = (xsks[i]->rx_full_npkts - + xsks[i]->prev_rx_full_npkts) * 1000000000. / dt; + fill_empty_pps = (xsks[i]->rx_fill_empty_npkts - + xsks[i]->prev_rx_fill_empty_npkts) + * 1000000000. / dt; + tx_empty_pps = (xsks[i]->tx_empty_npkts - + xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt; + + printf(fmt, "rx dropped", dropped_pps, + xsks[i]->rx_dropped_npkts); + printf(fmt, "rx invalid", rx_invalid_pps, + xsks[i]->rx_invalid_npkts); + printf(fmt, "tx invalid", tx_invalid_pps, + xsks[i]->tx_invalid_npkts); + printf(fmt, "rx queue full", full_pps, + xsks[i]->rx_full_npkts); + printf(fmt, "fill ring empty", fill_empty_pps, + xsks[i]->rx_fill_empty_npkts); + printf(fmt, "tx ring empty", tx_empty_pps, + xsks[i]->tx_empty_npkts); + + xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts; + xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts; + xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts; + xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts; + xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts; + xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts; + } else { + printf("%-15s\n", "Error retrieving extra stats"); + } + } } } @@ -630,6 +708,7 @@ static struct option long_options[] = { {"tx-pkt-count", required_argument, 0, 'C'}, {"tx-pkt-size", required_argument, 0, 's'}, {"tx-pkt-pattern", required_argument, 0, 'P'}, + {"extra-stats", no_argument, 0, 'x'}, {0, 0, 0, 0} }; @@ -664,6 +743,7 @@ static void usage(const char *prog) " (Default: %d bytes)\n" " Min size: %d, Max size %d.\n" " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n" + " -x, --extra-stats Display extra statistics.\n" "\n"; fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE, opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE, @@ -679,7 +759,7 @@ static void parse_command_line(int argc, char **argv) opterr = 0; for (;;) { - c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:", + c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x", long_options, &option_index); if (c == -1) break; @@ -760,6 +840,9 @@ static void parse_command_line(int argc, char **argv) case 'P': opt_pkt_fill_pattern = strtol(optarg, NULL, 16); break; + case 'x': + opt_extra_stats = 1; + break; default: usage(basename(argv[0])); } From 0d80cb4612aa32dc0faa17fa3ab6f96f33e2b4a7 Mon Sep 17 00:00:00 2001 From: Ciara Loftus Date: Wed, 8 Jul 2020 07:28:35 +0000 Subject: [PATCH 0883/2056] xsk: Add xdp statistics to xsk_diag Add xdp statistics to the information dumped through the xsk_diag interface Signed-off-by: Ciara Loftus Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200708072835.4427-4-ciara.loftus@intel.com --- include/uapi/linux/xdp_diag.h | 11 +++++++++++ net/xdp/xsk_diag.c | 17 +++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h index 78b2591a7782..66b9973b4f4c 100644 --- a/include/uapi/linux/xdp_diag.h +++ b/include/uapi/linux/xdp_diag.h @@ -30,6 +30,7 @@ struct xdp_diag_msg { #define XDP_SHOW_RING_CFG (1 << 1) #define XDP_SHOW_UMEM (1 << 2) #define XDP_SHOW_MEMINFO (1 << 3) +#define XDP_SHOW_STATS (1 << 4) enum { XDP_DIAG_NONE, @@ -41,6 +42,7 @@ enum { XDP_DIAG_UMEM_FILL_RING, XDP_DIAG_UMEM_COMPLETION_RING, XDP_DIAG_MEMINFO, + XDP_DIAG_STATS, __XDP_DIAG_MAX, }; @@ -69,4 +71,13 @@ struct xdp_diag_umem { __u32 refs; }; +struct xdp_diag_stats { + __u64 n_rx_dropped; + __u64 n_rx_invalid; + __u64 n_rx_full; + __u64 n_fill_ring_empty; + __u64 n_tx_invalid; + __u64 n_tx_ring_empty; +}; + #endif /* _LINUX_XDP_DIAG_H */ diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 0163b26aaf63..21e9c2d123ee 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) return err; } +static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) +{ + struct xdp_diag_stats du = {}; + + du.n_rx_dropped = xs->rx_dropped; + du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); + du.n_rx_full = xs->rx_queue_full; + du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); + du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); + return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); +} + static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, struct xdp_diag_req *req, struct user_namespace *user_ns, @@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) goto out_nlmsg_trim; + if ((req->xdiag_show & XDP_SHOW_STATS) && + xsk_diag_put_stats(xs, nlskb)) + goto out_nlmsg_trim; + mutex_unlock(&xs->mutex); nlmsg_end(nlskb, nlh); return 0; From 93776cb9ee91aeed43ba53dcec97ffed4ae6f1f7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 10 Jul 2020 16:26:04 -0700 Subject: [PATCH 0884/2056] tools/bpftool: Remove warning about PID iterator support Don't emit warning that bpftool was built without PID iterator support. This error garbles JSON output of otherwise perfectly valid show commands. Reported-by: Andrey Ignatov Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200710232605.20918-1-andriin@fb.com --- tools/bpf/bpftool/pids.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c index c0d23ce4a6f4..e3b116325403 100644 --- a/tools/bpf/bpftool/pids.c +++ b/tools/bpf/bpftool/pids.c @@ -15,7 +15,6 @@ int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type) { - p_err("bpftool built without PID iterator support"); return -ENOTSUP; } void delete_obj_refs_table(struct obj_refs_table *table) {} From ac5a72ea5c8989871e61f6bb0852e0f91de51ebe Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 13 Jul 2020 12:52:33 +0100 Subject: [PATCH 0885/2056] bpf: Use dedicated bpf_trace_printk event instead of trace_printk() The bpf helper bpf_trace_printk() uses trace_printk() under the hood. This leads to an alarming warning message originating from trace buffer allocation which occurs the first time a program using bpf_trace_printk() is loaded. We can instead create a trace event for bpf_trace_printk() and enable it in-kernel when/if we encounter a program using the bpf_trace_printk() helper. With this approach, trace_printk() is not used directly and no warning message appears. This work was started by Steven (see Link) and finished by Alan; added Steven's Signed-off-by with his permission. Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Alan Maguire Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20200628194334.6238b933@oasis.local.home Link: https://lore.kernel.org/bpf/1594641154-18897-2-git-send-email-alan.maguire@oracle.com --- kernel/trace/Makefile | 2 ++ kernel/trace/bpf_trace.c | 42 +++++++++++++++++++++++++++++++++++----- kernel/trace/bpf_trace.h | 34 ++++++++++++++++++++++++++++++++ 3 files changed, 73 insertions(+), 5 deletions(-) create mode 100644 kernel/trace/bpf_trace.h diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 6575bb0a0434..aeba5ee7325a 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -31,6 +31,8 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE GCOV_PROFILE := y endif +CFLAGS_bpf_trace.o := -I$(src) + CFLAGS_trace_benchmark.o := -I$(src) CFLAGS_trace_events_filter.o := -I$(src) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e178e8e32b33..3cc0dcb60ca2 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -20,6 +21,9 @@ #include "trace_probe.h" #include "trace.h" +#define CREATE_TRACE_POINTS +#include "bpf_trace.h" + #define bpf_event_rcu_dereference(p) \ rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) @@ -375,6 +379,30 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, } } +static DEFINE_RAW_SPINLOCK(trace_printk_lock); + +#define BPF_TRACE_PRINTK_SIZE 1024 + +static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) +{ + static char buf[BPF_TRACE_PRINTK_SIZE]; + unsigned long flags; + va_list ap; + int ret; + + raw_spin_lock_irqsave(&trace_printk_lock, flags); + va_start(ap, fmt); + ret = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + /* vsnprintf() will not append null for zero-length strings */ + if (ret == 0) + buf[0] = '\0'; + trace_bpf_trace_printk(buf); + raw_spin_unlock_irqrestore(&trace_printk_lock, flags); + + return ret; +} + /* * Only limited trace_printk() conversion specifiers allowed: * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s @@ -484,8 +512,7 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, */ #define __BPF_TP_EMIT() __BPF_ARG3_TP() #define __BPF_TP(...) \ - __trace_printk(0 /* Fake ip */, \ - fmt, ##__VA_ARGS__) + bpf_do_trace_printk(fmt, ##__VA_ARGS__) #define __BPF_ARG1_TP(...) \ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ @@ -522,10 +549,15 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { const struct bpf_func_proto *bpf_get_trace_printk_proto(void) { /* - * this program might be calling bpf_trace_printk, - * so allocate per-cpu printk buffers + * This program might be calling bpf_trace_printk, + * so enable the associated bpf_trace/bpf_trace_printk event. + * Repeat this each time as it is possible a user has + * disabled bpf_trace_printk events. By loading a program + * calling bpf_trace_printk() however the user has expressed + * the intent to see such events. */ - trace_printk_init_buffers(); + if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) + pr_warn_ratelimited("could not enable bpf_trace_printk events"); return &bpf_trace_printk_proto; } diff --git a/kernel/trace/bpf_trace.h b/kernel/trace/bpf_trace.h new file mode 100644 index 000000000000..9acbc11ac7bb --- /dev/null +++ b/kernel/trace/bpf_trace.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM bpf_trace + +#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) + +#define _TRACE_BPF_TRACE_H + +#include + +TRACE_EVENT(bpf_trace_printk, + + TP_PROTO(const char *bpf_string), + + TP_ARGS(bpf_string), + + TP_STRUCT__entry( + __string(bpf_string, bpf_string) + ), + + TP_fast_assign( + __assign_str(bpf_string, bpf_string); + ), + + TP_printk("%s", __get_str(bpf_string)) +); + +#endif /* _TRACE_BPF_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE bpf_trace + +#include From 59e8b60bf068180fcadb0ae06ce8f6f835132ce6 Mon Sep 17 00:00:00 2001 From: Alan Maguire Date: Mon, 13 Jul 2020 12:52:34 +0100 Subject: [PATCH 0886/2056] selftests/bpf: Add selftests verifying bpf_trace_printk() behaviour Simple selftests that verifies bpf_trace_printk() returns a sensible value and tracing messages appear. Signed-off-by: Alan Maguire Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/1594641154-18897-3-git-send-email-alan.maguire@oracle.com --- .../selftests/bpf/prog_tests/trace_printk.c | 75 +++++++++++++++++++ .../selftests/bpf/progs/trace_printk.c | 21 ++++++ 2 files changed, 96 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/trace_printk.c create mode 100644 tools/testing/selftests/bpf/progs/trace_printk.c diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c new file mode 100644 index 000000000000..39b0decb1bb2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/trace_printk.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020, Oracle and/or its affiliates. */ + +#include + +#include "trace_printk.skel.h" + +#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe" +#define SEARCHMSG "testing,testing" + +void test_trace_printk(void) +{ + int err, iter = 0, duration = 0, found = 0; + struct trace_printk__bss *bss; + struct trace_printk *skel; + char *buf = NULL; + FILE *fp = NULL; + size_t buflen; + + skel = trace_printk__open(); + if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) + return; + + err = trace_printk__load(skel); + if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) + goto cleanup; + + bss = skel->bss; + + err = trace_printk__attach(skel); + if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err)) + goto cleanup; + + fp = fopen(TRACEBUF, "r"); + if (CHECK(fp == NULL, "could not open trace buffer", + "error %d opening %s", errno, TRACEBUF)) + goto cleanup; + + /* We do not want to wait forever if this test fails... */ + fcntl(fileno(fp), F_SETFL, O_NONBLOCK); + + /* wait for tracepoint to trigger */ + usleep(1); + trace_printk__detach(skel); + + if (CHECK(bss->trace_printk_ran == 0, + "bpf_trace_printk never ran", + "ran == %d", bss->trace_printk_ran)) + goto cleanup; + + if (CHECK(bss->trace_printk_ret <= 0, + "bpf_trace_printk returned <= 0 value", + "got %d", bss->trace_printk_ret)) + goto cleanup; + + /* verify our search string is in the trace buffer */ + while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) { + if (strstr(buf, SEARCHMSG) != NULL) + found++; + if (found == bss->trace_printk_ran) + break; + if (++iter > 1000) + break; + } + + if (CHECK(!found, "message from bpf_trace_printk not found", + "no instance of %s in %s", SEARCHMSG, TRACEBUF)) + goto cleanup; + +cleanup: + trace_printk__destroy(skel); + free(buf); + if (fp) + fclose(fp); +} diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c new file mode 100644 index 000000000000..8ca7f399b670 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/trace_printk.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020, Oracle and/or its affiliates. + +#include "vmlinux.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +int trace_printk_ret = 0; +int trace_printk_ran = 0; + +SEC("tp/raw_syscalls/sys_enter") +int sys_enter(void *ctx) +{ + static const char fmt[] = "testing,testing %d\n"; + + trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt), + ++trace_printk_ran); + return 0; +} From ab2749592147cc310624c27470c2b63f820abaa9 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 11:33:23 +0200 Subject: [PATCH 0887/2056] WAN: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/net/wan/c101.c | 2 +- drivers/net/wan/n2.c | 2 +- drivers/net/wan/pc300too.c | 2 +- drivers/net/wan/pci200syn.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c index cb57f9124ab1..c354a5143e99 100644 --- a/drivers/net/wan/c101.c +++ b/drivers/net/wan/c101.c @@ -4,7 +4,7 @@ * * Copyright (C) 2000-2003 Krzysztof Halasa * - * For information see + * For information see * * Sources of information: * Hitachi HD64570 SCA User's Manual diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c index f9e7fc7e9978..5bf4463873b1 100644 --- a/drivers/net/wan/n2.c +++ b/drivers/net/wan/n2.c @@ -4,7 +4,7 @@ * * Copyright (C) 1998-2003 Krzysztof Halasa * - * For information see + * For information see * * Note: integrated CSU/DSU/DDS are not supported by this driver * diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index 190735604b2e..001fd378d417 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -4,7 +4,7 @@ * * Copyright (C) 2000-2008 Krzysztof Halasa * - * For information see . + * For information see . * * Sources of information: * Hitachi HD64572 SCA-II User's Manual diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c index b5f8aaca5f06..d0062224b216 100644 --- a/drivers/net/wan/pci200syn.c +++ b/drivers/net/wan/pci200syn.c @@ -4,7 +4,7 @@ * * Copyright (C) 2002-2008 Krzysztof Halasa * - * For information see + * For information see * * Sources of information: * Hitachi HD64572 SCA-II User's Manual From ed757328c34015be4ec51861a90bb3bcc807ad58 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 12:24:18 +0200 Subject: [PATCH 0888/2056] atm: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/atm/solos-pci.c | 2 +- include/uapi/linux/atmioc.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index c32f7dd9879a..b7646ae55942 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the Solos PCI ADSL2+ card, designed to support Linux by - * Traverse Technologies -- http://www.traverse.com.au/ + * Traverse Technologies -- https://www.traverse.com.au/ * Xrio Limited -- http://www.xrio.com/ * * Copyright © 2008 Traverse Technologies diff --git a/include/uapi/linux/atmioc.h b/include/uapi/linux/atmioc.h index cd7655e40c77..a9030bcc8d56 100644 --- a/include/uapi/linux/atmioc.h +++ b/include/uapi/linux/atmioc.h @@ -5,7 +5,7 @@ /* - * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of + * See https://icawww1.epfl.ch/linux-atm/magic.html for the complete list of * "magic" ioctl numbers. */ From 7c819e701382d4969ca4837b8cbe157895f5d0bf Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 13 Jul 2020 16:24:08 -0700 Subject: [PATCH 0889/2056] libbpf: Support stripping modifiers for btf_dump One important use case when emitting const/volatile/restrict is undesirable is BPF skeleton generation of DATASEC layout. These are further memory-mapped and can be written/read from user-space directly. For important case of .rodata variables, bpftool strips away first-level modifiers, to make their use on user-space side simple and not requiring extra type casts to override compiler complaining about writing to const variables. This logic works mostly fine, but breaks in some more complicated cases. E.g.: const volatile int params[10]; Because in BTF it's a chain of ARRAY -> CONST -> VOLATILE -> INT, bpftool stops at ARRAY and doesn't strip CONST and VOLATILE. In skeleton this variable will be emitted as is. So when used from user-space, compiler will complain about writing to const array. This is problematic, as also mentioned in [0]. To solve this for arrays and other non-trivial cases (e.g., inner const/volatile fields inside the struct), teach btf_dump to strip away any modifier, when requested. This is done as an extra option on btf_dump__emit_type_decl() API. Reported-by: Anton Protopopov Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200713232409.3062144-2-andriin@fb.com --- tools/lib/bpf/btf.h | 2 ++ tools/lib/bpf/btf_dump.c | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index a3b7ef9b737f..227d3f844801 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -144,6 +144,8 @@ struct btf_dump_emit_type_decl_opts { * necessary indentation already */ int indent_level; + /* strip all the const/volatile/restrict mods */ + bool strip_mods; }; #define btf_dump_emit_type_decl_opts__last_field indent_level diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index bbb430317260..e1c344504cae 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -60,6 +60,7 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + bool strip_mods; /* per-type auxiliary state */ struct btf_dump_type_aux_state *type_states; @@ -1032,7 +1033,9 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, fname = OPTS_GET(opts, field_name, ""); lvl = OPTS_GET(opts, indent_level, 0); + d->strip_mods = OPTS_GET(opts, strip_mods, false); btf_dump_emit_type_decl(d, id, fname, lvl); + d->strip_mods = false; return 0; } @@ -1045,6 +1048,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, stack_start = d->decl_stack_cnt; for (;;) { + t = btf__type_by_id(d->btf, id); + if (d->strip_mods && btf_is_mod(t)) + goto skip_mod; + err = btf_dump_push_decl_stack_id(d, id); if (err < 0) { /* @@ -1056,12 +1063,11 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id, d->decl_stack_cnt = stack_start; return; } - +skip_mod: /* VOID */ if (id == 0) break; - t = btf__type_by_id(d->btf, id); switch (btf_kind(t)) { case BTF_KIND_PTR: case BTF_KIND_VOLATILE: From 0b20933d8cfe2bf6473e9b581b5d1ed9a2117ecc Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 13 Jul 2020 16:24:09 -0700 Subject: [PATCH 0890/2056] tools/bpftool: Strip away modifiers from global variables Reliably remove all the type modifiers from read-only (.rodata) global variable definitions, including cases of inner field const modifiers and arrays of const values. Also modify one of selftests to ensure that const volatile struct doesn't prevent user-space from modifying .rodata variable. Fixes: 985ead416df3 ("bpftool: Add skeleton codegen command") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200713232409.3062144-3-andriin@fb.com --- tools/bpf/bpftool/gen.c | 23 ++++++++----------- tools/lib/bpf/btf.h | 2 +- .../selftests/bpf/prog_tests/skeleton.c | 6 ++--- .../selftests/bpf/progs/test_skeleton.c | 6 +++-- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 10de76b296ba..b59d26e89367 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -88,7 +88,7 @@ static const char *get_map_ident(const struct bpf_map *map) return NULL; } -static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args) +static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args) { vprintf(fmt, args); } @@ -104,17 +104,20 @@ static int codegen_datasec_def(struct bpf_object *obj, int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec); const char *sec_ident; char var_ident[256]; + bool strip_mods = false; - if (strcmp(sec_name, ".data") == 0) + if (strcmp(sec_name, ".data") == 0) { sec_ident = "data"; - else if (strcmp(sec_name, ".bss") == 0) + } else if (strcmp(sec_name, ".bss") == 0) { sec_ident = "bss"; - else if (strcmp(sec_name, ".rodata") == 0) + } else if (strcmp(sec_name, ".rodata") == 0) { sec_ident = "rodata"; - else if (strcmp(sec_name, ".kconfig") == 0) + strip_mods = true; + } else if (strcmp(sec_name, ".kconfig") == 0) { sec_ident = "kconfig"; - else + } else { return 0; + } printf(" struct %s__%s {\n", obj_name, sec_ident); for (i = 0; i < vlen; i++, sec_var++) { @@ -123,16 +126,10 @@ static int codegen_datasec_def(struct bpf_object *obj, DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts, .field_name = var_ident, .indent_level = 2, + .strip_mods = strip_mods, ); int need_off = sec_var->offset, align_off, align; __u32 var_type_id = var->type; - const struct btf_type *t; - - t = btf__type_by_id(btf, var_type_id); - while (btf_is_mod(t)) { - var_type_id = t->type; - t = btf__type_by_id(btf, var_type_id); - } if (off > need_off) { p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n", diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 227d3f844801..491c7b41ffdc 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -147,7 +147,7 @@ struct btf_dump_emit_type_decl_opts { /* strip all the const/volatile/restrict mods */ bool strip_mods; }; -#define btf_dump_emit_type_decl_opts__last_field indent_level +#define btf_dump_emit_type_decl_opts__last_field strip_mods LIBBPF_API int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, diff --git a/tools/testing/selftests/bpf/prog_tests/skeleton.c b/tools/testing/selftests/bpf/prog_tests/skeleton.c index fa153cf67b1b..fe87b77af459 100644 --- a/tools/testing/selftests/bpf/prog_tests/skeleton.c +++ b/tools/testing/selftests/bpf/prog_tests/skeleton.c @@ -41,7 +41,7 @@ void test_skeleton(void) CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL); CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL); - CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0); + CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0); CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0); /* validate we can pre-setup global variables, even in .bss */ @@ -49,7 +49,7 @@ void test_skeleton(void) data->in2 = 11; bss->in3 = 12; bss->in4 = 13; - rodata->in6 = 14; + rodata->in.in6 = 14; err = test_skeleton__load(skel); if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err)) @@ -60,7 +60,7 @@ void test_skeleton(void) CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL); CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12); CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL); - CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14); + CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14); /* now set new values and attach to get them into outX variables */ data->in1 = 1; diff --git a/tools/testing/selftests/bpf/progs/test_skeleton.c b/tools/testing/selftests/bpf/progs/test_skeleton.c index 77ae86f44db5..374ccef704e1 100644 --- a/tools/testing/selftests/bpf/progs/test_skeleton.c +++ b/tools/testing/selftests/bpf/progs/test_skeleton.c @@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0; struct s in5 = {}; /* .rodata section */ -const volatile int in6 = 0; +const volatile struct { + const int in6; +} in = {}; /* .data section */ int out1 = -1; @@ -46,7 +48,7 @@ int handler(const void *ctx) out3 = in3; out4 = in4; out5 = in5; - out6 = in6; + out6 = in.in6; bpf_syscall = CONFIG_BPF_SYSCALL; kern_ver = LINUX_KERNEL_VERSION; From e3eea08e641345a6df6f4d5cb2b325874de1a757 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 13 Jul 2020 07:24:18 -0500 Subject: [PATCH 0891/2056] net: ipa: fix kerneldoc comments This commit affects comments (and in one case, whitespace) only. Throughout the IPA code, return statements are documented using "@Return:", whereas they should use "Return:" instead. Fix these mistakes. In function definitions, some parameters are missing their comment to describe them. And in structure definitions, some fields are missing their comment to describe them. Add these missing descriptions. Some arguments changed name and type along the way, but their descriptions were not updated (an endpoint pointer is now used in many places that previously used an endpoint ID). Fix these incorrect parameter descriptions. In the description for the ipa_clock structure, one field had a semicolon instead of a colon in its description. Fix this. Add a missing function description for ipa_gsi_endpoint_data_empty(). All of these issues were identified when building with "W=1". Signed-off-by: Alex Elder Signed-off-by: David S. Miller --- drivers/net/ipa/gsi.c | 6 +++--- drivers/net/ipa/gsi.h | 12 ++++++------ drivers/net/ipa/gsi_private.h | 6 +++--- drivers/net/ipa/gsi_trans.h | 12 ++++++------ drivers/net/ipa/ipa_clock.c | 2 +- drivers/net/ipa/ipa_clock.h | 2 +- drivers/net/ipa/ipa_cmd.h | 10 +++++----- drivers/net/ipa/ipa_endpoint.c | 9 +++++++-- drivers/net/ipa/ipa_gsi.h | 13 +++++++++++-- drivers/net/ipa/ipa_interrupt.h | 2 +- drivers/net/ipa/ipa_main.c | 8 +++++--- drivers/net/ipa/ipa_mem.c | 7 +++++-- drivers/net/ipa/ipa_smp2p.h | 2 +- drivers/net/ipa/ipa_table.c | 3 ++- drivers/net/ipa/ipa_table.h | 4 ++-- drivers/net/ipa/ipa_uc.c | 5 +++++ 16 files changed, 64 insertions(+), 39 deletions(-) diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 1ab41e0e134e..0e63d35320aa 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -1363,7 +1363,7 @@ static void gsi_channel_update(struct gsi_channel *channel) * gsi_channel_poll_one() - Return a single completed transaction on a channel * @channel: Channel to be polled * - * @Return: Transaction pointer, or null if none are available + * Return: Transaction pointer, or null if none are available * * This function returns the first entry on a channel's completed transaction * list. If that list is empty, the hardware is consulted to determine @@ -1393,8 +1393,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel) * gsi_channel_poll() - NAPI poll function for a channel * @napi: NAPI structure for the channel * @budget: Budget supplied by NAPI core - - * @Return: Number of items polled (<= budget) + * + * Return: Number of items polled (<= budget) * * Single transactions completed by hardware are polled until either * the budget is exhausted, or there are no more. Each transaction diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h index 90a02194e7ad..061312773df0 100644 --- a/drivers/net/ipa/gsi.h +++ b/drivers/net/ipa/gsi.h @@ -167,7 +167,7 @@ struct gsi { * @gsi: Address of GSI structure embedded in an IPA structure * @legacy: Set up for legacy hardware * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code * * Performs initialization that must wait until the GSI hardware is * ready (including firmware loaded). @@ -185,7 +185,7 @@ void gsi_teardown(struct gsi *gsi); * @gsi: GSI pointer * @channel_id: Channel whose limit is to be returned * - * @Return: The maximum number of TREs oustanding on the channel + * Return: The maximum number of TREs oustanding on the channel */ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id); @@ -194,7 +194,7 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id); * @gsi: GSI pointer * @channel_id: Channel whose limit is to be returned * - * @Return: The maximum TRE count per transaction on the channel + * Return: The maximum TRE count per transaction on the channel */ u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id); @@ -203,7 +203,7 @@ u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id); * @gsi: GSI pointer * @channel_id: Channel to start * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ int gsi_channel_start(struct gsi *gsi, u32 channel_id); @@ -212,7 +212,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id); * @gsi: GSI pointer returned by gsi_setup() * @channel_id: Channel to stop * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ int gsi_channel_stop(struct gsi *gsi, u32 channel_id); @@ -238,7 +238,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start); * @gsi: Address of GSI structure embedded in an IPA structure * @pdev: IPA platform device * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code * * Early stage initialization of the GSI subsystem, performing tasks * that can be done before the GSI hardware is ready to use. diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h index b57d0198ebc1..1785c9d3344d 100644 --- a/drivers/net/ipa/gsi_private.h +++ b/drivers/net/ipa/gsi_private.h @@ -44,7 +44,7 @@ void gsi_trans_complete(struct gsi_trans *trans); * @channel: Channel associated with the transaction * @index: Index of the TRE having a transaction * - * @Return: The GSI transaction pointer associated with the TRE index + * Return: The GSI transaction pointer associated with the TRE index */ struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index); @@ -53,7 +53,7 @@ struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel, * gsi_channel_trans_complete() - Return a channel's next completed transaction * @channel: Channel whose next transaction is to be returned * - * @Return: The next completed transaction, or NULL if nothing new + * Return: The next completed transaction, or NULL if nothing new */ struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel); @@ -76,7 +76,7 @@ void gsi_channel_trans_cancel_pending(struct gsi_channel *channel); * @gsi: GSI pointer * @channel_id: Channel number * - * @Return: 0 if successful, or -ENOMEM on allocation failure + * Return: 0 if successful, or -ENOMEM on allocation failure * * Creates and sets up information for managing transactions on a channel */ diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h index 1477fc15b30a..4d4606b5fa95 100644 --- a/drivers/net/ipa/gsi_trans.h +++ b/drivers/net/ipa/gsi_trans.h @@ -75,7 +75,7 @@ struct gsi_trans { * @count: Minimum number of elements in the pool * @max_alloc: Maximum number of elements allocated at a time from pool * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, u32 max_alloc); @@ -85,7 +85,7 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count, * @pool: Pool pointer * @count: Number of elements to allocate from the pool * - * @Return: Virtual address of element(s) allocated from the pool + * Return: Virtual address of element(s) allocated from the pool */ void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count); @@ -103,7 +103,7 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool); * @count: Minimum number of elements in the pool * @max_alloc: Maximum number of elements allocated at a time from pool * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code * * Structures in this pool reside in DMA-coherent memory. */ @@ -115,7 +115,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool, * @pool: DMA pool pointer * @addr: DMA address "handle" associated with the allocation * - * @Return: Virtual address of element allocated from the pool + * Return: Virtual address of element allocated from the pool * * Only one element at a time may be allocated from a DMA pool. */ @@ -134,7 +134,7 @@ void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool); * @tre_count: Number of elements in the transaction * @direction: DMA direction for entire SGL (or DMA_NONE) * - * @Return: A GSI transaction structure, or a null pointer if all + * Return: A GSI transaction structure, or a null pointer if all * available transactions are in use */ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id, @@ -175,7 +175,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size, * @trans: Transaction * @skb: Socket buffer for transfer (outbound) * - * @Return: 0, or -EMSGSIZE if socket data won't fit in transaction. + * Return: 0, or -EMSGSIZE if socket data won't fit in transaction. */ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb); diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c index 0fbc8b1bdf41..398f2e47043d 100644 --- a/drivers/net/ipa/ipa_clock.c +++ b/drivers/net/ipa/ipa_clock.c @@ -44,7 +44,7 @@ /** * struct ipa_clock - IPA clocking information * @count: Clocking reference count - * @mutex; Protects clock enable/disable + * @mutex: Protects clock enable/disable * @core: IPA core clock * @memory_path: Memory interconnect * @imem_path: Internal memory interconnect diff --git a/drivers/net/ipa/ipa_clock.h b/drivers/net/ipa/ipa_clock.h index 0f4e2877a6df..1d70f1de3875 100644 --- a/drivers/net/ipa/ipa_clock.h +++ b/drivers/net/ipa/ipa_clock.h @@ -22,7 +22,7 @@ u32 ipa_clock_rate(struct ipa *ipa); * ipa_clock_init() - Initialize IPA clocking * @dev: IPA device * - * @Return: A pointer to an ipa_clock structure, or a pointer-coded error + * Return: A pointer to an ipa_clock structure, or a pointer-coded error */ struct ipa_clock *ipa_clock_init(struct device *dev); diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h index 1a646e0264a0..f7e6f87facf7 100644 --- a/drivers/net/ipa/ipa_cmd.h +++ b/drivers/net/ipa/ipa_cmd.h @@ -61,7 +61,7 @@ struct ipa_cmd_info { * @ipv6: - Whether the table is for IPv6 or IPv4 * @hashed: - Whether the table is hashed or non-hashed * - * @Return: true if region is valid, false otherwise + * Return: true if region is valid, false otherwise */ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route, bool ipv6, bool hashed); @@ -70,7 +70,7 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, * ipa_cmd_data_valid() - Validate command-realted configuration is valid * @ipa: - IPA pointer * - * @Return: true if assumptions required for command are valid + * Return: true if assumptions required for command are valid */ bool ipa_cmd_data_valid(struct ipa *ipa); @@ -95,7 +95,7 @@ static inline bool ipa_cmd_data_valid(struct ipa *ipa) * @channel: AP->IPA command TX GSI channel pointer * @tre_count: Number of pool elements to allocate * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count); @@ -166,7 +166,7 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans); /** * ipa_cmd_tag_process_add_count() - Number of commands in a tag process * - * @Return: The number of elements to allocate in a transaction + * Return: The number of elements to allocate in a transaction * to hold tag process commands */ u32 ipa_cmd_tag_process_count(void); @@ -184,7 +184,7 @@ void ipa_cmd_tag_process(struct ipa *ipa); * @ipa: IPA pointer * @tre_count: Number of elements in the transaction * - * @Return: A GSI transaction structure, or a null pointer if all + * Return: A GSI transaction structure, or a null pointer if all * available transactions are in use */ struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count); diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9de81d85682e..b7efd7c95e9c 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -349,7 +349,7 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) /** * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt - * @endpoint_id: Endpoint on which to emulate a suspend + * @endpoint: Endpoint on which to emulate a suspend * * Emulate suspend IPA interrupt to unsuspend an endpoint suspended * with an open aggregation frame. This is to work around a hardware @@ -499,6 +499,9 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) } /** + * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register + * @endpoint: Endpoint pointer + * * We program QMAP endpoints so each packet received is preceded by a QMAP * header structure. The QMAP header contains a 1-byte mux_id and 2-byte * packet size field, and we have the IPA hardware populate both for each @@ -921,6 +924,8 @@ static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) /** * ipa_endpoint_replenish() - Replenish the Rx packets cache. + * @endpoint: Endpoint to be replenished + * @count: Number of buffers to send to hardware * * Allocate RX packet wrapper structures with maximal socket buffers * for an endpoint. These are supplied to the hardware, which fills @@ -1231,7 +1236,7 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa) * on its underlying GSI channel, a special sequence of actions must be * taken to ensure the IPA pipeline is properly cleared. * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint) { diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h index 0a40f3dc55fc..c02cb6f3a2e1 100644 --- a/drivers/net/ipa/ipa_gsi.h +++ b/drivers/net/ipa/ipa_gsi.h @@ -43,9 +43,9 @@ void ipa_gsi_trans_release(struct gsi_trans *trans); */ void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count, u32 byte_count); + /** - * ipa_gsi_trans_complete() - GSI transaction completion callback -ipa_gsi_channel_tx_completed() + * ipa_gsi_channel_tx_completed() - GSI transaction completion callback * @gsi: GSI pointer * @channel_id: Channel number * @count: Number of transactions completed since last report @@ -57,6 +57,15 @@ ipa_gsi_channel_tx_completed() void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count, u32 byte_count); +/* ipa_gsi_endpoint_data_empty() - Empty endpoint config data test + * @data: endpoint configuration data + * + * Determines whether an endpoint configuration data entry is empty, + * meaning it contains no valid configuration information and should + * be ignored. + * + * Return: true if empty; false otherwise + */ bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data); #endif /* _IPA_GSI_TRANS_H_ */ diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h index d4f4c1c9f0b1..727e9c5044d1 100644 --- a/drivers/net/ipa/ipa_interrupt.h +++ b/drivers/net/ipa/ipa_interrupt.h @@ -104,7 +104,7 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt); * ipa_interrupt_setup() - Set up the IPA interrupt framework * @ipa: IPA pointer * - * @Return: Pointer to IPA SMP2P info, or a pointer-coded error + * Return: Pointer to IPA SMP2P info, or a pointer-coded error */ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa); diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 27a869df3a4b..1fdfec41e442 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -277,6 +277,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa, /** * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA + * @ipa: IPA pointer * * Configures when the IPA signals it is idle to the global clock * controller, which can respond by scalling down the clock to @@ -495,6 +496,7 @@ static void ipa_resource_deconfig(struct ipa *ipa) /** * ipa_config() - Configure IPA hardware * @ipa: IPA pointer + * @data: IPA configuration data * * Perform initialization requiring IPA clock to be enabled. */ @@ -686,7 +688,7 @@ static void ipa_validate_build(void) * ipa_probe() - IPA platform driver probe function * @pdev: Platform device pointer * - * @Return: 0 if successful, or a negative error code (possibly + * Return: 0 if successful, or a negative error code (possibly * EPROBE_DEFER) * * This is the main entry point for the IPA driver. Initialization proceeds @@ -902,7 +904,7 @@ static int ipa_remove(struct platform_device *pdev) * ipa_suspend() - Power management system suspend callback * @dev: IPA device structure * - * @Return: Zero + * Return: Always returns zero * * Called by the PM framework when a system suspend operation is invoked. */ @@ -920,7 +922,7 @@ static int ipa_suspend(struct device *dev) * ipa_resume() - Power management system resume callback * @dev: IPA device structure * - * @Return: Always returns 0 + * Return: Always returns 0 * * Called by the PM framework when a system resume operation is invoked. */ diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 3ef814119aab..2d45c444a67f 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -41,6 +41,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem) /** * ipa_mem_setup() - Set up IPA AP and modem shared memory areas + * @ipa: IPA pointer * * Set up the shared memory regions in IPA local memory. This involves * zero-filling memory regions, and in the case of header memory, telling @@ -52,7 +53,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem) * The AP informs the modem where its portions of memory are located * in a QMI exchange that occurs at modem startup. * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ int ipa_mem_setup(struct ipa *ipa) { @@ -137,8 +138,9 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id) /** * ipa_mem_config() - Configure IPA shared memory + * @ipa: IPA pointer * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code */ int ipa_mem_config(struct ipa *ipa) { @@ -238,6 +240,7 @@ void ipa_mem_deconfig(struct ipa *ipa) /** * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem + * @ipa: IPA pointer * * Zero regions of IPA-local memory used by the modem. These are configured * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h index 1f65cdc9d406..bf0e4063cfd9 100644 --- a/drivers/net/ipa/ipa_smp2p.h +++ b/drivers/net/ipa/ipa_smp2p.h @@ -15,7 +15,7 @@ struct ipa; * @ipa: IPA pointer * @modem_init: Whether the modem is responsible for GSI initialization * - * @Return: 0 if successful, or a negative error code + * Return: 0 if successful, or a negative error code * */ int ipa_smp2p_init(struct ipa *ipa, bool modem_init); diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 9df2a3e78c98..2098ca2f2c90 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -505,7 +505,7 @@ void ipa_table_teardown(struct ipa *ipa) /** * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple - * @endpoint_id: Endpoint whose filter hash tuple should be zeroed + * @endpoint: Endpoint whose filter hash tuple should be zeroed * * Endpoint must be for the AP (not modem) and support filtering. Updates * the filter hash values without changing route ones. @@ -560,6 +560,7 @@ static bool ipa_route_id_modem(u32 route_id) /** * ipa_route_tuple_zero() - Zero a hashed route table entry tuple + * @ipa: IPA pointer * @route_id: Route table entry whose hash tuple should be zeroed * * Updates the route hash values without changing filter ones. diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h index 64ea0221441a..78038d14fcea 100644 --- a/drivers/net/ipa/ipa_table.h +++ b/drivers/net/ipa/ipa_table.h @@ -25,7 +25,7 @@ struct ipa; * ipa_table_valid() - Validate route and filter table memory regions * @ipa: IPA pointer - * @Return: true if all regions are valid, false otherwise + * Return: true if all regions are valid, false otherwise */ bool ipa_table_valid(struct ipa *ipa); @@ -33,7 +33,7 @@ bool ipa_table_valid(struct ipa *ipa); * ipa_filter_map_valid() - Validate a filter table endpoint bitmap * @ipa: IPA pointer * - * @Return: true if all regions are valid, false otherwise + * Return: true if all regions are valid, false otherwise */ bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask); diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c index 9f9980ec2ed3..1a0b04e0ab74 100644 --- a/drivers/net/ipa/ipa_uc.c +++ b/drivers/net/ipa/ipa_uc.c @@ -41,19 +41,24 @@ /** * struct ipa_uc_mem_area - AP/microcontroller shared memory area * @command: command code (AP->microcontroller) + * @reserved0: reserved bytes; avoid reading or writing * @command_param: low 32 bits of command parameter (AP->microcontroller) * @command_param_hi: high 32 bits of command parameter (AP->microcontroller) * * @response: response code (microcontroller->AP) + * @reserved1: reserved bytes; avoid reading or writing * @response_param: response parameter (microcontroller->AP) * * @event: event code (microcontroller->AP) + * @reserved2: reserved bytes; avoid reading or writing * @event_param: event parameter (microcontroller->AP) * * @first_error_address: address of first error-source on SNOC * @hw_state: state of hardware (including error type information) * @warning_counter: counter of non-fatal hardware errors + * @reserved3: reserved bytes; avoid reading or writing * @interface_version: hardware-reported interface version + * @reserved4: reserved bytes; avoid reading or writing * * A shared memory area at the base of IPA resident memory is used for * communication with the microcontroller. The region is 128 bytes in From 15e522a7b110777b20612937c4424b5c448e7431 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:14:57 +0200 Subject: [PATCH 0892/2056] net: 9p: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Eric Van Hensbergen Cc: Latchesar Ionkov Cc: Dominique Martinet Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/9p/client.c | 2 +- net/9p/trans_rdma.c | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/net/9p/client.c b/net/9p/client.c index fc1f3635e5dd..09f1ec589b80 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -811,7 +811,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) * @uodata: source for zero copy write * @inlen: read buffer size * @olen: write buffer size - * @hdrlen: reader header size, This is the size of response protocol data + * @in_hdrlen: reader header size, This is the size of response protocol data * @fmt: protocol format string (see protocol.c) * * Returns request structure (which client must free using p9_tag_remove) diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index b21c3c209815..2885ff9c76f0 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c @@ -94,14 +94,15 @@ struct p9_trans_rdma { struct completion cm_done; }; +struct p9_rdma_req; + /** - * p9_rdma_context - Keeps track of in-process WR + * struct p9_rdma_context - Keeps track of in-process WR * * @busa: Bus address to unmap when the WR completes * @req: Keeps track of requests (send) * @rc: Keepts track of replies (receive) */ -struct p9_rdma_req; struct p9_rdma_context { struct ib_cqe cqe; dma_addr_t busa; @@ -112,7 +113,7 @@ struct p9_rdma_context { }; /** - * p9_rdma_opts - Collection of mount options + * struct p9_rdma_opts - Collection of mount options * @port: port of connection * @sq_depth: The requested depth of the SQ. This really doesn't need * to be any deeper than the number of threads used in the client From e0a7f1fe0c60ae48101b0170a8780c1e40ffb94d Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:14:58 +0200 Subject: [PATCH 0893/2056] net: can: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Oliver Hartkopp Cc: Marc Kleine-Budde Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/can/af_can.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/can/af_can.c b/net/can/af_can.c index 128d37a4c2e0..5c06404bdf3e 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -410,6 +410,7 @@ static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask, /** * can_rx_register - subscribe CAN frames from a specific interface + * @net: the applicable net namespace * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list) * @can_id: CAN identifier (see description) * @mask: CAN mask (see description) @@ -498,6 +499,7 @@ static void can_rx_delete_receiver(struct rcu_head *rp) /** * can_rx_unregister - unsubscribe CAN frames from a specific interface + * @net: the applicable net namespace * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list) * @can_id: CAN identifier * @mask: CAN mask From 8842500dd056b8ef9da77ba2bfc3275fd7e0a98f Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:14:59 +0200 Subject: [PATCH 0894/2056] net: core: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/core/dev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/dev.c b/net/core/dev.c index c02bae927812..eab4ebe3c21c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7908,6 +7908,7 @@ EXPORT_SYMBOL(netdev_bonding_info_change); /** * netdev_get_xmit_slave - Get the xmit slave of master device + * @dev: device * @skb: The packet * @all_slaves: assume all the slaves are active * From d0b1101bb5c1224881bb58460311d458ff1350d0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:00 +0200 Subject: [PATCH 0895/2056] net: dccp: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Gerrit Renker Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dccp/ccids/lib/packet_history.c | 2 ++ net/dccp/feat.c | 6 ++++++ net/dccp/input.c | 1 + net/dccp/ipv4.c | 2 ++ net/dccp/options.c | 4 ++++ net/dccp/timer.c | 2 ++ 6 files changed, 17 insertions(+) diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c index 0bef57b908fb..af08e2df7108 100644 --- a/net/dccp/ccids/lib/packet_history.c +++ b/net/dccp/ccids/lib/packet_history.c @@ -365,6 +365,7 @@ void tfrc_rx_hist_purge(struct tfrc_rx_hist *h) /** * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against + * @h: The non-empty RX history object */ static inline struct tfrc_rx_hist_entry * tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h) @@ -374,6 +375,7 @@ static inline struct tfrc_rx_hist_entry * /** * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry + * @h: The non-empty RX history object */ static inline struct tfrc_rx_hist_entry * tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 9c3b5e056234..afc071ea1271 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -165,6 +165,8 @@ static const struct { /** * dccp_feat_index - Hash function to map feature number into array position + * @feat_num: feature to hash, one of %dccp_feature_numbers + * * Returns consecutive array index or -1 if the feature is not understood. */ static int dccp_feat_index(u8 feat_num) @@ -567,6 +569,8 @@ int dccp_feat_clone_list(struct list_head const *from, struct list_head *to) /** * dccp_feat_valid_nn_length - Enforce length constraints on NN options + * @feat_num: feature to return length of, one of %dccp_feature_numbers + * * Length is between 0 and %DCCP_OPTVAL_MAXLEN. Used for outgoing packets only, * incoming options are accepted as long as their values are valid. */ @@ -1429,6 +1433,8 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, /** * dccp_feat_init - Seed feature negotiation with host-specific defaults + * @sk: Socket to initialize. + * * This initialises global defaults, depending on the value of the sysctls. * These can later be overridden by registering changes via setsockopt calls. * The last link in the chain is finalise_settings, to make sure that between diff --git a/net/dccp/input.c b/net/dccp/input.c index 6dce68a55964..bd9cfdb67436 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -715,6 +715,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_state_process); /** * dccp_sample_rtt - Validate and finalise computation of RTT sample + * @sk: socket structure * @delta: number of microseconds between packet and acknowledgment * * The routine is kept generic to work in different contexts. It should be diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index d19557c6d04b..a7e989919c53 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -694,6 +694,8 @@ EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); /** * dccp_invalid_packet - check for malformed packets + * @skb: Packet to validate + * * Implements RFC 4340, 8.5: Step 1: Check header basics * Packets that fail these checks are ignored and do not receive Resets. */ diff --git a/net/dccp/options.c b/net/dccp/options.c index 3b42f5c6a63d..daa9eed92646 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -43,6 +43,7 @@ u64 dccp_decode_value_var(const u8 *bf, const u8 len) * dccp_parse_options - Parse DCCP options present in @skb * @sk: client|server|listening dccp socket (when @dreq != NULL) * @dreq: request socket to use during connection setup, or NULL + * @skb: frame to parse */ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, struct sk_buff *skb) @@ -471,6 +472,8 @@ static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) /** * dccp_insert_option_mandatory - Mandatory option (5.8.2) + * @skb: frame into which to insert option + * * Note that since we are using skb_push, this function needs to be called * _after_ inserting the option it is supposed to influence (stack order). */ @@ -486,6 +489,7 @@ int dccp_insert_option_mandatory(struct sk_buff *skb) /** * dccp_insert_fn_opt - Insert single Feature-Negotiation option into @skb + * @skb: frame to insert feature negotiation option into * @type: %DCCPO_CHANGE_L, %DCCPO_CHANGE_R, %DCCPO_CONFIRM_L, %DCCPO_CONFIRM_R * @feat: one out of %dccp_feature_numbers * @val: NN value or SP array (preferred element first) to copy diff --git a/net/dccp/timer.c b/net/dccp/timer.c index c0b3672637c4..0e06dfc32273 100644 --- a/net/dccp/timer.c +++ b/net/dccp/timer.c @@ -216,6 +216,8 @@ static void dccp_delack_timer(struct timer_list *t) /** * dccp_write_xmitlet - Workhorse for CCID packet dequeueing interface + * @data: Socket to act on + * * See the comments above %ccid_dequeueing_decision for supported modes. */ static void dccp_write_xmitlet(unsigned long data) From aff53b23a9a74cf6729a6022e5dd22dd698bd9d9 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:01 +0200 Subject: [PATCH 0896/2056] net: decnet: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/decnet/dn_route.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 9eb7e4b62d9b..4cac31d22a50 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -494,6 +494,8 @@ static int dn_return_long(struct sk_buff *skb) /** * dn_route_rx_packet - Try and find a route for an incoming packet + * @net: The applicable net namespace + * @sk: Socket packet transmitted on * @skb: The packet to find a route for * * Returns: result of input function if route is found, error code otherwise From 3628e3cbf9edc8381961394ccd6fb0bf049fdb69 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:02 +0200 Subject: [PATCH 0897/2056] net: ipv4: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Paul Moore Cc: Alexey Kuznetsov Cc: Eric Dumazet Signed-off-by: Andrew Lunn Acked-by: Paul Moore Signed-off-by: David S. Miller --- net/ipv4/cipso_ipv4.c | 6 ++++-- net/ipv4/ipmr.c | 3 +++ net/ipv4/tcp_input.c | 1 - net/ipv4/tcp_output.c | 2 ++ net/ipv4/tcp_timer.c | 2 +- net/ipv4/udp.c | 6 +++--- 6 files changed, 13 insertions(+), 7 deletions(-) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 0f1b9065c0a6..2eb71579f4d2 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -283,7 +283,7 @@ static int cipso_v4_cache_check(const unsigned char *key, /** * cipso_v4_cache_add - Add an entry to the CIPSO cache - * @skb: the packet + * @cipso_ptr: pointer to CIPSO IP option * @secattr: the packet's security attributes * * Description: @@ -1535,6 +1535,7 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb) /** * cipso_v4_validate - Validate a CIPSO option + * @skb: the packet * @option: the start of the option, on error it is set to point to the error * * Description: @@ -2066,7 +2067,7 @@ void cipso_v4_sock_delattr(struct sock *sk) /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket - * @reg: the request socket + * @req: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. @@ -2158,6 +2159,7 @@ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet + * @doi_def: the DOI structure * @secattr: the security attributes * * Description: diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index f5c7a58844a4..678639c01e48 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -636,7 +636,10 @@ static int call_ipmr_mfc_entry_notifiers(struct net *net, /** * vif_delete - Delete a VIF entry + * @mrt: Table to delete from + * @vifi: VIF identifier to delete * @notify: Set to 1, if the caller is a notifier_call + * @head: if unregistering the VIF, place it on this queue */ static int vif_delete(struct mr_table *mrt, int vifi, int notify, struct list_head *head) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 61c808864f6b..b03ca68d4111 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4448,7 +4448,6 @@ static void tcp_sack_remove(struct tcp_sock *tp) /** * tcp_try_coalesce - try to merge skb to prior one * @sk: socket - * @dest: destination queue * @to: prior buffer * @from: buffer to add in queue * @fragstolen: pointer to boolean diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 2d563efcee0d..dc0117013ba5 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3337,6 +3337,8 @@ int tcp_send_synack(struct sock *sk) * sk: listener socket * dst: dst entry attached to the SYNACK * req: request_sock pointer + * foc: cookie for tcp fast open + * synack_type: Type of synback to prepare * * Allocate one skb and build a SYNACK packet. * @dst is consumed : Caller should not use it again. diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index ada046f425d2..0c08c420fbc2 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -314,7 +314,7 @@ void tcp_delack_timer_handler(struct sock *sk) /** * tcp_delack_timer() - The TCP delayed ACK timeout handler - * @data: Pointer to the current socket. (gets casted to struct sock *) + * @t: Pointer to the timer. (gets casted to struct sock *) * * This function gets (indirectly) called when the kernel timer for a TCP packet * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work. diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 31530129f137..073d346f515c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2743,9 +2743,9 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname, #endif /** * udp_poll - wait for a UDP event. - * @file - file struct - * @sock - socket - * @wait - poll table + * @file: - file struct + * @sock: - socket + * @wait: - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd From b51cd7c834dba0ec9300337e16e5aa5bf65bd04c Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:03 +0200 Subject: [PATCH 0898/2056] net: ipv6: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Alexey Kuznetsov Cc: Hideaki YOSHIFUJI Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/ipv6/exthdrs.c | 1 - net/ipv6/ip6_output.c | 6 ++++-- net/ipv6/ip6_tunnel.c | 10 ++++++---- net/ipv6/udp.c | 3 +++ 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index e9b366994475..374105e4394f 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -1232,7 +1232,6 @@ static void ipv6_renew_option(int renewtype, * @opt: original options * @newtype: option type to replace in @opt * @newopt: new option of type @newtype to replace (user-mem) - * @newoptlen: length of @newopt * * Returns a new set of options which is a copy of @opt with the * option type @newtype replaced with @newopt. diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8a8c2d0cfcc8..c78e67d7747f 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1118,6 +1118,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, /** * ip6_dst_lookup - perform route lookup on flow + * @net: Network namespace to perform lookup in * @sk: socket which provides route info * @dst: pointer to dst_entry * for result * @fl6: flow to lookup @@ -1136,6 +1137,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup); /** * ip6_dst_lookup_flow - perform route lookup on flow with ipsec + * @net: Network namespace to perform lookup in * @sk: socket which provides route info * @fl6: flow to lookup * @final_dst: final destination address for ipsec lookup @@ -1202,11 +1204,11 @@ EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); * @skb: Packet for which lookup is done * @dev: Tunnel device * @net: Network namespace of tunnel device - * @sk: Socket which provides route info + * @sock: Socket which provides route info * @saddr: Memory to store the src ip address * @info: Tunnel information * @protocol: IP protocol - * @use_cahce: Flag to enable cache usage + * @use_cache: Flag to enable cache usage * This function performs a route lookup on a tunnel * * It returns a valid dst pointer and stores src address to be used in diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a18c378ca5f4..f635914f42ec 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -124,8 +124,12 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev) return &dev->stats; } +#define for_each_ip6_tunnel_rcu(start) \ + for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) + /** * ip6_tnl_lookup - fetch tunnel matching the end-point addresses + * @net: network namespace * @link: ifindex of underlying interface * @remote: the address of the tunnel exit-point * @local: the address of the tunnel entry-point @@ -136,9 +140,6 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev) * else %NULL **/ -#define for_each_ip6_tunnel_rcu(start) \ - for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) - static struct ip6_tnl * ip6_tnl_lookup(struct net *net, int link, const struct in6_addr *remote, const struct in6_addr *local) @@ -302,8 +303,8 @@ static int ip6_tnl_create2(struct net_device *dev) /** * ip6_tnl_create - create a new tunnel + * @net: network namespace * @p: tunnel parameters - * @pt: pointer to new tunnel * * Description: * Create tunnel matching given parameters. @@ -351,6 +352,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) /** * ip6_tnl_locate - find or create tunnel matching given parameters + * @net: network namespace * @p: tunnel parameters * @create: != 0 if allowed to create new tunnel if no match found * diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7d4151747340..38c0d9350c6b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1059,6 +1059,9 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, * @sk: socket we are sending on * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) + * @saddr: source address + * @daddr: destination address + * @len: length of packet */ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, From 74c950c966c180c599d0af3043ab8959e74493d5 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:04 +0200 Subject: [PATCH 0899/2056] net: llc: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/llc/af_llc.c | 1 - net/llc/llc_conn.c | 7 ++++--- net/llc/llc_input.c | 1 + net/llc/llc_pdu.c | 2 +- net/llc/llc_sap.c | 3 +++ 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6e53e43c1907..6140a3e46c26 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -984,7 +984,6 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) * llc_ui_getname - return the address info of a socket * @sock: Socket to get address of. * @uaddr: Address structure to return information. - * @uaddrlen: Length of address structure. * @peer: Does user want local or remote address information. * * Return the address information of a socket. diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 7b620acaca9e..1144cda2a0fc 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -284,8 +284,8 @@ out:; /** * llc_conn_remove_acked_pdus - Removes acknowledged pdus from tx queue * @sk: active connection - * nr: NR - * how_many_unacked: size of pdu_unack_q after removing acked pdus + * @nr: NR + * @how_many_unacked: size of pdu_unack_q after removing acked pdus * * Removes acknowledged pdus from transmit queue (pdu_unack_q). Returns * the number of pdus that removed from queue. @@ -906,6 +906,7 @@ static void llc_sk_init(struct sock *sk) /** * llc_sk_alloc - Allocates LLC sock + * @net: network namespace * @family: upper layer protocol family * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * @@ -951,7 +952,7 @@ void llc_sk_stop_all_timers(struct sock *sk, bool sync) /** * llc_sk_free - Frees a LLC socket - * @sk - socket to free + * @sk: - socket to free * * Frees a LLC socket */ diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 82cb93f66b9b..c309b72a5877 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -144,6 +144,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb) * @skb: received pdu * @dev: device that receive pdu * @pt: packet type + * @orig_dev: the original receive net device * * When the system receives a 802.2 frame this function is called. It * checks SAP and connection of received pdu and passes frame to diff --git a/net/llc/llc_pdu.c b/net/llc/llc_pdu.c index 2e6cb79196bb..792d195c8bae 100644 --- a/net/llc/llc_pdu.c +++ b/net/llc/llc_pdu.c @@ -25,7 +25,7 @@ void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type) /** * pdu_set_pf_bit - sets poll/final bit in LLC header - * @pdu_frame: input frame that p/f bit must be set into it. + * @skb: Frame to set bit in * @bit_value: poll/final bit (0 or 1). * * This function sets poll/final bit in LLC header (based on type of PDU). diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index be419062e19a..6805ce43a055 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -37,6 +37,7 @@ static int llc_mac_header_len(unsigned short devtype) /** * llc_alloc_frame - allocates sk_buff for frame + * @sk: socket to allocate frame to * @dev: network device this skb will be sent over * @type: pdu type to allocate * @data_size: data size to allocate @@ -273,6 +274,7 @@ void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb, * llc_sap_rcv - sends received pdus to the sap state machine * @sap: current sap component structure. * @skb: received frame. + * @sk: socket to associate to frame * * Sends received pdus to the sap state machine. */ @@ -379,6 +381,7 @@ static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb, * llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets. * @sap: SAP * @laddr: address of local LLC (MAC + SAP) + * @skb: PDU to deliver * * Search socket list of the SAP and finds connections with same sap. * Deliver clone to each. From 9fd00b4d0eccdb859ba8a0c3228fadfb081441a0 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:05 +0200 Subject: [PATCH 0900/2056] net: mac80211: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Johannes Berg Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/mac80211/mesh_pathtbl.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 117519bf33d6..fe4e853c61f4 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -72,7 +72,6 @@ static void mesh_table_free(struct mesh_table *tbl) } /** - * * mesh_path_assign_nexthop - update mesh path next hop * * @mpath: mesh path to update @@ -140,7 +139,6 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, } /** - * * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another * * This function is used to transfer or copy frames from an unresolved mpath to @@ -152,7 +150,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr, * * The gate mpath must be an active mpath with a valid mpath->next_hop. * - * @mpath: An active mpath the frames will be sent to (i.e. the gate) + * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate) * @from_mpath: The failed mpath * @copy: When true, copy all the frames to the new mpath queue. When false, * move them. From 3db86c397f608b58b842d7f0f3d7ce305b46e6ad Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:06 +0200 Subject: [PATCH 0901/2056] net: netfilter: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Pablo Neira Ayuso Cc: Jozsef Kadlecsik Cc: Florian Westphal Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/netfilter/nf_conntrack_core.c | 2 +- net/netfilter/nf_tables_api.c | 8 ++++---- net/netfilter/nft_set_pipapo.c | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f33d72c5b06e..358108ff0833 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1006,7 +1006,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx) * * @skb: skb that causes the clash * @h: tuplehash of the clashing entry already in table - * @hash_reply: hash slot for reply direction + * @reply_hash: hash slot for reply direction * * A conntrack entry can be inserted to the connection tracking table * if there is no existing entry with an identical tuple. diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index f96785586f64..6708a4f2eec8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2375,7 +2375,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, /** * nft_register_expr - register nf_tables expr type - * @ops: expr type + * @type: expr type * * Registers the expr type for use with nf_tables. Returns zero on * success or a negative errno code otherwise. @@ -2394,7 +2394,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr); /** * nft_unregister_expr - unregister nf_tables expr type - * @ops: expr type + * @type: expr type * * Unregisters the expr typefor use with nf_tables. */ @@ -5595,7 +5595,7 @@ struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set, /** * nft_register_obj- register nf_tables stateful object type - * @obj: object type + * @obj_type: object type * * Registers the object type for use with nf_tables. Returns zero on * success or a negative errno code otherwise. @@ -5614,7 +5614,7 @@ EXPORT_SYMBOL_GPL(nft_register_obj); /** * nft_unregister_obj - unregister nf_tables object type - * @obj: object type + * @obj_type: object type * * Unregisters the object type for use with nf_tables. */ diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 313de1d73168..cc6082a5f7ad 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -401,7 +401,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst, * nft_pipapo_lookup() - Lookup function * @net: Network namespace * @set: nftables API set representation - * @elem: nftables API element representation containing key data + * @key: nftables API element representation containing key data * @ext: nftables API extension pointer, filled with matching reference * * For more details, see DOC: Theory of Operation. @@ -1075,7 +1075,7 @@ static int pipapo_expand(struct nft_pipapo_field *f, * @m: Matching data, including mapping table * @map: Table of rule maps: array of first rule and amount of rules * in next field a given rule maps to, for each field - * @ext: For last field, nft_set_ext pointer matching rules map to + * @e: For last field, nft_set_ext pointer matching rules map to */ static void pipapo_map(struct nft_pipapo_match *m, union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS], @@ -1099,7 +1099,7 @@ static void pipapo_map(struct nft_pipapo_match *m, /** * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results * @clone: Copy of matching data with pending insertions and deletions - * @bsize_max Maximum bucket size, scratch maps cover two buckets + * @bsize_max: Maximum bucket size, scratch maps cover two buckets * * Return: 0 on success, -ENOMEM on failure. */ @@ -1447,7 +1447,7 @@ static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules, /** * pipapo_drop() - Delete entry from lookup and mapping tables, given rule map * @m: Matching data - * @rulemap Table of rule maps, arrays of first rule and amount of rules + * @rulemap: Table of rule maps, arrays of first rule and amount of rules * in next field a given entry maps to, for each field * * For each rule in lookup table buckets mapping to this set of rules, drop From 26c3baaa0956ffabc1e3ede7f6933bd0dc9cb373 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:07 +0200 Subject: [PATCH 0902/2056] net: netlabel: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Paul Moore Signed-off-by: Andrew Lunn Acked-by: Paul Moore Signed-off-by: David S. Miller --- net/netlabel/netlabel_domainhash.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index a1f2320ecc16..d07de2c0fbc7 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -92,7 +92,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry) /** * netlbl_domhsh_hash - Hashing function for the domain hash table - * @domain: the domain name to hash + * @key: the domain name to hash * * Description: * This is the hashing function for the domain hash table, it returns the From ffbab1c93b52b3405183eb3643664d8a2e9b38ea Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:08 +0200 Subject: [PATCH 0903/2056] net: nfc: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/nfc/core.c | 3 +-- net/nfc/nci/core.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/net/nfc/core.c b/net/nfc/core.c index c5f9c3ee82f8..eb377f87bcae 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -704,7 +704,6 @@ EXPORT_SYMBOL(nfc_tm_deactivated); * nfc_alloc_send_skb - allocate a skb for data exchange responses * * @size: size to allocate - * @gfp: gfp flags */ struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk, unsigned int flags, unsigned int size, @@ -749,7 +748,7 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb); * * @dev: The nfc device that found the targets * @targets: array of nfc targets found - * @ntargets: targets array size + * @n_targets: targets array size * * The device driver must call this function when one or many nfc targets * are found. After calling this function, the device driver must stop diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 7cd524884304..f7b7dc5fe84a 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1182,7 +1182,7 @@ EXPORT_SYMBOL(nci_free_device); /** * nci_register_device - register a nci device in the nfc subsystem * - * @dev: The nci device to register + * @ndev: The nci device to register */ int nci_register_device(struct nci_dev *ndev) { @@ -1246,7 +1246,7 @@ EXPORT_SYMBOL(nci_register_device); /** * nci_unregister_device - unregister a nci device in the nfc subsystem * - * @dev: The nci device to unregister + * @ndev: The nci device to unregister */ void nci_unregister_device(struct nci_dev *ndev) { From 966785142342f473015f92be9e62554a29105599 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:09 +0200 Subject: [PATCH 0904/2056] net: openvswitch: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Pravin B Shelar Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/openvswitch/flow_netlink.c | 6 +++--- net/openvswitch/vport.c | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 79252d4887ff..9d3e50c4d29f 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1763,11 +1763,11 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val) * does not include any don't care bit. * @net: Used to determine per-namespace field support. * @match: receives the extracted flow match information. - * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute + * @nla_key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. The fields should of the packet that triggered the creation * of this flow. - * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink - * attribute specifies the mask field of the wildcarded flow. + * @nla_mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* + * Netlink attribute specifies the mask field of the wildcarded flow. * @log: Boolean to allow kernel error logging. Normally true, but when * probing for feature compatibility this should be passed in as false to * suppress unnecessary error logging. diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 47febb4504f0..0d44c5c013fa 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -87,6 +87,7 @@ EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister); /** * ovs_vport_locate - find a port that has already been created * + * @net: network namespace * @name: name of port to find * * Must be called with ovs or RCU read lock. @@ -418,7 +419,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) * * @vport: vport that received the packet * @skb: skb that was received - * @tun_key: tunnel (if any) that carried packet + * @tun_info: tunnel (if any) that carried packet * * Must be called with rcu_read_lock. The packet cannot be shared and * skb->data should point to the Ethernet header. From 76f2fe73c5b888fd009aa16efad9a6b563a1aa1f Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:10 +0200 Subject: [PATCH 0905/2056] net: rxrpc: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: David Howells Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/rxrpc/af_rxrpc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 394189b81849..cd7d0d204c74 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -267,7 +267,7 @@ static int rxrpc_listen(struct socket *sock, int backlog) * @gfp: The allocation constraints * @notify_rx: Where to send notifications instead of socket queue * @upgrade: Request service upgrade for call - * @intr: The call is interruptible + * @interruptibility: The call is interruptible, or can be canceled. * @debug_id: The debug ID for tracing to be assigned to the call * * Allow a kernel service to begin a call on the nominated socket. This just From 90ac5d0301db7cefc4129f89a858f66913a1c8f4 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:11 +0200 Subject: [PATCH 0906/2056] net: sched: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Jamal Hadi Salim Cc: Cong Wang Cc: Jiri Pirko Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/sched/em_canid.c | 1 + net/sched/ematch.c | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c index b9a94fdf9397..5ea84decec19 100644 --- a/net/sched/em_canid.c +++ b/net/sched/em_canid.c @@ -40,6 +40,7 @@ struct canid_match { /** * em_canid_get_id() - Extracts Can ID out of the sk_buff structure. + * @skb: buffer to extract Can ID from */ static canid_t em_canid_get_id(struct sk_buff *skb) { diff --git a/net/sched/ematch.c b/net/sched/ematch.c index dd3b8c11a2e0..f885bea5b452 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -389,7 +389,6 @@ EXPORT_SYMBOL(tcf_em_tree_validate); /** * tcf_em_tree_destroy - destroy an ematch tree * - * @tp: classifier kind handle * @tree: ematch tree to be deleted * * This functions destroys an ematch tree previously created by @@ -425,7 +424,7 @@ EXPORT_SYMBOL(tcf_em_tree_destroy); * tcf_em_tree_dump - dump ematch tree into a rtnl message * * @skb: skb holding the rtnl message - * @t: ematch tree to be dumped + * @tree: ematch tree to be dumped * @tlv: TLV type to be used to encapsulate the tree * * This function dumps a ematch tree into a rtnl message. It is valid to From 9a8ad9ac810aaddbb805c048511624a4972b24cc Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:12 +0200 Subject: [PATCH 0907/2056] net: socket: Move kerneldoc next to function it documents Fix the warning "Function parameter or member 'inode' not described in '__sock_release'' due to the kerneldoc being placed before __sock_release() not sock_release(), which does not take an inode parameter. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/socket.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/net/socket.c b/net/socket.c index d87812a9ed4b..770503c4ca76 100644 --- a/net/socket.c +++ b/net/socket.c @@ -586,15 +586,6 @@ struct socket *sock_alloc(void) } EXPORT_SYMBOL(sock_alloc); -/** - * sock_release - close a socket - * @sock: socket to close - * - * The socket is released from the protocol stack if it has a release - * callback, and the inode is then released if the socket is bound to - * an inode not a file. - */ - static void __sock_release(struct socket *sock, struct inode *inode) { if (sock->ops) { @@ -620,6 +611,14 @@ static void __sock_release(struct socket *sock, struct inode *inode) sock->file = NULL; } +/** + * sock_release - close a socket + * @sock: socket to close + * + * The socket is released from the protocol stack if it has a release + * callback, and the inode is then released if the socket is bound to + * an inode not a file. + */ void sock_release(struct socket *sock) { __sock_release(sock, NULL); From c8af73f0b23b75468e0f511287d15bf3051e88c8 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:13 +0200 Subject: [PATCH 0908/2056] net: switchdev: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Jiri Pirko Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/switchdev/switchdev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index f25604d68337..865f3e037425 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -304,8 +304,8 @@ static int switchdev_port_obj_add_defer(struct net_device *dev, * switchdev_port_obj_add - Add port object * * @dev: port device - * @id: object ID * @obj: object to add + * @extack: netlink extended ack * * Use a 2-phase prepare-commit transaction model to ensure * system is not left in a partially updated state due to @@ -357,7 +357,6 @@ static int switchdev_port_obj_del_defer(struct net_device *dev, * switchdev_port_obj_del - Delete port object * * @dev: port device - * @id: object ID * @obj: object to delete * * rtnl_lock must be held and must not be in atomic section, From d8141208b032eaee0efeacaadf1734f65db73ac5 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:14 +0200 Subject: [PATCH 0909/2056] net: tipc: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Jon Maloy Cc: Ying Xue Signed-off-by: Andrew Lunn Acked-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bearer.c | 2 +- net/tipc/discover.c | 5 ++--- net/tipc/link.c | 6 +++--- net/tipc/msg.c | 2 +- net/tipc/node.c | 4 ++-- net/tipc/socket.c | 8 +++----- net/tipc/udp_media.c | 2 +- 7 files changed, 13 insertions(+), 16 deletions(-) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index e366ec9a7e4d..808b147df7d5 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -595,7 +595,7 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, /** * tipc_l2_rcv_msg - handle incoming TIPC message from an interface - * @buf: the received packet + * @skb: the received message * @dev: the net device that the packet was received on * @pt: the packet_type structure which was used to register this handler * @orig_dev: the original receive net device in case the device is a bond diff --git a/net/tipc/discover.c b/net/tipc/discover.c index bfe43da127c0..d4ecacddb40c 100644 --- a/net/tipc/discover.c +++ b/net/tipc/discover.c @@ -74,7 +74,7 @@ struct tipc_discoverer { /** * tipc_disc_init_msg - initialize a link setup message * @net: the applicable net namespace - * @type: message type (request or response) + * @mtyp: message type (request or response) * @b: ptr to bearer issuing message */ static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb, @@ -339,7 +339,7 @@ static void tipc_disc_timeout(struct timer_list *t) * @net: the applicable net namespace * @b: ptr to bearer issuing requests * @dest: destination address for request messages - * @dest_domain: network domain to which links can be established + * @skb: pointer to created frame * * Returns 0 if successful, otherwise -errno. */ @@ -393,7 +393,6 @@ void tipc_disc_delete(struct tipc_discoverer *d) * tipc_disc_reset - reset object to send periodic link setup requests * @net: the applicable net namespace * @b: ptr to bearer issuing requests - * @dest_domain: network domain to which links can be established */ void tipc_disc_reset(struct net *net, struct tipc_bearer *b) { diff --git a/net/tipc/link.c b/net/tipc/link.c index f1d9c33dae72..d46d4ee5c4fd 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -445,7 +445,7 @@ u32 tipc_link_state(struct tipc_link *l) /** * tipc_link_create - create a new link - * @n: pointer to associated node + * @net: pointer to associated network namespace * @if_name: associated interface name * @bearer_id: id (index) of associated bearer * @tolerance: link tolerance to be used by link @@ -530,7 +530,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, /** * tipc_link_bc_create - create new link to be used for broadcast - * @n: pointer to associated node + * @net: pointer to associated network namespace * @mtu: mtu to be used initially if no peers * @window: send window to be used * @inputq: queue to put messages ready for delivery @@ -989,7 +989,7 @@ void tipc_link_reset(struct tipc_link *l) /** * tipc_link_xmit(): enqueue buffer list according to queue situation - * @link: link to use + * @l: link to use * @list: chain of buffers containing message * @xmitq: returned list of packets to be sent by caller * diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 01b64869a173..848fae674532 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -202,7 +202,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) /** * tipc_msg_append(): Append data to tail of an existing buffer queue - * @hdr: header to be used + * @_hdr: header to be used * @m: the data to be appended * @mss: max allowable size of buffer * @dlen: size of data to be appended diff --git a/net/tipc/node.c b/net/tipc/node.c index 030a51c4d1fa..4edcee3088da 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1515,7 +1515,7 @@ static void node_lost_contact(struct tipc_node *n, * tipc_node_get_linkname - get the name of a link * * @bearer_id: id of the bearer - * @node: peer node address + * @addr: peer node address * @linkname: link name output buffer * * Returns 0 on success @@ -2022,7 +2022,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb, * tipc_rcv - process TIPC packets/messages arriving from off-node * @net: the applicable net namespace * @skb: TIPC packet - * @bearer: pointer to bearer message arrived on + * @b: pointer to bearer message arrived on * * Invoked with no locks held. Bearer pointer must point to a valid bearer * structure (i.e. cannot be NULL), but bearer can be inactive. diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a94f38333698..fc388cef6471 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -711,7 +711,6 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr, * tipc_getname - get port ID of socket or peer socket * @sock: socket structure * @uaddr: area for returned socket address - * @uaddr_len: area for returned length of socket address * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID * * Returns 0 on success, errno otherwise @@ -1053,7 +1052,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, /** * tipc_send_group_bcast - send message to all members in communication group - * @sk: socket structure + * @sock: socket structure * @m: message to send * @dlen: total length of message data * @timeout: timeout to wait for wakeup @@ -1673,7 +1672,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, /** * tipc_sk_set_orig_addr - capture sender's address for received message * @m: descriptor for message info - * @hdr: received message header + * @skb: received message * * Note: Address is not captured if not requested by receiver. */ @@ -2095,7 +2094,6 @@ static void tipc_write_space(struct sock *sk) /** * tipc_data_ready - wake up threads to indicate messages have been received * @sk: socket - * @len: the length of messages */ static void tipc_data_ready(struct sock *sk) { @@ -2677,7 +2675,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) /** * tipc_accept - wait for connection request * @sock: listening socket - * @newsock: new socket that is to be connected + * @new_sock: new socket that is to be connected * @flags: file-related flags associated with socket * * Returns 0 on success, errno otherwise diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 28a283f26a8d..d91b7c543e39 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -565,7 +565,7 @@ int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b) /** * tipc_parse_udp_addr - build udp media address from netlink data - * @nlattr: netlink attribute containing sockaddr storage aligned address + * @nla: netlink attribute containing sockaddr storage aligned address * @addr: tipc media address to fill with address, port and protocol type * @scope_id: IPv6 scope id pointer, not NULL indicates it's required */ From 726e6af9af44b39cb0860667095fbce3a489ef42 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:15 +0200 Subject: [PATCH 0910/2056] net: wireless: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Johannes Berg Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/wireless/reg.c | 4 +++- net/wireless/wext-compat.c | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 0d74a31ef0ab..35b8847a2f6d 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -2384,7 +2384,7 @@ static void reg_set_request_processed(void) /** * reg_process_hint_core - process core regulatory requests - * @pending_request: a pending core regulatory request + * @core_request: a pending core regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by the regulatory core. @@ -2493,6 +2493,7 @@ __reg_process_hint_driver(struct regulatory_request *driver_request) /** * reg_process_hint_driver - process driver regulatory requests + * @wiphy: the wireless device for the regulatory request * @driver_request: a pending driver regulatory request * * The wireless subsystem can use this function to process @@ -2593,6 +2594,7 @@ __reg_process_hint_country_ie(struct wiphy *wiphy, /** * reg_process_hint_country_ie - process regulatory requests from country IEs + * @wiphy: the wireless device for the regulatory request * @country_ie_request: a regulatory request from a country IE * * The wireless subsystem can use this function to process diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index cac9e28d852b..aa918d7ff6bd 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c @@ -220,7 +220,6 @@ EXPORT_WEXT_HANDLER(cfg80211_wext_giwrange); /** * cfg80211_wext_freq - get wext frequency for non-"auto" - * @dev: the net device * @freq: the wext freq encoding * * Returns a frequency, or a negative error code, or 0 for auto. From 62c89238b182101dab0ae106f4c68061512c41f5 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Mon, 13 Jul 2020 01:15:16 +0200 Subject: [PATCH 0911/2056] net: x25: kerneldoc fixes Simple fixes which require no deep knowledge of the code. Cc: Andrew Hendry Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- net/x25/x25_link.c | 2 +- net/x25/x25_route.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 7d02532aad0d..fdae054b7dc1 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -270,7 +270,7 @@ void x25_link_device_up(struct net_device *dev) /** * __x25_remove_neigh - remove neighbour from x25_neigh_list - * @nb - neigh to remove + * @nb: - neigh to remove * * Remove neighbour from x25_neigh_list. If it was there. * Caller must hold x25_neigh_list_lock. diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c index b8e94d58d0f1..00e46c9a5280 100644 --- a/net/x25/x25_route.c +++ b/net/x25/x25_route.c @@ -142,7 +142,7 @@ struct net_device *x25_dev_get(char *devname) /** * x25_get_route - Find a route given an X.25 address. - * @addr - address to find a route for + * @addr: - address to find a route for * * Find a route given an X.25 address. */ From c40f4e50b6cfc7c66f69d12c6b3fbcd954f1ded5 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:03 +0300 Subject: [PATCH 0912/2056] net: sched: Pass qdisc reference in struct flow_block_offload Previously, shared blocks were only relevant for the pseudo-qdiscs ingress and clsact. Recently, a qevent facility was introduced, which allows to bind blocks to well-defined slots of a qdisc instance. RED in particular got two qevents: early_drop and mark. Drivers that wish to offload these blocks will be sent the usual notification, and need to know which qdisc it is related to. To that end, extend flow_block_offload with a "sch" pointer, and initialize as appropriate. This prompts changes in the indirect block facility, which now tracks the scheduler in addition to the netdevice. Update signatures of several functions similarly. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 9 ++++----- .../net/ethernet/mellanox/mlx5/core/en/rep/tc.c | 10 +++++----- drivers/net/ethernet/netronome/nfp/flower/main.h | 2 +- .../net/ethernet/netronome/nfp/flower/offload.c | 8 ++++---- include/net/flow_offload.h | 9 ++++++--- net/core/flow_offload.c | 12 +++++++----- net/netfilter/nf_flow_table_offload.c | 2 +- net/netfilter/nf_tables_offload.c | 2 +- net/sched/cls_api.c | 16 +++++++++------- 9 files changed, 38 insertions(+), 32 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0a9a4467d7c7..e82e5cf64d61 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -1888,7 +1888,7 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv) kfree(priv); } -static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, +static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp, struct flow_block_offload *f, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -1911,7 +1911,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp, block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb, cb_priv, cb_priv, bnxt_tc_setup_indr_rel, f, - netdev, data, bp, cleanup); + netdev, sch, data, bp, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1946,7 +1946,7 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev) return netif_is_vxlan(netdev); } -static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, +static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) @@ -1956,8 +1956,7 @@ static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv, switch (type) { case TC_SETUP_BLOCK: - return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data, data, - cleanup); + return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup); default: break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index ece8f535ce80..f8af109d34cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -404,7 +404,7 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv) static LIST_HEAD(mlx5e_block_cb_list); static int -mlx5e_rep_indr_setup_block(struct net_device *netdev, +mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, struct mlx5e_rep_priv *rpriv, struct flow_block_offload *f, flow_setup_cb_t *setup_cb, @@ -442,7 +442,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv, mlx5e_rep_indr_block_unbind, - f, netdev, data, rpriv, + f, netdev, sch, data, rpriv, cleanup); if (IS_ERR(block_cb)) { list_del(&indr_priv->list); @@ -472,18 +472,18 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, } static -int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv, +int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { switch (type) { case TC_SETUP_BLOCK: - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, mlx5e_rep_indr_setup_tc_cb, data, cleanup); case TC_SETUP_FT: - return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data, + return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data, mlx5e_rep_indr_setup_ft_cb, data, cleanup); default: diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 7f54a620acad..3bf9c1afa45e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -458,7 +458,7 @@ void nfp_flower_qos_cleanup(struct nfp_app *app); int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_matchall_offload *flow); void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb); -int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, +int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)); diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3af27bb5f4b0..4651fe417b7f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -1646,7 +1646,7 @@ void nfp_flower_setup_indr_tc_release(void *cb_priv) } static int -nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, +nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, struct flow_block_offload *f, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -1680,7 +1680,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, cb_priv, cb_priv, nfp_flower_setup_indr_tc_release, - f, netdev, data, app, cleanup); + f, netdev, sch, data, app, cleanup); if (IS_ERR(block_cb)) { list_del(&cb_priv->list); kfree(cb_priv); @@ -1711,7 +1711,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, } int -nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, +nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)) @@ -1721,7 +1721,7 @@ nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, switch (type) { case TC_SETUP_BLOCK: - return nfp_flower_setup_indr_tc_block(netdev, cb_priv, + return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, type_data, data, cleanup); default: return -EOPNOTSUPP; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index de395498440d..9f88a7b730a8 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -444,6 +444,7 @@ struct flow_block_offload { struct list_head cb_list; struct list_head *driver_block_list; struct netlink_ext_ack *extack; + struct Qdisc *sch; }; enum tc_setup_type; @@ -455,6 +456,7 @@ struct flow_block_cb; struct flow_block_indr { struct list_head list; struct net_device *dev; + struct Qdisc *sch; enum flow_block_binder_type binder_type; void *data; void *cb_priv; @@ -479,7 +481,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, - struct net_device *dev, void *data, + struct net_device *dev, + struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)); void flow_block_cb_free(struct flow_block_cb *block_cb); @@ -553,7 +556,7 @@ static inline void flow_block_init(struct flow_block *flow_block) INIT_LIST_HEAD(&flow_block->cb_list); } -typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, +typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, void *data, void (*cleanup)(struct flow_block_cb *block_cb)); @@ -561,7 +564,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, void (*release)(void *cb_priv)); -int flow_indr_dev_setup_offload(struct net_device *dev, +int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)); diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index b739cfab796e..b8cf6ff5f961 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -429,7 +429,7 @@ EXPORT_SYMBOL(flow_indr_dev_unregister); static void flow_block_indr_init(struct flow_block_cb *flow_block, struct flow_block_offload *bo, - struct net_device *dev, void *data, + struct net_device *dev, struct Qdisc *sch, void *data, void *cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -437,6 +437,7 @@ static void flow_block_indr_init(struct flow_block_cb *flow_block, flow_block->indr.data = data; flow_block->indr.cb_priv = cb_priv; flow_block->indr.dev = dev; + flow_block->indr.sch = sch; flow_block->indr.cleanup = cleanup; } @@ -444,7 +445,8 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, void *cb_ident, void *cb_priv, void (*release)(void *cb_priv), struct flow_block_offload *bo, - struct net_device *dev, void *data, + struct net_device *dev, + struct Qdisc *sch, void *data, void *indr_cb_priv, void (*cleanup)(struct flow_block_cb *block_cb)) { @@ -454,7 +456,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, if (IS_ERR(block_cb)) goto out; - flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup); + flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup); list_add(&block_cb->indr.list, &flow_block_indr_list); out: @@ -462,7 +464,7 @@ struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb, } EXPORT_SYMBOL(flow_indr_block_cb_alloc); -int flow_indr_dev_setup_offload(struct net_device *dev, +int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)) @@ -471,7 +473,7 @@ int flow_indr_dev_setup_offload(struct net_device *dev, mutex_lock(&flow_indr_block_lock); list_for_each_entry(this, &flow_block_indr_dev_list, list) - this->cb(dev, this->cb_priv, type, bo, data, cleanup); + this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup); mutex_unlock(&flow_indr_block_lock); diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 5fff1e040168..2a6993fa40d7 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -964,7 +964,7 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo, nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable, extack); - return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo, + return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo, nf_flow_table_indr_cleanup); } diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index c7cf1cde46de..9ef37c1b7b3b 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -312,7 +312,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain, nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack); - err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo, + err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo, nft_indr_block_cleanup); if (err < 0) return err; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e617f3e27ec0..322b279154de 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -622,7 +622,7 @@ static int tcf_block_setup(struct tcf_block *block, struct flow_block_offload *bo); static void tcf_block_offload_init(struct flow_block_offload *bo, - struct net_device *dev, + struct net_device *dev, struct Qdisc *sch, enum flow_block_command command, enum flow_block_binder_type binder_type, struct flow_block *flow_block, @@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo, bo->block = flow_block; bo->block_shared = shared; bo->extack = extack; + bo->sch = sch; INIT_LIST_HEAD(&bo->cb_list); } @@ -644,10 +645,11 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) { struct tcf_block *block = block_cb->indr.data; struct net_device *dev = block_cb->indr.dev; + struct Qdisc *sch = block_cb->indr.sch; struct netlink_ext_ack extack = {}; struct flow_block_offload bo; - tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND, + tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, block_cb->indr.binder_type, &block->flow_block, tcf_block_shared(block), &extack); @@ -666,14 +668,14 @@ static bool tcf_block_offload_in_use(struct tcf_block *block) } static int tcf_block_offload_cmd(struct tcf_block *block, - struct net_device *dev, + struct net_device *dev, struct Qdisc *sch, struct tcf_block_ext_info *ei, enum flow_block_command command, struct netlink_ext_ack *extack) { struct flow_block_offload bo = {}; - tcf_block_offload_init(&bo, dev, command, ei->binder_type, + tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, &block->flow_block, tcf_block_shared(block), extack); @@ -690,7 +692,7 @@ static int tcf_block_offload_cmd(struct tcf_block *block, return tcf_block_setup(block, &bo); } - flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo, + flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, tc_block_indr_cleanup); tcf_block_setup(block, &bo); @@ -717,7 +719,7 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, goto err_unlock; } - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; if (err) @@ -744,7 +746,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, int err; down_write(&block->cb_lock); - err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); + err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; up_write(&block->cb_lock); From 951b84d4aedde4144d254281ceba51bb3ca370fe Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Sat, 11 Jul 2020 00:55:04 +0300 Subject: [PATCH 0913/2056] mlxsw: reg: Add Monitoring Mirror Trigger Enable Register This register is used to configure the mirror enable for different mirror reasons. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 50 +++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index b76c839223b5..aa2fd7debec2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -9502,6 +9502,55 @@ MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1); */ MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1); +/* MOMTE - Monitoring Mirror Trigger Enable Register + * ------------------------------------------------- + * This register is used to configure the mirror enable for different mirror + * reasons. + */ +#define MLXSW_REG_MOMTE_ID 0x908D +#define MLXSW_REG_MOMTE_LEN 0x10 + +MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN); + +/* reg_momte_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8); + +enum mlxsw_reg_momte_type { + MLXSW_REG_MOMTE_TYPE_WRED = 0x20, + MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS = 0x31, + MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS_DESCRIPTORS = 0x32, + MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_EGRESS_PORT = 0x33, + MLXSW_REG_MOMTE_TYPE_ING_CONG = 0x40, + MLXSW_REG_MOMTE_TYPE_EGR_CONG = 0x50, + MLXSW_REG_MOMTE_TYPE_ECN = 0x60, + MLXSW_REG_MOMTE_TYPE_HIGH_LATENCY = 0x70, +}; + +/* reg_momte_type + * Type of mirroring. + * Access: Index + */ +MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8); + +/* reg_momte_tclass_en + * TClass/PG mirror enable. Each bit represents corresponding tclass. + * 0: disable (default) + * 1: enable + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1); + +static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port, + enum mlxsw_reg_momte_type type) +{ + MLXSW_REG_ZERO(momte, payload); + mlxsw_reg_momte_local_port_set(payload, local_port); + mlxsw_reg_momte_type_set(payload, type); +} + /* MTPPPC - Time Precision Packet Port Configuration * ------------------------------------------------- * This register serves for configuration of which PTP messages should be @@ -10853,6 +10902,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mgpc), MLXSW_REG(mprs), MLXSW_REG(mogcr), + MLXSW_REG(momte), MLXSW_REG(mtpppc), MLXSW_REG(mtpptr), MLXSW_REG(mtptpt), From c0e3969b07dc05eafcd0c35d20c134d3f7d360ed Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Sat, 11 Jul 2020 00:55:05 +0300 Subject: [PATCH 0914/2056] mlxsw: reg: Add Monitoring Port Analyzer Global Register This register is used for global port analyzer configurations. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 52 +++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index aa2fd7debec2..76f61bef03f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -9502,6 +9502,57 @@ MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1); */ MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1); +/* MPAGR - Monitoring Port Analyzer Global Register + * ------------------------------------------------ + * This register is used for global port analyzer configurations. + * Note: This register is not supported by current FW versions for Spectrum-1. + */ +#define MLXSW_REG_MPAGR_ID 0x9089 +#define MLXSW_REG_MPAGR_LEN 0x0C + +MLXSW_REG_DEFINE(mpagr, MLXSW_REG_MPAGR_ID, MLXSW_REG_MPAGR_LEN); + +enum mlxsw_reg_mpagr_trigger { + MLXSW_REG_MPAGR_TRIGGER_EGRESS, + MLXSW_REG_MPAGR_TRIGGER_INGRESS, + MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED, + MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER, + MLXSW_REG_MPAGR_TRIGGER_INGRESS_ING_CONG, + MLXSW_REG_MPAGR_TRIGGER_INGRESS_EGR_CONG, + MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN, + MLXSW_REG_MPAGR_TRIGGER_EGRESS_HIGH_LATENCY, +}; + +/* reg_mpagr_trigger + * Mirror trigger. + * Access: Index + */ +MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4); + +/* reg_mpagr_pa_id + * Port analyzer ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4); + +/* reg_mpagr_probability_rate + * Sampling rate. + * Valid values are: 1 to 3.5*10^9 + * Value of 1 means "sample all". Default is 1. + * Access: RW + */ +MLXSW_ITEM32(reg, mpagr, probability_rate, 0x08, 0, 32); + +static inline void mlxsw_reg_mpagr_pack(char *payload, + enum mlxsw_reg_mpagr_trigger trigger, + u8 pa_id, u32 probability_rate) +{ + MLXSW_REG_ZERO(mpagr, payload); + mlxsw_reg_mpagr_trigger_set(payload, trigger); + mlxsw_reg_mpagr_pa_id_set(payload, pa_id); + mlxsw_reg_mpagr_probability_rate_set(payload, probability_rate); +} + /* MOMTE - Monitoring Mirror Trigger Enable Register * ------------------------------------------------- * This register is used to configure the mirror enable for different mirror @@ -10902,6 +10953,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(mgpc), MLXSW_REG(mprs), MLXSW_REG(mogcr), + MLXSW_REG(mpagr), MLXSW_REG(momte), MLXSW_REG(mtpppc), MLXSW_REG(mtpptr), From 4bafb85ae24418cfa42ad3cb9f67031b1813fea8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 11 Jul 2020 00:55:06 +0300 Subject: [PATCH 0915/2056] mlxsw: spectrum_span: Move SPAN operations out of global file The per-ASIC SPAN operations are relevant to the SPAN module and therefore should be implemented there and not in the main driver file. Move them. These operations will be extended later on. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 50 ------------------- .../net/ethernet/mellanox/mlxsw/spectrum.h | 1 - .../ethernet/mellanox/mlxsw/spectrum_span.c | 47 +++++++++++++++++ .../ethernet/mellanox/mlxsw/spectrum_span.h | 8 +++ 4 files changed, 55 insertions(+), 51 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index eeeafd1d82ce..636dd09cbbbc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -175,10 +175,6 @@ struct mlxsw_sp_mlxfw_dev { struct mlxsw_sp *mlxsw_sp; }; -struct mlxsw_sp_span_ops { - u32 (*buffsize_get)(int mtu, u32 speed); -}; - static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev, u16 component_index, u32 *p_max_size, u8 *p_align_bits, u16 *p_max_write_size) @@ -2812,52 +2808,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = { .get_stats = mlxsw_sp2_get_stats, }; -static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) -{ - return mtu * 5 / 2; -} - -static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { - .buffsize_get = mlxsw_sp1_span_buffsize_get, -}; - -#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 -#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 - -static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) -{ - return 3 * mtu + buffer_factor * speed / 1000; -} - -static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) -{ - int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; - - return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); -} - -static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { - .buffsize_get = mlxsw_sp2_span_buffsize_get, -}; - -static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) -{ - int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; - - return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); -} - -static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { - .buffsize_get = mlxsw_sp3_span_buffsize_get, -}; - -u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed) -{ - u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); - - return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; -} - static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 1d6b2bc2774c..18c64f7b265d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -539,7 +539,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int *p_counter_index); void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp, unsigned int counter_index); -u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed); bool mlxsw_sp_port_dev_check(const struct net_device *dev); struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 92351a79addc..49e2a417ec0e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -766,6 +766,14 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, return 0; } +static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, + u32 speed) +{ + u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu); + + return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1; +} + static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) { @@ -1207,3 +1215,42 @@ void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); } + +static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) +{ + return mtu * 5 / 2; +} + +const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { + .buffsize_get = mlxsw_sp1_span_buffsize_get, +}; + +#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 +#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 + +static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor) +{ + return 3 * mtu + buffer_factor * speed / 1000; +} + +static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) +{ + int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR; + + return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); +} + +const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { + .buffsize_get = mlxsw_sp2_span_buffsize_get, +}; + +static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) +{ + int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR; + + return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); +} + +const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { + .buffsize_get = mlxsw_sp3_span_buffsize_get, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 9f6dd2d0f4e6..440551ec0dba 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -34,6 +34,10 @@ struct mlxsw_sp_span_trigger_parms { struct mlxsw_sp_span_entry_ops; +struct mlxsw_sp_span_ops { + u32 (*buffsize_get)(int mtu, u32 speed); +}; + struct mlxsw_sp_span_entry { const struct net_device *to_dev; const struct mlxsw_sp_span_entry_ops *ops; @@ -82,4 +86,8 @@ mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_port *mlxsw_sp_port, const struct mlxsw_sp_span_trigger_parms *parms); +extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops; +extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops; +extern const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops; + #endif From 08a3641f266276cafb14e7497e7954d6ee32a016 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 11 Jul 2020 00:55:07 +0300 Subject: [PATCH 0916/2056] mlxsw: spectrum_span: Prepare for global mirroring triggers Currently, a SPAN agent can only be bound to a per-port trigger where the trigger is either an incoming packet (INGRESS) or an outgoing packet (EGRESS) to / from the port. The subsequent patch will introduce the concept of global mirroring triggers. The binding / unbinding of global triggers is different than that of per-port triggers. Such triggers also need to be enabled / disabled on a per-{port, TC} basis and are only supported from Spectrum-2 onwards. Add trigger operations that allow us to abstract these differences. Only implement the operations for per-port triggers. Next patch will implement the operations for global triggers. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_span.c | 119 +++++++++++++++--- .../ethernet/mellanox/mlxsw/spectrum_span.h | 1 + 2 files changed, 103 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 49e2a417ec0e..b20422dde147 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -21,6 +21,7 @@ struct mlxsw_sp_span { struct work_struct work; struct mlxsw_sp *mlxsw_sp; + const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; struct list_head analyzed_ports_list; struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ struct list_head trigger_entries_list; @@ -38,12 +39,26 @@ struct mlxsw_sp_span_analyzed_port { struct mlxsw_sp_span_trigger_entry { struct list_head list; /* Member of trigger_entries_list */ + struct mlxsw_sp_span *span; + const struct mlxsw_sp_span_trigger_ops *ops; refcount_t ref_count; u8 local_port; enum mlxsw_sp_span_trigger trigger; struct mlxsw_sp_span_trigger_parms parms; }; +enum mlxsw_sp_span_trigger_type { + MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, +}; + +struct mlxsw_sp_span_trigger_ops { + int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); + void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry); + bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, + enum mlxsw_sp_span_trigger trigger, + struct mlxsw_sp_port *mlxsw_sp_port); +}; + static void mlxsw_sp_span_respin_work(struct work_struct *work); static u64 mlxsw_sp_span_occ_get(void *priv) @@ -57,7 +72,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_span *span; - int i, entries_count; + int i, entries_count, err; if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN)) return -EIO; @@ -77,11 +92,20 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) for (i = 0; i < mlxsw_sp->span->entries_count; i++) mlxsw_sp->span->entries[i].id = i; + err = mlxsw_sp->span_ops->init(mlxsw_sp); + if (err) + goto err_init; + devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN, mlxsw_sp_span_occ_get, mlxsw_sp); INIT_WORK(&span->work, mlxsw_sp_span_respin_work); return 0; + +err_init: + mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock); + kfree(mlxsw_sp->span); + return err; } void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) @@ -1059,9 +1083,9 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port, } static int -__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span, - struct mlxsw_sp_span_trigger_entry * - trigger_entry, bool enable) +__mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span, + struct mlxsw_sp_span_trigger_entry * + trigger_entry, bool enable) { char mpar_pl[MLXSW_REG_MPAR_LEN]; enum mlxsw_reg_mpar_i_e i_e; @@ -1084,19 +1108,60 @@ __mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span, } static int -mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span, - struct mlxsw_sp_span_trigger_entry * - trigger_entry) +mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry * + trigger_entry) { - return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true); + return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, + trigger_entry, true); } static void -mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span, - struct mlxsw_sp_span_trigger_entry * - trigger_entry) +mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry * + trigger_entry) { - __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false); + __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry, + false); +} + +static bool +mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + enum mlxsw_sp_span_trigger trigger, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + return trigger_entry->trigger == trigger && + trigger_entry->local_port == mlxsw_sp_port->local_port; +} + +static const struct mlxsw_sp_span_trigger_ops +mlxsw_sp_span_trigger_port_ops = { + .bind = mlxsw_sp_span_trigger_port_bind, + .unbind = mlxsw_sp_span_trigger_port_unbind, + .matches = mlxsw_sp_span_trigger_port_matches, +}; + +static const struct mlxsw_sp_span_trigger_ops * +mlxsw_sp_span_trigger_ops_arr[] = { + [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, +}; + +static void +mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) +{ + struct mlxsw_sp_span *span = trigger_entry->span; + enum mlxsw_sp_span_trigger_type type; + + switch (trigger_entry->trigger) { + case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_EGRESS: + type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; + break; + default: + WARN_ON_ONCE(1); + return; + } + + trigger_entry->ops = span->span_trigger_ops_arr[type]; } static struct mlxsw_sp_span_trigger_entry * @@ -1114,12 +1179,15 @@ mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span, return ERR_PTR(-ENOMEM); refcount_set(&trigger_entry->ref_count, 1); - trigger_entry->local_port = mlxsw_sp_port->local_port; + trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port : + 0; trigger_entry->trigger = trigger; memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms)); + trigger_entry->span = span; + mlxsw_sp_span_trigger_ops_set(trigger_entry); list_add_tail(&trigger_entry->list, &span->trigger_entries_list); - err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry); + err = trigger_entry->ops->bind(trigger_entry); if (err) goto err_trigger_entry_bind; @@ -1136,7 +1204,7 @@ mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span, struct mlxsw_sp_span_trigger_entry * trigger_entry) { - mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry); + trigger_entry->ops->unbind(trigger_entry); list_del(&trigger_entry->list); kfree(trigger_entry); } @@ -1149,8 +1217,8 @@ mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span, struct mlxsw_sp_span_trigger_entry *trigger_entry; list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) { - if (trigger_entry->trigger == trigger && - trigger_entry->local_port == mlxsw_sp_port->local_port) + if (trigger_entry->ops->matches(trigger_entry, trigger, + mlxsw_sp_port)) return trigger_entry; } @@ -1216,15 +1284,30 @@ void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); } +static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp_span_trigger_ops_arr; + + return 0; +} + static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) { return mtu * 5 / 2; } const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { + .init = mlxsw_sp1_span_init, .buffsize_get = mlxsw_sp1_span_buffsize_get, }; +static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp_span_trigger_ops_arr; + + return 0; +} + #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50 @@ -1241,6 +1324,7 @@ static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) } const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { + .init = mlxsw_sp2_span_init, .buffsize_get = mlxsw_sp2_span_buffsize_get, }; @@ -1252,5 +1336,6 @@ static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) } const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { + .init = mlxsw_sp2_span_init, .buffsize_get = mlxsw_sp3_span_buffsize_get, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 440551ec0dba..b9acecaf6ee2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -35,6 +35,7 @@ struct mlxsw_sp_span_trigger_parms { struct mlxsw_sp_span_entry_ops; struct mlxsw_sp_span_ops { + int (*init)(struct mlxsw_sp *mlxsw_sp); u32 (*buffsize_get)(int mtu, u32 speed); }; From ab8c06b7b42cf9e756a720931a0939cd0d56786a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 11 Jul 2020 00:55:08 +0300 Subject: [PATCH 0917/2056] mlxsw: spectrum_span: Add support for global mirroring triggers Global mirroring triggers are triggers that are only keyed by their trigger, as opposed to per-port triggers, which are keyed by their trigger and port. Such triggers allow mirroring packets that were tail/early dropped or ECN marked to a SPAN agent. Implement the previously added trigger operations for these global triggers. Since such triggers are only supported from Spectrum-2 onwards, have the Spectrum-1 operations return an error. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_span.c | 104 +++++++++++++++++- .../ethernet/mellanox/mlxsw/spectrum_span.h | 3 + 2 files changed, 104 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index b20422dde147..fa223c1351b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -49,6 +49,7 @@ struct mlxsw_sp_span_trigger_entry { enum mlxsw_sp_span_trigger_type { MLXSW_SP_SPAN_TRIGGER_TYPE_PORT, + MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL, }; struct mlxsw_sp_span_trigger_ops { @@ -1140,9 +1141,101 @@ mlxsw_sp_span_trigger_port_ops = { .matches = mlxsw_sp_span_trigger_port_matches, }; +static int +mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * + trigger_entry) +{ + return -EOPNOTSUPP; +} + +static void +mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * + trigger_entry) +{ +} + +static bool +mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + enum mlxsw_sp_span_trigger trigger, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + WARN_ON_ONCE(1); + return false; +} + +static const struct mlxsw_sp_span_trigger_ops +mlxsw_sp1_span_trigger_global_ops = { + .bind = mlxsw_sp1_span_trigger_global_bind, + .unbind = mlxsw_sp1_span_trigger_global_unbind, + .matches = mlxsw_sp1_span_trigger_global_matches, +}; + static const struct mlxsw_sp_span_trigger_ops * -mlxsw_sp_span_trigger_ops_arr[] = { +mlxsw_sp1_span_trigger_ops_arr[] = { [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, + [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = + &mlxsw_sp1_span_trigger_global_ops, +}; + +static int +mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry * + trigger_entry) +{ + struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; + enum mlxsw_reg_mpagr_trigger trigger; + char mpagr_pl[MLXSW_REG_MPAGR_LEN]; + + switch (trigger_entry->trigger) { + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: + trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER; + break; + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: + trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED; + break; + case MLXSW_SP_SPAN_TRIGGER_ECN: + trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id, + 1); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl); +} + +static void +mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry * + trigger_entry) +{ + /* There is no unbinding for global triggers. The trigger should be + * disabled on all ports by now. + */ +} + +static bool +mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + enum mlxsw_sp_span_trigger trigger, + struct mlxsw_sp_port *mlxsw_sp_port) +{ + return trigger_entry->trigger == trigger; +} + +static const struct mlxsw_sp_span_trigger_ops +mlxsw_sp2_span_trigger_global_ops = { + .bind = mlxsw_sp2_span_trigger_global_bind, + .unbind = mlxsw_sp2_span_trigger_global_unbind, + .matches = mlxsw_sp2_span_trigger_global_matches, +}; + +static const struct mlxsw_sp_span_trigger_ops * +mlxsw_sp2_span_trigger_ops_arr[] = { + [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops, + [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] = + &mlxsw_sp2_span_trigger_global_ops, }; static void @@ -1156,6 +1249,11 @@ mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry) case MLXSW_SP_SPAN_TRIGGER_EGRESS: type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT; break; + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */ + case MLXSW_SP_SPAN_TRIGGER_ECN: + type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL; + break; default: WARN_ON_ONCE(1); return; @@ -1286,7 +1384,7 @@ void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { - mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp_span_trigger_ops_arr; + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; return 0; } @@ -1303,7 +1401,7 @@ const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) { - mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp_span_trigger_ops_arr; + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index b9acecaf6ee2..bb7939b3f09c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -26,6 +26,9 @@ struct mlxsw_sp_span_parms { enum mlxsw_sp_span_trigger { MLXSW_SP_SPAN_TRIGGER_INGRESS, MLXSW_SP_SPAN_TRIGGER_EGRESS, + MLXSW_SP_SPAN_TRIGGER_TAIL_DROP, + MLXSW_SP_SPAN_TRIGGER_EARLY_DROP, + MLXSW_SP_SPAN_TRIGGER_ECN, }; struct mlxsw_sp_span_trigger_parms { From 2bafb216e10edf2684d3dc124be333a07523580d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 11 Jul 2020 00:55:09 +0300 Subject: [PATCH 0918/2056] mlxsw: spectrum_span: Add APIs to enable / disable global mirroring triggers While the binding of global mirroring triggers to a SPAN agent is global, packets are only mirrored if they belong to a port and TC on which the trigger was enabled. This allows, for example, to mirror packets that were tail-dropped on a specific netdev. Implement the operations that allow to enable / disable a global mirroring trigger on a specific port and TC. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_span.c | 135 ++++++++++++++++++ .../ethernet/mellanox/mlxsw/spectrum_span.h | 4 + 2 files changed, 139 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index fa223c1351b4..6374765a112d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -58,6 +58,10 @@ struct mlxsw_sp_span_trigger_ops { bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry, enum mlxsw_sp_span_trigger trigger, struct mlxsw_sp_port *mlxsw_sp_port); + int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); + void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc); }; static void mlxsw_sp_span_respin_work(struct work_struct *work); @@ -1134,11 +1138,29 @@ mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry * trigger_entry->local_port == mlxsw_sp_port->local_port; } +static int +mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) +{ + /* Port trigger are enabled during binding. */ + return 0; +} + +static void +mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, u8 tc) +{ +} + static const struct mlxsw_sp_span_trigger_ops mlxsw_sp_span_trigger_port_ops = { .bind = mlxsw_sp_span_trigger_port_bind, .unbind = mlxsw_sp_span_trigger_port_unbind, .matches = mlxsw_sp_span_trigger_port_matches, + .enable = mlxsw_sp_span_trigger_port_enable, + .disable = mlxsw_sp_span_trigger_port_disable, }; static int @@ -1164,11 +1186,30 @@ mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * return false; } +static int +mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, + u8 tc) +{ + return -EOPNOTSUPP; +} + +static void +mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, + u8 tc) +{ +} + static const struct mlxsw_sp_span_trigger_ops mlxsw_sp1_span_trigger_global_ops = { .bind = mlxsw_sp1_span_trigger_global_bind, .unbind = mlxsw_sp1_span_trigger_global_unbind, .matches = mlxsw_sp1_span_trigger_global_matches, + .enable = mlxsw_sp1_span_trigger_global_enable, + .disable = mlxsw_sp1_span_trigger_global_disable, }; static const struct mlxsw_sp_span_trigger_ops * @@ -1224,11 +1265,71 @@ mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry * return trigger_entry->trigger == trigger; } +static int +__mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, + u8 tc, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp; + char momte_pl[MLXSW_REG_MOMTE_LEN]; + enum mlxsw_reg_momte_type type; + int err; + + switch (trigger_entry->trigger) { + case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: + type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS; + break; + case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: + type = MLXSW_REG_MOMTE_TYPE_WRED; + break; + case MLXSW_SP_SPAN_TRIGGER_ECN: + type = MLXSW_REG_MOMTE_TYPE_ECN; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + /* Query existing configuration in order to only change the state of + * the specified traffic class. + */ + mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); + if (err) + return err; + + mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl); +} + +static int +mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, + u8 tc) +{ + return __mlxsw_sp2_span_trigger_global_enable(trigger_entry, + mlxsw_sp_port, tc, true); +} + +static void +mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry * + trigger_entry, + struct mlxsw_sp_port *mlxsw_sp_port, + u8 tc) +{ + __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc, + false); +} + static const struct mlxsw_sp_span_trigger_ops mlxsw_sp2_span_trigger_global_ops = { .bind = mlxsw_sp2_span_trigger_global_bind, .unbind = mlxsw_sp2_span_trigger_global_unbind, .matches = mlxsw_sp2_span_trigger_global_matches, + .enable = mlxsw_sp2_span_trigger_global_enable, + .disable = mlxsw_sp2_span_trigger_global_disable, }; static const struct mlxsw_sp_span_trigger_ops * @@ -1382,6 +1483,40 @@ void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry); } +int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_sp_span_trigger trigger, u8 tc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_span_trigger_entry *trigger_entry; + + ASSERT_RTNL(); + + trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, + trigger, + mlxsw_sp_port); + if (WARN_ON_ONCE(!trigger_entry)) + return -EINVAL; + + return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc); +} + +void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_sp_span_trigger trigger, u8 tc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_span_trigger_entry *trigger_entry; + + ASSERT_RTNL(); + + trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span, + trigger, + mlxsw_sp_port); + if (WARN_ON_ONCE(!trigger_entry)) + return; + + return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc); +} + static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index bb7939b3f09c..29b96b222e25 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -89,6 +89,10 @@ mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_span_trigger trigger, struct mlxsw_sp_port *mlxsw_sp_port, const struct mlxsw_sp_span_trigger_parms *parms); +int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_sp_span_trigger trigger, u8 tc); +void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_sp_span_trigger trigger, u8 tc); extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops; extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops; From 2c4950ea10a3e159550df9cdca15a63518f3affc Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:10 +0300 Subject: [PATCH 0919/2056] mlxsw: spectrum_flow: Convert a goto to a return No clean-up is performed at the target label of this goto. Convert it to a direct return. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c index 47b66f347ff1..421581a85cd6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c @@ -219,8 +219,7 @@ static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_tc_block_release); if (IS_ERR(block_cb)) { mlxsw_sp_flow_block_destroy(flow_block); - err = PTR_ERR(block_cb); - goto err_cb_register; + return PTR_ERR(block_cb); } register_block = true; } else { @@ -247,7 +246,6 @@ static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port, err_block_bind: if (!flow_block_cb_decref(block_cb)) flow_block_cb_free(block_cb); -err_cb_register: return err; } From d928f82198338abcce218fc5330bacb157f27b6b Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:11 +0300 Subject: [PATCH 0920/2056] mlxsw: spectrum_flow: Drop an unused field The field "dev" in struct mlxsw_sp_flow_block_binding is not used. Drop it. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 18c64f7b265d..ab54790d2955 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -710,7 +710,6 @@ struct mlxsw_sp_flow_block { struct mlxsw_sp_flow_block_binding { struct list_head list; - struct net_device *dev; struct mlxsw_sp_port *mlxsw_sp_port; bool ingress; }; From b50f60a0c4c92b07647a042c54278c9206c476d0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:12 +0300 Subject: [PATCH 0921/2056] mlxsw: spectrum_matchall: Publish matchall data structures A following patch introduces offloading of filters attached to blocks bound to the RED tail_drop qevent. The only classifier that mlxsw will permit in this role is matchall. mlxsw currently offloads matchall filters used with clsact qdisc. The data structures used for that offload will come handy for the qevent offload as well. Publish them in spectrum.h. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 24 +++++++++++++++++++ .../mellanox/mlxsw/spectrum_matchall.c | 23 ------------------ 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ab54790d2955..51047b1aa23a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -960,6 +960,30 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops; extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops; /* spectrum_matchall.c */ +enum mlxsw_sp_mall_action_type { + MLXSW_SP_MALL_ACTION_TYPE_MIRROR, + MLXSW_SP_MALL_ACTION_TYPE_SAMPLE, + MLXSW_SP_MALL_ACTION_TYPE_TRAP, +}; + +struct mlxsw_sp_mall_mirror_entry { + const struct net_device *to_dev; + int span_id; +}; + +struct mlxsw_sp_mall_entry { + struct list_head list; + unsigned long cookie; + unsigned int priority; + enum mlxsw_sp_mall_action_type type; + bool ingress; + union { + struct mlxsw_sp_mall_mirror_entry mirror; + struct mlxsw_sp_port_sample sample; + }; + struct rcu_head rcu; +}; + int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_flow_block *block, struct tc_cls_matchall_offload *f); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c index f1a44a8eda55..195e28ab8e65 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c @@ -10,29 +10,6 @@ #include "spectrum_span.h" #include "reg.h" -enum mlxsw_sp_mall_action_type { - MLXSW_SP_MALL_ACTION_TYPE_MIRROR, - MLXSW_SP_MALL_ACTION_TYPE_SAMPLE, -}; - -struct mlxsw_sp_mall_mirror_entry { - const struct net_device *to_dev; - int span_id; -}; - -struct mlxsw_sp_mall_entry { - struct list_head list; - unsigned long cookie; - unsigned int priority; - enum mlxsw_sp_mall_action_type type; - bool ingress; - union { - struct mlxsw_sp_mall_mirror_entry mirror; - struct mlxsw_sp_port_sample sample; - }; - struct rcu_head rcu; -}; - static struct mlxsw_sp_mall_entry * mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie) { From f7a439cbf1e8c544393fd854ad7bd6df41a60e08 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:13 +0300 Subject: [PATCH 0922/2056] mlxsw: spectrum_flow: Promote binder-type dispatch to spectrum.c Two RED qevents have been introduced recently. From the point of view of a driver, qevents are simply blocks with unusual binder types. However they need to be handled by different logic than ACL-like flows. Thus rename mlxsw_sp_setup_tc_block() to mlxsw_sp_setup_tc_block_clsact() and move the binder-type dispatch from there to spectrum.c into a new function of the original name. The new dispatcher is easier to extend with new binder types. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 13 +++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 5 +++-- .../net/ethernet/mellanox/mlxsw/spectrum_flow.c | 14 +++----------- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 636dd09cbbbc..2235c4bf330d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1329,6 +1329,19 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; } +static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f) +{ + switch (f->binder_type) { + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: + return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: + return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); + default: + return -EOPNOTSUPP; + } +} + static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 51047b1aa23a..ee9a19f28b97 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -767,8 +767,9 @@ mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block) struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp, struct net *net); void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block); -int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, - struct flow_block_offload *f); +int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + bool ingress); /* spectrum_acl.c */ struct mlxsw_sp_acl_ruleset; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c index 421581a85cd6..0456cda33808 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c @@ -277,18 +277,10 @@ static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port, } } -int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, - struct flow_block_offload *f) +int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + bool ingress) { - bool ingress; - - if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - ingress = true; - else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) - ingress = false; - else - return -EOPNOTSUPP; - f->driver_block_list = &mlxsw_sp_block_cb_list; switch (f->command) { From f6668eac2206d18e0668fd495dab6a0d91a6b8d2 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:14 +0300 Subject: [PATCH 0923/2056] mlxsw: spectrum_qdisc: Offload mirroring on RED qevent early_drop The RED qevents early_drop and mark can be offloaded under the following fairly strict conditions: - At most one filter is configured at the qevent block - The protocol is "any" - The classifier is matchall - The action is trap, sample, or mirror with the same conditions as with other SPAN offloads - The hw_counters type is none In this patchset, implement offload of mirror for early_drop qevent. The ECN trigger is currently not implemented in the FW and therefore the mark qevent is not supported. The qevent notifications look exactly like regular block binding notifications with a binder type that identifies them as qevents. Therefore the details of processing this binding are fairly similar to the matchall offload. struct flow_block_offload.sch points at the qdisc in question. Use it to figure out if the qdisc is offloaded at all and what TC it configures. Bounce bindings on not-offloaded qdiscs. Individual bindings are kept in a list so that several qevents can share the same block and all binding points get configured as the configured filters change. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 2 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 2 + .../ethernet/mellanox/mlxsw/spectrum_qdisc.c | 472 ++++++++++++++++++ 3 files changed, 476 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 2235c4bf330d..4ac634bd3571 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1337,6 +1337,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true); case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false); + case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP: + return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ee9a19f28b97..c00811178637 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1031,6 +1031,8 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_tbf_qopt_offload *p); int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_fifo_qopt_offload *p); +int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f); /* spectrum_fid.c */ bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 670a43fe2a00..901acd87353f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -8,6 +8,7 @@ #include #include "spectrum.h" +#include "spectrum_span.h" #include "reg.h" #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1) @@ -1272,6 +1273,477 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port, } } +struct mlxsw_sp_qevent_block { + struct list_head binding_list; + struct list_head mall_entry_list; + struct mlxsw_sp *mlxsw_sp; +}; + +struct mlxsw_sp_qevent_binding { + struct list_head list; + struct mlxsw_sp_port *mlxsw_sp_port; + u32 handle; + int tclass_num; + enum mlxsw_sp_span_trigger span_trigger; +}; + +static LIST_HEAD(mlxsw_sp_qevent_block_cb_list); + +static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; + struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + int span_id; + int err; + + err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, &span_id); + if (err) + return err; + + err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true); + if (err) + goto err_analyzed_port_get; + + trigger_parms.span_id = span_id; + err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + &trigger_parms); + if (err) + goto err_agent_bind; + + err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger, + qevent_binding->tclass_num); + if (err) + goto err_trigger_enable; + + mall_entry->mirror.span_id = span_id; + return 0; + +err_trigger_enable: + mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + &trigger_parms); +err_agent_bind: + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); +err_analyzed_port_get: + mlxsw_sp_span_agent_put(mlxsw_sp, span_id); + return err; +} + +static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; + struct mlxsw_sp_span_trigger_parms trigger_parms = { + .span_id = mall_entry->mirror.span_id, + }; + + mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger, + qevent_binding->tclass_num); + mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, + &trigger_parms); + mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); + mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id); +} + +static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + switch (mall_entry->type) { + case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: + return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding); + default: + /* This should have been validated away. */ + WARN_ON(1); + return -EOPNOTSUPP; + } +} + +static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + switch (mall_entry->type) { + case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: + return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding); + default: + WARN_ON(1); + return; + } +} + +static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + struct mlxsw_sp_mall_entry *mall_entry; + int err; + + list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) { + err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry, + qevent_binding); + if (err) + goto err_entry_configure; + } + + return 0; + +err_entry_configure: + list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list) + mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry, + qevent_binding); + return err; +} + +static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + struct mlxsw_sp_mall_entry *mall_entry; + + list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) + mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry, + qevent_binding); +} + +static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block) +{ + struct mlxsw_sp_qevent_binding *qevent_binding; + int err; + + list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) { + err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); + if (err) + goto err_binding_configure; + } + + return 0; + +err_binding_configure: + list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list) + mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding); + return err; +} + +static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block) +{ + struct mlxsw_sp_qevent_binding *qevent_binding; + + list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) + mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding); +} + +static struct mlxsw_sp_mall_entry * +mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie) +{ + struct mlxsw_sp_mall_entry *mall_entry; + + list_for_each_entry(mall_entry, &block->mall_entry_list, list) + if (mall_entry->cookie == cookie) + return mall_entry; + + return NULL; +} + +static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_qevent_block *qevent_block, + struct tc_cls_matchall_offload *f) +{ + struct mlxsw_sp_mall_entry *mall_entry; + struct flow_action_entry *act; + int err; + + /* It should not currently be possible to replace a matchall rule. So + * this must be a new rule. + */ + if (!list_empty(&qevent_block->mall_entry_list)) { + NL_SET_ERR_MSG(f->common.extack, "At most one filter supported"); + return -EOPNOTSUPP; + } + if (f->rule->action.num_entries != 1) { + NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported"); + return -EOPNOTSUPP; + } + if (f->common.chain_index) { + NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported"); + return -EOPNOTSUPP; + } + if (f->common.protocol != htons(ETH_P_ALL)) { + NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported"); + return -EOPNOTSUPP; + } + + act = &f->rule->action.entries[0]; + if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) { + NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents"); + return -EOPNOTSUPP; + } + + mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL); + if (!mall_entry) + return -ENOMEM; + mall_entry->cookie = f->cookie; + + if (act->id == FLOW_ACTION_MIRRED) { + mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR; + mall_entry->mirror.to_dev = act->dev; + } else { + NL_SET_ERR_MSG(f->common.extack, "Unsupported action"); + err = -EOPNOTSUPP; + goto err_unsupported_action; + } + + list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list); + + err = mlxsw_sp_qevent_block_configure(qevent_block); + if (err) + goto err_block_configure; + + return 0; + +err_block_configure: + list_del(&mall_entry->list); +err_unsupported_action: + kfree(mall_entry); + return err; +} + +static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block, + struct tc_cls_matchall_offload *f) +{ + struct mlxsw_sp_mall_entry *mall_entry; + + mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie); + if (!mall_entry) + return; + + mlxsw_sp_qevent_block_deconfigure(qevent_block); + + list_del(&mall_entry->list); + kfree(mall_entry); +} + +static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block, + struct tc_cls_matchall_offload *f) +{ + struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp; + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_qevent_mall_destroy(qevent_block, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct mlxsw_sp_qevent_block *qevent_block = cb_priv; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data); + default: + return -EOPNOTSUPP; + } +} + +static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp, + struct net *net) +{ + struct mlxsw_sp_qevent_block *qevent_block; + + qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL); + if (!qevent_block) + return NULL; + + INIT_LIST_HEAD(&qevent_block->binding_list); + INIT_LIST_HEAD(&qevent_block->mall_entry_list); + qevent_block->mlxsw_sp = mlxsw_sp; + return qevent_block; +} + +static void +mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block) +{ + WARN_ON(!list_empty(&qevent_block->binding_list)); + WARN_ON(!list_empty(&qevent_block->mall_entry_list)); + kfree(qevent_block); +} + +static void mlxsw_sp_qevent_block_release(void *cb_priv) +{ + struct mlxsw_sp_qevent_block *qevent_block = cb_priv; + + mlxsw_sp_qevent_block_destroy(qevent_block); +} + +static struct mlxsw_sp_qevent_binding * +mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num, + enum mlxsw_sp_span_trigger span_trigger) +{ + struct mlxsw_sp_qevent_binding *binding; + + binding = kzalloc(sizeof(*binding), GFP_KERNEL); + if (!binding) + return ERR_PTR(-ENOMEM); + + binding->mlxsw_sp_port = mlxsw_sp_port; + binding->handle = handle; + binding->tclass_num = tclass_num; + binding->span_trigger = span_trigger; + return binding; +} + +static void +mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding) +{ + kfree(binding); +} + +static struct mlxsw_sp_qevent_binding * +mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block, + struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle, + enum mlxsw_sp_span_trigger span_trigger) +{ + struct mlxsw_sp_qevent_binding *qevent_binding; + + list_for_each_entry(qevent_binding, &block->binding_list, list) + if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port && + qevent_binding->handle == handle && + qevent_binding->span_trigger == span_trigger) + return qevent_binding; + return NULL; +} + +static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_qevent_binding *qevent_binding; + struct mlxsw_sp_qevent_block *qevent_block; + struct flow_block_cb *block_cb; + struct mlxsw_sp_qdisc *qdisc; + bool register_block = false; + int err; + + block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp); + if (!block_cb) { + qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net); + if (!qevent_block) + return -ENOMEM; + block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block, + mlxsw_sp_qevent_block_release); + if (IS_ERR(block_cb)) { + mlxsw_sp_qevent_block_destroy(qevent_block); + return PTR_ERR(block_cb); + } + register_block = true; + } else { + qevent_block = flow_block_cb_priv(block_cb); + } + flow_block_cb_incref(block_cb); + + qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle); + if (!qdisc) { + NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded"); + err = -ENOENT; + goto err_find_qdisc; + } + + if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle, + span_trigger))) { + err = -EEXIST; + goto err_binding_exists; + } + + qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle, + qdisc->tclass_num, span_trigger); + if (IS_ERR(qevent_binding)) { + err = PTR_ERR(qevent_binding); + goto err_binding_create; + } + + err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding); + if (err) + goto err_binding_configure; + + list_add(&qevent_binding->list, &qevent_block->binding_list); + + if (register_block) { + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list); + } + + return 0; + +err_binding_configure: + mlxsw_sp_qevent_binding_destroy(qevent_binding); +err_binding_create: +err_binding_exists: +err_find_qdisc: + if (!flow_block_cb_decref(block_cb)) + flow_block_cb_free(block_cb); + return err; +} + +static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_qevent_binding *qevent_binding; + struct mlxsw_sp_qevent_block *qevent_block; + struct flow_block_cb *block_cb; + + block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp); + if (!block_cb) + return; + qevent_block = flow_block_cb_priv(block_cb); + + qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle, + span_trigger); + if (!qevent_binding) + return; + + list_del(&qevent_binding->list); + mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding); + mlxsw_sp_qevent_binding_destroy(qevent_binding); + + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } +} + +static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f, + enum mlxsw_sp_span_trigger span_trigger) +{ + f->driver_block_list = &mlxsw_sp_qevent_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger); + case FLOW_BLOCK_UNBIND: + mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger); + return 0; + default: + return -EOPNOTSUPP; + } +} + +int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port, + struct flow_block_offload *f) +{ + return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP); +} + int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port) { struct mlxsw_sp_qdisc_state *qdisc_state; From 1add92121e39ee5e4af21562eb59bd562c8033e3 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Sat, 11 Jul 2020 00:55:15 +0300 Subject: [PATCH 0924/2056] selftests: mlxsw: RED: Test offload of mirror on RED early_drop qevent Add a selftest for offloading a mirror action attached to the block associated with RED early_drop qevent. Signed-off-by: Petr Machata Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/sch_red_core.sh | 106 +++++++++++++++++- .../drivers/net/mlxsw/sch_red_ets.sh | 11 ++ .../drivers/net/mlxsw/sch_red_root.sh | 8 ++ 3 files changed, 122 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh index 0d347d48c112..45042105ead7 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh @@ -121,6 +121,7 @@ h1_destroy() h2_create() { host_create $h2 2 + tc qdisc add dev $h2 clsact # Some of the tests in this suite use multicast traffic. As this traffic # enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus @@ -141,6 +142,7 @@ h2_create() h2_destroy() { ethtool -s $h2 autoneg on + tc qdisc del dev $h2 clsact host_destroy $h2 } @@ -336,6 +338,17 @@ get_qdisc_npackets() qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .packets } +send_packets() +{ + local vlan=$1; shift + local proto=$1; shift + local pkts=$1; shift + + $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \ + -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \ + -t $proto -q -c $pkts "$@" +} + # This sends traffic in an attempt to build a backlog of $size. Returns 0 on # success. After 10 failed attempts it bails out and returns 1. It dumps the # backlog size to stdout. @@ -364,9 +377,7 @@ build_backlog() return 1 fi - $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \ - -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \ - -t $proto -q -c $pkts "$@" + send_packets $vlan $proto $pkts "$@" done } @@ -531,3 +542,92 @@ do_mc_backlog_test() log_test "TC $((vlan - 10)): Qdisc reports MC backlog" } + +do_drop_test() +{ + local vlan=$1; shift + local limit=$1; shift + local trigger=$1; shift + local subtest=$1; shift + local fetch_counter=$1; shift + local backlog + local base + local now + local pct + + RET=0 + + start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac + + # Create a bit of a backlog and observe no mirroring due to drops. + qevent_rule_install_$subtest + base=$($fetch_counter) + + build_backlog $vlan $((2 * limit / 3)) udp >/dev/null + + busywait 1100 until_counter_is ">= $((base + 1))" $fetch_counter >/dev/null + check_fail $? "Spurious packets observed without buffer pressure" + + qevent_rule_uninstall_$subtest + + # Push to the queue until it's at the limit. The configured limit is + # rounded by the qdisc and then by the driver, so this is the best we + # can do to get to the real limit of the system. Do this with the rules + # uninstalled so that the inevitable drops don't get counted. + build_backlog $vlan $((3 * limit / 2)) udp >/dev/null + + qevent_rule_install_$subtest + base=$($fetch_counter) + + send_packets $vlan udp 11 + + now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter) + check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen" + + # When no extra traffic is injected, there should be no mirroring. + busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null + check_fail $? "Spurious packets observed" + + # When the rule is uninstalled, there should be no mirroring. + qevent_rule_uninstall_$subtest + send_packets $vlan udp 11 + busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null + check_fail $? "Spurious packets observed after uninstall" + + log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd" + + stop_traffic + sleep 1 +} + +qevent_rule_install_mirror() +{ + tc filter add block 10 pref 1234 handle 102 matchall skip_sw \ + action mirred egress mirror dev $swp2 hw_stats disabled +} + +qevent_rule_uninstall_mirror() +{ + tc filter del block 10 pref 1234 handle 102 matchall +} + +qevent_counter_fetch_mirror() +{ + tc_rule_handle_stats_get "dev $h2 ingress" 101 +} + +do_drop_mirror_test() +{ + local vlan=$1; shift + local limit=$1; shift + local qevent_name=$1; shift + + tc filter add dev $h2 ingress pref 1 handle 101 prot ip \ + flower skip_sw ip_proto udp \ + action drop + + do_drop_test "$vlan" "$limit" "$qevent_name" mirror \ + qevent_counter_fetch_mirror + + tc filter del dev $h2 ingress pref 1 handle 101 flower +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh index 1c36c576613b..c8968b041bea 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh @@ -7,6 +7,7 @@ ALL_TESTS=" ecn_nodrop_test red_test mc_backlog_test + red_mirror_test " : ${QDISC:=ets} source sch_red_core.sh @@ -83,6 +84,16 @@ mc_backlog_test() uninstall_qdisc } +red_mirror_test() +{ + install_qdisc qevent early_drop block 10 + + do_drop_mirror_test 10 $BACKLOG1 early_drop + do_drop_mirror_test 11 $BACKLOG2 early_drop + + uninstall_qdisc +} + trap cleanup EXIT setup_prepare diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh index 558667ea11ec..ede9c38d3eff 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh @@ -7,6 +7,7 @@ ALL_TESTS=" ecn_nodrop_test red_test mc_backlog_test + red_mirror_test " source sch_red_core.sh @@ -57,6 +58,13 @@ mc_backlog_test() uninstall_qdisc } +red_mirror_test() +{ + install_qdisc qevent early_drop block 10 + do_drop_mirror_test 10 $BACKLOG + uninstall_qdisc +} + trap cleanup EXIT setup_prepare From ce1e2a776ffcd5ce1570bf8f077ea0c26c1d8a8d Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 13 Jul 2020 22:23:44 +0800 Subject: [PATCH 0925/2056] net: make symbol 'flush_works' static The sparse tool complains as follows: net/core/dev.c:5594:1: warning: symbol '__pcpu_scope_flush_works' was not declared. Should it be static? 'flush_works' is not used outside of dev.c, so marks it static. Fixes: 41852497a9205 ("net: batch calls to flush_all_backlogs()") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index eab4ebe3c21c..b61075828358 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5591,7 +5591,7 @@ void netif_receive_skb_list(struct list_head *head) } EXPORT_SYMBOL(netif_receive_skb_list); -DEFINE_PER_CPU(struct work_struct, flush_works); +static DEFINE_PER_CPU(struct work_struct, flush_works); /* Network device is going away, flush any packets still pending */ static void flush_backlog(struct work_struct *work) From e7fff95c8cea05d870239f6eebe7d3f1208e2a5c Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 16:30:54 +0200 Subject: [PATCH 0926/2056] isdn/capi: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/isdn/capi/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig index cc408ad9aafb..fdb43a632215 100644 --- a/drivers/isdn/capi/Kconfig +++ b/drivers/isdn/capi/Kconfig @@ -5,7 +5,7 @@ config ISDN_CAPI This provides CAPI (the Common ISDN Application Programming Interface) Version 2.0, a standard making it easy for programs to access ISDN hardware in a device independent way. (For details see - .) CAPI supports making and accepting voice + .) CAPI supports making and accepting voice and data connections, controlling call options and protocols, as well as ISDN supplementary services like call forwarding or three-party conferences (if supported by the specific hardware From c15841dd15af3ccbdc87031245ad1aee6d4362bb Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Mon, 13 Jul 2020 17:12:07 +0200 Subject: [PATCH 0927/2056] net: phy: fix mdio-mscc-miim build PHYLIB is not selected by mdio-mscc-miim but it uses mdio devres helpers. Explicitly select MDIO_DEVRES in this driver's Kconfig entry. Reported-by: kernel test robot Fixes: 1814cff26739 ("net: phy: add a Kconfig option for mdio_devres") Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 7ffa8a4529a8..dd20c2c27c2f 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -185,6 +185,7 @@ config MDIO_MOXART config MDIO_MSCC_MIIM tristate "Microsemi MIIM interface support" depends on HAS_IOMEM + select MDIO_DEVRES help This driver supports the MIIM (MDIO) interface found in the network switches of the Microsemi SoCs; it is recommended to switch on From 5d037b4d3df7b77ecd22fa8e10f5000bdc42cc8b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 13 Jul 2020 18:20:14 +0300 Subject: [PATCH 0928/2056] devlink: Fix use-after-free when destroying health reporters Dereferencing the reporter after it was destroyed in order to unlock the reporters lock results in a use-after-free [1]. Fix this by storing a pointer to the lock in a local variable before destroying the reporter. [1] ================================================================== BUG: KASAN: use-after-free in devlink_health_reporter_destroy+0x15c/0x1b0 net/core/devlink.c:5476 Read of size 8 at addr ffff8880650fd020 by task syz-executor.1/904 CPU: 0 PID: 904 Comm: syz-executor.1 Not tainted 5.8.0-rc2+ #35 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0xf6/0x16e lib/dump_stack.c:118 print_address_description.constprop.0+0x1c/0x250 mm/kasan/report.c:383 __kasan_report mm/kasan/report.c:513 [inline] kasan_report.cold+0x1f/0x37 mm/kasan/report.c:530 devlink_health_reporter_destroy+0x15c/0x1b0 net/core/devlink.c:5476 nsim_dev_health_exit+0x8b/0xe0 drivers/net/netdevsim/health.c:317 nsim_dev_reload_destroy+0x7f/0x110 drivers/net/netdevsim/dev.c:1134 nsim_dev_reload_down+0x6e/0xd0 drivers/net/netdevsim/dev.c:712 devlink_reload+0xc6/0x3b0 net/core/devlink.c:2952 devlink_nl_cmd_reload+0x2f1/0x7c0 net/core/devlink.c:2987 genl_family_rcv_msg_doit net/netlink/genetlink.c:691 [inline] genl_family_rcv_msg net/netlink/genetlink.c:736 [inline] genl_rcv_msg+0x611/0x9d0 net/netlink/genetlink.c:753 netlink_rcv_skb+0x152/0x440 net/netlink/af_netlink.c:2469 genl_rcv+0x24/0x40 net/netlink/genetlink.c:764 netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline] netlink_unicast+0x53a/0x750 net/netlink/af_netlink.c:1329 netlink_sendmsg+0x850/0xd90 net/netlink/af_netlink.c:1918 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg+0x150/0x190 net/socket.c:672 ____sys_sendmsg+0x6d8/0x840 net/socket.c:2363 ___sys_sendmsg+0xff/0x170 net/socket.c:2417 __sys_sendmsg+0xe5/0x1b0 net/socket.c:2450 do_syscall_64+0x56/0xa0 arch/x86/entry/common.c:359 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x4748ad Code: Bad RIP value. RSP: 002b:00007fd0358adc38 EFLAGS: 00000246 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 000000000056bf00 RCX: 00000000004748ad RDX: 0000000000000000 RSI: 00000000200000c0 RDI: 0000000000000003 RBP: 00000000ffffffff R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00000000004d1a4b R14: 00007fd0358ae6b4 R15: 00007fd0358add80 Allocated by task 539: save_stack+0x1b/0x40 mm/kasan/common.c:48 set_track mm/kasan/common.c:56 [inline] __kasan_kmalloc mm/kasan/common.c:494 [inline] __kasan_kmalloc.constprop.0+0xc2/0xd0 mm/kasan/common.c:467 kmalloc include/linux/slab.h:555 [inline] kzalloc include/linux/slab.h:669 [inline] __devlink_health_reporter_create+0x91/0x2f0 net/core/devlink.c:5359 devlink_health_reporter_create+0xa1/0x170 net/core/devlink.c:5431 nsim_dev_health_init+0x95/0x3a0 drivers/net/netdevsim/health.c:279 nsim_dev_probe+0xb1e/0xeb0 drivers/net/netdevsim/dev.c:1086 really_probe+0x287/0x6d0 drivers/base/dd.c:525 driver_probe_device+0xfe/0x1d0 drivers/base/dd.c:701 __device_attach_driver+0x21e/0x290 drivers/base/dd.c:807 bus_for_each_drv+0x161/0x1e0 drivers/base/bus.c:431 __device_attach+0x21a/0x360 drivers/base/dd.c:873 bus_probe_device+0x1e6/0x290 drivers/base/bus.c:491 device_add+0xaf2/0x1b00 drivers/base/core.c:2680 nsim_bus_dev_new drivers/net/netdevsim/bus.c:336 [inline] new_device_store+0x374/0x590 drivers/net/netdevsim/bus.c:215 bus_attr_store+0x75/0xa0 drivers/base/bus.c:122 sysfs_kf_write+0x113/0x170 fs/sysfs/file.c:138 kernfs_fop_write+0x25d/0x480 fs/kernfs/file.c:315 __vfs_write+0x7c/0x100 fs/read_write.c:495 vfs_write+0x265/0x5e0 fs/read_write.c:559 ksys_write+0x12d/0x250 fs/read_write.c:612 do_syscall_64+0x56/0xa0 arch/x86/entry/common.c:359 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Freed by task 904: save_stack+0x1b/0x40 mm/kasan/common.c:48 set_track mm/kasan/common.c:56 [inline] kasan_set_free_info mm/kasan/common.c:316 [inline] __kasan_slab_free+0x12c/0x170 mm/kasan/common.c:455 slab_free_hook mm/slub.c:1474 [inline] slab_free_freelist_hook mm/slub.c:1507 [inline] slab_free mm/slub.c:3072 [inline] kfree+0xe6/0x320 mm/slub.c:4063 devlink_health_reporter_free net/core/devlink.c:5449 [inline] devlink_health_reporter_put+0xb7/0xf0 net/core/devlink.c:5456 __devlink_health_reporter_destroy net/core/devlink.c:5463 [inline] devlink_health_reporter_destroy+0x11b/0x1b0 net/core/devlink.c:5475 nsim_dev_health_exit+0x8b/0xe0 drivers/net/netdevsim/health.c:317 nsim_dev_reload_destroy+0x7f/0x110 drivers/net/netdevsim/dev.c:1134 nsim_dev_reload_down+0x6e/0xd0 drivers/net/netdevsim/dev.c:712 devlink_reload+0xc6/0x3b0 net/core/devlink.c:2952 devlink_nl_cmd_reload+0x2f1/0x7c0 net/core/devlink.c:2987 genl_family_rcv_msg_doit net/netlink/genetlink.c:691 [inline] genl_family_rcv_msg net/netlink/genetlink.c:736 [inline] genl_rcv_msg+0x611/0x9d0 net/netlink/genetlink.c:753 netlink_rcv_skb+0x152/0x440 net/netlink/af_netlink.c:2469 genl_rcv+0x24/0x40 net/netlink/genetlink.c:764 netlink_unicast_kernel net/netlink/af_netlink.c:1303 [inline] netlink_unicast+0x53a/0x750 net/netlink/af_netlink.c:1329 netlink_sendmsg+0x850/0xd90 net/netlink/af_netlink.c:1918 sock_sendmsg_nosec net/socket.c:652 [inline] sock_sendmsg+0x150/0x190 net/socket.c:672 ____sys_sendmsg+0x6d8/0x840 net/socket.c:2363 ___sys_sendmsg+0xff/0x170 net/socket.c:2417 __sys_sendmsg+0xe5/0x1b0 net/socket.c:2450 do_syscall_64+0x56/0xa0 arch/x86/entry/common.c:359 entry_SYSCALL_64_after_hwframe+0x44/0xa9 The buggy address belongs to the object at ffff8880650fd000 which belongs to the cache kmalloc-512 of size 512 The buggy address is located 32 bytes inside of 512-byte region [ffff8880650fd000, ffff8880650fd200) The buggy address belongs to the page: page:ffffea0001943f00 refcount:1 mapcount:0 mapping:0000000000000000 index:0xffff8880650ff800 head:ffffea0001943f00 order:2 compound_mapcount:0 compound_pincount:0 flags: 0x100000000010200(slab|head) raw: 0100000000010200 ffffea0001a06a08 ffffea00010ad308 ffff88806c402500 raw: ffff8880650ff800 0000000000100009 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff8880650fcf00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffff8880650fcf80: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc >ffff8880650fd000: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff8880650fd080: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880650fd100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Fixes: 3c5584bf0a04 ("devlink: Rework devlink health reporter destructor") Fixes: 15c724b997a8 ("devlink: Add devlink health port reporters API") Signed-off-by: Ido Schimmel Reviewed-by: Moshe Shemesh Reviewed-by: Jiri Pirko Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/core/devlink.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 20a83aace642..6335e1851088 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -5471,9 +5471,11 @@ __devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) void devlink_health_reporter_destroy(struct devlink_health_reporter *reporter) { - mutex_lock(&reporter->devlink->reporters_lock); + struct mutex *lock = &reporter->devlink->reporters_lock; + + mutex_lock(lock); __devlink_health_reporter_destroy(reporter); - mutex_unlock(&reporter->devlink->reporters_lock); + mutex_unlock(lock); } EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy); @@ -5485,9 +5487,11 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy); void devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter) { - mutex_lock(&reporter->devlink_port->reporters_lock); + struct mutex *lock = &reporter->devlink_port->reporters_lock; + + mutex_lock(lock); __devlink_health_reporter_destroy(reporter); - mutex_unlock(&reporter->devlink_port->reporters_lock); + mutex_unlock(lock); } EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy); From 91c724cfc0cbc049f18c04634ad56080650e93b8 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:01 +0300 Subject: [PATCH 0929/2056] net: mscc: ocelot: convert port registers to regmap At the moment, there are some minimal register differences between VSC7514 Ocelot and VSC9959 Felix. To be precise, the PCS1G registers are missing from Felix because it was integrated with an NXP PCS. But with VSC9953 Seville (not yet introduced), the register differences are more pronounced. The MAC registers are located at different offsets within the DEV_GMII target. So we need to refactor the driver to keep a regmap even for per-port registers. The callers of the ocelot_port_readl and ocelot_port_writel were kept unchanged, only the implementation is now more generic. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 13 ++-- drivers/net/dsa/ocelot/felix_vsc9959.c | 47 ++++++++++++- drivers/net/ethernet/mscc/ocelot.h | 3 +- drivers/net/ethernet/mscc/ocelot_io.c | 16 ++++- drivers/net/ethernet/mscc/ocelot_net.c | 5 +- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 53 +++++++++++++-- include/soc/mscc/ocelot.h | 42 +++++++++++- include/soc/mscc/ocelot_dev.h | 78 ---------------------- 8 files changed, 158 insertions(+), 99 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 75652ed99b24..bf0bd5c7b12c 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -524,7 +524,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; - void __iomem *port_regs; + struct regmap *target; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -541,17 +541,18 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) res.start += switch_base; res.end += switch_base; - port_regs = devm_ioremap_resource(ocelot->dev, &res); - if (IS_ERR(port_regs)) { + target = ocelot_regmap_init(ocelot, &res); + if (IS_ERR(target)) { dev_err(ocelot->dev, - "failed to map registers for port %d\n", port); + "Failed to map memory space for port %d\n", + port); kfree(port_phy_modes); - return PTR_ERR(port_regs); + return PTR_ERR(target); } ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; - ocelot_port->regs = port_regs; + ocelot_port->target = target; ocelot->ports[port] = ocelot_port; } diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 19614537b1ba..0c54d67a4039 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -329,7 +329,49 @@ static const u32 vsc9959_gcb_regmap[] = { REG(GCB_SOFT_RST, 0x000004), }; -static const u32 *vsc9959_regmap[] = { +static const u32 vsc9959_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG_RESERVED(PCS1G_CFG), + REG_RESERVED(PCS1G_MODE_CFG), + REG_RESERVED(PCS1G_SD_CFG), + REG_RESERVED(PCS1G_ANEG_CFG), + REG_RESERVED(PCS1G_ANEG_NP_CFG), + REG_RESERVED(PCS1G_LB_CFG), + REG_RESERVED(PCS1G_DBG_CFG), + REG_RESERVED(PCS1G_CDET_CFG), + REG_RESERVED(PCS1G_ANEG_STATUS), + REG_RESERVED(PCS1G_ANEG_NP_STATUS), + REG_RESERVED(PCS1G_LINK_STATUS), + REG_RESERVED(PCS1G_LINK_DOWN_CNT), + REG_RESERVED(PCS1G_STICKY), + REG_RESERVED(PCS1G_DEBUG_STATUS), + REG_RESERVED(PCS1G_LPI_CFG), + REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT), + REG_RESERVED(PCS1G_LPI_STATUS), + REG_RESERVED(PCS1G_TSTPAT_MODE_CFG), + REG_RESERVED(PCS1G_TSTPAT_STATUS), + REG_RESERVED(DEV_PCS_FX100_CFG), + REG_RESERVED(DEV_PCS_FX100_STATUS), +}; + +static const u32 *vsc9959_regmap[TARGET_MAX] = { [ANA] = vsc9959_ana_regmap, [QS] = vsc9959_qs_regmap, [QSYS] = vsc9959_qsys_regmap, @@ -338,10 +380,11 @@ static const u32 *vsc9959_regmap[] = { [S2] = vsc9959_s2_regmap, [PTP] = vsc9959_ptp_regmap, [GCB] = vsc9959_gcb_regmap, + [DEV_GMII] = vsc9959_dev_gmii_regmap, }; /* Addresses are relative to the PCI device's base address */ -static const struct resource vsc9959_target_io_res[] = { +static const struct resource vsc9959_target_io_res[TARGET_MAX] = { [ANA] = { .start = 0x0280000, .end = 0x028ffff, diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 394362e23c47..814b09dd2c11 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -105,8 +105,7 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); #define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) #define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) -int ocelot_probe_port(struct ocelot *ocelot, u8 port, - void __iomem *regs, +int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy); void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu, diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index b229b1cb68ef..741f653bc85b 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -49,13 +49,25 @@ EXPORT_SYMBOL(__ocelot_rmw_ix); u32 ocelot_port_readl(struct ocelot_port *port, u32 reg) { - return readl(port->regs + reg); + struct ocelot *ocelot = port->ocelot; + u16 target = reg >> TARGET_OFFSET; + u32 val; + + WARN_ON(!target); + + regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val); + return val; } EXPORT_SYMBOL(ocelot_port_readl); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) { - writel(val, port->regs + reg); + struct ocelot *ocelot = port->ocelot; + u16 target = reg >> TARGET_OFFSET; + + WARN_ON(!target); + + regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val); } EXPORT_SYMBOL(ocelot_port_writel); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 41a1b5f6df95..0668d23cdbfa 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1005,8 +1005,7 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { .notifier_call = ocelot_switchdev_blocking_event, }; -int ocelot_probe_port(struct ocelot *ocelot, u8 port, - void __iomem *regs, +int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy) { struct ocelot_port_private *priv; @@ -1024,7 +1023,7 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, priv->chip_port = port; ocelot_port = &priv->port; ocelot_port->ocelot = ocelot; - ocelot_port->regs = regs; + ocelot_port->target = target; ocelot->ports[port] = ocelot_port; dev->netdev_ops = &ocelot_port_netdev_ops; diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 43716e8dc0ac..63af145e744c 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -263,7 +263,49 @@ static const u32 ocelot_ptp_regmap[] = { REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8), }; -static const u32 *ocelot_regmap[] = { +static const u32 ocelot_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG(DEV_EVENTS, 0x8), + REG(DEV_EEE_CFG, 0xc), + REG(DEV_RX_PATH_DELAY, 0x10), + REG(DEV_TX_PATH_DELAY, 0x14), + REG(DEV_PTP_PREDICT_CFG, 0x18), + REG(DEV_MAC_ENA_CFG, 0x1c), + REG(DEV_MAC_MODE_CFG, 0x20), + REG(DEV_MAC_MAXLEN_CFG, 0x24), + REG(DEV_MAC_TAGS_CFG, 0x28), + REG(DEV_MAC_ADV_CHK_CFG, 0x2c), + REG(DEV_MAC_IFG_CFG, 0x30), + REG(DEV_MAC_HDX_CFG, 0x34), + REG(DEV_MAC_DBG_CFG, 0x38), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40), + REG(DEV_MAC_STICKY, 0x44), + REG(PCS1G_CFG, 0x48), + REG(PCS1G_MODE_CFG, 0x4c), + REG(PCS1G_SD_CFG, 0x50), + REG(PCS1G_ANEG_CFG, 0x54), + REG(PCS1G_ANEG_NP_CFG, 0x58), + REG(PCS1G_LB_CFG, 0x5c), + REG(PCS1G_DBG_CFG, 0x60), + REG(PCS1G_CDET_CFG, 0x64), + REG(PCS1G_ANEG_STATUS, 0x68), + REG(PCS1G_ANEG_NP_STATUS, 0x6c), + REG(PCS1G_LINK_STATUS, 0x70), + REG(PCS1G_LINK_DOWN_CNT, 0x74), + REG(PCS1G_STICKY, 0x78), + REG(PCS1G_DEBUG_STATUS, 0x7c), + REG(PCS1G_LPI_CFG, 0x80), + REG(PCS1G_LPI_WAKE_ERROR_CNT, 0x84), + REG(PCS1G_LPI_STATUS, 0x88), + REG(PCS1G_TSTPAT_MODE_CFG, 0x8c), + REG(PCS1G_TSTPAT_STATUS, 0x90), + REG(DEV_PCS_FX100_CFG, 0x94), + REG(DEV_PCS_FX100_STATUS, 0x98), +}; + +static const u32 *ocelot_regmap[TARGET_MAX] = { [ANA] = ocelot_ana_regmap, [QS] = ocelot_qs_regmap, [QSYS] = ocelot_qsys_regmap, @@ -271,6 +313,7 @@ static const u32 *ocelot_regmap[] = { [SYS] = ocelot_sys_regmap, [S2] = ocelot_s2_regmap, [PTP] = ocelot_ptp_regmap, + [DEV_GMII] = ocelot_dev_gmii_regmap, }; static const struct reg_field ocelot_regfields[] = { @@ -948,9 +991,9 @@ static int mscc_ocelot_probe(struct platform_device *pdev) struct device_node *phy_node; phy_interface_t phy_mode; struct phy_device *phy; + struct regmap *target; struct resource *res; struct phy *serdes; - void __iomem *regs; char res_name[8]; u32 port; @@ -961,8 +1004,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); - regs = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(regs)) + target = ocelot_regmap_init(ocelot, res); + if (IS_ERR(target)) continue; phy_node = of_parse_phandle(portnp, "phy-handle", 0); @@ -974,7 +1017,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (!phy) continue; - err = ocelot_probe_port(ocelot, port, regs, phy); + err = ocelot_probe_port(ocelot, port, target, phy); if (err) { of_node_put(portnp); goto out_put_ports; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index e050f8121ba2..c2a2d0165ef1 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -126,6 +126,7 @@ enum ocelot_target { HSIO, PTP, GCB, + DEV_GMII, TARGET_MAX, }; @@ -408,6 +409,45 @@ enum ocelot_reg { PTP_CLK_CFG_ADJ_CFG, PTP_CLK_CFG_ADJ_FREQ, GCB_SOFT_RST = GCB << TARGET_OFFSET, + DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET, + DEV_PORT_MISC, + DEV_EVENTS, + DEV_EEE_CFG, + DEV_RX_PATH_DELAY, + DEV_TX_PATH_DELAY, + DEV_PTP_PREDICT_CFG, + DEV_MAC_ENA_CFG, + DEV_MAC_MODE_CFG, + DEV_MAC_MAXLEN_CFG, + DEV_MAC_TAGS_CFG, + DEV_MAC_ADV_CHK_CFG, + DEV_MAC_IFG_CFG, + DEV_MAC_HDX_CFG, + DEV_MAC_DBG_CFG, + DEV_MAC_FC_MAC_LOW_CFG, + DEV_MAC_FC_MAC_HIGH_CFG, + DEV_MAC_STICKY, + PCS1G_CFG, + PCS1G_MODE_CFG, + PCS1G_SD_CFG, + PCS1G_ANEG_CFG, + PCS1G_ANEG_NP_CFG, + PCS1G_LB_CFG, + PCS1G_DBG_CFG, + PCS1G_CDET_CFG, + PCS1G_ANEG_STATUS, + PCS1G_ANEG_NP_STATUS, + PCS1G_LINK_STATUS, + PCS1G_LINK_DOWN_CNT, + PCS1G_STICKY, + PCS1G_DEBUG_STATUS, + PCS1G_LPI_CFG, + PCS1G_LPI_WAKE_ERROR_CNT, + PCS1G_LPI_STATUS, + PCS1G_TSTPAT_MODE_CFG, + PCS1G_TSTPAT_STATUS, + DEV_PCS_FX100_CFG, + DEV_PCS_FX100_STATUS, }; enum ocelot_regfield { @@ -494,7 +534,7 @@ struct ocelot_vcap_block { struct ocelot_port { struct ocelot *ocelot; - void __iomem *regs; + struct regmap *target; bool vlan_aware; diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h index 7c08437061fc..0c6021f02fee 100644 --- a/include/soc/mscc/ocelot_dev.h +++ b/include/soc/mscc/ocelot_dev.h @@ -8,8 +8,6 @@ #ifndef _MSCC_OCELOT_DEV_H_ #define _MSCC_OCELOT_DEV_H_ -#define DEV_CLOCK_CFG 0x0 - #define DEV_CLOCK_CFG_MAC_TX_RST BIT(7) #define DEV_CLOCK_CFG_MAC_RX_RST BIT(6) #define DEV_CLOCK_CFG_PCS_TX_RST BIT(5) @@ -19,18 +17,12 @@ #define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0)) #define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0) -#define DEV_PORT_MISC 0x4 - #define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4) #define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3) #define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2) #define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1) #define DEV_PORT_MISC_HDX_FAST_DIS BIT(0) -#define DEV_EVENTS 0x8 - -#define DEV_EEE_CFG 0xc - #define DEV_EEE_CFG_EEE_ENA BIT(22) #define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15)) #define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15) @@ -43,33 +35,19 @@ #define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1) #define DEV_EEE_CFG_PORT_LPI BIT(0) -#define DEV_RX_PATH_DELAY 0x10 - -#define DEV_TX_PATH_DELAY 0x14 - -#define DEV_PTP_PREDICT_CFG 0x18 - #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4)) #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4) #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4) #define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0)) #define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0) -#define DEV_MAC_ENA_CFG 0x1c - #define DEV_MAC_ENA_CFG_RX_ENA BIT(4) #define DEV_MAC_ENA_CFG_TX_ENA BIT(0) -#define DEV_MAC_MODE_CFG 0x20 - #define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8) #define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4) #define DEV_MAC_MODE_CFG_FDX_ENA BIT(0) -#define DEV_MAC_MAXLEN_CFG 0x24 - -#define DEV_MAC_TAGS_CFG 0x28 - #define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16)) #define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16) #define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16) @@ -77,12 +55,8 @@ #define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) -#define DEV_MAC_ADV_CHK_CFG 0x2c - #define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0) -#define DEV_MAC_IFG_CFG 0x30 - #define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17) #define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16) #define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8)) @@ -94,8 +68,6 @@ #define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0)) #define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0) -#define DEV_MAC_HDX_CFG 0x34 - #define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26) #define DEV_MAC_HDX_CFG_OB_ENA BIT(25) #define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24) @@ -107,17 +79,9 @@ #define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0)) #define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0) -#define DEV_MAC_DBG_CFG 0x38 - #define DEV_MAC_DBG_CFG_TBI_MODE BIT(4) #define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0) -#define DEV_MAC_FC_MAC_LOW_CFG 0x3c - -#define DEV_MAC_FC_MAC_HIGH_CFG 0x40 - -#define DEV_MAC_STICKY 0x44 - #define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9) #define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8) #define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7) @@ -129,25 +93,17 @@ #define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1) #define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0) -#define PCS1G_CFG 0x48 - #define PCS1G_CFG_LINK_STATUS_TYPE BIT(4) #define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1) #define PCS1G_CFG_PCS_ENA BIT(0) -#define PCS1G_MODE_CFG 0x4c - #define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4) #define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0) -#define PCS1G_SD_CFG 0x50 - #define PCS1G_SD_CFG_SD_SEL BIT(8) #define PCS1G_SD_CFG_SD_POL BIT(4) #define PCS1G_SD_CFG_SD_ENA BIT(0) -#define PCS1G_ANEG_CFG 0x54 - #define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) #define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16) #define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) @@ -155,29 +111,19 @@ #define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1) #define PCS1G_ANEG_CFG_ANEG_ENA BIT(0) -#define PCS1G_ANEG_NP_CFG 0x58 - #define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16)) #define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16) #define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16) #define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0) -#define PCS1G_LB_CFG 0x5c - #define PCS1G_LB_CFG_RA_ENA BIT(4) #define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1) #define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0) -#define PCS1G_DBG_CFG 0x60 - #define PCS1G_DBG_CFG_UDLT BIT(0) -#define PCS1G_CDET_CFG 0x64 - #define PCS1G_CDET_CFG_CDET_ENA BIT(0) -#define PCS1G_ANEG_STATUS 0x68 - #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16)) #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16) #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16) @@ -185,10 +131,6 @@ #define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3) #define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0) -#define PCS1G_ANEG_NP_STATUS 0x6c - -#define PCS1G_LINK_STATUS 0x70 - #define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12)) #define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12) #define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12) @@ -196,17 +138,9 @@ #define PCS1G_LINK_STATUS_LINK_STATUS BIT(4) #define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0) -#define PCS1G_LINK_DOWN_CNT 0x74 - -#define PCS1G_STICKY 0x78 - #define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4) #define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0) -#define PCS1G_DEBUG_STATUS 0x7c - -#define PCS1G_LPI_CFG 0x80 - #define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20) #define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17) #define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16) @@ -215,10 +149,6 @@ #define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4) #define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0) -#define PCS1G_LPI_WAKE_ERROR_CNT 0x84 - -#define PCS1G_LPI_STATUS 0x88 - #define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16) #define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12) #define PCS1G_LPI_STATUS_RX_QUIET BIT(9) @@ -227,18 +157,12 @@ #define PCS1G_LPI_STATUS_TX_QUIET BIT(1) #define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0) -#define PCS1G_TSTPAT_MODE_CFG 0x8c - -#define PCS1G_TSTPAT_STATUS 0x90 - #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8)) #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8) #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8) #define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4) #define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0) -#define DEV_PCS_FX100_CFG 0x94 - #define DEV_PCS_FX100_CFG_SD_SEL BIT(26) #define DEV_PCS_FX100_CFG_SD_POL BIT(25) #define DEV_PCS_FX100_CFG_SD_ENA BIT(24) @@ -259,8 +183,6 @@ #define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1) #define DEV_PCS_FX100_CFG_PCS_ENA BIT(0) -#define DEV_PCS_FX100_STATUS 0x98 - #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8)) #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8) #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8) From 2789658fa319f51db43a585e076bb99a3de3c6d1 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:02 +0300 Subject: [PATCH 0930/2056] soc: mscc: ocelot: add MII registers description Add the register definitions for the MSCC MIIM MDIO controller in preparation for seville_vsc9959.c to create its accessors for the internal MDIO bus. Since we've introduced elements to ocelot_regfields that are not instantiated by felix and ocelot, we need to define the size of the regfields arrays explicitly, otherwise ocelot_regfields_init, which iterates up to REGFIELD_MAX, will fault on the undefined regfield entries (if we're lucky). Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 2 +- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 2 +- include/soc/mscc/ocelot.h | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 0c54d67a4039..b97c12a783eb 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -469,7 +469,7 @@ static const struct resource vsc9959_imdio_res = { .name = "imdio", }; -static const struct reg_field vsc9959_regfields[] = { +static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6), [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5), [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30), diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 63af145e744c..83c17c689641 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -316,7 +316,7 @@ static const u32 *ocelot_regmap[TARGET_MAX] = { [DEV_GMII] = ocelot_dev_gmii_regmap, }; -static const struct reg_field ocelot_regfields[] = { +static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11), [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10), [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27), diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index c2a2d0165ef1..348fa26a349c 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -409,6 +409,9 @@ enum ocelot_reg { PTP_CLK_CFG_ADJ_CFG, PTP_CLK_CFG_ADJ_FREQ, GCB_SOFT_RST = GCB << TARGET_OFFSET, + GCB_MIIM_MII_STATUS, + GCB_MIIM_MII_CMD, + GCB_MIIM_MII_DATA, DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET, DEV_PORT_MISC, DEV_EVENTS, @@ -496,6 +499,8 @@ enum ocelot_regfield { SYS_RESET_CFG_MEM_ENA, SYS_RESET_CFG_MEM_INIT, GCB_SOFT_RST_SWC_RST, + GCB_MIIM_MII_STATUS_PENDING, + GCB_MIIM_MII_STATUS_BUSY, REGFIELD_MAX }; From 886e1387c73d895ad0eff53353913081983570c0 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:03 +0300 Subject: [PATCH 0931/2056] net: mscc: ocelot: convert QSYS_SWITCH_PORT_MODE and SYS_PORT_MODE to regfields Currently Felix and Ocelot share the same bit layout in these per-port registers, but Seville does not. So we need reg_fields for that. Actually since these are per-port registers, we need to also specify the number of ports, and register size per port, and use the regmap API for multiple ports. There's a more subtle point to be made about the other 2 register fields: - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG - QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE which we are not writing any longer, for 2 reasons: - Using the previous API (ocelot_write_rix), we were only writing 1 for Felix and Ocelot, which was their hardware-default value, and which there wasn't any intention in changing. - In the case of SCH_NEXT_CFG, in fact Seville does not have this register field at all, and therefore, if we want to have common code we would be required to not write to it. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 9 ++---- drivers/net/dsa/ocelot/felix_vsc9959.c | 11 +++++++ drivers/net/ethernet/mscc/ocelot.c | 36 +++++++++------------- drivers/net/ethernet/mscc/ocelot.h | 6 ---- drivers/net/ethernet/mscc/ocelot_io.c | 2 ++ drivers/net/ethernet/mscc/ocelot_vsc7514.c | 11 +++++++ include/soc/mscc/ocelot.h | 15 +++++++++ include/soc/mscc/ocelot_qsys.h | 13 -------- include/soc/mscc/ocelot_sys.h | 13 -------- 9 files changed, 56 insertions(+), 60 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index bf0bd5c7b12c..4b255ed614e4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -249,8 +249,7 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, struct ocelot_port *ocelot_port = ocelot->ports[port]; ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); - ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); } static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, @@ -326,10 +325,8 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, ANA_PORT_PORT_CFG, port); /* Core: Enable port for frame transfer */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); if (felix->info->pcs_link_up) felix->info->pcs_link_up(ocelot, port, link_an_mode, interface, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index b97c12a783eb..efbfbdccb2b6 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -503,6 +503,17 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10), [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0), [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0), + /* Replicated per number of ports (7), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 7, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 7, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 7, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 7, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 7, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 7, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 7, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4), }; static const struct ocelot_stat_layout vsc9959_stats_layout[] = { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index e815aad8d85e..36986fccedf4 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -389,10 +389,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, ANA_PFC_PFC_CFG, port); /* Core: Enable port for frame transfer */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* Flow control */ ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | @@ -423,8 +421,7 @@ void ocelot_port_disable(struct ocelot *ocelot, int port) struct ocelot_port *ocelot_port = ocelot->ports[port]; ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); - ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, port); + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); } EXPORT_SYMBOL(ocelot_port_disable); @@ -1392,27 +1389,22 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi, QSYS_EXT_CPU_CFG); /* Enable NPI port */ - ocelot_write_rix(ocelot, - QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, npi); + ocelot_fields_write(ocelot, npi, + QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* NPI port Injection/Extraction configuration */ - ocelot_write_rix(ocelot, - SYS_PORT_MODE_INCL_XTR_HDR(extraction) | - SYS_PORT_MODE_INCL_INJ_HDR(injection), - SYS_PORT_MODE, npi); + ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_XTR_HDR, + extraction); + ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_INJ_HDR, + injection); } /* Enable CPU port module */ - ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE | - QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) | - QSYS_SWITCH_PORT_MODE_PORT_ENA, - QSYS_SWITCH_PORT_MODE, cpu); + ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); /* CPU port Injection/Extraction configuration */ - ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) | - SYS_PORT_MODE_INCL_INJ_HDR(injection), - SYS_PORT_MODE, cpu); + ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR, + extraction); + ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR, + injection); /* Configure the CPU port to be VLAN aware */ ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) | diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 814b09dd2c11..dc29e05103a1 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -102,9 +102,6 @@ void ocelot_port_lag_leave(struct ocelot *ocelot, int port, u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); -#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) -#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) - int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, struct phy_device *phy); @@ -116,7 +113,4 @@ extern struct notifier_block ocelot_netdevice_nb; extern struct notifier_block ocelot_switchdev_nb; extern struct notifier_block ocelot_switchdev_blocking_nb; -#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) -#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) - #endif diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index 741f653bc85b..d22711282183 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -89,6 +89,8 @@ int ocelot_regfields_init(struct ocelot *ocelot, regfield.reg = ocelot->map[target][reg & REG_MASK]; regfield.lsb = regfields[i].lsb; regfield.msb = regfields[i].msb; + regfield.id_size = regfields[i].id_size; + regfield.id_offset = regfields[i].id_offset; ocelot->regfields[i] = devm_regmap_field_alloc(ocelot->dev, diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 83c17c689641..9c6a9d44871d 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -358,6 +358,17 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), + /* Replicated per number of ports (11), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 11, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 11, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 11, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 11, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 11, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), }; static const struct ocelot_stat_layout ocelot_stats_layout[] = { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 348fa26a349c..19d97585345a 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -490,11 +490,21 @@ enum ocelot_regfield { ANA_TABLES_MACACCESS_B_DOM, ANA_TABLES_MACTINDX_BUCKET, ANA_TABLES_MACTINDX_M_INDEX, + QSYS_SWITCH_PORT_MODE_PORT_ENA, + QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG, + QSYS_SWITCH_PORT_MODE_YEL_RSRVD, + QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE, + QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, + QSYS_SWITCH_PORT_MODE_TX_PFC_MODE, QSYS_TIMED_FRAME_ENTRY_TFRM_VLD, QSYS_TIMED_FRAME_ENTRY_TFRM_FP, QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO, QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL, QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T, + SYS_PORT_MODE_DATA_WO_TS, + SYS_PORT_MODE_INCL_INJ_HDR, + SYS_PORT_MODE_INCL_XTR_HDR, + SYS_PORT_MODE_INCL_HDR_ERR, SYS_RESET_CFG_CORE_ENA, SYS_RESET_CFG_MEM_ENA, SYS_RESET_CFG_MEM_INIT, @@ -638,6 +648,11 @@ struct ocelot_policer { #define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) #define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0) +#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) +#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) +#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val)) +#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val)) + /* I/O */ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h index d8c63aa761be..a814bc2017d8 100644 --- a/include/soc/mscc/ocelot_qsys.h +++ b/include/soc/mscc/ocelot_qsys.h @@ -13,19 +13,6 @@ #define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1) #define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0) -#define QSYS_SWITCH_PORT_MODE_RSZ 0x4 - -#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14) -#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11)) -#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11) -#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11) -#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10) -#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1)) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1) -#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0) - #define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5) #define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4) #define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3) diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h index 16f91e172bcb..8a95fc93fde5 100644 --- a/include/soc/mscc/ocelot_sys.h +++ b/include/soc/mscc/ocelot_sys.h @@ -12,19 +12,6 @@ #define SYS_COUNT_TX_OCTETS_RSZ 0x4 -#define SYS_PORT_MODE_RSZ 0x4 - -#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5)) -#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5) -#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5) -#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3)) -#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3) -#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3) -#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1)) -#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1) -#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1) -#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0) - #define SYS_FRONT_PORT_MODE_RSZ 0x4 #define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0) From 67c2404922c2c3f9cc0898aafaa4e3bea2bde084 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:04 +0300 Subject: [PATCH 0932/2056] net: dsa: felix: create a template for the DSA tags on xmit With this patch we try to kill 2 birds with 1 stone. First of all, some switches that use tag_ocelot.c don't have the exact same bitfield layout for the DSA tags. The destination ports field is different for Seville VSC9953 for example. So the choices are to either duplicate tag_ocelot.c into a new tag_seville.c (sub-optimal) or somehow take into account a supposed ocelot->dest_ports_offset when packing this field into the DSA injection header (again not ideal). Secondly, tag_ocelot.c already needs to memset a 128-bit area to zero and call some packing() functions of dubious performance in the fastpath. And most of the values it needs to pack are pretty much constant (BYPASS=1, SRC_PORT=CPU, DEST=port index). So it would be good if we could improve that. The proposed solution is to allocate a memory area per port at probe time, initialize that with the statically defined bits as per chip hardware revision, and just perform a simpler memcpy in the fastpath. Other alternatives have been analyzed, such as: - Create a separate tag_seville.c: too much code duplication for just 1 bit field difference. - Create a separate DSA_TAG_PROTO_SEVILLE under tag_ocelot.c, just like tag_brcm.c, which would have a separate .xmit function. Again, too much code duplication for just 1 bit field difference. - Allocate the template from the init function of the tag_ocelot.c module, instead of from the driver: couldn't figure out a method of accessing the correct port template corresponding to the correct tagger in the .xmit function. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 13 +++++++++++++ drivers/net/dsa/ocelot/felix.h | 1 + drivers/net/dsa/ocelot/felix_vsc9959.c | 20 ++++++++++++++++++++ include/soc/mscc/ocelot.h | 2 ++ net/dsa/tag_ocelot.c | 21 ++++++++------------- 5 files changed, 44 insertions(+), 13 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 4b255ed614e4..b9981d8c4c98 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -522,6 +522,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; struct regmap *target; + u8 *template; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), @@ -547,10 +548,22 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return PTR_ERR(target); } + template = devm_kzalloc(ocelot->dev, OCELOT_TAG_LEN, + GFP_KERNEL); + if (!template) { + dev_err(ocelot->dev, + "Failed to allocate memory for DSA tag\n"); + kfree(port_phy_modes); + return -ENOMEM; + } + ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; ocelot_port->target = target; + ocelot_port->xmit_template = template; ocelot->ports[port] = ocelot_port; + + felix->info->xmit_template_populate(ocelot, port); } kfree(port_phy_modes); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index 00137b64132b..a85631d716b9 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -43,6 +43,7 @@ struct felix_info { enum tc_setup_type type, void *type_data); void (*port_sched_speed_set)(struct ocelot *ocelot, int port, u32 speed); + void (*xmit_template_populate)(struct ocelot *ocelot, int port); }; extern struct felix_info felix_info_vsc9959; diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index efbfbdccb2b6..d640146acc3d 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -1432,6 +1433,24 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port, } } +static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u8 *template = ocelot_port->xmit_template; + u64 bypass, dest, src; + + /* Set the source port as the CPU port module and not the + * NPI port + */ + src = ocelot->num_phys_ports; + dest = BIT(port); + bypass = true; + + packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0); + packing(template, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0); + packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); +} + struct felix_info felix_info_vsc9959 = { .target_io_res = vsc9959_target_io_res, .port_io_res = vsc9959_port_io_res, @@ -1458,4 +1477,5 @@ struct felix_info felix_info_vsc9959 = { .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, + .xmit_template_populate = vsc9959_xmit_template_populate, }; diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 19d97585345a..6cfbace57770 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -564,6 +564,8 @@ struct ocelot_port { u8 ts_id; phy_interface_t phy_mode; + + u8 *xmit_template; }; struct ocelot { diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index b0c98ee4e13b..42f327c06dca 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -137,11 +137,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct net_device *netdev) { struct dsa_port *dp = dsa_slave_to_port(netdev); - u64 bypass, dest, src, qos_class, rew_op; struct dsa_switch *ds = dp->ds; - int port = dp->index; struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port *ocelot_port; + u64 qos_class, rew_op; u8 *injection; if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) { @@ -149,19 +148,15 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, return NULL; } + ocelot_port = ocelot->ports[dp->index]; + injection = skb_push(skb, OCELOT_TAG_LEN); - memset(injection, 0, OCELOT_TAG_LEN); - - /* Set the source port as the CPU port module and not the NPI port */ - src = ocelot->num_phys_ports; - dest = BIT(port); - bypass = true; + memcpy(injection, ocelot_port->xmit_template, OCELOT_TAG_LEN); + /* Fix up the fields which are not statically determined + * in the template + */ qos_class = skb->priority; - - packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0); - packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0); - packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0); if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { From e8e6e73db14273464b374d49ca7242c0994945f3 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:05 +0300 Subject: [PATCH 0933/2056] net: mscc: ocelot: split writes to pause frame enable bit and to thresholds We don't want ocelot_port_set_maxlen to enable pause frame TX, just to adjust the pause thresholds. Move the unconditional enabling of pause TX to ocelot_init_port. There is no good place to put such setting because it shouldn't be unconditional. But at the moment it is, we're not changing that. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 36986fccedf4..aca805b9c0b3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1259,6 +1259,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) { struct ocelot_port *ocelot_port = ocelot->ports[port]; int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; + int pause_start, pause_stop; int atop_wm; if (port == ocelot->npi) { @@ -1272,13 +1273,13 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG); - /* Set Pause WM hysteresis - * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ - * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ - */ - ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA | - SYS_PAUSE_CFG_PAUSE_STOP(101) | - SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port); + /* Set Pause watermark hysteresis */ + pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ; + pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ; + ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_START(pause_start), + SYS_PAUSE_CFG_PAUSE_START_M, SYS_PAUSE_CFG, port); + ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_STOP(pause_stop), + SYS_PAUSE_CFG_PAUSE_STOP_M, SYS_PAUSE_CFG, port); /* Tail dropping watermark */ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / @@ -1341,6 +1342,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port) ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG); ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); + /* Enable transmission of pause frames */ + ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA, SYS_PAUSE_CFG_PAUSE_ENA, + SYS_PAUSE_CFG, port); + /* Drop frames with multicast source address */ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, From b39648079db40874df97c5104066f8a9fc3daa0c Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:06 +0300 Subject: [PATCH 0934/2056] net: mscc: ocelot: disable flow control on NPI interface The Ocelot switches do not support flow control on Ethernet interfaces where a DSA tag must be added. If pause frames are enabled, they will be encapsulated in the DSA tag just like regular frames, and the DSA master will not recognize them. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index aca805b9c0b3..2a44305912d2 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1401,6 +1401,10 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi, extraction); ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_INJ_HDR, injection); + + /* Disable transmission of pause frames */ + ocelot_rmw_rix(ocelot, 0, SYS_PAUSE_CFG_PAUSE_ENA, + SYS_PAUSE_CFG, npi); } /* Enable CPU port module */ From 541132f0961a4f17b02974902085af964adf5966 Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:07 +0300 Subject: [PATCH 0935/2056] net: mscc: ocelot: convert SYS_PAUSE_CFG register access to regfield Seville has a different bitwise layout than Ocelot and Felix. Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 3 +++ drivers/net/ethernet/mscc/ocelot.c | 14 ++++++-------- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 3 +++ include/soc/mscc/ocelot.h | 3 +++ include/soc/mscc/ocelot_sys.h | 10 ---------- 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index d640146acc3d..fea482ad92c7 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -515,6 +515,9 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = { [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4), [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4), [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 7, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 7, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4), }; static const struct ocelot_stat_layout vsc9959_stats_layout[] = { diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 2a44305912d2..4d5222fa3397 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -1276,10 +1276,10 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) /* Set Pause watermark hysteresis */ pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ; pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ; - ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_START(pause_start), - SYS_PAUSE_CFG_PAUSE_START_M, SYS_PAUSE_CFG, port); - ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_STOP(pause_stop), - SYS_PAUSE_CFG_PAUSE_STOP_M, SYS_PAUSE_CFG, port); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START, + pause_start); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP, + pause_stop); /* Tail dropping watermark */ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / @@ -1343,8 +1343,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port) ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG); /* Enable transmission of pause frames */ - ocelot_rmw_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA, SYS_PAUSE_CFG_PAUSE_ENA, - SYS_PAUSE_CFG, port); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); /* Drop frames with multicast source address */ ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA, @@ -1403,8 +1402,7 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi, injection); /* Disable transmission of pause frames */ - ocelot_rmw_rix(ocelot, 0, SYS_PAUSE_CFG_PAUSE_ENA, - SYS_PAUSE_CFG, npi); + ocelot_fields_write(ocelot, npi, SYS_PAUSE_CFG_PAUSE_ENA, 0); } /* Enable CPU port module */ diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 9c6a9d44871d..e9cbfbed1fc6 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -369,6 +369,9 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 11, 4), [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 11, 4), [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 11, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 11, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4), }; static const struct ocelot_stat_layout ocelot_stats_layout[] = { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 6cfbace57770..71bb92bcfdf7 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -511,6 +511,9 @@ enum ocelot_regfield { GCB_SOFT_RST_SWC_RST, GCB_MIIM_MII_STATUS_PENDING, GCB_MIIM_MII_STATUS_BUSY, + SYS_PAUSE_CFG_PAUSE_START, + SYS_PAUSE_CFG_PAUSE_STOP, + SYS_PAUSE_CFG_PAUSE_ENA, REGFIELD_MAX }; diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h index 8a95fc93fde5..79cf40ccdbe6 100644 --- a/include/soc/mscc/ocelot_sys.h +++ b/include/soc/mscc/ocelot_sys.h @@ -43,16 +43,6 @@ #define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0)) #define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0) -#define SYS_PAUSE_CFG_RSZ 0x4 - -#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10)) -#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10) -#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10) -#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1)) -#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1) -#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1) -#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0) - #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9)) #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9) #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9) From aa92d836d5c40a7e21e563a272ad177f1bfd44dd Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:08 +0300 Subject: [PATCH 0936/2056] net: mscc: ocelot: extend watermark encoding function The ocelot_wm_encode function deals with setting thresholds for pause frame start and stop. In Ocelot and Felix the register layout is the same, but for Seville, it isn't. The easiest way to accommodate Seville hardware configuration is to introduce a function pointer for setting this up. Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 13 +++++++++++++ drivers/net/ethernet/mscc/ocelot.c | 16 ++-------------- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 13 +++++++++++++ include/soc/mscc/ocelot.h | 1 + 4 files changed, 29 insertions(+), 14 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index fea482ad92c7..7e8a99455670 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1149,8 +1149,21 @@ static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, } } +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 vsc9959_wm_enc(u16 value) +{ + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + static const struct ocelot_ops vsc9959_ops = { .reset = vsc9959_reset, + .wm_enc = vsc9959_wm_enc, }; static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 4d5222fa3397..f2d94b026d88 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -309,18 +309,6 @@ static void ocelot_vlan_init(struct ocelot *ocelot) } } -/* Watermark encode - * Bit 8: Unit; 0:1, 1:16 - * Bit 7-0: Value to be multiplied with unit - */ -static u16 ocelot_wm_enc(u16 value) -{ - if (value >= BIT(8)) - return BIT(8) | (value / 16); - - return value; -} - void ocelot_adjust_link(struct ocelot *ocelot, int port, struct phy_device *phydev) { @@ -1284,9 +1272,9 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu) /* Tail dropping watermark */ atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / OCELOT_BUFFER_CELL_SZ; - ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen), + ocelot_write_rix(ocelot, ocelot->ops->wm_enc(9 * maxlen), SYS_ATOP, port); - ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG); + ocelot_write(ocelot, ocelot->ops->wm_enc(atop_wm), SYS_ATOP_TOT_CFG); } EXPORT_SYMBOL(ocelot_port_set_maxlen); diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index e9cbfbed1fc6..0ead1ef11c6c 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -739,8 +739,21 @@ static int ocelot_reset(struct ocelot *ocelot) return 0; } +/* Watermark encode + * Bit 8: Unit; 0:1, 1:16 + * Bit 7-0: Value to be multiplied with unit + */ +static u16 ocelot_wm_enc(u16 value) +{ + if (value >= BIT(8)) + return BIT(8) | (value / 16); + + return value; +} + static const struct ocelot_ops ocelot_ops = { .reset = ocelot_reset, + .wm_enc = ocelot_wm_enc, }; static const struct vcap_field vsc7514_vcap_is2_keys[] = { diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 71bb92bcfdf7..da369b12005f 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -541,6 +541,7 @@ struct ocelot; struct ocelot_ops { int (*reset)(struct ocelot *ocelot); + u16 (*wm_enc)(u16 value); }; struct ocelot_vcap_block { From 375e131429219486e581df8bd11b0dff87f0895a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:09 +0300 Subject: [PATCH 0937/2056] net: dsa: felix: move probing to felix_vsc9959.c Felix is not actually meant to be a DSA driver only for the switch inside NXP LS1028A, but an umbrella for all Vitesse / Microsemi / Microchip switches that are register-compatible with Ocelot and that are using in DSA mode (with an NPI Ethernet port). For the dsa_switch_ops exported by the felix driver to be generic enough to be used by other non-PCI switches, we need to move the PCI-specific probing to the low-level translation module felix_vsc9959.c. This way, other switches can have their own probing functions, as platform devices or otherwise. This patch also removes the "Felix instance table", which did not stand the test of time and is unnecessary at this point. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix.c | 195 +++---------------------- drivers/net/dsa/ocelot/felix.h | 15 +- drivers/net/dsa/ocelot/felix_vsc9959.c | 187 +++++++++++++++++++++++- 3 files changed, 206 insertions(+), 191 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index b9981d8c4c98..d90a7e12568e 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 NXP Semiconductors + * + * This is an umbrella module for all network switches that are + * register-compatible with Ocelot and that perform I/O to their host CPU + * through an NPI (Node Processor Interface) Ethernet port. */ #include #include @@ -185,37 +189,10 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port, struct phylink_link_state *state) { struct ocelot *ocelot = ds->priv; - struct ocelot_port *ocelot_port = ocelot->ports[port]; - __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct felix *felix = ocelot_to_felix(ocelot); - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != ocelot_port->phy_mode) { - bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); - return; - } - - phylink_set_port_modes(mask); - phylink_set(mask, Autoneg); - phylink_set(mask, Pause); - phylink_set(mask, Asym_Pause); - phylink_set(mask, 10baseT_Half); - phylink_set(mask, 10baseT_Full); - phylink_set(mask, 100baseT_Half); - phylink_set(mask, 100baseT_Full); - phylink_set(mask, 1000baseT_Half); - phylink_set(mask, 1000baseT_Full); - - if (state->interface == PHY_INTERFACE_MODE_INTERNAL || - state->interface == PHY_INTERFACE_MODE_2500BASEX || - state->interface == PHY_INTERFACE_MODE_USXGMII) { - phylink_set(mask, 2500baseT_Full); - phylink_set(mask, 2500baseX_Full); - } - - bitmap_and(supported, supported, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); - bitmap_and(state->advertising, state->advertising, mask, - __ETHTOOL_LINK_MODE_MASK_NBITS); + if (felix->info->phylink_validate) + felix->info->phylink_validate(ocelot, port, supported, state); } static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port, @@ -456,7 +433,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) { struct ocelot *ocelot = &felix->ocelot; phy_interface_t *port_phy_modes; - resource_size_t switch_base; struct resource res; int port, i, err; @@ -487,9 +463,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) return err; } - switch_base = pci_resource_start(felix->pdev, - felix->info->switch_pci_bar); - for (i = 0; i < TARGET_MAX; i++) { struct regmap *target; @@ -498,8 +471,8 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); res.flags = IORESOURCE_MEM; - res.start += switch_base; - res.end += switch_base; + res.start += felix->switch_base; + res.end += felix->switch_base; target = ocelot_regmap_init(ocelot, &res); if (IS_ERR(target)) { @@ -536,8 +509,8 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports) memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); res.flags = IORESOURCE_MEM; - res.start += switch_base; - res.end += switch_base; + res.start += felix->switch_base; + res.end += felix->switch_base; target = ocelot_regmap_init(ocelot, &res); if (IS_ERR(target)) { @@ -802,7 +775,7 @@ static int felix_port_setup_tc(struct dsa_switch *ds, int port, return -EOPNOTSUPP; } -static const struct dsa_switch_ops felix_switch_ops = { +const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .setup = felix_setup, .teardown = felix_teardown, @@ -845,149 +818,17 @@ static const struct dsa_switch_ops felix_switch_ops = { .port_setup_tc = felix_port_setup_tc, }; -static struct felix_info *felix_instance_tbl[] = { - [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959, -}; - -static irqreturn_t felix_irq_handler(int irq, void *data) +static int __init felix_init(void) { - struct ocelot *ocelot = (struct ocelot *)data; - - /* The INTB interrupt is used for both PTP TX timestamp interrupt - * and preemption status change interrupt on each port. - * - * - Get txtstamp if have - * - TODO: handle preemption. Without handling it, driver may get - * interrupt storm. - */ - - ocelot_get_txtstamp(ocelot); - - return IRQ_HANDLED; + return pci_register_driver(&felix_vsc9959_pci_driver); } +module_init(felix_init); -static int felix_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static void __exit felix_exit(void) { - enum felix_instance instance = id->driver_data; - struct dsa_switch *ds; - struct ocelot *ocelot; - struct felix *felix; - int err; - - if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { - dev_info(&pdev->dev, "device is disabled, skipping\n"); - return -ENODEV; - } - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - goto err_pci_enable; - } - - /* set up for high or low dma */ - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (err) { - err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "DMA configuration failed: 0x%x\n", err); - goto err_dma; - } - } - - felix = kzalloc(sizeof(struct felix), GFP_KERNEL); - if (!felix) { - err = -ENOMEM; - dev_err(&pdev->dev, "Failed to allocate driver memory\n"); - goto err_alloc_felix; - } - - pci_set_drvdata(pdev, felix); - ocelot = &felix->ocelot; - ocelot->dev = &pdev->dev; - felix->pdev = pdev; - felix->info = felix_instance_tbl[instance]; - - pci_set_master(pdev); - - err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL, - &felix_irq_handler, IRQF_ONESHOT, - "felix-intb", ocelot); - if (err) { - dev_err(&pdev->dev, "Failed to request irq\n"); - goto err_alloc_irq; - } - - ocelot->ptp = 1; - - ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); - if (!ds) { - err = -ENOMEM; - dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); - goto err_alloc_ds; - } - - ds->dev = &pdev->dev; - ds->num_ports = felix->info->num_ports; - ds->num_tx_queues = felix->info->num_tx_queues; - ds->ops = &felix_switch_ops; - ds->priv = ocelot; - felix->ds = ds; - - err = dsa_register_switch(ds); - if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); - goto err_register_ds; - } - - return 0; - -err_register_ds: - kfree(ds); -err_alloc_ds: -err_alloc_irq: -err_alloc_felix: - kfree(felix); -err_dma: - pci_disable_device(pdev); -err_pci_enable: - return err; + pci_unregister_driver(&felix_vsc9959_pci_driver); } - -static void felix_pci_remove(struct pci_dev *pdev) -{ - struct felix *felix; - - felix = pci_get_drvdata(pdev); - - dsa_unregister_switch(felix->ds); - - kfree(felix->ds); - kfree(felix); - - pci_disable_device(pdev); -} - -static struct pci_device_id felix_ids[] = { - { - /* NXP LS1028A */ - PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0), - .driver_data = FELIX_INSTANCE_VSC9959, - }, - { 0, } -}; -MODULE_DEVICE_TABLE(pci, felix_ids); - -static struct pci_driver felix_pci_driver = { - .name = KBUILD_MODNAME, - .id_table = felix_ids, - .probe = felix_pci_probe, - .remove = felix_pci_remove, -}; - -module_pci_driver(felix_pci_driver); +module_exit(felix_exit); MODULE_DESCRIPTION("Felix Switch driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index a85631d716b9..b858b7f79090 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -37,6 +37,9 @@ struct felix_info { int speed, int duplex); void (*pcs_link_state)(struct ocelot *ocelot, int port, struct phylink_link_state *state); + void (*phylink_validate)(struct ocelot *ocelot, int port, + unsigned long *supported, + struct phylink_link_state *state); int (*prevalidate_phy_mode)(struct ocelot *ocelot, int port, phy_interface_t phy_mode); int (*port_setup_tc)(struct dsa_switch *ds, int port, @@ -46,20 +49,18 @@ struct felix_info { void (*xmit_template_populate)(struct ocelot *ocelot, int port); }; -extern struct felix_info felix_info_vsc9959; - -enum felix_instance { - FELIX_INSTANCE_VSC9959 = 0, -}; +extern const struct dsa_switch_ops felix_switch_ops; +extern struct pci_driver felix_vsc9959_pci_driver; /* DSA glue / front-end for struct ocelot */ struct felix { struct dsa_switch *ds; - struct pci_dev *pdev; - struct felix_info *info; + const struct felix_info *info; struct ocelot ocelot; struct mii_bus *imdio; struct phy_device **pcs; + resource_size_t switch_base; + resource_size_t imdio_base; }; #endif diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 7e8a99455670..cdfc806d5179 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1128,6 +1128,43 @@ static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port, vsc9959_pcs_link_state_resolve(pcs, state); } +static void vsc9959_phylink_validate(struct ocelot *ocelot, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != ocelot_port->phy_mode) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + + if (state->interface == PHY_INTERFACE_MODE_INTERNAL || + state->interface == PHY_INTERFACE_MODE_2500BASEX || + state->interface == PHY_INTERFACE_MODE_USXGMII) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port, phy_interface_t phy_mode) { @@ -1171,7 +1208,6 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) struct felix *felix = ocelot_to_felix(ocelot); struct enetc_mdio_priv *mdio_priv; struct device *dev = ocelot->dev; - resource_size_t imdio_base; void __iomem *imdio_regs; struct resource res; struct enetc_hw *hw; @@ -1187,13 +1223,10 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return -ENOMEM; } - imdio_base = pci_resource_start(felix->pdev, - felix->info->imdio_pci_bar); - memcpy(&res, felix->info->imdio_res, sizeof(res)); res.flags = IORESOURCE_MEM; - res.start += imdio_base; - res.end += imdio_base; + res.start += felix->imdio_base; + res.end += felix->imdio_base; imdio_regs = devm_ioremap_resource(dev, &res); if (IS_ERR(imdio_regs)) { @@ -1467,7 +1500,7 @@ static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port) packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); } -struct felix_info felix_info_vsc9959 = { +static const struct felix_info felix_info_vsc9959 = { .target_io_res = vsc9959_target_io_res, .port_io_res = vsc9959_port_io_res, .imdio_res = &vsc9959_imdio_res, @@ -1490,8 +1523,148 @@ struct felix_info felix_info_vsc9959 = { .pcs_config = vsc9959_pcs_config, .pcs_link_up = vsc9959_pcs_link_up, .pcs_link_state = vsc9959_pcs_link_state, + .phylink_validate = vsc9959_phylink_validate, .prevalidate_phy_mode = vsc9959_prevalidate_phy_mode, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, .xmit_template_populate = vsc9959_xmit_template_populate, }; + +static irqreturn_t felix_irq_handler(int irq, void *data) +{ + struct ocelot *ocelot = (struct ocelot *)data; + + /* The INTB interrupt is used for both PTP TX timestamp interrupt + * and preemption status change interrupt on each port. + * + * - Get txtstamp if have + * - TODO: handle preemption. Without handling it, driver may get + * interrupt storm. + */ + + ocelot_get_txtstamp(ocelot); + + return IRQ_HANDLED; +} + +static int felix_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct dsa_switch *ds; + struct ocelot *ocelot; + struct felix *felix; + int err; + + if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { + dev_info(&pdev->dev, "device is disabled, skipping\n"); + return -ENODEV; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "device enable failed\n"); + goto err_pci_enable; + } + + /* set up for high or low dma */ + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "DMA configuration failed: 0x%x\n", err); + goto err_dma; + } + } + + felix = kzalloc(sizeof(struct felix), GFP_KERNEL); + if (!felix) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate driver memory\n"); + goto err_alloc_felix; + } + + pci_set_drvdata(pdev, felix); + ocelot = &felix->ocelot; + ocelot->dev = &pdev->dev; + felix->info = &felix_info_vsc9959; + felix->switch_base = pci_resource_start(pdev, + felix->info->switch_pci_bar); + felix->imdio_base = pci_resource_start(pdev, + felix->info->imdio_pci_bar); + + pci_set_master(pdev); + + err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL, + &felix_irq_handler, IRQF_ONESHOT, + "felix-intb", ocelot); + if (err) { + dev_err(&pdev->dev, "Failed to request irq\n"); + goto err_alloc_irq; + } + + ocelot->ptp = 1; + + ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); + if (!ds) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); + goto err_alloc_ds; + } + + ds->dev = &pdev->dev; + ds->num_ports = felix->info->num_ports; + ds->num_tx_queues = felix->info->num_tx_queues; + ds->ops = &felix_switch_ops; + ds->priv = ocelot; + felix->ds = ds; + + err = dsa_register_switch(ds); + if (err) { + dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + goto err_register_ds; + } + + return 0; + +err_register_ds: + kfree(ds); +err_alloc_ds: +err_alloc_irq: +err_alloc_felix: + kfree(felix); +err_dma: + pci_disable_device(pdev); +err_pci_enable: + return err; +} + +static void felix_pci_remove(struct pci_dev *pdev) +{ + struct felix *felix; + + felix = pci_get_drvdata(pdev); + + dsa_unregister_switch(felix->ds); + + kfree(felix->ds); + kfree(felix); + + pci_disable_device(pdev); +} + +static struct pci_device_id felix_ids[] = { + { + /* NXP LS1028A */ + PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0), + }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, felix_ids); + +struct pci_driver felix_vsc9959_pci_driver = { + .name = "mscc_felix", + .id_table = felix_ids, + .probe = felix_pci_probe, + .remove = felix_pci_remove, +}; From 84705fc165526e8e55d208b2b10a48cc720a106a Mon Sep 17 00:00:00 2001 From: Maxim Kochetkov Date: Mon, 13 Jul 2020 19:57:10 +0300 Subject: [PATCH 0938/2056] net: dsa: felix: introduce support for Seville VSC9953 switch This is another switch from Vitesse / Microsemi / Microchip, that has 10 ports (8 external, 2 internal) and is integrated into the Freescale / NXP T1040 PowerPC SoC. It is very similar to Felix from NXP LS1028A, except that this is a platform device and Felix is a PCI device, and it doesn't support IEEE 1588 and TSN. Like Felix, this driver configures its own PCS on the internal MDIO bus using a phy_device abstraction for it (yes, it will be refactored to use a raw mdio_device, like other phylink drivers do, but let's keep it like that for now). But unlike Felix, the MDIO bus and the PCS are not from the same vendor. The PCS is the same QorIQ/Layerscape PCS as found in Felix/ENETC/DPAA*, but the internal MDIO bus that is used to access it is actually an instantiation of drivers/net/phy/mdio-mscc-miim.c. But it would be difficult to reuse that driver (it doesn't even use regmap, and it's less than 200 lines of code), so we hand-roll here some internal MDIO bus accessors within seville_vsc9953.c, which serves the purpose of driving the PCS absolutely fine. Also, same as Felix, the PCS doesn't support dynamic reconfiguration of SerDes protocol, so we need to do pre-validation of PHY mode from device tree and not let phylink change it. Signed-off-by: Maxim Kochetkov Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/Kconfig | 12 +- drivers/net/dsa/ocelot/Makefile | 3 +- drivers/net/dsa/ocelot/felix.c | 14 +- drivers/net/dsa/ocelot/felix.h | 12 + drivers/net/dsa/ocelot/felix_vsc9959.c | 20 +- drivers/net/dsa/ocelot/seville_vsc9953.c | 1104 ++++++++++++++++++++++ 6 files changed, 1149 insertions(+), 16 deletions(-) create mode 100644 drivers/net/dsa/ocelot/seville_vsc9953.c diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 3d3c2a6fb0c0..5b23bf6ba848 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -10,7 +10,11 @@ config NET_DSA_MSCC_FELIX select NET_DSA_TAG_OCELOT select FSL_ENETC_MDIO help - This driver supports the VSC9959 network switch, which is a member of - the Vitesse / Microsemi / Microchip Ocelot family of switching cores. - It is embedded as a PCIe function of the NXP LS1028A ENETC integrated - endpoint. + This driver supports network switches from the the Vitesse / + Microsemi / Microchip Ocelot family of switching cores that are + connected to their host CPU via Ethernet. + The following switches are supported: + - VSC9959 (Felix): embedded as a PCIe function of the NXP LS1028A + ENETC integrated endpoint. + - VSC9953 (Seville): embedded as a platform device on the + NXP T1040 SoC. diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile index 37ad403e0b2a..ec57a5a12330 100644 --- a/drivers/net/dsa/ocelot/Makefile +++ b/drivers/net/dsa/ocelot/Makefile @@ -3,4 +3,5 @@ obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o mscc_felix-objs := \ felix.o \ - felix_vsc9959.o + felix_vsc9959.o \ + seville_vsc9953.o diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index d90a7e12568e..c69d9592a2b7 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -820,13 +821,24 @@ const struct dsa_switch_ops felix_switch_ops = { static int __init felix_init(void) { - return pci_register_driver(&felix_vsc9959_pci_driver); + int err; + + err = pci_register_driver(&felix_vsc9959_pci_driver); + if (err) + return err; + + err = platform_driver_register(&seville_vsc9953_driver); + if (err) + return err; + + return 0; } module_init(felix_init); static void __exit felix_exit(void) { pci_unregister_driver(&felix_vsc9959_pci_driver); + platform_driver_unregister(&seville_vsc9953_driver); } module_exit(felix_exit); diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index b858b7f79090..98f14621ac23 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -51,6 +51,7 @@ struct felix_info { extern const struct dsa_switch_ops felix_switch_ops; extern struct pci_driver felix_vsc9959_pci_driver; +extern struct platform_driver seville_vsc9953_driver; /* DSA glue / front-end for struct ocelot */ struct felix { @@ -63,4 +64,15 @@ struct felix { resource_size_t imdio_base; }; +void vsc9959_pcs_link_state(struct ocelot *ocelot, int port, + struct phylink_link_state *state); +void vsc9959_pcs_config(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + const struct phylink_link_state *state); +void vsc9959_pcs_link_up(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex); +void vsc9959_mdio_bus_free(struct ocelot *ocelot); + #endif diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index cdfc806d5179..1bdf74fd8be4 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -852,9 +852,9 @@ static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs, USXGMII_ADVERTISE_FDX); } -static void vsc9959_pcs_config(struct ocelot *ocelot, int port, - unsigned int link_an_mode, - const struct phylink_link_state *state) +void vsc9959_pcs_config(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + const struct phylink_link_state *state) { struct felix *felix = ocelot_to_felix(ocelot); struct phy_device *pcs = felix->pcs[port]; @@ -965,10 +965,10 @@ static void vsc9959_pcs_link_up_2500basex(struct phy_device *pcs, phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE); } -static void vsc9959_pcs_link_up(struct ocelot *ocelot, int port, - unsigned int link_an_mode, - phy_interface_t interface, - int speed, int duplex) +void vsc9959_pcs_link_up(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex) { struct felix *felix = ocelot_to_felix(ocelot); struct phy_device *pcs = felix->pcs[port]; @@ -1096,8 +1096,8 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs, pcs->duplex = DUPLEX_HALF; } -static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port, - struct phylink_link_state *state) +void vsc9959_pcs_link_state(struct ocelot *ocelot, int port, + struct phylink_link_state *state) { struct felix *felix = ocelot_to_felix(ocelot); struct phy_device *pcs = felix->pcs[port]; @@ -1286,7 +1286,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot) return 0; } -static void vsc9959_mdio_bus_free(struct ocelot *ocelot) +void vsc9959_mdio_bus_free(struct ocelot *ocelot) { struct felix *felix = ocelot_to_felix(ocelot); int port; diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c new file mode 100644 index 000000000000..625b1891d955 --- /dev/null +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -0,0 +1,1104 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Distributed Switch Architecture VSC9953 driver + * Copyright (C) 2020, Maxim Kochetkov + */ +#include +#include +#include +#include +#include +#include +#include +#include "felix.h" + +#define VSC9953_VCAP_IS2_CNT 1024 +#define VSC9953_VCAP_IS2_ENTRY_WIDTH 376 +#define VSC9953_VCAP_PORT_CNT 10 + +#define MSCC_MIIM_REG_STATUS 0x0 +#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3) +#define MSCC_MIIM_REG_CMD 0x8 +#define MSCC_MIIM_CMD_OPR_WRITE BIT(1) +#define MSCC_MIIM_CMD_OPR_READ BIT(2) +#define MSCC_MIIM_CMD_WRDATA_SHIFT 4 +#define MSCC_MIIM_CMD_REGAD_SHIFT 20 +#define MSCC_MIIM_CMD_PHYAD_SHIFT 25 +#define MSCC_MIIM_CMD_VLD BIT(31) +#define MSCC_MIIM_REG_DATA 0xC +#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17)) + +#define MSCC_PHY_REG_PHY_CFG 0x0 +#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3)) +#define PHY_CFG_PHY_COMMON_RESET BIT(4) +#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8)) +#define MSCC_PHY_REG_PHY_STATUS 0x4 + +static const u32 vsc9953_ana_regmap[] = { + REG(ANA_ADVLEARN, 0x00b500), + REG(ANA_VLANMASK, 0x00b504), + REG_RESERVED(ANA_PORT_B_DOMAIN), + REG(ANA_ANAGEFIL, 0x00b50c), + REG(ANA_ANEVENTS, 0x00b510), + REG(ANA_STORMLIMIT_BURST, 0x00b514), + REG(ANA_STORMLIMIT_CFG, 0x00b518), + REG(ANA_ISOLATED_PORTS, 0x00b528), + REG(ANA_COMMUNITY_PORTS, 0x00b52c), + REG(ANA_AUTOAGE, 0x00b530), + REG(ANA_MACTOPTIONS, 0x00b534), + REG(ANA_LEARNDISC, 0x00b538), + REG(ANA_AGENCTRL, 0x00b53c), + REG(ANA_MIRRORPORTS, 0x00b540), + REG(ANA_EMIRRORPORTS, 0x00b544), + REG(ANA_FLOODING, 0x00b548), + REG(ANA_FLOODING_IPMC, 0x00b54c), + REG(ANA_SFLOW_CFG, 0x00b550), + REG(ANA_PORT_MODE, 0x00b57c), + REG_RESERVED(ANA_CUT_THRU_CFG), + REG(ANA_PGID_PGID, 0x00b600), + REG(ANA_TABLES_ANMOVED, 0x00b4ac), + REG(ANA_TABLES_MACHDATA, 0x00b4b0), + REG(ANA_TABLES_MACLDATA, 0x00b4b4), + REG_RESERVED(ANA_TABLES_STREAMDATA), + REG(ANA_TABLES_MACACCESS, 0x00b4b8), + REG(ANA_TABLES_MACTINDX, 0x00b4bc), + REG(ANA_TABLES_VLANACCESS, 0x00b4c0), + REG(ANA_TABLES_VLANTIDX, 0x00b4c4), + REG_RESERVED(ANA_TABLES_ISDXACCESS), + REG_RESERVED(ANA_TABLES_ISDXTIDX), + REG(ANA_TABLES_ENTRYLIM, 0x00b480), + REG_RESERVED(ANA_TABLES_PTP_ID_HIGH), + REG_RESERVED(ANA_TABLES_PTP_ID_LOW), + REG_RESERVED(ANA_TABLES_STREAMACCESS), + REG_RESERVED(ANA_TABLES_STREAMTIDX), + REG_RESERVED(ANA_TABLES_SEQ_HISTORY), + REG_RESERVED(ANA_TABLES_SEQ_MASK), + REG_RESERVED(ANA_TABLES_SFID_MASK), + REG_RESERVED(ANA_TABLES_SFIDACCESS), + REG_RESERVED(ANA_TABLES_SFIDTIDX), + REG_RESERVED(ANA_MSTI_STATE), + REG_RESERVED(ANA_OAM_UPM_LM_CNT), + REG_RESERVED(ANA_SG_ACCESS_CTRL), + REG_RESERVED(ANA_SG_CONFIG_REG_1), + REG_RESERVED(ANA_SG_CONFIG_REG_2), + REG_RESERVED(ANA_SG_CONFIG_REG_3), + REG_RESERVED(ANA_SG_CONFIG_REG_4), + REG_RESERVED(ANA_SG_CONFIG_REG_5), + REG_RESERVED(ANA_SG_GCL_GS_CONFIG), + REG_RESERVED(ANA_SG_GCL_TI_CONFIG), + REG_RESERVED(ANA_SG_STATUS_REG_1), + REG_RESERVED(ANA_SG_STATUS_REG_2), + REG_RESERVED(ANA_SG_STATUS_REG_3), + REG(ANA_PORT_VLAN_CFG, 0x000000), + REG(ANA_PORT_DROP_CFG, 0x000004), + REG(ANA_PORT_QOS_CFG, 0x000008), + REG(ANA_PORT_VCAP_CFG, 0x00000c), + REG(ANA_PORT_VCAP_S1_KEY_CFG, 0x000010), + REG(ANA_PORT_VCAP_S2_CFG, 0x00001c), + REG(ANA_PORT_PCP_DEI_MAP, 0x000020), + REG(ANA_PORT_CPU_FWD_CFG, 0x000060), + REG(ANA_PORT_CPU_FWD_BPDU_CFG, 0x000064), + REG(ANA_PORT_CPU_FWD_GARP_CFG, 0x000068), + REG(ANA_PORT_CPU_FWD_CCM_CFG, 0x00006c), + REG(ANA_PORT_PORT_CFG, 0x000070), + REG(ANA_PORT_POL_CFG, 0x000074), + REG_RESERVED(ANA_PORT_PTP_CFG), + REG_RESERVED(ANA_PORT_PTP_DLY1_CFG), + REG_RESERVED(ANA_PORT_PTP_DLY2_CFG), + REG_RESERVED(ANA_PORT_SFID_CFG), + REG(ANA_PFC_PFC_CFG, 0x00c000), + REG_RESERVED(ANA_PFC_PFC_TIMER), + REG_RESERVED(ANA_IPT_OAM_MEP_CFG), + REG_RESERVED(ANA_IPT_IPT), + REG_RESERVED(ANA_PPT_PPT), + REG_RESERVED(ANA_FID_MAP_FID_MAP), + REG(ANA_AGGR_CFG, 0x00c600), + REG(ANA_CPUQ_CFG, 0x00c604), + REG_RESERVED(ANA_CPUQ_CFG2), + REG(ANA_CPUQ_8021_CFG, 0x00c60c), + REG(ANA_DSCP_CFG, 0x00c64c), + REG(ANA_DSCP_REWR_CFG, 0x00c74c), + REG(ANA_VCAP_RNG_TYPE_CFG, 0x00c78c), + REG(ANA_VCAP_RNG_VAL_CFG, 0x00c7ac), + REG_RESERVED(ANA_VRAP_CFG), + REG_RESERVED(ANA_VRAP_HDR_DATA), + REG_RESERVED(ANA_VRAP_HDR_MASK), + REG(ANA_DISCARD_CFG, 0x00c7d8), + REG(ANA_FID_CFG, 0x00c7dc), + REG(ANA_POL_PIR_CFG, 0x00a000), + REG(ANA_POL_CIR_CFG, 0x00a004), + REG(ANA_POL_MODE_CFG, 0x00a008), + REG(ANA_POL_PIR_STATE, 0x00a00c), + REG(ANA_POL_CIR_STATE, 0x00a010), + REG_RESERVED(ANA_POL_STATE), + REG(ANA_POL_FLOWC, 0x00c280), + REG(ANA_POL_HYST, 0x00c2ec), + REG_RESERVED(ANA_POL_MISC_CFG), +}; + +static const u32 vsc9953_qs_regmap[] = { + REG(QS_XTR_GRP_CFG, 0x000000), + REG(QS_XTR_RD, 0x000008), + REG(QS_XTR_FRM_PRUNING, 0x000010), + REG(QS_XTR_FLUSH, 0x000018), + REG(QS_XTR_DATA_PRESENT, 0x00001c), + REG(QS_XTR_CFG, 0x000020), + REG(QS_INJ_GRP_CFG, 0x000024), + REG(QS_INJ_WR, 0x00002c), + REG(QS_INJ_CTRL, 0x000034), + REG(QS_INJ_STATUS, 0x00003c), + REG(QS_INJ_ERR, 0x000040), + REG_RESERVED(QS_INH_DBG), +}; + +static const u32 vsc9953_s2_regmap[] = { + REG(S2_CORE_UPDATE_CTRL, 0x000000), + REG(S2_CORE_MV_CFG, 0x000004), + REG(S2_CACHE_ENTRY_DAT, 0x000008), + REG(S2_CACHE_MASK_DAT, 0x000108), + REG(S2_CACHE_ACTION_DAT, 0x000208), + REG(S2_CACHE_CNT_DAT, 0x000308), + REG(S2_CACHE_TG_DAT, 0x000388), +}; + +static const u32 vsc9953_qsys_regmap[] = { + REG(QSYS_PORT_MODE, 0x003600), + REG(QSYS_SWITCH_PORT_MODE, 0x003630), + REG(QSYS_STAT_CNT_CFG, 0x00365c), + REG(QSYS_EEE_CFG, 0x003660), + REG(QSYS_EEE_THRES, 0x003688), + REG(QSYS_IGR_NO_SHARING, 0x00368c), + REG(QSYS_EGR_NO_SHARING, 0x003690), + REG(QSYS_SW_STATUS, 0x003694), + REG(QSYS_EXT_CPU_CFG, 0x0036c0), + REG_RESERVED(QSYS_PAD_CFG), + REG(QSYS_CPU_GROUP_MAP, 0x0036c8), + REG_RESERVED(QSYS_QMAP), + REG_RESERVED(QSYS_ISDX_SGRP), + REG_RESERVED(QSYS_TIMED_FRAME_ENTRY), + REG_RESERVED(QSYS_TFRM_MISC), + REG_RESERVED(QSYS_TFRM_PORT_DLY), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_1), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_2), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_3), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_4), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_5), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_6), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_7), + REG_RESERVED(QSYS_TFRM_TIMER_CFG_8), + REG(QSYS_RED_PROFILE, 0x003724), + REG(QSYS_RES_QOS_MODE, 0x003764), + REG(QSYS_RES_CFG, 0x004000), + REG(QSYS_RES_STAT, 0x004004), + REG(QSYS_EGR_DROP_MODE, 0x003768), + REG(QSYS_EQ_CTRL, 0x00376c), + REG_RESERVED(QSYS_EVENTS_CORE), + REG_RESERVED(QSYS_QMAXSDU_CFG_0), + REG_RESERVED(QSYS_QMAXSDU_CFG_1), + REG_RESERVED(QSYS_QMAXSDU_CFG_2), + REG_RESERVED(QSYS_QMAXSDU_CFG_3), + REG_RESERVED(QSYS_QMAXSDU_CFG_4), + REG_RESERVED(QSYS_QMAXSDU_CFG_5), + REG_RESERVED(QSYS_QMAXSDU_CFG_6), + REG_RESERVED(QSYS_QMAXSDU_CFG_7), + REG_RESERVED(QSYS_PREEMPTION_CFG), + REG(QSYS_CIR_CFG, 0x000000), + REG_RESERVED(QSYS_EIR_CFG), + REG(QSYS_SE_CFG, 0x000008), + REG(QSYS_SE_DWRR_CFG, 0x00000c), + REG_RESERVED(QSYS_SE_CONNECT), + REG_RESERVED(QSYS_SE_DLB_SENSE), + REG(QSYS_CIR_STATE, 0x000044), + REG_RESERVED(QSYS_EIR_STATE), + REG_RESERVED(QSYS_SE_STATE), + REG(QSYS_HSCH_MISC_CFG, 0x003774), + REG_RESERVED(QSYS_TAG_CONFIG), + REG_RESERVED(QSYS_TAS_PARAM_CFG_CTRL), + REG_RESERVED(QSYS_PORT_MAX_SDU), + REG_RESERVED(QSYS_PARAM_CFG_REG_1), + REG_RESERVED(QSYS_PARAM_CFG_REG_2), + REG_RESERVED(QSYS_PARAM_CFG_REG_3), + REG_RESERVED(QSYS_PARAM_CFG_REG_4), + REG_RESERVED(QSYS_PARAM_CFG_REG_5), + REG_RESERVED(QSYS_GCL_CFG_REG_1), + REG_RESERVED(QSYS_GCL_CFG_REG_2), + REG_RESERVED(QSYS_PARAM_STATUS_REG_1), + REG_RESERVED(QSYS_PARAM_STATUS_REG_2), + REG_RESERVED(QSYS_PARAM_STATUS_REG_3), + REG_RESERVED(QSYS_PARAM_STATUS_REG_4), + REG_RESERVED(QSYS_PARAM_STATUS_REG_5), + REG_RESERVED(QSYS_PARAM_STATUS_REG_6), + REG_RESERVED(QSYS_PARAM_STATUS_REG_7), + REG_RESERVED(QSYS_PARAM_STATUS_REG_8), + REG_RESERVED(QSYS_PARAM_STATUS_REG_9), + REG_RESERVED(QSYS_GCL_STATUS_REG_1), + REG_RESERVED(QSYS_GCL_STATUS_REG_2), +}; + +static const u32 vsc9953_rew_regmap[] = { + REG(REW_PORT_VLAN_CFG, 0x000000), + REG(REW_TAG_CFG, 0x000004), + REG(REW_PORT_CFG, 0x000008), + REG(REW_DSCP_CFG, 0x00000c), + REG(REW_PCP_DEI_QOS_MAP_CFG, 0x000010), + REG_RESERVED(REW_PTP_CFG), + REG_RESERVED(REW_PTP_DLY1_CFG), + REG_RESERVED(REW_RED_TAG_CFG), + REG(REW_DSCP_REMAP_DP1_CFG, 0x000610), + REG(REW_DSCP_REMAP_CFG, 0x000710), + REG_RESERVED(REW_STAT_CFG), + REG_RESERVED(REW_REW_STICKY), + REG_RESERVED(REW_PPT), +}; + +static const u32 vsc9953_sys_regmap[] = { + REG(SYS_COUNT_RX_OCTETS, 0x000000), + REG(SYS_COUNT_RX_MULTICAST, 0x000008), + REG(SYS_COUNT_RX_SHORTS, 0x000010), + REG(SYS_COUNT_RX_FRAGMENTS, 0x000014), + REG(SYS_COUNT_RX_JABBERS, 0x000018), + REG(SYS_COUNT_RX_64, 0x000024), + REG(SYS_COUNT_RX_65_127, 0x000028), + REG(SYS_COUNT_RX_128_255, 0x00002c), + REG(SYS_COUNT_RX_256_1023, 0x000030), + REG(SYS_COUNT_RX_1024_1526, 0x000034), + REG(SYS_COUNT_RX_1527_MAX, 0x000038), + REG(SYS_COUNT_RX_LONGS, 0x000048), + REG(SYS_COUNT_TX_OCTETS, 0x000100), + REG(SYS_COUNT_TX_COLLISION, 0x000110), + REG(SYS_COUNT_TX_DROPS, 0x000114), + REG(SYS_COUNT_TX_64, 0x00011c), + REG(SYS_COUNT_TX_65_127, 0x000120), + REG(SYS_COUNT_TX_128_511, 0x000124), + REG(SYS_COUNT_TX_512_1023, 0x000128), + REG(SYS_COUNT_TX_1024_1526, 0x00012c), + REG(SYS_COUNT_TX_1527_MAX, 0x000130), + REG(SYS_COUNT_TX_AGING, 0x000178), + REG(SYS_RESET_CFG, 0x000318), + REG_RESERVED(SYS_SR_ETYPE_CFG), + REG(SYS_VLAN_ETYPE_CFG, 0x000320), + REG(SYS_PORT_MODE, 0x000324), + REG(SYS_FRONT_PORT_MODE, 0x000354), + REG(SYS_FRM_AGING, 0x00037c), + REG(SYS_STAT_CFG, 0x000380), + REG_RESERVED(SYS_SW_STATUS), + REG_RESERVED(SYS_MISC_CFG), + REG_RESERVED(SYS_REW_MAC_HIGH_CFG), + REG_RESERVED(SYS_REW_MAC_LOW_CFG), + REG_RESERVED(SYS_TIMESTAMP_OFFSET), + REG(SYS_PAUSE_CFG, 0x00044c), + REG(SYS_PAUSE_TOT_CFG, 0x000478), + REG(SYS_ATOP, 0x00047c), + REG(SYS_ATOP_TOT_CFG, 0x0004a8), + REG(SYS_MAC_FC_CFG, 0x0004ac), + REG(SYS_MMGT, 0x0004d4), + REG_RESERVED(SYS_MMGT_FAST), + REG_RESERVED(SYS_EVENTS_DIF), + REG_RESERVED(SYS_EVENTS_CORE), + REG_RESERVED(SYS_CNT), + REG_RESERVED(SYS_PTP_STATUS), + REG_RESERVED(SYS_PTP_TXSTAMP), + REG_RESERVED(SYS_PTP_NXT), + REG_RESERVED(SYS_PTP_CFG), + REG_RESERVED(SYS_RAM_INIT), + REG_RESERVED(SYS_CM_ADDR), + REG_RESERVED(SYS_CM_DATA_WR), + REG_RESERVED(SYS_CM_DATA_RD), + REG_RESERVED(SYS_CM_OP), + REG_RESERVED(SYS_CM_DATA), +}; + +static const u32 vsc9953_gcb_regmap[] = { + REG(GCB_SOFT_RST, 0x000008), + REG(GCB_MIIM_MII_STATUS, 0x0000ac), + REG(GCB_MIIM_MII_CMD, 0x0000b4), + REG(GCB_MIIM_MII_DATA, 0x0000b8), +}; + +static const u32 vsc9953_dev_gmii_regmap[] = { + REG(DEV_CLOCK_CFG, 0x0), + REG(DEV_PORT_MISC, 0x4), + REG_RESERVED(DEV_EVENTS), + REG(DEV_EEE_CFG, 0xc), + REG_RESERVED(DEV_RX_PATH_DELAY), + REG_RESERVED(DEV_TX_PATH_DELAY), + REG_RESERVED(DEV_PTP_PREDICT_CFG), + REG(DEV_MAC_ENA_CFG, 0x10), + REG(DEV_MAC_MODE_CFG, 0x14), + REG(DEV_MAC_MAXLEN_CFG, 0x18), + REG(DEV_MAC_TAGS_CFG, 0x1c), + REG(DEV_MAC_ADV_CHK_CFG, 0x20), + REG(DEV_MAC_IFG_CFG, 0x24), + REG(DEV_MAC_HDX_CFG, 0x28), + REG_RESERVED(DEV_MAC_DBG_CFG), + REG(DEV_MAC_FC_MAC_LOW_CFG, 0x30), + REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x34), + REG(DEV_MAC_STICKY, 0x38), + REG_RESERVED(PCS1G_CFG), + REG_RESERVED(PCS1G_MODE_CFG), + REG_RESERVED(PCS1G_SD_CFG), + REG_RESERVED(PCS1G_ANEG_CFG), + REG_RESERVED(PCS1G_ANEG_NP_CFG), + REG_RESERVED(PCS1G_LB_CFG), + REG_RESERVED(PCS1G_DBG_CFG), + REG_RESERVED(PCS1G_CDET_CFG), + REG_RESERVED(PCS1G_ANEG_STATUS), + REG_RESERVED(PCS1G_ANEG_NP_STATUS), + REG_RESERVED(PCS1G_LINK_STATUS), + REG_RESERVED(PCS1G_LINK_DOWN_CNT), + REG_RESERVED(PCS1G_STICKY), + REG_RESERVED(PCS1G_DEBUG_STATUS), + REG_RESERVED(PCS1G_LPI_CFG), + REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT), + REG_RESERVED(PCS1G_LPI_STATUS), + REG_RESERVED(PCS1G_TSTPAT_MODE_CFG), + REG_RESERVED(PCS1G_TSTPAT_STATUS), + REG_RESERVED(DEV_PCS_FX100_CFG), + REG_RESERVED(DEV_PCS_FX100_STATUS), +}; + +static const u32 *vsc9953_regmap[TARGET_MAX] = { + [ANA] = vsc9953_ana_regmap, + [QS] = vsc9953_qs_regmap, + [QSYS] = vsc9953_qsys_regmap, + [REW] = vsc9953_rew_regmap, + [SYS] = vsc9953_sys_regmap, + [S2] = vsc9953_s2_regmap, + [GCB] = vsc9953_gcb_regmap, + [DEV_GMII] = vsc9953_dev_gmii_regmap, +}; + +/* Addresses are relative to the device's base address */ +static const struct resource vsc9953_target_io_res[TARGET_MAX] = { + [ANA] = { + .start = 0x0280000, + .end = 0x028ffff, + .name = "ana", + }, + [QS] = { + .start = 0x0080000, + .end = 0x00800ff, + .name = "qs", + }, + [QSYS] = { + .start = 0x0200000, + .end = 0x021ffff, + .name = "qsys", + }, + [REW] = { + .start = 0x0030000, + .end = 0x003ffff, + .name = "rew", + }, + [SYS] = { + .start = 0x0010000, + .end = 0x001ffff, + .name = "sys", + }, + [S2] = { + .start = 0x0060000, + .end = 0x00603ff, + .name = "s2", + }, + [PTP] = { + .start = 0x0090000, + .end = 0x00900cb, + .name = "ptp", + }, + [GCB] = { + .start = 0x0070000, + .end = 0x00701ff, + .name = "devcpu_gcb", + }, +}; + +static const struct resource vsc9953_port_io_res[] = { + { + .start = 0x0100000, + .end = 0x010ffff, + .name = "port0", + }, + { + .start = 0x0110000, + .end = 0x011ffff, + .name = "port1", + }, + { + .start = 0x0120000, + .end = 0x012ffff, + .name = "port2", + }, + { + .start = 0x0130000, + .end = 0x013ffff, + .name = "port3", + }, + { + .start = 0x0140000, + .end = 0x014ffff, + .name = "port4", + }, + { + .start = 0x0150000, + .end = 0x015ffff, + .name = "port5", + }, + { + .start = 0x0160000, + .end = 0x016ffff, + .name = "port6", + }, + { + .start = 0x0170000, + .end = 0x017ffff, + .name = "port7", + }, + { + .start = 0x0180000, + .end = 0x018ffff, + .name = "port8", + }, + { + .start = 0x0190000, + .end = 0x019ffff, + .name = "port9", + }, +}; + +static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = { + [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 10, 10), + [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 9), + [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24), + [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22), + [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21), + [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20), + [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19), + [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18), + [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17), + [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16), + [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15), + [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13), + [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12), + [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11), + [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10), + [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9), + [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8), + [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7), + [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6), + [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5), + [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4), + [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3), + [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2), + [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1), + [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0), + [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16), + [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12), + [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10), + [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 7, 7), + [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 6, 6), + [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 5, 5), + [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0), + [GCB_MIIM_MII_STATUS_PENDING] = REG_FIELD(GCB_MIIM_MII_STATUS, 2, 2), + [GCB_MIIM_MII_STATUS_BUSY] = REG_FIELD(GCB_MIIM_MII_STATUS, 3, 3), + /* Replicated per number of ports (11), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 13, 13, 11, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 4, 5, 11, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 2, 3, 11, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 11, 20, 11, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 10, 11, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4), +}; + +static const struct ocelot_stat_layout vsc9953_stats_layout[] = { + { .offset = 0x00, .name = "rx_octets", }, + { .offset = 0x01, .name = "rx_unicast", }, + { .offset = 0x02, .name = "rx_multicast", }, + { .offset = 0x03, .name = "rx_broadcast", }, + { .offset = 0x04, .name = "rx_shorts", }, + { .offset = 0x05, .name = "rx_fragments", }, + { .offset = 0x06, .name = "rx_jabbers", }, + { .offset = 0x07, .name = "rx_crc_align_errs", }, + { .offset = 0x08, .name = "rx_sym_errs", }, + { .offset = 0x09, .name = "rx_frames_below_65_octets", }, + { .offset = 0x0A, .name = "rx_frames_65_to_127_octets", }, + { .offset = 0x0B, .name = "rx_frames_128_to_255_octets", }, + { .offset = 0x0C, .name = "rx_frames_256_to_511_octets", }, + { .offset = 0x0D, .name = "rx_frames_512_to_1023_octets", }, + { .offset = 0x0E, .name = "rx_frames_1024_to_1526_octets", }, + { .offset = 0x0F, .name = "rx_frames_over_1526_octets", }, + { .offset = 0x10, .name = "rx_pause", }, + { .offset = 0x11, .name = "rx_control", }, + { .offset = 0x12, .name = "rx_longs", }, + { .offset = 0x13, .name = "rx_classified_drops", }, + { .offset = 0x14, .name = "rx_red_prio_0", }, + { .offset = 0x15, .name = "rx_red_prio_1", }, + { .offset = 0x16, .name = "rx_red_prio_2", }, + { .offset = 0x17, .name = "rx_red_prio_3", }, + { .offset = 0x18, .name = "rx_red_prio_4", }, + { .offset = 0x19, .name = "rx_red_prio_5", }, + { .offset = 0x1A, .name = "rx_red_prio_6", }, + { .offset = 0x1B, .name = "rx_red_prio_7", }, + { .offset = 0x1C, .name = "rx_yellow_prio_0", }, + { .offset = 0x1D, .name = "rx_yellow_prio_1", }, + { .offset = 0x1E, .name = "rx_yellow_prio_2", }, + { .offset = 0x1F, .name = "rx_yellow_prio_3", }, + { .offset = 0x20, .name = "rx_yellow_prio_4", }, + { .offset = 0x21, .name = "rx_yellow_prio_5", }, + { .offset = 0x22, .name = "rx_yellow_prio_6", }, + { .offset = 0x23, .name = "rx_yellow_prio_7", }, + { .offset = 0x24, .name = "rx_green_prio_0", }, + { .offset = 0x25, .name = "rx_green_prio_1", }, + { .offset = 0x26, .name = "rx_green_prio_2", }, + { .offset = 0x27, .name = "rx_green_prio_3", }, + { .offset = 0x28, .name = "rx_green_prio_4", }, + { .offset = 0x29, .name = "rx_green_prio_5", }, + { .offset = 0x2A, .name = "rx_green_prio_6", }, + { .offset = 0x2B, .name = "rx_green_prio_7", }, + { .offset = 0x40, .name = "tx_octets", }, + { .offset = 0x41, .name = "tx_unicast", }, + { .offset = 0x42, .name = "tx_multicast", }, + { .offset = 0x43, .name = "tx_broadcast", }, + { .offset = 0x44, .name = "tx_collision", }, + { .offset = 0x45, .name = "tx_drops", }, + { .offset = 0x46, .name = "tx_pause", }, + { .offset = 0x47, .name = "tx_frames_below_65_octets", }, + { .offset = 0x48, .name = "tx_frames_65_to_127_octets", }, + { .offset = 0x49, .name = "tx_frames_128_255_octets", }, + { .offset = 0x4A, .name = "tx_frames_256_511_octets", }, + { .offset = 0x4B, .name = "tx_frames_512_1023_octets", }, + { .offset = 0x4C, .name = "tx_frames_1024_1526_octets", }, + { .offset = 0x4D, .name = "tx_frames_over_1526_octets", }, + { .offset = 0x4E, .name = "tx_yellow_prio_0", }, + { .offset = 0x4F, .name = "tx_yellow_prio_1", }, + { .offset = 0x50, .name = "tx_yellow_prio_2", }, + { .offset = 0x51, .name = "tx_yellow_prio_3", }, + { .offset = 0x52, .name = "tx_yellow_prio_4", }, + { .offset = 0x53, .name = "tx_yellow_prio_5", }, + { .offset = 0x54, .name = "tx_yellow_prio_6", }, + { .offset = 0x55, .name = "tx_yellow_prio_7", }, + { .offset = 0x56, .name = "tx_green_prio_0", }, + { .offset = 0x57, .name = "tx_green_prio_1", }, + { .offset = 0x58, .name = "tx_green_prio_2", }, + { .offset = 0x59, .name = "tx_green_prio_3", }, + { .offset = 0x5A, .name = "tx_green_prio_4", }, + { .offset = 0x5B, .name = "tx_green_prio_5", }, + { .offset = 0x5C, .name = "tx_green_prio_6", }, + { .offset = 0x5D, .name = "tx_green_prio_7", }, + { .offset = 0x5E, .name = "tx_aged", }, + { .offset = 0x80, .name = "drop_local", }, + { .offset = 0x81, .name = "drop_tail", }, + { .offset = 0x82, .name = "drop_yellow_prio_0", }, + { .offset = 0x83, .name = "drop_yellow_prio_1", }, + { .offset = 0x84, .name = "drop_yellow_prio_2", }, + { .offset = 0x85, .name = "drop_yellow_prio_3", }, + { .offset = 0x86, .name = "drop_yellow_prio_4", }, + { .offset = 0x87, .name = "drop_yellow_prio_5", }, + { .offset = 0x88, .name = "drop_yellow_prio_6", }, + { .offset = 0x89, .name = "drop_yellow_prio_7", }, + { .offset = 0x8A, .name = "drop_green_prio_0", }, + { .offset = 0x8B, .name = "drop_green_prio_1", }, + { .offset = 0x8C, .name = "drop_green_prio_2", }, + { .offset = 0x8D, .name = "drop_green_prio_3", }, + { .offset = 0x8E, .name = "drop_green_prio_4", }, + { .offset = 0x8F, .name = "drop_green_prio_5", }, + { .offset = 0x90, .name = "drop_green_prio_6", }, + { .offset = 0x91, .name = "drop_green_prio_7", }, +}; + +static struct vcap_field vsc9953_vcap_is2_keys[] = { + /* Common: 41 bits */ + [VCAP_IS2_TYPE] = { 0, 4}, + [VCAP_IS2_HK_FIRST] = { 4, 1}, + [VCAP_IS2_HK_PAG] = { 5, 8}, + [VCAP_IS2_HK_IGR_PORT_MASK] = { 13, 11}, + [VCAP_IS2_HK_RSV2] = { 24, 1}, + [VCAP_IS2_HK_HOST_MATCH] = { 25, 1}, + [VCAP_IS2_HK_L2_MC] = { 26, 1}, + [VCAP_IS2_HK_L2_BC] = { 27, 1}, + [VCAP_IS2_HK_VLAN_TAGGED] = { 28, 1}, + [VCAP_IS2_HK_VID] = { 29, 12}, + [VCAP_IS2_HK_DEI] = { 41, 1}, + [VCAP_IS2_HK_PCP] = { 42, 3}, + /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */ + [VCAP_IS2_HK_L2_DMAC] = { 45, 48}, + [VCAP_IS2_HK_L2_SMAC] = { 93, 48}, + /* MAC_ETYPE (TYPE=000) */ + [VCAP_IS2_HK_MAC_ETYPE_ETYPE] = {141, 16}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0] = {157, 16}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1] = {173, 8}, + [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2] = {181, 3}, + /* MAC_LLC (TYPE=001) */ + [VCAP_IS2_HK_MAC_LLC_L2_LLC] = {141, 40}, + /* MAC_SNAP (TYPE=010) */ + [VCAP_IS2_HK_MAC_SNAP_L2_SNAP] = {141, 40}, + /* MAC_ARP (TYPE=011) */ + [VCAP_IS2_HK_MAC_ARP_SMAC] = { 45, 48}, + [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK] = { 93, 1}, + [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK] = { 94, 1}, + [VCAP_IS2_HK_MAC_ARP_LEN_OK] = { 95, 1}, + [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH] = { 96, 1}, + [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH] = { 97, 1}, + [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN] = { 98, 1}, + [VCAP_IS2_HK_MAC_ARP_OPCODE] = { 99, 2}, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP] = {101, 32}, + [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP] = {133, 32}, + [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP] = {165, 1}, + /* IP4_TCP_UDP / IP4_OTHER common */ + [VCAP_IS2_HK_IP4] = { 45, 1}, + [VCAP_IS2_HK_L3_FRAGMENT] = { 46, 1}, + [VCAP_IS2_HK_L3_FRAG_OFS_GT0] = { 47, 1}, + [VCAP_IS2_HK_L3_OPTIONS] = { 48, 1}, + [VCAP_IS2_HK_IP4_L3_TTL_GT0] = { 49, 1}, + [VCAP_IS2_HK_L3_TOS] = { 50, 8}, + [VCAP_IS2_HK_L3_IP4_DIP] = { 58, 32}, + [VCAP_IS2_HK_L3_IP4_SIP] = { 90, 32}, + [VCAP_IS2_HK_DIP_EQ_SIP] = {122, 1}, + /* IP4_TCP_UDP (TYPE=100) */ + [VCAP_IS2_HK_TCP] = {123, 1}, + [VCAP_IS2_HK_L4_SPORT] = {124, 16}, + [VCAP_IS2_HK_L4_DPORT] = {140, 16}, + [VCAP_IS2_HK_L4_RNG] = {156, 8}, + [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {164, 1}, + [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {165, 1}, + [VCAP_IS2_HK_L4_URG] = {166, 1}, + [VCAP_IS2_HK_L4_ACK] = {167, 1}, + [VCAP_IS2_HK_L4_PSH] = {168, 1}, + [VCAP_IS2_HK_L4_RST] = {169, 1}, + [VCAP_IS2_HK_L4_SYN] = {170, 1}, + [VCAP_IS2_HK_L4_FIN] = {171, 1}, + /* IP4_OTHER (TYPE=101) */ + [VCAP_IS2_HK_IP4_L3_PROTO] = {123, 8}, + [VCAP_IS2_HK_L3_PAYLOAD] = {131, 56}, + /* IP6_STD (TYPE=110) */ + [VCAP_IS2_HK_IP6_L3_TTL_GT0] = { 45, 1}, + [VCAP_IS2_HK_L3_IP6_SIP] = { 46, 128}, + [VCAP_IS2_HK_IP6_L3_PROTO] = {174, 8}, +}; + +static struct vcap_field vsc9953_vcap_is2_actions[] = { + [VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1}, + [VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1}, + [VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3}, + [VCAP_IS2_ACT_MASK_MODE] = { 5, 2}, + [VCAP_IS2_ACT_MIRROR_ENA] = { 7, 1}, + [VCAP_IS2_ACT_LRN_DIS] = { 8, 1}, + [VCAP_IS2_ACT_POLICE_ENA] = { 9, 1}, + [VCAP_IS2_ACT_POLICE_IDX] = { 10, 8}, + [VCAP_IS2_ACT_POLICE_VCAP_ONLY] = { 21, 1}, + [VCAP_IS2_ACT_PORT_MASK] = { 22, 10}, + [VCAP_IS2_ACT_ACL_ID] = { 44, 6}, + [VCAP_IS2_ACT_HIT_CNT] = { 50, 32}, +}; + +static const struct vcap_props vsc9953_vcap_props[] = { + [VCAP_IS2] = { + .tg_width = 2, + .sw_count = 4, + .entry_count = VSC9953_VCAP_IS2_CNT, + .entry_width = VSC9953_VCAP_IS2_ENTRY_WIDTH, + .action_count = VSC9953_VCAP_IS2_CNT + + VSC9953_VCAP_PORT_CNT + 2, + .action_width = 101, + .action_type_width = 1, + .action_table = { + [IS2_ACTION_TYPE_NORMAL] = { + .width = 44, + .count = 2 + }, + [IS2_ACTION_TYPE_SMAC_SIP] = { + .width = 6, + .count = 4 + }, + }, + .counter_words = 4, + .counter_width = 32, + }, +}; + +#define VSC9953_INIT_TIMEOUT 50000 +#define VSC9953_GCB_RST_SLEEP 100 +#define VSC9953_SYS_RAMINIT_SLEEP 80 +#define VCS9953_MII_TIMEOUT 10000 + +static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val); + + return val; +} + +static int vsc9953_sys_ram_init_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, SYS_RESET_CFG_MEM_INIT, &val); + + return val; +} + +static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val); + + return val; +} + +static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot) +{ + int val; + + ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val); + + return val; +} + +static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum, + u16 value) +{ + struct ocelot *ocelot = bus->priv; + int err, cmd, val; + + /* Wait while MIIM controller becomes idle */ + err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot, + val, !val, 10, VCS9953_MII_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "MDIO write: pending timeout\n"); + goto out; + } + + cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | + (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | + MSCC_MIIM_CMD_OPR_WRITE; + + ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD); + +out: + return err; +} + +static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct ocelot *ocelot = bus->priv; + int err, cmd, val; + + /* Wait until MIIM controller becomes idle */ + err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot, + val, !val, 10, VCS9953_MII_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "MDIO read: pending timeout\n"); + goto out; + } + + /* Write the MIIM COMMAND register */ + cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | + (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ; + + ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD); + + /* Wait while read operation via the MIIM controller is in progress */ + err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot, + val, !val, 10, VCS9953_MII_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "MDIO read: busy timeout\n"); + goto out; + } + + val = ocelot_read(ocelot, GCB_MIIM_MII_DATA); + + err = val & 0xFFFF; +out: + return err; +} + +static int vsc9953_reset(struct ocelot *ocelot) +{ + int val, err; + + /* soft-reset the switch core */ + ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1); + + err = readx_poll_timeout(vsc9953_gcb_soft_rst_status, ocelot, val, !val, + VSC9953_GCB_RST_SLEEP, VSC9953_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch core reset\n"); + return err; + } + + /* initialize switch mem ~40us */ + ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1); + ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1); + + err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val, + VSC9953_SYS_RAMINIT_SLEEP, + VSC9953_INIT_TIMEOUT); + if (err) { + dev_err(ocelot->dev, "timeout: switch sram init\n"); + return err; + } + + /* enable switch core */ + ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1); + ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1); + + return 0; +} + +static void vsc9953_phylink_validate(struct ocelot *ocelot, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != ocelot_port->phy_mode) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + phylink_set(mask, Autoneg); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 1000baseT_Full); + + if (state->interface == PHY_INTERFACE_MODE_INTERNAL) { + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port, + phy_interface_t phy_mode) +{ + switch (phy_mode) { + case PHY_INTERFACE_MODE_INTERNAL: + if (port != 8 && port != 9) + return -ENOTSUPP; + return 0; + case PHY_INTERFACE_MODE_SGMII: + case PHY_INTERFACE_MODE_QSGMII: + /* Not supported on internal to-CPU ports */ + if (port == 8 || port == 9) + return -ENOTSUPP; + return 0; + default: + return -ENOTSUPP; + } +} + +/* Watermark encode + * Bit 9: Unit; 0:1, 1:16 + * Bit 8-0: Value to be multiplied with unit + */ +static u16 vsc9953_wm_enc(u16 value) +{ + if (value >= BIT(9)) + return BIT(9) | (value / 16); + + return value; +} + +static const struct ocelot_ops vsc9953_ops = { + .reset = vsc9953_reset, + .wm_enc = vsc9953_wm_enc, +}; + +static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot) +{ + struct felix *felix = ocelot_to_felix(ocelot); + struct device *dev = ocelot->dev; + struct mii_bus *bus; + int port; + int rc; + + felix->pcs = devm_kcalloc(dev, felix->info->num_ports, + sizeof(struct phy_device *), + GFP_KERNEL); + if (!felix->pcs) { + dev_err(dev, "failed to allocate array for PCS PHYs\n"); + return -ENOMEM; + } + + bus = devm_mdiobus_alloc(dev); + if (!bus) + return -ENOMEM; + + bus->name = "VSC9953 internal MDIO bus"; + bus->read = vsc9953_mdio_read; + bus->write = vsc9953_mdio_write; + bus->parent = dev; + bus->priv = ocelot; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + + /* Needed in order to initialize the bus mutex lock */ + rc = mdiobus_register(bus); + if (rc < 0) { + dev_err(dev, "failed to register MDIO bus\n"); + return rc; + } + + felix->imdio = bus; + + for (port = 0; port < felix->info->num_ports; port++) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct phy_device *pcs; + int addr = port + 4; + + if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL) + continue; + + pcs = get_phy_device(felix->imdio, addr, false); + if (IS_ERR(pcs)) + continue; + + pcs->interface = ocelot_port->phy_mode; + felix->pcs[port] = pcs; + + dev_info(dev, "Found PCS at internal MDIO address %d\n", addr); + } + + return 0; +} + +static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + u8 *template = ocelot_port->xmit_template; + u64 bypass, dest, src; + + /* Set the source port as the CPU port module and not the + * NPI port + */ + src = ocelot->num_phys_ports; + dest = BIT(port); + bypass = true; + + packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0); + packing(template, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0); + packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0); +} + +static const struct felix_info seville_info_vsc9953 = { + .target_io_res = vsc9953_target_io_res, + .port_io_res = vsc9953_port_io_res, + .regfields = vsc9953_regfields, + .map = vsc9953_regmap, + .ops = &vsc9953_ops, + .stats_layout = vsc9953_stats_layout, + .num_stats = ARRAY_SIZE(vsc9953_stats_layout), + .vcap_is2_keys = vsc9953_vcap_is2_keys, + .vcap_is2_actions = vsc9953_vcap_is2_actions, + .vcap = vsc9953_vcap_props, + .shared_queue_sz = 128 * 1024, + .num_mact_rows = 2048, + .num_ports = 10, + .mdio_bus_alloc = vsc9953_mdio_bus_alloc, + .mdio_bus_free = vsc9959_mdio_bus_free, + .pcs_config = vsc9959_pcs_config, + .pcs_link_up = vsc9959_pcs_link_up, + .pcs_link_state = vsc9959_pcs_link_state, + .phylink_validate = vsc9953_phylink_validate, + .prevalidate_phy_mode = vsc9953_prevalidate_phy_mode, + .xmit_template_populate = vsc9953_xmit_template_populate, +}; + +static int seville_probe(struct platform_device *pdev) +{ + struct dsa_switch *ds; + struct ocelot *ocelot; + struct resource *res; + struct felix *felix; + int err; + + felix = kzalloc(sizeof(struct felix), GFP_KERNEL); + if (!felix) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate driver memory\n"); + goto err_alloc_felix; + } + + platform_set_drvdata(pdev, felix); + + ocelot = &felix->ocelot; + ocelot->dev = &pdev->dev; + felix->info = &seville_info_vsc9953; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + felix->switch_base = res->start; + + ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); + if (!ds) { + err = -ENOMEM; + dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); + goto err_alloc_ds; + } + + ds->dev = &pdev->dev; + ds->num_ports = felix->info->num_ports; + ds->ops = &felix_switch_ops; + ds->priv = ocelot; + felix->ds = ds; + + err = dsa_register_switch(ds); + if (err) { + dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); + goto err_register_ds; + } + + return 0; + +err_register_ds: + kfree(ds); +err_alloc_ds: +err_alloc_felix: + kfree(felix); + return err; +} + +static int seville_remove(struct platform_device *pdev) +{ + struct felix *felix; + + felix = platform_get_drvdata(pdev); + + dsa_unregister_switch(felix->ds); + + kfree(felix->ds); + kfree(felix); + + return 0; +} + +static const struct of_device_id seville_of_match[] = { + { .compatible = "mscc,vsc9953-switch" }, + { }, +}; +MODULE_DEVICE_TABLE(of, seville_of_match); + +struct platform_driver seville_vsc9953_driver = { + .probe = seville_probe, + .remove = seville_remove, + .driver = { + .name = "mscc_seville", + .of_match_table = of_match_ptr(seville_of_match), + }, +}; From 77710929da1359cfca1f214ffd5ad8f32f8d72ed Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 19:57:11 +0300 Subject: [PATCH 0939/2056] docs: devicetree: add bindings for Seville DSA switch inside Felix driver There are no non-standard bindings being used. However Felix is a PCI device and Seville is a platform device. So give an example of device tree for this switch and document its compatible string. Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/ocelot.txt | 105 +++++++++++++++++- 1 file changed, 101 insertions(+), 4 deletions(-) diff --git a/Documentation/devicetree/bindings/net/dsa/ocelot.txt b/Documentation/devicetree/bindings/net/dsa/ocelot.txt index 66a129fea705..7a271d070b72 100644 --- a/Documentation/devicetree/bindings/net/dsa/ocelot.txt +++ b/Documentation/devicetree/bindings/net/dsa/ocelot.txt @@ -4,10 +4,15 @@ Microchip Ocelot switch driver family Felix ----- -The VSC9959 core is currently the only switch supported by the driver, and is -found in the NXP LS1028A. It is a PCI device, part of the larger ENETC root -complex. As a result, the ethernet-switch node is a sub-node of the PCIe root -complex node and its "reg" property conforms to the parent node bindings: +Currently the switches supported by the felix driver are: + +- VSC9959 (Felix) +- VSC9953 (Seville) + +The VSC9959 switch is found in the NXP LS1028A. It is a PCI device, part of the +larger ENETC root complex. As a result, the ethernet-switch node is a sub-node +of the PCIe root complex node and its "reg" property conforms to the parent +node bindings: * reg: Specifies PCIe Device Number and Function Number of the endpoint device, in this case for the Ethernet L2Switch it is PF5 (of device 0, bus 0). @@ -114,3 +119,95 @@ Example: }; }; }; + +The VSC9953 switch is found inside NXP T1040. It is a platform device with the +following required properties: + +- compatible: + Must be "mscc,vsc9953-switch". + +Supported PHY interface types (appropriate SerDes protocol setting changes are +needed in the RCW binary): + +* phy_mode = "internal": on ports 8 and 9 +* phy_mode = "sgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7 +* phy_mode = "qsgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7 + +Example: + +&soc { + ethernet-switch@800000 { + #address-cells = <0x1>; + #size-cells = <0x0>; + compatible = "mscc,vsc9953-switch"; + little-endian; + reg = <0x800000 0x290000>; + + ports { + #address-cells = <0x1>; + #size-cells = <0x0>; + + port@0 { + reg = <0x0>; + label = "swp0"; + }; + + port@1 { + reg = <0x1>; + label = "swp1"; + }; + + port@2 { + reg = <0x2>; + label = "swp2"; + }; + + port@3 { + reg = <0x3>; + label = "swp3"; + }; + + port@4 { + reg = <0x4>; + label = "swp4"; + }; + + port@5 { + reg = <0x5>; + label = "swp5"; + }; + + port@6 { + reg = <0x6>; + label = "swp6"; + }; + + port@7 { + reg = <0x7>; + label = "swp7"; + }; + + port@8 { + reg = <0x8>; + phy-mode = "internal"; + ethernet = <&enet0>; + + fixed-link { + speed = <2500>; + full-duplex; + }; + }; + + port@9 { + reg = <0x9>; + phy-mode = "internal"; + status = "disabled"; + + fixed-link { + speed = <2500>; + full-duplex; + }; + }; + }; + }; +}; From a7d02782357b5d2f07d1d00b522da2613f649b53 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 19:14:29 +0200 Subject: [PATCH 0940/2056] net: ethernet: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/net/ethernet/cortina/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cortina/Kconfig b/drivers/net/ethernet/cortina/Kconfig index ac8cb5744a87..aaf9e294b70b 100644 --- a/drivers/net/ethernet/cortina/Kconfig +++ b/drivers/net/ethernet/cortina/Kconfig @@ -7,7 +7,7 @@ config NET_VENDOR_CORTINA help If you have a network (Ethernet) card belonging to this class, say Y and read the Ethernet-HOWTO, available from - . + . if NET_VENDOR_CORTINA From d788a0b512f5f97d00286e9c1438517e40d19703 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 19:24:39 +0200 Subject: [PATCH 0941/2056] net: jme: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/net/ethernet/jme.c | 2 +- drivers/net/ethernet/jme.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index c97c74164c73..ddc757680089 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -3,7 +3,7 @@ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver * * Copyright 2008 JMicron Technology Corporation - * http://www.jmicron.com/ + * https://www.jmicron.com/ * Copyright (c) 2009 - 2010 Guo-Fu Tseng * * Author: Guo-Fu Tseng diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h index 2bba5ce20289..a2c3b00d939d 100644 --- a/drivers/net/ethernet/jme.h +++ b/drivers/net/ethernet/jme.h @@ -3,7 +3,7 @@ * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver * * Copyright 2008 JMicron Technology Corporation - * http://www.jmicron.com/ + * https://www.jmicron.com/ * Copyright (c) 2009 - 2010 Guo-Fu Tseng * * Author: Guo-Fu Tseng From 350d1931423c6bc683058b8d497a118576754fbc Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 19:34:47 +0200 Subject: [PATCH 0942/2056] net: wan: cosa: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/net/wan/cosa.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 5d6532ad6b78..f8aed0696d77 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -12,7 +12,7 @@ * HARDWARE INFO * * Both cards are developed at the Institute of Computer Science, - * Masaryk University (http://www.ics.muni.cz/). The hardware is + * Masaryk University (https://www.ics.muni.cz/). The hardware is * developed by Jiri Novotny . More information * and the photo of both cards is available at * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares @@ -35,7 +35,7 @@ * * SOFTWARE INFO * - * The homepage of the Linux driver is at http://www.fi.muni.cz/~kas/cosa/. + * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/. * The CVS tree of Linux driver can be viewed there, as well as the * firmware binaries and user-space utilities for downloading the firmware * into the card and setting up the card. From 428f09c2b757689edf778ae039ceb8a88475084d Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 13 Jul 2020 21:55:03 +0200 Subject: [PATCH 0943/2056] amd8111e: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GPF_ with a correct flag. It has been compile tested. When memory is allocated in 'amd8111e_init_ring()', GFP_ATOMIC must be used because a spin_lock is hold. One of the call chains is: amd8111e_open ** spin_lock_irq(&lp->lock); --> amd8111e_restart --> amd8111e_init_ring ** spin_unlock_irq(&lp->lock); The rest of the patch is produced by coccinelle with a few adjustments to please checkpatch.pl. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 81 ++++++++++++++++------------- 1 file changed, 44 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 5d389a984394..b6c43b58ed3d 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -226,7 +226,9 @@ static int amd8111e_free_skbs(struct net_device *dev) /* Freeing transmit skbs */ for(i = 0; i < NUM_TX_BUFFERS; i++){ if(lp->tx_skbuff[i]){ - pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE); + dma_unmap_single(&lp->pci_dev->dev, + lp->tx_dma_addr[i], + lp->tx_skbuff[i]->len, DMA_TO_DEVICE); dev_kfree_skb (lp->tx_skbuff[i]); lp->tx_skbuff[i] = NULL; lp->tx_dma_addr[i] = 0; @@ -236,8 +238,9 @@ static int amd8111e_free_skbs(struct net_device *dev) for (i = 0; i < NUM_RX_BUFFERS; i++){ rx_skbuff = lp->rx_skbuff[i]; if(rx_skbuff != NULL){ - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i], - lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE); + dma_unmap_single(&lp->pci_dev->dev, + lp->rx_dma_addr[i], + lp->rx_buff_len - 2, DMA_FROM_DEVICE); dev_kfree_skb(lp->rx_skbuff[i]); lp->rx_skbuff[i] = NULL; lp->rx_dma_addr[i] = 0; @@ -287,20 +290,20 @@ static int amd8111e_init_ring(struct net_device *dev) amd8111e_free_skbs(dev); else{ - /* allocate the tx and rx descriptors */ - if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, - &lp->tx_ring_dma_addr)) == NULL) - + /* allocate the tx and rx descriptors */ + lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, + sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, + &lp->tx_ring_dma_addr, GFP_ATOMIC); + if (!lp->tx_ring) goto err_no_mem; - if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, - &lp->rx_ring_dma_addr)) == NULL) - + lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev, + sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR, + &lp->rx_ring_dma_addr, GFP_ATOMIC); + if (!lp->rx_ring) goto err_free_tx_ring; - } + /* Set new receive buff size */ amd8111e_set_rx_buff_len(dev); @@ -318,8 +321,10 @@ static int amd8111e_init_ring(struct net_device *dev) } /* Initilaizing receive descriptors */ for (i = 0; i < NUM_RX_BUFFERS; i++) { - lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, - lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev, + lp->rx_skbuff[i]->data, + lp->rx_buff_len - 2, + DMA_FROM_DEVICE); lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]); lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2); @@ -338,15 +343,15 @@ static int amd8111e_init_ring(struct net_device *dev) err_free_rx_ring: - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring, - lp->rx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR, + lp->rx_ring, lp->rx_ring_dma_addr); err_free_tx_ring: - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring, - lp->tx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, + lp->tx_ring, lp->tx_ring_dma_addr); err_no_mem: return -ENOMEM; @@ -612,16 +617,16 @@ static void amd8111e_free_ring(struct amd8111e_priv *lp) { /* Free transmit and receive descriptor rings */ if(lp->rx_ring){ - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR, - lp->rx_ring, lp->rx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR, + lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring = NULL; } if(lp->tx_ring){ - pci_free_consistent(lp->pci_dev, - sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR, - lp->tx_ring, lp->tx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR, + lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring = NULL; } @@ -649,9 +654,10 @@ static int amd8111e_tx(struct net_device *dev) /* We must free the original skb */ if (lp->tx_skbuff[tx_index]) { - pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], - lp->tx_skbuff[tx_index]->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&lp->pci_dev->dev, + lp->tx_dma_addr[tx_index], + lp->tx_skbuff[tx_index]->len, + DMA_TO_DEVICE); dev_consume_skb_irq(lp->tx_skbuff[tx_index]); lp->tx_skbuff[tx_index] = NULL; lp->tx_dma_addr[tx_index] = 0; @@ -737,14 +743,14 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget) skb_reserve(new_skb, 2); skb = lp->rx_skbuff[rx_index]; - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); + dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index], + lp->rx_buff_len - 2, DMA_FROM_DEVICE); skb_put(skb, pkt_len); lp->rx_skbuff[rx_index] = new_skb; - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, + lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev, new_skb->data, - lp->rx_buff_len-2, - PCI_DMA_FROMDEVICE); + lp->rx_buff_len - 2, + DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, dev); @@ -1270,7 +1276,8 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb, } #endif lp->tx_dma_addr[tx_index] = - pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); + dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); lp->tx_ring[tx_index].buff_phy_addr = cpu_to_le32(lp->tx_dma_addr[tx_index]); @@ -1773,7 +1780,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, } /* Initialize DMA */ - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) { dev_err(&pdev->dev, "DMA not supported\n"); err = -ENODEV; goto err_free_reg; From da6e8ace5623290490a9466f5aba9e40c925ce1b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 13 Jul 2020 22:18:45 +0200 Subject: [PATCH 0944/2056] pcnet32: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GPF_ with a correct flag. It has been compile tested. When memory is allocated in 'pcnet32_realloc_tx_ring()' and 'pcnet32_realloc_rx_ring()', GFP_ATOMIC must be used because a spin_lock is hold. The call chain is: pcnet32_set_ringparam ** spin_lock_irqsave(&lp->lock, flags); --> pcnet32_realloc_tx_ring --> pcnet32_realloc_rx_ring ** spin_unlock_irqrestore(&lp->lock, flags); When memory is in 'pcnet32_probe1()' and 'pcnet32_alloc_ring()', GFP_KERNEL can be used. While at it, update a few comments and pr_err messages to be more in line with the new function names. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 181 +++++++++++++---------------- 1 file changed, 84 insertions(+), 97 deletions(-) diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index f47140391f67..187b0b9a6e1d 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -250,7 +250,7 @@ struct pcnet32_access { /* * The first field of pcnet32_private is read by the ethernet device - * so the structure should be allocated using pci_alloc_consistent(). + * so the structure should be allocated using dma_alloc_coherent(). */ struct pcnet32_private { struct pcnet32_init_block *init_block; @@ -258,7 +258,7 @@ struct pcnet32_private { struct pcnet32_rx_head *rx_ring; struct pcnet32_tx_head *tx_ring; dma_addr_t init_dma_addr;/* DMA address of beginning of the init block, - returned by pci_alloc_consistent */ + returned by dma_alloc_coherent */ struct pci_dev *pci_dev; const char *name; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ @@ -485,9 +485,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, pcnet32_purge_tx_ring(dev); new_tx_ring = - pci_zalloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * entries, - &new_ring_dma_addr); + dma_alloc_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_tx_head) * entries, + &new_ring_dma_addr, GFP_ATOMIC); if (new_tx_ring == NULL) return; @@ -501,9 +501,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, kfree(lp->tx_skbuff); kfree(lp->tx_dma_addr); - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, - lp->tx_ring, lp->tx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring_size = entries; lp->tx_mod_mask = lp->tx_ring_size - 1; @@ -517,10 +517,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev, free_new_lists: kfree(new_dma_addr_list); free_new_tx_ring: - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * entries, - new_tx_ring, - new_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_tx_head) * entries, + new_tx_ring, new_ring_dma_addr); } /* @@ -545,9 +544,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, unsigned int entries = BIT(size); new_rx_ring = - pci_zalloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * entries, - &new_ring_dma_addr); + dma_alloc_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_rx_head) * entries, + &new_ring_dma_addr, GFP_ATOMIC); if (new_rx_ring == NULL) return; @@ -580,10 +579,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, skb_reserve(rx_skbuff, NET_IP_ALIGN); new_dma_addr_list[new] = - pci_map_single(lp->pci_dev, rx_skbuff->data, - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(lp->pci_dev, - new_dma_addr_list[new])) { + dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, + PKT_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) { netif_err(lp, drv, dev, "%s dma mapping failed\n", __func__); dev_kfree_skb(new_skb_list[new]); @@ -596,22 +594,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, /* and free any unneeded buffers */ for (; new < lp->rx_ring_size; new++) { if (lp->rx_skbuff[new]) { - if (!pci_dma_mapping_error(lp->pci_dev, - lp->rx_dma_addr[new])) - pci_unmap_single(lp->pci_dev, + if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new])) + dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[new], PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(lp->rx_skbuff[new]); } } kfree(lp->rx_skbuff); kfree(lp->rx_dma_addr); - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - lp->rx_ring_size, lp->rx_ring, - lp->rx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, + lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring_size = entries; lp->rx_mod_mask = lp->rx_ring_size - 1; @@ -625,12 +621,11 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, free_all_new: while (--new >= lp->rx_ring_size) { if (new_skb_list[new]) { - if (!pci_dma_mapping_error(lp->pci_dev, - new_dma_addr_list[new])) - pci_unmap_single(lp->pci_dev, + if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) + dma_unmap_single(&lp->pci_dev->dev, new_dma_addr_list[new], PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(new_skb_list[new]); } } @@ -638,10 +633,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev, free_new_lists: kfree(new_dma_addr_list); free_new_rx_ring: - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * entries, - new_rx_ring, - new_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_rx_head) * entries, + new_rx_ring, new_ring_dma_addr); } static void pcnet32_purge_rx_ring(struct net_device *dev) @@ -654,12 +648,11 @@ static void pcnet32_purge_rx_ring(struct net_device *dev) lp->rx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->rx_skbuff[i]) { - if (!pci_dma_mapping_error(lp->pci_dev, - lp->rx_dma_addr[i])) - pci_unmap_single(lp->pci_dev, + if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) + dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[i], PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(lp->rx_skbuff[i]); } lp->rx_skbuff[i] = NULL; @@ -1036,9 +1029,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) *packet++ = i; lp->tx_dma_addr[x] = - pci_map_single(lp->pci_dev, skb->data, skb->len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) { + dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) { netif_printk(lp, hw, KERN_DEBUG, dev, "DMA mapping error at line: %d!\n", __LINE__); @@ -1226,21 +1219,21 @@ static void pcnet32_rx_entry(struct net_device *dev, */ if (newskb) { skb_reserve(newskb, NET_IP_ALIGN); - new_dma_addr = pci_map_single(lp->pci_dev, + new_dma_addr = dma_map_single(&lp->pci_dev->dev, newskb->data, PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) { netif_err(lp, rx_err, dev, "DMA mapping error.\n"); dev_kfree_skb(newskb); skb = NULL; } else { skb = lp->rx_skbuff[entry]; - pci_unmap_single(lp->pci_dev, + dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[entry], PKT_BUF_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb_put(skb, pkt_len); lp->rx_skbuff[entry] = newskb; lp->rx_dma_addr[entry] = new_dma_addr; @@ -1259,17 +1252,15 @@ static void pcnet32_rx_entry(struct net_device *dev, if (!rx_in_place) { skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pkt_len); /* Make room */ - pci_dma_sync_single_for_cpu(lp->pci_dev, - lp->rx_dma_addr[entry], - pkt_len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&lp->pci_dev->dev, + lp->rx_dma_addr[entry], pkt_len, + DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, (unsigned char *)(lp->rx_skbuff[entry]->data), pkt_len); - pci_dma_sync_single_for_device(lp->pci_dev, - lp->rx_dma_addr[entry], - pkt_len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&lp->pci_dev->dev, + lp->rx_dma_addr[entry], pkt_len, + DMA_FROM_DEVICE); } dev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, dev); @@ -1358,10 +1349,10 @@ static int pcnet32_tx(struct net_device *dev) /* We must free the original skb */ if (lp->tx_skbuff[entry]) { - pci_unmap_single(lp->pci_dev, + dma_unmap_single(&lp->pci_dev->dev, lp->tx_dma_addr[entry], - lp->tx_skbuff[entry]-> - len, PCI_DMA_TODEVICE); + lp->tx_skbuff[entry]->len, + DMA_TO_DEVICE); dev_kfree_skb_any(lp->tx_skbuff[entry]); lp->tx_skbuff[entry] = NULL; lp->tx_dma_addr[entry] = 0; @@ -1551,7 +1542,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_dev; } - err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); + err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK); if (err) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); @@ -1834,12 +1825,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) dev->base_addr = ioaddr; lp = netdev_priv(dev); - /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ - lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block), - &lp->init_dma_addr); + /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */ + lp->init_block = dma_alloc_coherent(&pdev->dev, + sizeof(*lp->init_block), + &lp->init_dma_addr, GFP_KERNEL); if (!lp->init_block) { if (pcnet32_debug & NETIF_MSG_PROBE) - pr_err("Consistent memory allocation failed\n"); + pr_err("Coherent memory allocation failed\n"); ret = -ENOMEM; goto err_free_netdev; } @@ -1998,8 +1990,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) err_free_ring: pcnet32_free_ring(dev); - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), - lp->init_block, lp->init_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); err_free_netdev: free_netdev(dev); err_release_region: @@ -2012,21 +2004,19 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name) { struct pcnet32_private *lp = netdev_priv(dev); - lp->tx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, - &lp->tx_ring_dma_addr); + lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + &lp->tx_ring_dma_addr, GFP_KERNEL); if (lp->tx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); return -ENOMEM; } - lp->rx_ring = pci_alloc_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - lp->rx_ring_size, - &lp->rx_ring_dma_addr); + lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, + &lp->rx_ring_dma_addr, GFP_KERNEL); if (lp->rx_ring == NULL) { - netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); + netif_err(lp, drv, dev, "Coherent memory allocation failed\n"); return -ENOMEM; } @@ -2070,18 +2060,16 @@ static void pcnet32_free_ring(struct net_device *dev) lp->rx_dma_addr = NULL; if (lp->tx_ring) { - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_tx_head) * - lp->tx_ring_size, lp->tx_ring, - lp->tx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, + lp->tx_ring, lp->tx_ring_dma_addr); lp->tx_ring = NULL; } if (lp->rx_ring) { - pci_free_consistent(lp->pci_dev, - sizeof(struct pcnet32_rx_head) * - lp->rx_ring_size, lp->rx_ring, - lp->rx_ring_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, + sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, + lp->rx_ring, lp->rx_ring_dma_addr); lp->rx_ring = NULL; } } @@ -2342,12 +2330,11 @@ static void pcnet32_purge_tx_ring(struct net_device *dev) lp->tx_ring[i].status = 0; /* CPU owns buffer */ wmb(); /* Make sure adapter sees owner change */ if (lp->tx_skbuff[i]) { - if (!pci_dma_mapping_error(lp->pci_dev, - lp->tx_dma_addr[i])) - pci_unmap_single(lp->pci_dev, + if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i])) + dma_unmap_single(&lp->pci_dev->dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dev_kfree_skb_any(lp->tx_skbuff[i]); } lp->tx_skbuff[i] = NULL; @@ -2382,10 +2369,9 @@ static int pcnet32_init_ring(struct net_device *dev) rmb(); if (lp->rx_dma_addr[i] == 0) { lp->rx_dma_addr[i] = - pci_map_single(lp->pci_dev, rx_skbuff->data, - PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(lp->pci_dev, - lp->rx_dma_addr[i])) { + dma_map_single(&lp->pci_dev->dev, rx_skbuff->data, + PKT_BUF_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) { /* there is not much we can do at this point */ netif_err(lp, drv, dev, "%s pci dma mapping error\n", @@ -2523,8 +2509,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb, lp->tx_ring[entry].misc = 0x00000000; lp->tx_dma_addr[entry] = - pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) { + dma_map_single(&lp->pci_dev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) { dev_kfree_skb_any(skb); dev->stats.tx_dropped++; goto drop_packet; @@ -2947,8 +2934,8 @@ static void pcnet32_remove_one(struct pci_dev *pdev) unregister_netdev(dev); pcnet32_free_ring(dev); release_region(dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), - lp->init_block, lp->init_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); free_netdev(dev); pci_disable_device(pdev); } @@ -3030,8 +3017,8 @@ static void __exit pcnet32_cleanup_module(void) unregister_netdev(pcnet32_dev); pcnet32_free_ring(pcnet32_dev); release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); - pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block), - lp->init_block, lp->init_dma_addr); + dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block), + lp->init_block, lp->init_dma_addr); free_netdev(pcnet32_dev); pcnet32_dev = next_dev; } From bfe4c40333065639cf0f364b8ea9e93270781699 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Mon, 13 Jul 2020 22:48:12 +0200 Subject: [PATCH 0945/2056] wan: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: David S. Miller --- drivers/net/wan/farsync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 7916efce7188..0196b7fd130d 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -124,7 +124,7 @@ module_param_array(fst_excluded_list, int, NULL, 0); /* The Am186CH/CC processors support a SmartDMA mode using circular pools * of buffer descriptors. The structure is almost identical to that used * in the LANCE Ethernet controllers. Details available as PDF from the - * AMD web site: http://www.amd.com/products/epd/processors/\ + * AMD web site: https://www.amd.com/products/epd/processors/\ * 2.16bitcont/3.am186cxfa/a21914/21914.pdf */ struct txdesc { /* Transmit descriptor */ From f7d40ee7eff8fde2b0701b80c2aa3ba354f8f520 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 13 Jul 2020 20:55:48 -0700 Subject: [PATCH 0946/2056] selftests/bpf: Fix merge conflict resolution Remove double definition of structs. Fixes: 71930d61025e ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net") Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/progs/bpf_iter_netlink.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index b9c2756c5c97..7de98a68599a 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -7,20 +7,6 @@ char _license[] SEC("license") = "GPL"; -#define sk_rmem_alloc sk_backlog.rmem_alloc -#define sk_refcnt __sk_common.skc_refcnt - -struct bpf_iter_meta { - struct seq_file *seq; - __u64 session_id; - __u64 seq_num; -} __attribute__((preserve_access_index)); - -struct bpf_iter__netlink { - struct bpf_iter_meta *meta; - struct netlink_sock *sk; -} __attribute__((preserve_access_index)); - static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket) { return &container_of(socket, struct socket_alloc, socket)->vfs_inode; From 698bae2e6ea139cd67debf7e8eb064056783b5bf Mon Sep 17 00:00:00 2001 From: Raveendran Somu Date: Thu, 4 Jun 2020 02:18:31 -0500 Subject: [PATCH 0947/2056] brcmfmac: To fix kernel crash on out of boundary access To truncate the additional bytes, if extra bytes have been received. Current code only have a warning and proceed without handling it. But in one of the crash reported by DVT, these causes the crash intermittently. So the processing is limit to the skb->len. Signed-off-by: Raveendran Somu Signed-off-by: Chi-hsien Lin Signed-off-by: Wright Feng Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-2-wright.feng@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 09701262330d..531fe9be4025 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1843,6 +1843,9 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb) WARN_ON(siglen > skb->len); + if (siglen > skb->len) + siglen = skb->len; + if (!siglen) return; /* if flow control disabled, skip to packet data and leave */ From 1eb4e9f629981941f3e03c65eee2ee7178a62d4e Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Thu, 4 Jun 2020 02:18:32 -0500 Subject: [PATCH 0948/2056] brcmfmac: fix invalid permanent MAC address in wiphy When host driver retrieves mac addresses from dongle, driver copies memory from drvr->mac to perm_addr. But at the moment, drvr->mac is all zero array which causes permanent MAC address in wiphy is all zero as well. To fix this, we set drvr->mac before setting perm_addr. Signed-off-by: Wright Feng Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-3-wright.feng@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index dec25e415619..e3758bd86acf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -209,8 +209,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err); goto done; } - memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac)); + memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); bus = ifp->drvr->bus_if; ri = &ifp->drvr->revinfo; From eccbf46b15bb3e35d004148f7c3a8fa8e9b26c1e Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Thu, 4 Jun 2020 02:18:33 -0500 Subject: [PATCH 0949/2056] brcmfmac: keep SDIO watchdog running when console_interval is non-zero brcmfmac host driver makes SDIO bus sleep and stops SDIO watchdog if no pending event or data. As a result, host driver does not poll firmware console buffer before buffer overflow, which leads to missing firmware logs. We should not stop SDIO watchdog if console_interval is non-zero in debug build. Signed-off-by: Wright Feng Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-4-wright.feng@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 310d8075f5d7..bc02168ebb53 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3699,7 +3699,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) if (bus->idlecount > bus->idletime) { brcmf_dbg(SDIO, "idle\n"); sdio_claim_host(bus->sdiodev->func1); - brcmf_sdio_wd_timer(bus, false); +#ifdef DEBUG + if (!BRCMF_FWCON_ON() || + bus->console_interval == 0) +#endif + brcmf_sdio_wd_timer(bus, false); bus->idlecount = 0; brcmf_sdio_bus_sleep(bus, true, false); sdio_release_host(bus->sdiodev->func1); From ec3428bb891512fb766fb266b3127aebaa2edc36 Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Thu, 4 Jun 2020 02:18:34 -0500 Subject: [PATCH 0950/2056] brcmfmac: reduce maximum station interface from 2 to 1 in RSDB mode The firmware state machines are not fully suitable for concurrent station interface support, it may hit unexpected error if we have 2 different SSIDs and the roaming scenarios concurrently. To avoid the bad user-experience if this is not fully validated, we dis-allow user to create two concurrent station interfaces. Signed-off-by: Wright Feng Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-5-wright.feng@cypress.com --- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index a757abd7a599..8c1801fb59e7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6801,7 +6801,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { * #AP <= 4, matching BI, channels = 1, 4 total * * no p2p and rsdb: - * #STA <= 2, #AP <= 2, channels = 2, 4 total + * #STA <= 1, #AP <= 2, channels = 2, 4 total * * p2p, no mchan, and mbss: * @@ -6816,7 +6816,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { * #AP <= 4, matching BI, channels = 1, 4 total * * p2p, rsdb, and no mbss: - * #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2, + * #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2, * channels = 2, 4 total */ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) @@ -6857,7 +6857,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) goto err; combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan)); - c0_limits[i].max = 1 + rsdb; + c0_limits[i].max = 1; c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION); if (mon_flag) { c0_limits[i].max = 1; @@ -6873,7 +6873,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) if (p2p && rsdb) { c0_limits[i].max = 2; c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); - combo[c].max_interfaces = 5; + combo[c].max_interfaces = 4; } else if (p2p) { combo[c].max_interfaces = i; } else if (rsdb) { From fa3266541b13f390eb35bdbc38ff4a03368be004 Mon Sep 17 00:00:00 2001 From: Prasanna Kerekoppa Date: Thu, 4 Jun 2020 02:18:35 -0500 Subject: [PATCH 0951/2056] brcmfmac: To fix Bss Info flag definition Bug Bss info flag definition need to be fixed from 0x2 to 0x4 This flag is for rssi info received on channel. All Firmware branches defined as 0x4 and this is bug in brcmfmac. Signed-off-by: Prasanna Kerekoppa Signed-off-by: Chi-hsien Lin Signed-off-by: Wright Feng Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604071835.3842-6-wright.feng@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index de0ef1b545c4..2e31cc10c195 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -19,7 +19,7 @@ #define BRCMF_ARP_OL_PEER_AUTO_REPLY 0x00000008 #define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */ -#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0002 +#define BRCMF_BSS_RSSI_ON_CHANNEL 0x0004 #define BRCMF_STA_BRCM 0x00000001 /* Running a Broadcom driver */ #define BRCMF_STA_WME 0x00000002 /* WMM association */ From 55a48c7ec75ad8a1f4e39d271e2b5aee21150f65 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 13 Jul 2020 15:42:36 +0800 Subject: [PATCH 0952/2056] ip_vti: not register vti_ipip_handler twice An xfrm_tunnel object is linked into the list when registering, so vti_ipip_handler can not be registered twice, otherwise its next pointer will be overwritten on the second time. So this patch is to define a new xfrm_tunnel object to register for AF_INET6. Fixes: e6ce64570f24 ("ip_vti: support IPIP6 tunnel processing") Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index c0b97b8f6fbd..3e5d54517145 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -484,6 +484,13 @@ static struct xfrm_tunnel vti_ipip_handler __read_mostly = { .err_handler = vti4_err, .priority = 0, }; + +static struct xfrm_tunnel vti_ipip6_handler __read_mostly = { + .handler = vti_rcv_tunnel, + .cb_handler = vti_rcv_cb, + .err_handler = vti4_err, + .priority = 0, +}; #endif static int __net_init vti_init_net(struct net *net) @@ -660,7 +667,7 @@ static int __init vti_init(void) if (err < 0) goto xfrm_tunnel_ipip_failed; #if IS_ENABLED(CONFIG_IPV6) - err = xfrm4_tunnel_register(&vti_ipip_handler, AF_INET6); + err = xfrm4_tunnel_register(&vti_ipip6_handler, AF_INET6); if (err < 0) goto xfrm_tunnel_ipip6_failed; #endif @@ -676,7 +683,7 @@ static int __init vti_init(void) rtnl_link_failed: #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) #if IS_ENABLED(CONFIG_IPV6) - xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET6); + xfrm4_tunnel_deregister(&vti_ipip6_handler, AF_INET6); xfrm_tunnel_ipip6_failed: #endif xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET); @@ -699,7 +706,7 @@ static void __exit vti_fini(void) rtnl_link_unregister(&vti_link_ops); #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) #if IS_ENABLED(CONFIG_IPV6) - xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET6); + xfrm4_tunnel_deregister(&vti_ipip6_handler, AF_INET6); #endif xfrm4_tunnel_deregister(&vti_ipip_handler, AF_INET); #endif From a8757147905ea5adee78c7a813fc080a4124f248 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 13 Jul 2020 15:42:37 +0800 Subject: [PATCH 0953/2056] ip6_vti: not register vti_ipv6_handler twice An xfrm6_tunnel object is linked into the list when registering, so vti_ipv6_handler can not be registered twice, otherwise its next pointer will be overwritten on the second time. So this patch is to define a new xfrm6_tunnel object to register for AF_INET. Fixes: 2ab110cbb0c0 ("ip6_vti: support IP6IP tunnel processing") Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index dfa93bc857d2..18ec4ab45be7 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1236,6 +1236,13 @@ static struct xfrm6_tunnel vti_ipv6_handler __read_mostly = { .err_handler = vti6_err, .priority = 0, }; + +static struct xfrm6_tunnel vti_ip6ip_handler __read_mostly = { + .handler = vti6_rcv_tunnel, + .cb_handler = vti6_rcv_cb, + .err_handler = vti6_err, + .priority = 0, +}; #endif /** @@ -1268,7 +1275,7 @@ static int __init vti6_tunnel_init(void) err = xfrm6_tunnel_register(&vti_ipv6_handler, AF_INET6); if (err < 0) goto vti_tunnel_ipv6_failed; - err = xfrm6_tunnel_register(&vti_ipv6_handler, AF_INET); + err = xfrm6_tunnel_register(&vti_ip6ip_handler, AF_INET); if (err < 0) goto vti_tunnel_ip6ip_failed; #endif @@ -1282,7 +1289,7 @@ static int __init vti6_tunnel_init(void) rtnl_link_failed: #if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) - err = xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET); + err = xfrm6_tunnel_deregister(&vti_ip6ip_handler, AF_INET); vti_tunnel_ip6ip_failed: err = xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); vti_tunnel_ipv6_failed: @@ -1306,7 +1313,7 @@ static void __exit vti6_tunnel_cleanup(void) { rtnl_link_unregister(&vti6_link_ops); #if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) - xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET); + xfrm6_tunnel_deregister(&vti_ip6ip_handler, AF_INET); xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); #endif xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP); From 8b404f46dd6ab845d0fa794679329d4089281ccb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 13 Jul 2020 15:42:38 +0800 Subject: [PATCH 0954/2056] xfrm: interface: not xfrmi_ipv6/ipip_handler twice As we did in the last 2 patches for vti(6), this patch is to define a new xfrm_tunnel object 'xfrmi_ipip6_handler' to register for AF_INET6, and a new xfrm6_tunnel object 'xfrmi_ip6ip_handler' to register for AF_INET. Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 96496fdfe3ce..63a52b4b6ea9 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -830,6 +830,13 @@ static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = { .err_handler = xfrmi6_err, .priority = -1, }; + +static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = { + .handler = xfrmi6_rcv_tunnel, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi6_err, + .priority = -1, +}; #endif static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = { @@ -868,6 +875,13 @@ static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = { .err_handler = xfrmi4_err, .priority = -1, }; + +static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = { + .handler = xfrmi4_rcv_tunnel, + .cb_handler = xfrmi_rcv_cb, + .err_handler = xfrmi4_err, + .priority = -1, +}; #endif static int __init xfrmi4_init(void) @@ -887,7 +901,7 @@ static int __init xfrmi4_init(void) err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET); if (err < 0) goto xfrm_tunnel_ipip_failed; - err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET6); + err = xfrm4_tunnel_register(&xfrmi_ipip6_handler, AF_INET6); if (err < 0) goto xfrm_tunnel_ipip6_failed; #endif @@ -911,7 +925,7 @@ static int __init xfrmi4_init(void) static void xfrmi4_fini(void) { #if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) - xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET6); + xfrm4_tunnel_deregister(&xfrmi_ipip6_handler, AF_INET6); xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET); #endif xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); @@ -936,7 +950,7 @@ static int __init xfrmi6_init(void) err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6); if (err < 0) goto xfrm_tunnel_ipv6_failed; - err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET); + err = xfrm6_tunnel_register(&xfrmi_ip6ip_handler, AF_INET); if (err < 0) goto xfrm_tunnel_ip6ip_failed; #endif @@ -960,7 +974,7 @@ static int __init xfrmi6_init(void) static void xfrmi6_fini(void) { #if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) - xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET); + xfrm6_tunnel_deregister(&xfrmi_ip6ip_handler, AF_INET); xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6); #endif xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); From 683608bde0308b1a2143e4fce579c744e18ae229 Mon Sep 17 00:00:00 2001 From: Raveendran Somu Date: Wed, 10 Jun 2020 10:21:01 -0500 Subject: [PATCH 0955/2056] brcmfmac: allow credit borrowing for all access categories Current credit borrowing allows only the access category BE to borrow the credits. This change is to fix the credit borrowing logic, to make borrowing available for all access categories and also to borrow only from the lower categories. This fixes WFA 802.11n certs 5.2.27 failures. Signed-off-by: Raveendran Somu Signed-off-by: Jia-Shyr Chuang Signed-off-by: Chung-Hsien Hsu Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200610152106.175257-2-chi-hsien.lin@cypress.com --- .../broadcom/brcm80211/brcmfmac/fwsignal.c | 61 +++++++++++-------- 1 file changed, 37 insertions(+), 24 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 531fe9be4025..91edde188a9d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -383,6 +383,7 @@ struct brcmf_fws_mac_descriptor { }; #define BRCMF_FWS_HANGER_MAXITEMS 3072 +#define BRCMF_BORROW_RATIO 3 /** * enum brcmf_fws_hanger_item_state - state of hanger item. @@ -479,7 +480,8 @@ struct brcmf_fws_info { u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT]; int fifo_credit[BRCMF_FWS_FIFO_COUNT]; int init_fifo_credit[BRCMF_FWS_FIFO_COUNT]; - int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1]; + int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1] + [BRCMF_FWS_FIFO_AC_VO + 1]; int deq_node_pos[BRCMF_FWS_FIFO_COUNT]; u32 fifo_credit_map; u32 fifo_delay_map; @@ -1185,13 +1187,11 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, if (!credits) return; - fws->fifo_credit_map |= 1 << fifo; - - if ((fifo == BRCMF_FWS_FIFO_AC_BE) && - (fws->credits_borrowed[0])) { + if (fifo > BRCMF_FWS_FIFO_AC_BK && + fifo <= BRCMF_FWS_FIFO_AC_VO) { for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; lender_ac--) { - borrowed = &fws->credits_borrowed[lender_ac]; + borrowed = &fws->credits_borrowed[fifo][lender_ac]; if (*borrowed) { fws->fifo_credit_map |= (1 << lender_ac); fifo_credit = &fws->fifo_credit[lender_ac]; @@ -1208,7 +1208,11 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, } } - fws->fifo_credit[fifo] += credits; + if (credits) { + fws->fifo_credit[fifo] += credits; + fws->fifo_credit_map |= 1 << fifo; + } + if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo]) fws->fifo_credit[fifo] = fws->init_fifo_credit[fifo]; @@ -2008,27 +2012,31 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, } } -static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) +static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws, + int highest_lender_ac, int borrower_ac, + bool borrow_all) { - int lender_ac; + int lender_ac, borrow_limit = 0; - if (time_after(fws->borrow_defer_timestamp, jiffies)) { - fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); - return -ENAVAIL; - } + for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) { - for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { - if (fws->fifo_credit[lender_ac] > 0) { - fws->credits_borrowed[lender_ac]++; + if (!borrow_all) + borrow_limit = + fws->init_fifo_credit[lender_ac] / BRCMF_BORROW_RATIO; + else + borrow_limit = 0; + + if (fws->fifo_credit[lender_ac] > borrow_limit) { + fws->credits_borrowed[borrower_ac][lender_ac]++; fws->fifo_credit[lender_ac]--; if (fws->fifo_credit[lender_ac] == 0) fws->fifo_credit_map &= ~(1 << lender_ac); - fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE); + fws->fifo_credit_map |= (1 << borrower_ac); brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac); return 0; } } - fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE); + fws->fifo_credit_map &= ~(1 << borrower_ac); return -ENAVAIL; } @@ -2219,9 +2227,10 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) } continue; } - while ((fws->fifo_credit[fifo] > 0) || + + while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && - (fifo == BRCMF_FWS_FIFO_BCMC))) { + (fifo == BRCMF_FWS_FIFO_BCMC))) { skb = brcmf_fws_deq(fws, fifo); if (!skb) break; @@ -2231,10 +2240,14 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) if (fws->bus_flow_blocked) break; } - if ((fifo == BRCMF_FWS_FIFO_AC_BE) && - (fws->fifo_credit[fifo] <= 0) && - (!fws->bus_flow_blocked)) { - while (brcmf_fws_borrow_credit(fws) == 0) { + + if (fifo >= BRCMF_FWS_FIFO_AC_BE && + fifo <= BRCMF_FWS_FIFO_AC_VO && + fws->fifo_credit[fifo] == 0 && + !fws->bus_flow_blocked) { + while (brcmf_fws_borrow_credit(fws, + fifo - 1, fifo, + true) == 0) { skb = brcmf_fws_deq(fws, fifo); if (!skb) { brcmf_fws_return_credits(fws, fifo, 1); From fc4aa125974d4401084eb0f4affdb92e9b75f1df Mon Sep 17 00:00:00 2001 From: Jia-Shyr Chuang Date: Wed, 10 Jun 2020 10:21:02 -0500 Subject: [PATCH 0956/2056] brcmfmac: increase message buffer size for control packets In wifi firmware, max length of IOCTL/IOVAR buffer size is 8192. Increase the message buffer max size same as wifi firmware for control packets so return buffers can come back. Signed-off-by: Soontak Lee Signed-off-by: Jia-Shyr Chuang Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200610152106.175257-3-chi-hsien.lin@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 8bb4f1fa790e..f1a20db8daab 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -54,6 +54,7 @@ #define BRCMF_IOCTL_REQ_PKTID 0xFFFE #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048 +#define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE 8192 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8 @@ -1028,7 +1029,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf, rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr; memset(rx_bufpost, 0, sizeof(*rx_bufpost)); - skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE); + skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE); if (skb == NULL) { bphy_err(drvr, "Failed to alloc SKB\n"); From b41c232d33666191a1db11befc0f040fcbe664e9 Mon Sep 17 00:00:00 2001 From: Amar Shankar Date: Wed, 10 Jun 2020 10:21:03 -0500 Subject: [PATCH 0957/2056] brcmfmac: reserve 2 credits for host tx control path It is observed that sometimes when sdiod is low in tx credits in low rssi scenarios, the data path consumes all sdiod rx all credits and there is no sdiod rx credit available for control path causing host and card to go out of sync resulting in link loss between host and card. So in order to prevent it some credits are reserved for control path. Note that TXCTL_CREDITS can't be larger than the firmware default credit update threshold 2; otherwise there will be a deadlock for both side waiting for each other. Signed-off-by: Amar Shankar Signed-off-by: Jia-Shyr Chuang Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200610152106.175257-4-chi-hsien.lin@cypress.com --- .../broadcom/brcm80211/brcmfmac/sdio.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index bc02168ebb53..1e66535e23f5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -648,6 +648,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012) }; +#define TXCTL_CREDITS 2 + static void pkt_align(struct sk_buff *p, int len, int align) { uint datalign; @@ -661,8 +663,16 @@ static void pkt_align(struct sk_buff *p, int len, int align) /* To check if there's window offered */ static bool data_ok(struct brcmf_sdio *bus) { - return (u8)(bus->tx_max - bus->tx_seq) != 0 && - ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0; + /* Reserve TXCTL_CREDITS credits for txctl */ + return (bus->tx_max - bus->tx_seq) > TXCTL_CREDITS && + ((bus->tx_max - bus->tx_seq) & 0x80) == 0; +} + +/* To check if there's window offered */ +static bool txctl_ok(struct brcmf_sdio *bus) +{ + return (bus->tx_max - bus->tx_seq) != 0 && + ((bus->tx_max - bus->tx_seq) & 0x80) == 0; } static int @@ -2668,7 +2678,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) brcmf_sdio_clrintr(bus); if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) && - data_ok(bus)) { + txctl_ok(bus)) { sdio_claim_host(bus->sdiodev->func1); if (bus->ctrl_frame_stat) { err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, @@ -2676,6 +2686,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus) bus->ctrl_frame_err = err; wmb(); bus->ctrl_frame_stat = false; + if (err) + brcmf_err("sdio ctrlframe tx failed err=%d\n", + err); } sdio_release_host(bus->sdiodev->func1); brcmf_sdio_wait_event_wakeup(bus); From d843246ee7614828ac5ff1d58b21b1fce853044e Mon Sep 17 00:00:00 2001 From: Chung-Hsien Hsu Date: Wed, 10 Jun 2020 10:21:04 -0500 Subject: [PATCH 0958/2056] brcmfmac: update tx status flags to sync with firmware There is a mismatch of tx status flag values between host and firmware. It makes the host mistake the flags and have incorrect behavior of credit returns. So update the flags to sync with the firmware ones. Signed-off-by: Chung-Hsien Hsu Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200610152106.175257-5-chi-hsien.lin@cypress.com --- .../wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 91edde188a9d..2758e704ad54 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -323,6 +323,10 @@ struct brcmf_skbuff_cb { * firmware suppress the packet as device is already in PS mode. * @BRCMF_FWS_TXSTATUS_FW_TOSSED: * firmware tossed the packet. + * @BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK: + * firmware tossed the packet after retries. + * @BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED: + * firmware wrongly reported suppressed previously, now fixing to acked. * @BRCMF_FWS_TXSTATUS_HOST_TOSSED: * host tossed the packet. */ @@ -331,6 +335,8 @@ enum brcmf_fws_txstatus { BRCMF_FWS_TXSTATUS_CORE_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS, BRCMF_FWS_TXSTATUS_FW_TOSSED, + BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK, + BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED, BRCMF_FWS_TXSTATUS_HOST_TOSSED }; @@ -1455,6 +1461,10 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) fws->stats.txs_tossed += compcnt; + else if (flags == BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK) + fws->stats.txs_discard += compcnt; + else if (flags == BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED) + fws->stats.txs_discard += compcnt; else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) fws->stats.txs_host_tossed += compcnt; else From bbf7ae3dcb42b720943a2589cf4d0061ab3ac129 Mon Sep 17 00:00:00 2001 From: Double Lo Date: Wed, 10 Jun 2020 10:21:05 -0500 Subject: [PATCH 0959/2056] brcmfmac: fix throughput zero stalls on PM 1 mode due to credit map This patch move the credit map setting to right place to avoid brcmf_fws_return_credits() return without setting the credit map. It fix the thoughput zero stalls issue in softAP mode when STA using PM 1 mode. Signed-off-by: Double Lo Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200610152106.175257-6-chi-hsien.lin@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 2758e704ad54..4fefa7c0b892 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1193,6 +1193,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, if (!credits) return; + fws->fifo_credit_map |= 1 << fifo; + if (fifo > BRCMF_FWS_FIFO_AC_BK && fifo <= BRCMF_FWS_FIFO_AC_VO) { for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0; @@ -1216,7 +1218,6 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, if (credits) { fws->fifo_credit[fifo] += credits; - fws->fifo_credit_map |= 1 << fifo; } if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo]) From ad96bc27032c27cf3d77b26fa71f317c67b7f940 Mon Sep 17 00:00:00 2001 From: Joseph Chuang Date: Wed, 10 Jun 2020 10:21:06 -0500 Subject: [PATCH 0960/2056] brcmfmac: initialize the requested dwell time Commit 4905432b28b7 ("brcmfmac: Fix P2P Group Formation failure via Go-neg method") did not initialize requested_dwell properly, resulting in an always-false dwell time overflow check. Fix it by setting the correct requested_dwell value. Fixes: 4905432b28b7 ("brcmfmac: Fix P2P Group Formation failure via Go-neg method") Reported-by: kernel test robot Signed-off-by: Joseph Chuang Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200610152106.175257-7-chi-hsien.lin@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 8c1801fb59e7..0c93a2f5ab78 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5129,7 +5129,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, &freq); chan_nr = ieee80211_frequency_to_channel(freq); af_params->channel = cpu_to_le32(chan_nr); - + af_params->dwell_time = cpu_to_le32(params->wait); memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN], le16_to_cpu(action_frame->len)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index d2795dc17c46..debd887e159e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1700,7 +1700,7 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg, return err; } -static bool brcmf_p2p_check_dwell_overflow(s32 requested_dwell, +static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell, unsigned long dwell_jiffies) { if ((requested_dwell & CUSTOM_RETRY_MASK) && @@ -1738,8 +1738,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, unsigned long dwell_jiffies = 0; bool dwell_overflow = false; - s32 requested_dwell = af_params->dwell_time; - + u32 requested_dwell = le32_to_cpu(af_params->dwell_time); action_frame = &af_params->action_frame; action_frame_len = le16_to_cpu(action_frame->len); From d9429d03b6bcf32430e96295febdd41ee39cc0e8 Mon Sep 17 00:00:00 2001 From: Soontak Lee Date: Thu, 18 Jun 2020 11:07:37 -0500 Subject: [PATCH 0961/2056] brcmfmac: Fix for unable to return to visible SSID Unable to change back to visiable SSID because there is no disable hidden ssid routine. Signed-off-by: Soontak Lee Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200618160739.21457-2-chi-hsien.lin@cypress.com --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 0c93a2f5ab78..c698a559951d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4835,12 +4835,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, goto exit; } - if (settings->hidden_ssid) { - err = brcmf_fil_iovar_int_set(ifp, "closednet", 1); - if (err) { - bphy_err(drvr, "closednet error (%d)\n", err); - goto exit; - } + err = brcmf_fil_iovar_int_set(ifp, "closednet", + settings->hidden_ssid); + if (err) { + bphy_err(drvr, "%s closednet error (%d)\n", + settings->hidden_ssid ? + "enabled" : "disabled", + err); + goto exit; } brcmf_dbg(TRACE, "AP mode configuration complete\n"); From 1b050d9711f9ce3657d00cca48d767d1ac622b6d Mon Sep 17 00:00:00 2001 From: Soontak Lee Date: Thu, 18 Jun 2020 11:07:38 -0500 Subject: [PATCH 0962/2056] brcmfmac: Fix for wrong disconnection event source information Current brcmf_link_down() always call cfg80211_disconnected() with locally_generated=1, which is not always the case. Add event source argument on link down handler and set locally_generated based on the real trigger. Signed-off-by: Soontak Lee Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200618160739.21457-3-chi-hsien.lin@cypress.com --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index c698a559951d..e7b227714167 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1387,7 +1387,8 @@ static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data, return err; } -static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) +static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason, + bool locally_generated) { struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy); struct brcmf_pub *drvr = cfg->pub; @@ -1409,7 +1410,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason) if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) || (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)) cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0, - true, GFP_KERNEL); + locally_generated, GFP_KERNEL); } clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state); clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status); @@ -1588,7 +1589,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev) return 0; } - brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING); + brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING, true); brcmf_net_setcarrier(ifp, false); brcmf_dbg(TRACE, "Exit\n"); @@ -3907,7 +3908,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy, * disassociate from AP to save power while system is * in suspended state */ - brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED); + brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED, true); /* Make sure WPA_Supplicant receives all the event * generated due to DISASSOC call to the fw to keep * the state fw and WPA_Supplicant state consistent @@ -6029,7 +6030,11 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, if (!brcmf_is_ibssmode(ifp->vif)) { brcmf_bss_connect_done(cfg, ndev, e, false); brcmf_link_down(ifp->vif, - brcmf_map_fw_linkdown_reason(e)); + brcmf_map_fw_linkdown_reason(e), + e->event_code & + (BRCMF_E_DEAUTH_IND | + BRCMF_E_DISASSOC_IND) + ? false : true); brcmf_init_prof(ndev_to_prof(ndev)); if (ndev != cfg_to_ndev(cfg)) complete(&cfg->vif_disabled); @@ -7182,7 +7187,7 @@ static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp) * from AP to save power */ if (check_vif_up(ifp->vif)) { - brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED); + brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED, true); /* Make sure WPA_Supplicant receives all the event generated due to DISASSOC call to the fw to keep From 0d9de08da52a6cbda290d125d96a0015e172186f Mon Sep 17 00:00:00 2001 From: Able Liao Date: Thu, 18 Jun 2020 11:07:39 -0500 Subject: [PATCH 0963/2056] brcmfmac: do not disconnect for disassoc frame from unconnected AP Ignore FW event if the event's BSSID is different form the BSSID of the currently connected AP. Check interface state is connected or not, if state is not connected that can ignore link down event. Signed-off-by: Able Liao Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200618160739.21457-4-chi-hsien.lin@cypress.com --- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index e7b227714167..5d99771c3f64 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6027,7 +6027,12 @@ brcmf_notify_connect_status(struct brcmf_if *ifp, brcmf_net_setcarrier(ifp, true); } else if (brcmf_is_linkdown(e)) { brcmf_dbg(CONN, "Linkdown\n"); - if (!brcmf_is_ibssmode(ifp->vif)) { + if (!brcmf_is_ibssmode(ifp->vif) && + test_bit(BRCMF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { + if (memcmp(profile->bssid, e->addr, ETH_ALEN)) + return err; + brcmf_bss_connect_done(cfg, ndev, e, false); brcmf_link_down(ifp->vif, brcmf_map_fw_linkdown_reason(e), From 7836102a750a92ddec19b7cd997a76f05c0a16ea Mon Sep 17 00:00:00 2001 From: Chi-Hsien Lin Date: Mon, 22 Jun 2020 09:48:51 -0500 Subject: [PATCH 0964/2056] brcmfmac: reset SDIO bus on a firmware crash commit 4684997d9eea ("brcmfmac: reset PCIe bus on a firmware crash") adds a reset function to recover firmware trap for PCIe bus. This commit adds an implementation for SDIO bus. Upon SDIO firmware trap, do below: - Remove the device - Reset hardware - Probe the device again Signed-off-by: Chi-Hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200622144851.165248-1-chi-hsien.lin@cypress.com --- .../broadcom/brcm80211/brcmfmac/bcmsdh.c | 4 +-- .../broadcom/brcm80211/brcmfmac/sdio.c | 34 ++++++++++++++++++- .../broadcom/brcm80211/brcmfmac/sdio.h | 3 ++ 3 files changed, 38 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 46346cb3bc84..1a7ab49295aa 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -863,7 +863,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev) } #endif /* CONFIG_PM_SLEEP */ -static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) +int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev) { sdiodev->state = BRCMF_SDIOD_DOWN; if (sdiodev->bus) { @@ -898,7 +898,7 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host) host->caps |= MMC_CAP_NONREMOVABLE; } -static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) +int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) { int ret = 0; unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 1e66535e23f5..e8712ad3ac45 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -4126,6 +4127,36 @@ int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name) return 0; } +static int brcmf_sdio_bus_reset(struct device *dev) +{ + int ret = 0; + struct brcmf_bus *bus_if = dev_get_drvdata(dev); + struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; + + brcmf_dbg(SDIO, "Enter\n"); + + /* start by unregistering irqs */ + brcmf_sdiod_intr_unregister(sdiodev); + + brcmf_sdiod_remove(sdiodev); + + /* reset the adapter */ + sdio_claim_host(sdiodev->func1); + mmc_hw_reset(sdiodev->func1->card->host); + sdio_release_host(sdiodev->func1); + + brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN); + + ret = brcmf_sdiod_probe(sdiodev); + if (ret) { + brcmf_err("Failed to probe after sdio device reset: ret %d\n", + ret); + brcmf_sdiod_remove(sdiodev); + } + + return ret; +} + static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .stop = brcmf_sdio_bus_stop, .preinit = brcmf_sdio_bus_preinit, @@ -4137,7 +4168,8 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = { .get_ramsize = brcmf_sdio_bus_get_ramsize, .get_memdump = brcmf_sdio_bus_get_memdump, .get_fwname = brcmf_sdio_get_fwname, - .debugfs_create = brcmf_sdio_debugfs_create + .debugfs_create = brcmf_sdio_debugfs_create, + .reset = brcmf_sdio_bus_reset }; #define BRCMF_SDIO_FW_CODE 0 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 163fd664780a..12108927fb50 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -367,6 +367,9 @@ static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev) } #endif /* CONFIG_PM_SLEEP */ +int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev); +int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev); + struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev); void brcmf_sdio_remove(struct brcmf_sdio *bus); void brcmf_sdio_isr(struct brcmf_sdio *bus); From fcdd7a875def793c38d7369633af3eba6c7cf089 Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Wed, 24 Jun 2020 04:16:07 -0500 Subject: [PATCH 0965/2056] brcmfmac: set state of hanger slot to FREE when flushing PSQ When USB or SDIO device got abnormal bus disconnection, host driver tried to clean up the skbs in PSQ and TXQ (The skb's pointer in hanger slot linked to PSQ and TSQ), so we should set the state of skb hanger slot to BRCMF_FWS_HANGER_ITEM_STATE_FREE before freeing skb. In brcmf_fws_bus_txq_cleanup it already sets BRCMF_FWS_HANGER_ITEM_STATE_FREE before freeing skb, therefore we add the same thing in brcmf_fws_psq_flush to avoid following warning message. [ 1580.012880] ------------ [ cut here ]------------ [ 1580.017550] WARNING: CPU: 3 PID: 3065 at drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c:49 brcmu_pkt_buf_free_skb+0x21/0x30 [brcmutil] [ 1580.184017] Call Trace: [ 1580.186514] brcmf_fws_cleanup+0x14e/0x190 [brcmfmac] [ 1580.191594] brcmf_fws_del_interface+0x70/0x90 [brcmfmac] [ 1580.197029] brcmf_proto_bcdc_del_if+0xe/0x10 [brcmfmac] [ 1580.202418] brcmf_remove_interface+0x69/0x190 [brcmfmac] [ 1580.207888] brcmf_detach+0x90/0xe0 [brcmfmac] [ 1580.212385] brcmf_usb_disconnect+0x76/0xb0 [brcmfmac] [ 1580.217557] usb_unbind_interface+0x72/0x260 [ 1580.221857] device_release_driver_internal+0x141/0x200 [ 1580.227152] device_release_driver+0x12/0x20 [ 1580.231460] bus_remove_device+0xfd/0x170 [ 1580.235504] device_del+0x1d9/0x300 [ 1580.239041] usb_disable_device+0x9e/0x270 [ 1580.243160] usb_disconnect+0x94/0x270 [ 1580.246980] hub_event+0x76d/0x13b0 [ 1580.250499] process_one_work+0x144/0x360 [ 1580.254564] worker_thread+0x4d/0x3c0 [ 1580.258247] kthread+0x109/0x140 [ 1580.261515] ? rescuer_thread+0x340/0x340 [ 1580.265543] ? kthread_park+0x60/0x60 [ 1580.269237] ? SyS_exit_group+0x14/0x20 [ 1580.273118] ret_from_fork+0x25/0x30 [ 1580.300446] ------------ [ cut here ]------------ Acked-by: Arend van Spriel Signed-off-by: Wright Feng Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200624091608.25154-2-wright.feng@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 4fefa7c0b892..2df6811c066e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -629,6 +629,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h, static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, int ifidx) { + struct brcmf_fws_hanger_item *hi; bool (*matchfn)(struct sk_buff *, void *) = NULL; struct sk_buff *skb; int prec; @@ -640,6 +641,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); while (skb) { hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); + hi = &fws->hanger.items[hslot]; + WARN_ON(skb != hi->pkt); + hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE; brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, true); brcmu_pkt_buf_free_skb(skb); From 2fa8085fc6daa255c591e754f66ba98035b797c3 Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Wed, 24 Jun 2020 04:16:08 -0500 Subject: [PATCH 0966/2056] brcmfmac: set pacing shift before transmitting skb to bus Linux 3.6 introduces TSQ which has a per socket threshold for TCP Tx packet to reduce latency. In flow control mode, host driver enqueues skb in hanger and TCP doesn't push new skb frees until host frees the skb when receiving fwstatus event. So set pacing shift 8 to send them as a single large aggregate frame to the bus layer. 43455 TX TCP throughput in different FC modes on Linux 5.4.18 sk_pacing_shift : Throughput (fcmode=0) 10: 245 Mbps 9: 245 Mbps 8: 246 Mbps 7: 246 Mbps sk_pacing_shift : Throughput (fcmode=1) 10: 182 Mbps 9: 197 Mbps 8: 206 Mbps 7: 207 Mbps sk_pacing_shift : Throughput (fcmode=2) 10: 180 Mbps 9: 197 Mbps 8: 206 Mbps 7: 207 Mbps Signed-off-by: Wright Feng Signed-off-by: Chi-hsien Lin Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200624091608.25154-3-wright.feng@cypress.com --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index c88655acc78c..f89010a81ffb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -352,6 +352,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, if ((skb->priority == 0) || (skb->priority > 7)) skb->priority = cfg80211_classify8021d(skb, NULL); + /* set pacing shift for packet aggregation */ + sk_pacing_shift_update(skb->sk, 8); + ret = brcmf_proto_tx_queue_data(drvr, ifp->ifidx, skb); if (ret < 0) brcmf_txfinalize(ifp, skb, false); From 29e354ebeeecaee979e6fe22cd6272682d7552c9 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Wed, 1 Jul 2020 13:22:00 +0200 Subject: [PATCH 0967/2056] brcmfmac: Transform compatible string for FW loading The driver relies on the compatible string from DT to determine which FW configuration file it should load. The DTS spec allows for '/' as part of the compatible string. We change this to '-' so that we will still be able to load the config file, even when the compatible has a '/'. This fixes explicitly the firmware loading for "solidrun,cubox-i/q". Signed-off-by: Matthias Brugger Reviewed-by: Hans deGoede Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200701112201.6449-1-matthias.bgg@kernel.org --- .../wireless/broadcom/brcm80211/brcmfmac/of.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index b886b56a5e5a..a7554265f95f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -17,7 +17,6 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, { struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio; struct device_node *root, *np = dev->of_node; - struct property *prop; int irq; u32 irqf; u32 val; @@ -25,8 +24,22 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, /* Set board-type to the first string of the machine compatible prop */ root = of_find_node_by_path("/"); if (root) { - prop = of_find_property(root, "compatible", NULL); - settings->board_type = of_prop_next_string(prop, NULL); + int i, len; + char *board_type; + const char *tmp; + + of_property_read_string_index(root, "compatible", 0, &tmp); + + /* get rid of '/' in the compatible string to be able to find the FW */ + len = strlen(tmp) + 1; + board_type = devm_kzalloc(dev, len, GFP_KERNEL); + strscpy(board_type, tmp, len); + for (i = 0; i < board_type[i]; i++) { + if (board_type[i] == '/') + board_type[i] = '-'; + } + settings->board_type = board_type; + of_node_put(root); } From b424808115cb0141da851082d84ede20caa2215c Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 11 Jul 2020 23:01:50 +0200 Subject: [PATCH 0968/2056] brcm80211: brcmsmac: Move LEDs to GPIO descriptors The code in the BRCM80211 BRCMSMAC driver is using the legacy GPIO API to to a complex check of the validity of the base of the GPIO chip and whether it is present at all and then adding an offset to the base of the chip. Use the existing function to obtain a GPIO line internally from a GPIO chip so we can use the offset directly and modernize the code to use GPIO descriptors instead of integers from the global GPIO numberspace. Cc: Wright Feng Cc: Frank Kao Cc: Kalle Valo Signed-off-by: Linus Walleij Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200711210150.4943-1-linus.walleij@linaro.org --- .../broadcom/brcm80211/brcmsmac/led.c | 62 ++++++++----------- .../broadcom/brcm80211/brcmsmac/led.h | 6 +- 2 files changed, 29 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c index c0a5449ed72c..c1b9ac692d26 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 #include #include -#include +#include +#include +#include #include "mac80211_if.h" #include "pub.h" @@ -19,16 +21,13 @@ static void brcms_radio_led_ctrl(struct brcms_info *wl, bool state) { - if (wl->radio_led.gpio == -1) + if (!wl->radio_led.gpiod) return; - if (wl->radio_led.active_low) - state = !state; - if (state) - gpio_set_value(wl->radio_led.gpio, 1); + gpiod_set_value(wl->radio_led.gpiod, 1); else - gpio_set_value(wl->radio_led.gpio, 0); + gpiod_set_value(wl->radio_led.gpiod, 0); } @@ -45,8 +44,8 @@ void brcms_led_unregister(struct brcms_info *wl) { if (wl->led_dev.dev) led_classdev_unregister(&wl->led_dev); - if (wl->radio_led.gpio != -1) - gpio_free(wl->radio_led.gpio); + if (wl->radio_led.gpiod) + gpiochip_free_own_desc(wl->radio_led.gpiod); } int brcms_led_register(struct brcms_info *wl) @@ -61,12 +60,8 @@ int brcms_led_register(struct brcms_info *wl) &sprom->gpio1, &sprom->gpio2, &sprom->gpio3 }; - unsigned gpio = -1; - bool active_low = false; - - /* none by default */ - radio_led->gpio = -1; - radio_led->active_low = false; + int hwnum = -1; + enum gpio_lookup_flags lflags = GPIO_ACTIVE_HIGH; if (!bcma_gpio || !gpio_is_valid(bcma_gpio->base)) return -ENODEV; @@ -75,30 +70,26 @@ int brcms_led_register(struct brcms_info *wl) for (i = 0; i < BRCMS_LED_NO; i++) { u8 led = *leds[i]; if ((led & BRCMS_LED_BEH_MASK) == BRCMS_LED_RADIO) { - gpio = bcma_gpio->base + i; + hwnum = i; if (led & BRCMS_LED_AL_MASK) - active_low = true; + lflags = GPIO_ACTIVE_LOW; break; } } - if (gpio == -1 || !gpio_is_valid(gpio)) + /* No LED, bail out */ + if (hwnum == -1) return -ENODEV; - /* request and configure LED gpio */ - err = gpio_request_one(gpio, - active_low ? GPIOF_OUT_INIT_HIGH - : GPIOF_OUT_INIT_LOW, - "radio on"); - if (err) { - wiphy_err(wl->wiphy, "requesting led gpio %d failed (err: %d)\n", - gpio, err); - return err; - } - err = gpio_direction_output(gpio, 1); - if (err) { - wiphy_err(wl->wiphy, "cannot set led gpio %d to output (err: %d)\n", - gpio, err); + /* Try to obtain this LED GPIO line */ + radio_led->gpiod = gpiochip_request_own_desc(bcma_gpio, hwnum, + "radio on", lflags, + GPIOD_OUT_LOW); + + if (IS_ERR(radio_led->gpiod)) { + err = PTR_ERR(radio_led->gpiod); + wiphy_err(wl->wiphy, "requesting led GPIO failed (err: %d)\n", + err); return err; } @@ -117,11 +108,8 @@ int brcms_led_register(struct brcms_info *wl) return err; } - wiphy_info(wl->wiphy, "registered radio enabled led device: %s gpio: %d\n", - wl->radio_led.name, - gpio); - radio_led->gpio = gpio; - radio_led->active_low = active_low; + wiphy_info(wl->wiphy, "registered radio enabled led device: %s\n", + wl->radio_led.name); return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h index 17a0b1f5dbcf..d65f5c268fd7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h @@ -16,10 +16,12 @@ #ifndef _BRCM_LED_H_ #define _BRCM_LED_H_ + +struct gpio_desc; + struct brcms_led { char name[32]; - unsigned gpio; - bool active_low; + struct gpio_desc *gpiod; }; #ifdef CONFIG_BCMA_DRIVER_GPIO From 079ef53673f2e3b3ee1728800311f20f28eed4f7 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 14 Jul 2020 12:25:33 +0200 Subject: [PATCH 0969/2056] bpf: Fix build for disabled CONFIG_DEBUG_INFO_BTF option Stephen reported following linker warnings on powerpc build: ld: warning: orphan section `.BTF_ids' from `kernel/trace/bpf_trace.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `kernel/bpf/btf.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `kernel/bpf/stackmap.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `net/core/filter.o' being placed in section `.BTF_ids' ld: warning: orphan section `.BTF_ids' from `kernel/trace/bpf_trace.o' being placed in section `.BTF_ids' It's because we generated .BTF_ids section even when CONFIG_DEBUG_INFO_BTF is not enabled. Fixing this by generating empty btf_id arrays for this case. Reported-by: Stephen Rothwell Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Geert Uytterhoeven Link: https://lore.kernel.org/bpf/20200714102534.299280-1-jolsa@kernel.org --- include/linux/btf_ids.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index fe019774f8a7..b3c73db9587c 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -3,6 +3,8 @@ #ifndef _LINUX_BTF_IDS_H #define _LINUX_BTF_IDS_H +#ifdef CONFIG_DEBUG_INFO_BTF + #include /* for __PASTE */ /* @@ -83,5 +85,12 @@ asm( \ ".zero 4 \n" \ ".popsection; \n"); +#else + +#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID(prefix, name) +#define BTF_ID_UNUSED + +#endif /* CONFIG_DEBUG_INFO_BTF */ #endif From 11bb2f7a45909f4f64afe471875672ae1b84a380 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 14 Jul 2020 12:25:34 +0200 Subject: [PATCH 0970/2056] bpf: Fix cross build for CONFIG_DEBUG_INFO_BTF option Stephen and 0-DAY CI Kernel Test Service reported broken cross build for arm (arm-linux-gnueabi-gcc (GCC) 9.3.0), with following output: /tmp/ccMS5uth.s: Assembler messages: /tmp/ccMS5uth.s:69: Error: unrecognized symbol type "" /tmp/ccMS5uth.s:82: Error: unrecognized symbol type "" Having '@object' for .type diretive is wrong because '@' is comment character for some architectures. Using STT_OBJECT instead that should work everywhere. Also using HOST* variables to build resolve_btfids so it's properly build in crossbuilds (stolen from objtool's Makefile). Reported-by: kernel test robot Reported-by: Stephen Rothwell Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Tested-by: Geert Uytterhoeven Link: https://lore.kernel.org/bpf/20200714102534.299280-2-jolsa@kernel.org --- include/linux/btf_ids.h | 2 +- tools/bpf/resolve_btfids/Makefile | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index b3c73db9587c..1cdb56950ffe 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -23,7 +23,7 @@ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ ".local " #symbol " ; \n" \ -".type " #symbol ", @object; \n" \ +".type " #symbol ", STT_OBJECT; \n" \ ".size " #symbol ", 4; \n" \ #symbol ": \n" \ ".zero 4 \n" \ diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index 948378ca73d4..a88cd4426398 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -16,6 +16,20 @@ else MAKEFLAGS=--no-print-directory endif +# always use the host compiler +ifneq ($(LLVM),) +HOSTAR ?= llvm-ar +HOSTCC ?= clang +HOSTLD ?= ld.lld +else +HOSTAR ?= ar +HOSTCC ?= gcc +HOSTLD ?= ld +endif +AR = $(HOSTAR) +CC = $(HOSTCC) +LD = $(HOSTLD) + OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/ LIBBPF_SRC := $(srctree)/tools/lib/bpf/ From 4f5479e2ee1fc75b7c98e1e6da0dada41778618d Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 10 Jul 2020 05:18:29 +0000 Subject: [PATCH 0971/2056] MAINTAINERS: net: wilc1000: Update entry As Adham's email address is bouncing, remove him from wilc1000 entry and add Claudiu as a new co-maintainer. Claudiu follows wilc1000 driver development for a long time and contributed to it already. Signed-off-by: Nicolas Ferre Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200710051826.3267-2-ajay.kathat@microchip.com --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 7dae51e32254..099e672ec6e6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -11363,8 +11363,8 @@ S: Supported F: drivers/usb/gadget/udc/atmel_usba_udc.* MICROCHIP WILC1000 WIFI DRIVER -M: Adham Abozaeid M: Ajay Singh +M: Claudiu Beznea L: linux-wireless@vger.kernel.org S: Supported F: drivers/net/wireless/microchip/wilc1000/ From 1b3dd7743041c88d11409c05a7d294f63ef53e44 Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Fri, 10 Jul 2020 05:18:30 +0000 Subject: [PATCH 0972/2056] wilc1000: use strlcpy to avoid 'stringop-truncation' warning Make use 'strlcpy' instead of 'strncpy' to overcome 'stringop-truncation' compiler warning. Reported-by: kernel test robot Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200710051826.3267-3-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/mon.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c index 60331417bd98..358ac8601333 100644 --- a/drivers/net/wireless/microchip/wilc1000/mon.c +++ b/drivers/net/wireless/microchip/wilc1000/mon.c @@ -229,8 +229,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, return NULL; wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP; - strncpy(wl->monitor_dev->name, name, IFNAMSIZ); - wl->monitor_dev->name[IFNAMSIZ - 1] = 0; + strlcpy(wl->monitor_dev->name, name, IFNAMSIZ); wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops; wl->monitor_dev->needs_free_netdev = true; From 9bdcbdafc659d44962291a3a6b8b01511f92a0a1 Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Fri, 10 Jul 2020 05:18:30 +0000 Subject: [PATCH 0973/2056] wilc1000: fix compiler warning for 'wowlan_support' unused variable Avoid below reported warning found when 'CONFIG_PM' config is undefined. 'warning: unused variable 'wowlan_support' [-Wunused-const-variable]' Reported-by: kernel test robot Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200710051826.3267-4-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/cfg80211.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index b6065a0d660f..c1ac1d84790f 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -46,9 +46,11 @@ static const struct ieee80211_txrx_stypes } }; +#ifdef CONFIG_PM static const struct wiphy_wowlan_support wowlan_support = { .flags = WIPHY_WOWLAN_ANY }; +#endif struct wilc_p2p_mgmt_data { int size; From 0b3dd675ed595c0cbeb7b5a6cca21a5e64e7123c Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Fri, 10 Jul 2020 05:18:31 +0000 Subject: [PATCH 0974/2056] wilc1000: use unified single wilc1000 FW binary Modify WILC1000 binary filename to use single unified wilc1000 FW. A single wilc1000 binary is used for different wilc1000 revisions. Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200710051826.3267-5-ajay.kathat@microchip.com --- .../net/wireless/microchip/wilc1000/Makefile | 3 +-- .../net/wireless/microchip/wilc1000/netdev.c | 18 +++++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/microchip/wilc1000/Makefile b/drivers/net/wireless/microchip/wilc1000/Makefile index a3305a0a888a..7d1ed5f40cbb 100644 --- a/drivers/net/wireless/microchip/wilc1000/Makefile +++ b/drivers/net/wireless/microchip/wilc1000/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_WILC1000) += wilc1000.o -ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \ - -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\" +ccflags-y += -DFIRMWARE_WILC1000=\"atmel/wilc1000_wifi_firmware.bin\" wilc1000-objs := cfg80211.o netdev.o mon.o \ hif.o wlan_cfg.o wlan.o diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index fda0ab97b02c..1005526ae896 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -176,23 +176,19 @@ static int wilc_wlan_get_firmware(struct net_device *dev) struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; int chip_id; - const struct firmware *wilc_firmware; - char *firmware; + const struct firmware *wilc_fw; chip_id = wilc_get_chipid(wilc, false); - if (chip_id < 0x1003a0) - firmware = FIRMWARE_1002; - else - firmware = FIRMWARE_1003; + netdev_info(dev, "ChipID [%x] loading firmware [%s]\n", chip_id, + FIRMWARE_WILC1000); - netdev_info(dev, "loading firmware %s\n", firmware); - - if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) { - netdev_err(dev, "%s - firmware not available\n", firmware); + if (request_firmware(&wilc_fw, FIRMWARE_WILC1000, wilc->dev) != 0) { + netdev_err(dev, "%s - firmware not available\n", + FIRMWARE_WILC1000); return -EINVAL; } - wilc->firmware = wilc_firmware; + wilc->firmware = wilc_fw; return 0; } From b52b331a8978ab5414fc68bb4a73e690f080549e Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Fri, 10 Jul 2020 05:18:31 +0000 Subject: [PATCH 0975/2056] wilc1000: use API version number info along with firmware filename Added version number info along with firmware name so driver can pick the correct revision of FW file. Moved FW filename macro as part of driver code & added MODULE_FIRMWARE to specify FW needed by module. Signed-off-by: Ajay Singh Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200710051826.3267-6-ajay.kathat@microchip.com --- .../net/wireless/microchip/wilc1000/Makefile | 2 -- .../net/wireless/microchip/wilc1000/netdev.c | 17 ++++++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/microchip/wilc1000/Makefile b/drivers/net/wireless/microchip/wilc1000/Makefile index 7d1ed5f40cbb..c3c9e34c1eaa 100644 --- a/drivers/net/wireless/microchip/wilc1000/Makefile +++ b/drivers/net/wireless/microchip/wilc1000/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_WILC1000) += wilc1000.o -ccflags-y += -DFIRMWARE_WILC1000=\"atmel/wilc1000_wifi_firmware.bin\" - wilc1000-objs := cfg80211.o netdev.o mon.o \ hif.o wlan_cfg.o wlan.o diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 1005526ae896..198354670fe1 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -15,6 +15,13 @@ #define WILC_MULTICAST_TABLE_SIZE 8 +/* latest API version supported */ +#define WILC1000_API_VER 1 + +#define WILC1000_FW_PREFIX "atmel/wilc1000_wifi_firmware-" +#define __WILC1000_FW(api) WILC1000_FW_PREFIX #api ".bin" +#define WILC1000_FW(api) __WILC1000_FW(api) + static irqreturn_t isr_uh_routine(int irq, void *user_data) { struct net_device *dev = user_data; @@ -177,15 +184,18 @@ static int wilc_wlan_get_firmware(struct net_device *dev) struct wilc *wilc = vif->wilc; int chip_id; const struct firmware *wilc_fw; + int ret; chip_id = wilc_get_chipid(wilc, false); netdev_info(dev, "ChipID [%x] loading firmware [%s]\n", chip_id, - FIRMWARE_WILC1000); + WILC1000_FW(WILC1000_API_VER)); - if (request_firmware(&wilc_fw, FIRMWARE_WILC1000, wilc->dev) != 0) { + ret = request_firmware(&wilc_fw, WILC1000_FW(WILC1000_API_VER), + wilc->dev); + if (ret != 0) { netdev_err(dev, "%s - firmware not available\n", - FIRMWARE_WILC1000); + WILC1000_FW(WILC1000_API_VER)); return -EINVAL; } wilc->firmware = wilc_fw; @@ -925,3 +935,4 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, } MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER)); From cce0e08301fe43dc3fe983d5f098393d15f803f0 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Mon, 29 Jun 2020 12:40:09 +0200 Subject: [PATCH 0976/2056] wilc1000: let wilc_mac_xmit() return NETDEV_TX_OK The method ndo_start_xmit() is defined as returning an 'netdev_tx_t', which is a typedef for an enum type defining 'NETDEV_TX_OK' but this driver returns '0' instead of 'NETDEV_TX_OK'. Fix this by returning 'NETDEV_TX_OK' instead of '0'. Signed-off-by: Luc Van Oostenryck Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200629104009.84077-1-luc.vanoostenryck@gmail.com --- drivers/net/wireless/microchip/wilc1000/netdev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 198354670fe1..20615c7ec168 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -684,14 +684,14 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) if (skb->dev != ndev) { netdev_err(ndev, "Packet not destined to this device\n"); - return 0; + return NETDEV_TX_OK; } tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC); if (!tx_data) { dev_kfree_skb(skb); netif_wake_queue(ndev); - return 0; + return NETDEV_TX_OK; } tx_data->buff = skb->data; @@ -716,7 +716,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) srcu_read_unlock(&wilc->srcu, srcu_idx); } - return 0; + return NETDEV_TX_OK; } static int wilc_mac_close(struct net_device *ndev) From a4fa458950b40d3849946daa32466392811a3716 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 14 Jul 2020 12:31:45 -0700 Subject: [PATCH 0977/2056] bpfilter: Initialize pos variable Make sure 'pos' is initialized to zero before calling kernel_write(). Fixes: d2ba09c17a06 ("net: add skeleton of bpfilter kernel module") Signed-off-by: Alexei Starovoitov --- net/bpfilter/bpfilter_kern.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 54f82c341e20..2c31e82cb953 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -48,6 +48,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, req.len = optlen; if (!bpfilter_ops.info.tgid) goto out; + pos = 0; n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), &pos); if (n != sizeof(req)) { From 9326e0f85bfaf0578d40f5357f8143ec857469f5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 1 Jul 2020 18:26:44 +0900 Subject: [PATCH 0978/2056] bpfilter: Allow to build bpfilter_umh as a module without static library Originally, bpfilter_umh was linked with -static only when CONFIG_BPFILTER_UMH=y. Commit 8a2cc0505cc4 ("bpfilter: use 'userprogs' syntax to build bpfilter_umh") silently, accidentally dropped the CONFIG_BPFILTER_UMH=y test in the Makefile. Revive it in order to link it dynamically when CONFIG_BPFILTER_UMH=m. Since commit b1183b6dca3e ("bpfilter: check if $(CC) can link static libc in Kconfig"), the compiler must be capable of static linking to enable CONFIG_BPFILTER_UMH, but it requires more than needed. To loosen the compiler requirement, I changed the dependency as follows: depends on CC_CAN_LINK depends on m || CC_CAN_LINK_STATIC If CONFIG_CC_CAN_LINK_STATIC in unset, CONFIG_BPFILTER_UMH is restricted to 'm' or 'n'. In theory, CONFIG_CC_CAN_LINK is not required for CONFIG_BPFILTER_UMH=y, but I did not come up with a good way to describe it. Fixes: 8a2cc0505cc4 ("bpfilter: use 'userprogs' syntax to build bpfilter_umh") Reported-by: Michal Kubecek Signed-off-by: Masahiro Yamada Signed-off-by: Alexei Starovoitov Tested-by: Michal Kubecek Link: https://lore.kernel.org/bpf/20200701092644.762234-1-masahiroy@kernel.org --- net/bpfilter/Kconfig | 10 ++++++---- net/bpfilter/Makefile | 2 ++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig index 84015ef3ee27..73d0b12789f1 100644 --- a/net/bpfilter/Kconfig +++ b/net/bpfilter/Kconfig @@ -9,12 +9,14 @@ menuconfig BPFILTER if BPFILTER config BPFILTER_UMH tristate "bpfilter kernel module with user mode helper" - depends on CC_CAN_LINK_STATIC + depends on CC_CAN_LINK + depends on m || CC_CAN_LINK_STATIC default m help This builds bpfilter kernel module with embedded user mode helper - Note: your toolchain must support building static binaries, since - rootfs isn't mounted at the time when __init functions are called - and do_execv won't be able to find the elf interpreter. + Note: To compile this as built-in, your toolchain must support + building static binaries, since rootfs isn't mounted at the time + when __init functions are called and do_execv won't be able to find + the elf interpreter. endif diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile index f23b53294fba..cdac82b8c53a 100644 --- a/net/bpfilter/Makefile +++ b/net/bpfilter/Makefile @@ -7,10 +7,12 @@ userprogs := bpfilter_umh bpfilter_umh-objs := main.o userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi +ifeq ($(CONFIG_BPFILTER_UMH), y) # builtin bpfilter_umh should be linked with -static # since rootfs isn't mounted at the time of __init # function is called and do_execv won't find elf interpreter userldflags += -static +endif $(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh From cf7c52748f64606f5f9111e7cbdb2ffb281a60af Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:47 +0200 Subject: [PATCH 0979/2056] switchdev: mrp: Extend switchdev API for MRP Interconnect Extend switchdev API to add support for MRP interconnect. The HW is notified in the following cases: SWITCHDEV_OBJ_ID_IN_ROLE_MRP: This is used when the interconnect role of the node changes. The supported roles are MIM and MIC. SWITCHDEV_OBJ_ID_IN_STATE_MRP: This is used when the interconnect ring changes it states to open or closed. SWITCHDEV_OBJ_ID_IN_TEST_MRP: This is used to start/stop sending MRP_InTest frames on all MRP ports. This is called only on nodes that have the interconnect role MIM. Signed-off-by: Horatiu Vultur Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/net/switchdev.h | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index b8c059b4e06d..ff2246914301 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -76,6 +76,10 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_RING_TEST_MRP, SWITCHDEV_OBJ_ID_RING_ROLE_MRP, SWITCHDEV_OBJ_ID_RING_STATE_MRP, + SWITCHDEV_OBJ_ID_IN_TEST_MRP, + SWITCHDEV_OBJ_ID_IN_ROLE_MRP, + SWITCHDEV_OBJ_ID_IN_STATE_MRP, + #endif }; @@ -155,6 +159,40 @@ struct switchdev_obj_ring_state_mrp { #define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \ container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj) +/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */ +struct switchdev_obj_in_test_mrp { + struct switchdev_obj obj; + /* The value is in us and a value of 0 represents to stop */ + u32 interval; + u32 in_id; + u32 period; + u8 max_miss; +}; + +#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_test_mrp, obj) + +/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */ +struct switchdev_obj_in_role_mrp { + struct switchdev_obj obj; + struct net_device *i_port; + u32 ring_id; + u16 in_id; + u8 in_role; +}; + +#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_role_mrp, obj) + +struct switchdev_obj_in_state_mrp { + struct switchdev_obj obj; + u32 in_id; + u8 in_state; +}; + +#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_in_state_mrp, obj) + #endif typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); From 2801758391ba6b0c20e253b956355e1b15ad85a2 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:48 +0200 Subject: [PATCH 0980/2056] bridge: uapi: mrp: Extend MRP attributes for MRP interconnect Extend the existing MRP netlink attributes to allow to configure MRP Interconnect: IFLA_BRIDGE_MRP_IN_ROLE - the parameter type is br_mrp_in_role which contains the interconnect id, the ring id, the interconnect role(MIM or MIC) and the port ifindex that represents the interconnect port. IFLA_BRIDGE_MRP_IN_STATE - the parameter type is br_mrp_in_state which contains the interconnect id and the interconnect state. IFLA_BRIDGE_MRP_IN_TEST - the parameter type is br_mrp_start_in_test which contains the interconnect id, the interval at which to send MRP_InTest frames, how many test frames can be missed before declaring the interconnect ring open and the period which represents for how long to send MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 53 +++++++++++++++++++++++++++++++++ include/uapi/linux/mrp_bridge.h | 38 +++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index c114c1c2bd53..d840a3e37a37 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -167,6 +167,9 @@ enum { IFLA_BRIDGE_MRP_RING_ROLE, IFLA_BRIDGE_MRP_START_TEST, IFLA_BRIDGE_MRP_INFO, + IFLA_BRIDGE_MRP_IN_ROLE, + IFLA_BRIDGE_MRP_IN_STATE, + IFLA_BRIDGE_MRP_START_IN_TEST, __IFLA_BRIDGE_MRP_MAX, }; @@ -245,6 +248,37 @@ enum { #define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1) +enum { + IFLA_BRIDGE_MRP_IN_STATE_UNSPEC, + IFLA_BRIDGE_MRP_IN_STATE_IN_ID, + IFLA_BRIDGE_MRP_IN_STATE_STATE, + __IFLA_BRIDGE_MRP_IN_STATE_MAX, +}; + +#define IFLA_BRIDGE_MRP_IN_STATE_MAX (__IFLA_BRIDGE_MRP_IN_STATE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC, + IFLA_BRIDGE_MRP_IN_ROLE_RING_ID, + IFLA_BRIDGE_MRP_IN_ROLE_IN_ID, + IFLA_BRIDGE_MRP_IN_ROLE_ROLE, + IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX, + __IFLA_BRIDGE_MRP_IN_ROLE_MAX, +}; + +#define IFLA_BRIDGE_MRP_IN_ROLE_MAX (__IFLA_BRIDGE_MRP_IN_ROLE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC, + IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID, + IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL, + IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS, + IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD, + __IFLA_BRIDGE_MRP_START_IN_TEST_MAX, +}; + +#define IFLA_BRIDGE_MRP_START_IN_TEST_MAX (__IFLA_BRIDGE_MRP_START_IN_TEST_MAX - 1) + struct br_mrp_instance { __u32 ring_id; __u32 p_ifindex; @@ -270,6 +304,25 @@ struct br_mrp_start_test { __u32 monitor; }; +struct br_mrp_in_state { + __u32 in_state; + __u16 in_id; +}; + +struct br_mrp_in_role { + __u32 ring_id; + __u32 in_role; + __u32 i_ifindex; + __u16 in_id; +}; + +struct br_mrp_start_in_test { + __u32 interval; + __u32 max_miss; + __u32 period; + __u16 in_id; +}; + struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h index bee366540212..6aeb13ef0b1e 100644 --- a/include/uapi/linux/mrp_bridge.h +++ b/include/uapi/linux/mrp_bridge.h @@ -21,11 +21,22 @@ enum br_mrp_ring_role_type { BR_MRP_RING_ROLE_MRA, }; +enum br_mrp_in_role_type { + BR_MRP_IN_ROLE_DISABLED, + BR_MRP_IN_ROLE_MIC, + BR_MRP_IN_ROLE_MIM, +}; + enum br_mrp_ring_state_type { BR_MRP_RING_STATE_OPEN, BR_MRP_RING_STATE_CLOSED, }; +enum br_mrp_in_state_type { + BR_MRP_IN_STATE_OPEN, + BR_MRP_IN_STATE_CLOSED, +}; + enum br_mrp_port_state_type { BR_MRP_PORT_STATE_DISABLED, BR_MRP_PORT_STATE_BLOCKED, @@ -36,6 +47,7 @@ enum br_mrp_port_state_type { enum br_mrp_port_role_type { BR_MRP_PORT_ROLE_PRIMARY, BR_MRP_PORT_ROLE_SECONDARY, + BR_MRP_PORT_ROLE_INTER, }; enum br_mrp_tlv_header_type { @@ -45,6 +57,10 @@ enum br_mrp_tlv_header_type { BR_MRP_TLV_HEADER_RING_TOPO = 0x3, BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4, BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5, + BR_MRP_TLV_HEADER_IN_TEST = 0x6, + BR_MRP_TLV_HEADER_IN_TOPO = 0x7, + BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8, + BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9, BR_MRP_TLV_HEADER_OPTION = 0x7f, }; @@ -118,4 +134,26 @@ struct br_mrp_oui_hdr { __u8 oui[MRP_OUI_LENGTH]; }; +struct br_mrp_in_test_hdr { + __be16 id; + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 state; + __be16 transitions; + __be32 timestamp; +}; + +struct br_mrp_in_topo_hdr { + __u8 sa[ETH_ALEN]; + __be16 id; + __be16 interval; +}; + +struct br_mrp_in_link_hdr { + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 id; + __be16 interval; +}; + #endif From 43364ef1a12a4236b7956b076649ddd080764cd1 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:49 +0200 Subject: [PATCH 0981/2056] bridge: mrp: Extend bridge interface This patch adds a new flag(BR_MRP_LOST_IN_CONT) to the net bridge ports. This bit will be set when the port lost the continuity of MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/linux/if_bridge.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index b3a8d3054af0..6479a38e52fa 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -49,6 +49,7 @@ struct br_ip_list { #define BR_ISOLATED BIT(16) #define BR_MRP_AWARE BIT(17) #define BR_MRP_LOST_CONT BIT(18) +#define BR_MRP_LOST_IN_CONT BIT(19) #define BR_DEFAULT_AGEING_TIME (300 * HZ) From 78c1b4fb0e3ed6907955abf3fc8eea74704fe072 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:50 +0200 Subject: [PATCH 0982/2056] bridge: mrp: Extend br_mrp for MRP interconnect This patch extends the 'struct br_mrp' to contain information regarding the MRP interconnect. It contains the following: - the interconnect port 'i_port', which is NULL if the node doesn't have a interconnect role - the interconnect id, which is similar with the ring id, but this field is also part of the MRP_InTest frames. - the interconnect role, which can be MIM or MIC. - the interconnect state, which can be open or closed. - the interconnect delayed_work for sending MRP_InTest frames and check for lost of continuity. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_private_mrp.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index 315eb37d89f0..8841ba847fb2 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -12,8 +12,10 @@ struct br_mrp { struct net_bridge_port __rcu *p_port; struct net_bridge_port __rcu *s_port; + struct net_bridge_port __rcu *i_port; u32 ring_id; + u16 in_id; u16 prio; enum br_mrp_ring_role_type ring_role; @@ -21,6 +23,11 @@ struct br_mrp { enum br_mrp_ring_state_type ring_state; u32 ring_transitions; + enum br_mrp_in_role_type in_role; + u8 in_role_offloaded; + enum br_mrp_in_state_type in_state; + u32 in_transitions; + struct delayed_work test_work; u32 test_interval; unsigned long test_end; @@ -28,6 +35,12 @@ struct br_mrp { u32 test_max_miss; bool test_monitor; + struct delayed_work in_test_work; + u32 in_test_interval; + unsigned long in_test_end; + u32 in_test_count_miss; + u32 in_test_max_miss; + u32 seq_id; struct rcu_head rcu; From 4cc625c63a9274f28f05d5be39a2cbeb48cbfed5 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:51 +0200 Subject: [PATCH 0983/2056] bridge: mrp: Rename br_mrp_port_open to br_mrp_ring_port_open This patch renames the function br_mrp_port_open to br_mrp_ring_port_open. In this way is more clear that a ring port lost the continuity because there will be also a br_mrp_in_port_open. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp.c | 6 +++--- net/bridge/br_mrp_netlink.c | 2 +- net/bridge/br_private_mrp.h | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c index 90592af9db61..fe7cf1446b58 100644 --- a/net/bridge/br_mrp.c +++ b/net/bridge/br_mrp.c @@ -213,7 +213,7 @@ static void br_mrp_test_work_expired(struct work_struct *work) } if (notify_open && !mrp->ring_role_offloaded) - br_mrp_port_open(p->dev, true); + br_mrp_ring_port_open(p->dev, true); } p = rcu_dereference(mrp->s_port); @@ -229,7 +229,7 @@ static void br_mrp_test_work_expired(struct work_struct *work) } if (notify_open && !mrp->ring_role_offloaded) - br_mrp_port_open(p->dev, true); + br_mrp_ring_port_open(p->dev, true); } out: @@ -537,7 +537,7 @@ static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port, * not closed */ if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED) - br_mrp_port_open(port->dev, false); + br_mrp_ring_port_open(port->dev, false); } /* Determin if the test hdr has a better priority than the node */ diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c index c4f5c356811f..acce300c0cc2 100644 --- a/net/bridge/br_mrp_netlink.c +++ b/net/bridge/br_mrp_netlink.c @@ -368,7 +368,7 @@ int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) return -EMSGSIZE; } -int br_mrp_port_open(struct net_device *dev, u8 loc) +int br_mrp_ring_port_open(struct net_device *dev, u8 loc) { struct net_bridge_port *p; int err = 0; diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index 8841ba847fb2..e93c8f9d4df5 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -74,6 +74,6 @@ int br_mrp_port_switchdev_set_role(struct net_bridge_port *p, enum br_mrp_port_role_type role); /* br_mrp_netlink.c */ -int br_mrp_port_open(struct net_device *dev, u8 loc); +int br_mrp_ring_port_open(struct net_device *dev, u8 loc); #endif /* _BR_PRIVATE_MRP_H */ From 4139d4b51a462135d6368e80a9314b57e57279f2 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:52 +0200 Subject: [PATCH 0984/2056] bridge: mrp: Add br_mrp_in_port_open function This function notifies the userspace when the node lost the continuity of MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp_netlink.c | 22 ++++++++++++++++++++++ net/bridge/br_private_mrp.h | 1 + 2 files changed, 23 insertions(+) diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c index acce300c0cc2..4bf7aaeb2915 100644 --- a/net/bridge/br_mrp_netlink.c +++ b/net/bridge/br_mrp_netlink.c @@ -389,3 +389,25 @@ int br_mrp_ring_port_open(struct net_device *dev, u8 loc) out: return err; } + +int br_mrp_in_port_open(struct net_device *dev, u8 loc) +{ + struct net_bridge_port *p; + int err = 0; + + p = br_port_get_rcu(dev); + if (!p) { + err = -EINVAL; + goto out; + } + + if (loc) + p->flags |= BR_MRP_LOST_IN_CONT; + else + p->flags &= ~BR_MRP_LOST_IN_CONT; + + br_ifinfo_notify(RTM_NEWLINK, NULL, p); + +out: + return err; +} diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index e93c8f9d4df5..23da2f956ad0 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -75,5 +75,6 @@ int br_mrp_port_switchdev_set_role(struct net_bridge_port *p, /* br_mrp_netlink.c */ int br_mrp_ring_port_open(struct net_device *dev, u8 loc); +int br_mrp_in_port_open(struct net_device *dev, u8 loc); #endif /* _BR_PRIVATE_MRP_H */ From f23f0db3607582636b475eaeb74d32e924f11c41 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:53 +0200 Subject: [PATCH 0985/2056] bridge: switchdev: mrp: Extend MRP API for switchdev for MRP Interconnect Implement the MRP API for interconnect switchdev. Similar with the other br_mrp_switchdev function, these function will just eventually call the switchdev functions: switchdev_port_obj_add/del. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp_switchdev.c | 62 +++++++++++++++++++++++++++++++++++ net/bridge/br_private_mrp.h | 7 ++++ 2 files changed, 69 insertions(+) diff --git a/net/bridge/br_mrp_switchdev.c b/net/bridge/br_mrp_switchdev.c index 0da68a0da4b5..ed547e03ace1 100644 --- a/net/bridge/br_mrp_switchdev.c +++ b/net/bridge/br_mrp_switchdev.c @@ -107,6 +107,68 @@ int br_mrp_switchdev_set_ring_state(struct net_bridge *br, return 0; } +int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp, + u16 in_id, u32 ring_id, + enum br_mrp_in_role_type role) +{ + struct switchdev_obj_in_role_mrp mrp_role = { + .obj.orig_dev = br->dev, + .obj.id = SWITCHDEV_OBJ_ID_IN_ROLE_MRP, + .in_role = role, + .in_id = mrp->in_id, + .ring_id = mrp->ring_id, + .i_port = rtnl_dereference(mrp->i_port)->dev, + }; + int err; + + if (role == BR_MRP_IN_ROLE_DISABLED) + err = switchdev_port_obj_del(br->dev, &mrp_role.obj); + else + err = switchdev_port_obj_add(br->dev, &mrp_role.obj, NULL); + + return err; +} + +int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp, + enum br_mrp_in_state_type state) +{ + struct switchdev_obj_in_state_mrp mrp_state = { + .obj.orig_dev = br->dev, + .obj.id = SWITCHDEV_OBJ_ID_IN_STATE_MRP, + .in_state = state, + .in_id = mrp->in_id, + }; + int err; + + err = switchdev_port_obj_add(br->dev, &mrp_state.obj, NULL); + + if (err && err != -EOPNOTSUPP) + return err; + + return 0; +} + +int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp, + u32 interval, u8 max_miss, u32 period) +{ + struct switchdev_obj_in_test_mrp test = { + .obj.orig_dev = br->dev, + .obj.id = SWITCHDEV_OBJ_ID_IN_TEST_MRP, + .interval = interval, + .max_miss = max_miss, + .in_id = mrp->in_id, + .period = period, + }; + int err; + + if (interval == 0) + err = switchdev_port_obj_del(br->dev, &test.obj); + else + err = switchdev_port_obj_add(br->dev, &test.obj, NULL); + + return err; +} + int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, enum br_mrp_port_state_type state) { diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index 23da2f956ad0..0d554ef88db8 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -72,6 +72,13 @@ int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, enum br_mrp_port_state_type state); int br_mrp_port_switchdev_set_role(struct net_bridge_port *p, enum br_mrp_port_role_type role); +int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp, + u16 in_id, u32 ring_id, + enum br_mrp_in_role_type role); +int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp, + enum br_mrp_in_state_type state); +int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp, + u32 interval, u8 max_miss, u32 period); /* br_mrp_netlink.c */ int br_mrp_ring_port_open(struct net_device *dev, u8 loc); From 537ed5676d4648abc8ef75b5c04d773d961aee2f Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:54 +0200 Subject: [PATCH 0986/2056] bridge: mrp: Implement the MRP Interconnect API Thie patch adds support for MRP Interconnect. Similar with the MRP ring, if the HW can't generate MRP_InTest frames, then the SW will try to generate them. And if also the SW fails to generate the frames then an error is return to userspace. The forwarding/termination of MRP_In frames is happening in the kernel and is done by MRP instances. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp.c | 570 ++++++++++++++++++++++++++++++++++-- net/bridge/br_private_mrp.h | 4 + 2 files changed, 543 insertions(+), 31 deletions(-) diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c index fe7cf1446b58..b36689e6e7cb 100644 --- a/net/bridge/br_mrp.c +++ b/net/bridge/br_mrp.c @@ -4,6 +4,27 @@ #include "br_private_mrp.h" static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 }; +static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 }; + +static bool br_mrp_is_ring_port(struct net_bridge_port *p_port, + struct net_bridge_port *s_port, + struct net_bridge_port *port) +{ + if (port == p_port || + port == s_port) + return true; + + return false; +} + +static bool br_mrp_is_in_port(struct net_bridge_port *i_port, + struct net_bridge_port *port) +{ + if (port == i_port) + return true; + + return false; +} static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br, u32 ifindex) @@ -37,6 +58,22 @@ static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id) return res; } +static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id) +{ + struct br_mrp *res = NULL; + struct br_mrp *mrp; + + list_for_each_entry_rcu(mrp, &br->mrp_list, list, + lockdep_rtnl_is_held()) { + if (mrp->in_id == in_id) { + res = mrp; + break; + } + } + + return res; +} + static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) { struct br_mrp *mrp; @@ -52,6 +89,10 @@ static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex) p = rtnl_dereference(mrp->s_port); if (p && p->dev->ifindex == ifindex) return false; + + p = rtnl_dereference(mrp->i_port); + if (p && p->dev->ifindex == ifindex) + return false; } return true; @@ -66,7 +107,8 @@ static struct br_mrp *br_mrp_find_port(struct net_bridge *br, list_for_each_entry_rcu(mrp, &br->mrp_list, list, lockdep_rtnl_is_held()) { if (rcu_access_pointer(mrp->p_port) == p || - rcu_access_pointer(mrp->s_port) == p) { + rcu_access_pointer(mrp->s_port) == p || + rcu_access_pointer(mrp->i_port) == p) { res = mrp; break; } @@ -160,6 +202,36 @@ static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp, return skb; } +static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp, + struct net_bridge_port *p, + enum br_mrp_port_role_type port_role) +{ + struct br_mrp_in_test_hdr *hdr = NULL; + struct sk_buff *skb = NULL; + + if (!p) + return NULL; + + skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac); + if (!skb) + return NULL; + + br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr)); + hdr = skb_put(skb, sizeof(*hdr)); + + hdr->id = cpu_to_be16(mrp->in_id); + ether_addr_copy(hdr->sa, p->br->dev->dev_addr); + hdr->port_role = cpu_to_be16(port_role); + hdr->state = cpu_to_be16(mrp->in_state); + hdr->transitions = cpu_to_be16(mrp->in_transitions); + hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies)); + + br_mrp_skb_common(skb, mrp); + br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0); + + return skb; +} + /* This function is continuously called in the following cases: * - when node role is MRM, in this case test_monitor is always set to false * because it needs to notify the userspace that the ring is open and needs to @@ -239,6 +311,83 @@ static void br_mrp_test_work_expired(struct work_struct *work) usecs_to_jiffies(mrp->test_interval)); } +/* This function is continuously called when the node has the interconnect role + * MIM. It would generate interconnect test frames and will send them on all 3 + * ports. But will also check if it stop receiving interconnect test frames. + */ +static void br_mrp_in_test_work_expired(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work); + struct net_bridge_port *p; + bool notify_open = false; + struct sk_buff *skb; + + if (time_before_eq(mrp->in_test_end, jiffies)) + return; + + if (mrp->in_test_count_miss < mrp->in_test_max_miss) { + mrp->in_test_count_miss++; + } else { + /* Notify that the interconnect ring is open only if the + * interconnect ring state is closed, otherwise it would + * continue to notify at every interval. + */ + if (mrp->in_state == BR_MRP_IN_STATE_CLOSED) + notify_open = true; + } + + rcu_read_lock(); + + p = rcu_dereference(mrp->p_port); + if (p) { + skb = br_mrp_alloc_in_test_skb(mrp, p, + BR_MRP_PORT_ROLE_PRIMARY); + if (!skb) + goto out; + + skb_reset_network_header(skb); + dev_queue_xmit(skb); + + if (notify_open && !mrp->in_role_offloaded) + br_mrp_in_port_open(p->dev, true); + } + + p = rcu_dereference(mrp->s_port); + if (p) { + skb = br_mrp_alloc_in_test_skb(mrp, p, + BR_MRP_PORT_ROLE_SECONDARY); + if (!skb) + goto out; + + skb_reset_network_header(skb); + dev_queue_xmit(skb); + + if (notify_open && !mrp->in_role_offloaded) + br_mrp_in_port_open(p->dev, true); + } + + p = rcu_dereference(mrp->i_port); + if (p) { + skb = br_mrp_alloc_in_test_skb(mrp, p, + BR_MRP_PORT_ROLE_INTER); + if (!skb) + goto out; + + skb_reset_network_header(skb); + dev_queue_xmit(skb); + + if (notify_open && !mrp->in_role_offloaded) + br_mrp_in_port_open(p->dev, true); + } + +out: + rcu_read_unlock(); + + queue_delayed_work(system_wq, &mrp->in_test_work, + usecs_to_jiffies(mrp->in_test_interval)); +} + /* Deletes the MRP instance. * note: called under rtnl_lock */ @@ -251,6 +400,10 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) cancel_delayed_work_sync(&mrp->test_work); br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0); + /* Stop sending MRP_InTest frames if has an interconnect role */ + cancel_delayed_work_sync(&mrp->in_test_work); + br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); + br_mrp_switchdev_del(br, mrp); /* Reset the ports */ @@ -278,6 +431,18 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp) rcu_assign_pointer(mrp->s_port, NULL); } + p = rtnl_dereference(mrp->i_port); + if (p) { + spin_lock_bh(&br->lock); + state = netif_running(br->dev) ? + BR_STATE_FORWARDING : BR_STATE_DISABLED; + p->state = state; + p->flags &= ~BR_MRP_AWARE; + spin_unlock_bh(&br->lock); + br_mrp_port_switchdev_set_state(p, state); + rcu_assign_pointer(mrp->i_port, NULL); + } + list_del_rcu(&mrp->list); kfree_rcu(mrp, rcu); } @@ -329,6 +494,7 @@ int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance) rcu_assign_pointer(mrp->s_port, p); INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired); + INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired); list_add_tail_rcu(&mrp->list, &br->mrp_list); err = br_mrp_switchdev_add(br, mrp); @@ -511,6 +677,180 @@ int br_mrp_start_test(struct net_bridge *br, return 0; } +/* Set in state, int state can be only Open or Closed + * note: already called with rtnl_lock + */ +int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state) +{ + struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id); + + if (!mrp) + return -EINVAL; + + if (mrp->in_state == BR_MRP_IN_STATE_CLOSED && + state->in_state != BR_MRP_IN_STATE_CLOSED) + mrp->in_transitions++; + + mrp->in_state = state->in_state; + + br_mrp_switchdev_set_in_state(br, mrp, state->in_state); + + return 0; +} + +/* Set in role, in role can be only MIM(Media Interconnection Manager) or + * MIC(Media Interconnection Client). + * note: already called with rtnl_lock + */ +int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role) +{ + struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id); + struct net_bridge_port *p; + int err; + + if (!mrp) + return -EINVAL; + + if (!br_mrp_get_port(br, role->i_ifindex)) + return -EINVAL; + + if (role->in_role == BR_MRP_IN_ROLE_DISABLED) { + u8 state; + + /* It is not allowed to disable a port that doesn't exist */ + p = rtnl_dereference(mrp->i_port); + if (!p) + return -EINVAL; + + /* Stop the generating MRP_InTest frames */ + cancel_delayed_work_sync(&mrp->in_test_work); + br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0); + + /* Remove the port */ + spin_lock_bh(&br->lock); + state = netif_running(br->dev) ? + BR_STATE_FORWARDING : BR_STATE_DISABLED; + p->state = state; + p->flags &= ~BR_MRP_AWARE; + spin_unlock_bh(&br->lock); + br_mrp_port_switchdev_set_state(p, state); + rcu_assign_pointer(mrp->i_port, NULL); + + mrp->in_role = role->in_role; + mrp->in_id = 0; + + return 0; + } + + /* It is not possible to have the same port part of multiple rings */ + if (!br_mrp_unique_ifindex(br, role->i_ifindex)) + return -EINVAL; + + /* It is not allowed to set a different interconnect port if the mrp + * instance has already one. First it needs to be disabled and after + * that set the new port + */ + if (rcu_access_pointer(mrp->i_port)) + return -EINVAL; + + p = br_mrp_get_port(br, role->i_ifindex); + spin_lock_bh(&br->lock); + p->state = BR_STATE_FORWARDING; + p->flags |= BR_MRP_AWARE; + spin_unlock_bh(&br->lock); + rcu_assign_pointer(mrp->i_port, p); + + mrp->in_role = role->in_role; + mrp->in_id = role->in_id; + + /* If there is an error just bailed out */ + err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id, + role->ring_id, role->in_role); + if (err && err != -EOPNOTSUPP) + return err; + + /* Now detect if the HW actually applied the role or not. If the HW + * applied the role it means that the SW will not to do those operations + * anymore. For example if the role is MIM then the HW will notify the + * SW when interconnect ring is open, but if the is not pushed to the HW + * the SW will need to detect when the interconnect ring is open. + */ + mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1; + + return 0; +} + +/* Start to generate MRP_InTest frames, the frames are generated by + * HW and if it fails, they are generated by the SW. + * note: already called with rtnl_lock + */ +int br_mrp_start_in_test(struct net_bridge *br, + struct br_mrp_start_in_test *in_test) +{ + struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id); + + if (!mrp) + return -EINVAL; + + if (mrp->in_role != BR_MRP_IN_ROLE_MIM) + return -EINVAL; + + /* Try to push it to the HW and if it fails then continue with SW + * implementation and if that also fails then return error. + */ + if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval, + in_test->max_miss, in_test->period)) + return 0; + + mrp->in_test_interval = in_test->interval; + mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period); + mrp->in_test_max_miss = in_test->max_miss; + mrp->in_test_count_miss = 0; + queue_delayed_work(system_wq, &mrp->in_test_work, + usecs_to_jiffies(in_test->interval)); + + return 0; +} + +/* Determin if the frame type is a ring frame */ +static bool br_mrp_ring_frame(struct sk_buff *skb) +{ + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return false; + + if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST || + hdr->type == BR_MRP_TLV_HEADER_RING_TOPO || + hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN || + hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP || + hdr->type == BR_MRP_TLV_HEADER_OPTION) + return true; + + return false; +} + +/* Determin if the frame type is an interconnect frame */ +static bool br_mrp_in_frame(struct sk_buff *skb) +{ + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return false; + + if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST || + hdr->type == BR_MRP_TLV_HEADER_IN_TOPO || + hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN || + hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP) + return true; + + return false; +} + /* Process only MRP Test frame. All the other MRP frames are processed by * userspace application * note: already called with rcu_read_lock @@ -591,17 +931,92 @@ static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br, mrp->test_count_miss = 0; } -/* This will just forward the frame to the other mrp ring port(MRC role) or will - * not do anything. +/* Process only MRP InTest frame. All the other MRP frames are processed by + * userspace application + * note: already called with rcu_read_lock + */ +static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port, + struct sk_buff *skb) +{ + const struct br_mrp_in_test_hdr *in_hdr; + struct br_mrp_in_test_hdr _in_hdr; + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + /* Each MRP header starts with a version field which is 16 bits. + * Therefore skip the version and get directly the TLV header. + */ + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return false; + + /* The check for InTest frame type was already done */ + in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr), + sizeof(_in_hdr), &_in_hdr); + if (!in_hdr) + return false; + + /* It needs to process only it's own InTest frames. */ + if (mrp->in_id != ntohs(in_hdr->id)) + return false; + + mrp->in_test_count_miss = 0; + + /* Notify the userspace that the ring is closed only when the ring is + * not closed + */ + if (mrp->in_state != BR_MRP_IN_STATE_CLOSED) + br_mrp_in_port_open(port->dev, false); + + return true; +} + +/* Get the MRP frame type + * note: already called with rcu_read_lock + */ +static u8 br_mrp_get_frame_type(struct sk_buff *skb) +{ + const struct br_mrp_tlv_hdr *hdr; + struct br_mrp_tlv_hdr _hdr; + + /* Each MRP header starts with a version field which is 16 bits. + * Therefore skip the version and get directly the TLV header. + */ + hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr); + if (!hdr) + return 0xff; + + return hdr->type; +} + +static bool br_mrp_mrm_behaviour(struct br_mrp *mrp) +{ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRM || + (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor)) + return true; + + return false; +} + +static bool br_mrp_mrc_behaviour(struct br_mrp *mrp) +{ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRC || + (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor)) + return true; + + return false; +} + +/* This will just forward the frame to the other mrp ring ports, depending on + * the frame type, ring role and interconnect role * note: already called with rcu_read_lock */ static int br_mrp_rcv(struct net_bridge_port *p, struct sk_buff *skb, struct net_device *dev) { - struct net_device *s_dev, *p_dev, *d_dev; - struct net_bridge_port *p_port, *s_port; + struct net_bridge_port *p_port, *s_port, *i_port = NULL; + struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL; struct net_bridge *br; - struct sk_buff *nskb; struct br_mrp *mrp; /* If port is disabled don't accept any frames */ @@ -616,46 +1031,139 @@ static int br_mrp_rcv(struct net_bridge_port *p, p_port = rcu_dereference(mrp->p_port); if (!p_port) return 0; + p_dst = p_port; s_port = rcu_dereference(mrp->s_port); if (!s_port) return 0; + s_dst = s_port; - /* If the role is MRM then don't forward the frames */ - if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { - br_mrp_mrm_process(mrp, p, skb); - return 1; - } - - /* If the role is MRA then don't forward the frames if it behaves as - * MRM node + /* If the frame is a ring frame then it is not required to check the + * interconnect role and ports to process or forward the frame */ - if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { - if (!mrp->test_monitor) { + if (br_mrp_ring_frame(skb)) { + /* If the role is MRM then don't forward the frames */ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) { br_mrp_mrm_process(mrp, p, skb); - return 1; + goto no_forward; } - br_mrp_mra_process(mrp, br, p, skb); + /* If the role is MRA then don't forward the frames if it + * behaves as MRM node + */ + if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) { + if (!mrp->test_monitor) { + br_mrp_mrm_process(mrp, p, skb); + goto no_forward; + } + + br_mrp_mra_process(mrp, br, p, skb); + } + + goto forward; } - /* Clone the frame and forward it on the other MRP port */ - nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - return 0; + if (br_mrp_in_frame(skb)) { + u8 in_type = br_mrp_get_frame_type(skb); - p_dev = p_port->dev; - s_dev = s_port->dev; + i_port = rcu_dereference(mrp->i_port); + i_dst = i_port; - if (p_dev == dev) - d_dev = s_dev; - else - d_dev = p_dev; + /* If the ring port is in block state it should not forward + * In_Test frames + */ + if (br_mrp_is_ring_port(p_port, s_port, p) && + p->state == BR_STATE_BLOCKING && + in_type == BR_MRP_TLV_HEADER_IN_TEST) + goto no_forward; - nskb->dev = d_dev; - skb_push(nskb, ETH_HLEN); - dev_queue_xmit(nskb); + /* Nodes that behaves as MRM needs to stop forwarding the + * frames in case the ring is closed, otherwise will be a loop. + * In this case the frame is no forward between the ring ports. + */ + if (br_mrp_mrm_behaviour(mrp) && + br_mrp_is_ring_port(p_port, s_port, p) && + (s_port->state != BR_STATE_FORWARDING || + p_port->state != BR_STATE_FORWARDING)) { + p_dst = NULL; + s_dst = NULL; + } + /* A node that behaves as MRC and doesn't have a interconnect + * role then it should forward all frames between the ring ports + * because it doesn't have an interconnect port + */ + if (br_mrp_mrc_behaviour(mrp) && + mrp->in_role == BR_MRP_IN_ROLE_DISABLED) + goto forward; + + if (mrp->in_role == BR_MRP_IN_ROLE_MIM) { + if (in_type == BR_MRP_TLV_HEADER_IN_TEST) { + /* MIM should not forward it's own InTest + * frames + */ + if (br_mrp_mim_process(mrp, p, skb)) { + goto no_forward; + } else { + if (br_mrp_is_ring_port(p_port, s_port, + p)) + i_dst = NULL; + + if (br_mrp_is_in_port(i_port, p)) + goto no_forward; + } + } else { + /* MIM should forward IntLinkChange and + * IntTopoChange between ring ports but MIM + * should not forward IntLinkChange and + * IntTopoChange if the frame was received at + * the interconnect port + */ + if (br_mrp_is_ring_port(p_port, s_port, p)) + i_dst = NULL; + + if (br_mrp_is_in_port(i_port, p)) + goto no_forward; + } + } + + if (mrp->in_role == BR_MRP_IN_ROLE_MIC) { + /* MIC should forward InTest frames on all ports + * regardless of the received port + */ + if (in_type == BR_MRP_TLV_HEADER_IN_TEST) + goto forward; + + /* MIC should forward IntLinkChange frames only if they + * are received on ring ports to all the ports + */ + if (br_mrp_is_ring_port(p_port, s_port, p) && + (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP || + in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN)) + goto forward; + + /* Should forward the InTopo frames only between the + * ring ports + */ + if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) { + i_dst = NULL; + goto forward; + } + + /* In all the other cases don't forward the frames */ + goto no_forward; + } + } + +forward: + if (p_dst) + br_forward(p_dst, skb, true, false); + if (s_dst) + br_forward(s_dst, skb, true, false); + if (i_dst) + br_forward(i_dst, skb, true, false); + +no_forward: return 1; } diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h index 0d554ef88db8..af0e9eff6549 100644 --- a/net/bridge/br_private_mrp.h +++ b/net/bridge/br_private_mrp.h @@ -57,6 +57,10 @@ int br_mrp_set_ring_state(struct net_bridge *br, struct br_mrp_ring_state *state); int br_mrp_set_ring_role(struct net_bridge *br, struct br_mrp_ring_role *role); int br_mrp_start_test(struct net_bridge *br, struct br_mrp_start_test *test); +int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state); +int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role); +int br_mrp_start_in_test(struct net_bridge *br, + struct br_mrp_start_in_test *test); /* br_mrp_switchdev.c */ int br_mrp_switchdev_add(struct net_bridge *br, struct br_mrp *mrp); From 7ab1748e4ce607ba1b4133bfe7c2be0022339d87 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:55 +0200 Subject: [PATCH 0987/2056] bridge: mrp: Extend MRP netlink interface for configuring MRP interconnect This patch extends the existing MRP netlink interface with the following attributes: IFLA_BRIDGE_MRP_IN_ROLE, IFLA_BRIDGE_MRP_IN_STATE and IFLA_BRIDGE_MRP_START_IN_TEST. These attributes are similar with their ring attributes but they apply to the interconnect port. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp_netlink.c | 140 ++++++++++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c index 4bf7aaeb2915..a006e0771e8d 100644 --- a/net/bridge/br_mrp_netlink.c +++ b/net/bridge/br_mrp_netlink.c @@ -14,6 +14,9 @@ static const struct nla_policy br_mrp_policy[IFLA_BRIDGE_MRP_MAX + 1] = { [IFLA_BRIDGE_MRP_RING_STATE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_RING_ROLE] = { .type = NLA_NESTED }, [IFLA_BRIDGE_MRP_START_TEST] = { .type = NLA_NESTED }, + [IFLA_BRIDGE_MRP_IN_ROLE] = { .type = NLA_NESTED }, + [IFLA_BRIDGE_MRP_IN_STATE] = { .type = NLA_NESTED }, + [IFLA_BRIDGE_MRP_START_IN_TEST] = { .type = NLA_NESTED }, }; static const struct nla_policy @@ -235,6 +238,121 @@ static int br_mrp_start_test_parse(struct net_bridge *br, struct nlattr *attr, return br_mrp_start_test(br, &test); } +static const struct nla_policy +br_mrp_in_state_policy[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1] = { + [IFLA_BRIDGE_MRP_IN_STATE_UNSPEC] = { .type = NLA_REJECT }, + [IFLA_BRIDGE_MRP_IN_STATE_IN_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_IN_STATE_STATE] = { .type = NLA_U32 }, +}; + +static int br_mrp_in_state_parse(struct net_bridge *br, struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1]; + struct br_mrp_in_state state; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_STATE_MAX, attr, + br_mrp_in_state_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] || + !tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]) { + NL_SET_ERR_MSG_MOD(extack, + "Missing attribute: IN_ID or STATE"); + return -EINVAL; + } + + memset(&state, 0x0, sizeof(state)); + + state.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID]); + state.in_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]); + + return br_mrp_set_in_state(br, &state); +} + +static const struct nla_policy +br_mrp_in_role_policy[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1] = { + [IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC] = { .type = NLA_REJECT }, + [IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] = { .type = NLA_U16 }, + [IFLA_BRIDGE_MRP_IN_ROLE_ROLE] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] = { .type = NLA_U32 }, +}; + +static int br_mrp_in_role_parse(struct net_bridge *br, struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1]; + struct br_mrp_in_role role; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_ROLE_MAX, attr, + br_mrp_in_role_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] || + !tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] || + !tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] || + !tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]) { + NL_SET_ERR_MSG_MOD(extack, + "Missing attribute: RING_ID or ROLE or IN_ID or I_IFINDEX"); + return -EINVAL; + } + + memset(&role, 0x0, sizeof(role)); + + role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]); + role.in_id = nla_get_u16(tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]); + role.i_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]); + role.in_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]); + + return br_mrp_set_in_role(br, &role); +} + +static const struct nla_policy +br_mrp_start_in_test_policy[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1] = { + [IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC] = { .type = NLA_REJECT }, + [IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] = { .type = NLA_U32 }, + [IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD] = { .type = NLA_U32 }, +}; + +static int br_mrp_start_in_test_parse(struct net_bridge *br, + struct nlattr *attr, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1]; + struct br_mrp_start_in_test test; + int err; + + err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_IN_TEST_MAX, attr, + br_mrp_start_in_test_policy, extack); + if (err) + return err; + + if (!tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] || + !tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] || + !tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] || + !tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]) { + NL_SET_ERR_MSG_MOD(extack, + "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD"); + return -EINVAL; + } + + memset(&test, 0x0, sizeof(test)); + + test.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]); + test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]); + test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]); + test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]); + + return br_mrp_start_in_test(br, &test); +} + int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, struct nlattr *attr, int cmd, struct netlink_ext_ack *extack) { @@ -301,6 +419,28 @@ int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p, return err; } + if (tb[IFLA_BRIDGE_MRP_IN_STATE]) { + err = br_mrp_in_state_parse(br, tb[IFLA_BRIDGE_MRP_IN_STATE], + extack); + if (err) + return err; + } + + if (tb[IFLA_BRIDGE_MRP_IN_ROLE]) { + err = br_mrp_in_role_parse(br, tb[IFLA_BRIDGE_MRP_IN_ROLE], + extack); + if (err) + return err; + } + + if (tb[IFLA_BRIDGE_MRP_START_IN_TEST]) { + err = br_mrp_start_in_test_parse(br, + tb[IFLA_BRIDGE_MRP_START_IN_TEST], + extack); + if (err) + return err; + } + return 0; } From 559139cb0405d38816e5e725adee9000db993235 Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:56 +0200 Subject: [PATCH 0988/2056] bridge: uapi: mrp: Extend MRP_INFO attributes for interconnect status Extend the existing MRP_INFO to return status of MRP interconnect. In case there is no MRP interconnect on the node then the role will be disabled so the other attributes can be ignored. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index d840a3e37a37..c1227aecd38f 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -243,6 +243,11 @@ enum { IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR, + IFLA_BRIDGE_MRP_INFO_I_IFINDEX, + IFLA_BRIDGE_MRP_INFO_IN_STATE, + IFLA_BRIDGE_MRP_INFO_IN_ROLE, + IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL, + IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS, __IFLA_BRIDGE_MRP_INFO_MAX, }; From 4fc4871fc2dc91098948e038ac9af7a0d1e3166a Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:57 +0200 Subject: [PATCH 0989/2056] bridge: mrp: Extend br_mrp_fill_info This patch extends the function br_mrp_fill_info to return also the status for the interconnect ring. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_mrp_netlink.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/net/bridge/br_mrp_netlink.c b/net/bridge/br_mrp_netlink.c index a006e0771e8d..2a2fdf3500c5 100644 --- a/net/bridge/br_mrp_netlink.c +++ b/net/bridge/br_mrp_netlink.c @@ -474,6 +474,11 @@ int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) p->dev->ifindex)) goto nla_put_failure; + p = rcu_dereference(mrp->i_port); + if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_I_IFINDEX, + p->dev->ifindex)) + goto nla_put_failure; + if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO, mrp->prio)) goto nla_put_failure; @@ -493,6 +498,19 @@ int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br) mrp->test_monitor)) goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_STATE, + mrp->in_state)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_ROLE, + mrp->in_role)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL, + mrp->in_test_interval)) + goto nla_put_failure; + if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS, + mrp->in_test_max_miss)) + goto nla_put_failure; + nla_nest_end(skb, tb); } nla_nest_end(skb, mrp_tb); From ffb3adba64801f70c472303c9e386eb5eaec193d Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Tue, 14 Jul 2020 09:34:58 +0200 Subject: [PATCH 0990/2056] net: bridge: Add port attribute IFLA_BRPORT_MRP_IN_OPEN This patch adds a new port attribute, IFLA_BRPORT_MRP_IN_OPEN, which allows to notify the userspace when the node lost the contiuity of MRP_InTest frames. Signed-off-by: Horatiu Vultur Acked-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_link.h | 1 + net/bridge/br_netlink.c | 3 +++ tools/include/uapi/linux/if_link.h | 1 + 3 files changed, 5 insertions(+) diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index cc185a007ade..26842ffd0501 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -344,6 +344,7 @@ enum { IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index c532fa65c983..147d52596e17 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -152,6 +152,7 @@ static inline size_t br_port_info_size(void) #endif + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */ + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_RING_OPEN */ + + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MRP_IN_OPEN */ + 0; } @@ -216,6 +217,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, !!(p->flags & BR_NEIGH_SUPPRESS)) || nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags & BR_MRP_LOST_CONT)) || + nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN, + !!(p->flags & BR_MRP_LOST_IN_CONT)) || nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED))) return -EMSGSIZE; diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index cafedbbfefbe..781e482dc499 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -344,6 +344,7 @@ enum { IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, IFLA_BRPORT_MRP_RING_OPEN, + IFLA_BRPORT_MRP_IN_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) From a3b7b493882b140777956218d5f745d773f2f5ce Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 14 Jul 2020 09:52:24 +0200 Subject: [PATCH 0991/2056] lan743x: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GPF_ with a correct flag. It has been compile tested. When memory is allocated in 'lan743x_tx_ring_cleanup()' and 'lan743x_rx_ring_init()', GFP_KERNEL can be used because this flag is already used to allocate some memory in these functions. While at it, remove a useless (void *) casting in the first hunk in so that the code is more consistent. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_main.c | 46 +++++++++---------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 2373e72d2d29..f6479384dc4f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1742,10 +1742,9 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight) static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) { if (tx->head_cpu_ptr) { - pci_free_consistent(tx->adapter->pdev, - sizeof(*tx->head_cpu_ptr), - (void *)(tx->head_cpu_ptr), - tx->head_dma_ptr); + dma_free_coherent(&tx->adapter->pdev->dev, + sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr, + tx->head_dma_ptr); tx->head_cpu_ptr = NULL; tx->head_dma_ptr = 0; } @@ -1753,10 +1752,9 @@ static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) tx->buffer_info = NULL; if (tx->ring_cpu_ptr) { - pci_free_consistent(tx->adapter->pdev, - tx->ring_allocation_size, - tx->ring_cpu_ptr, - tx->ring_dma_ptr); + dma_free_coherent(&tx->adapter->pdev->dev, + tx->ring_allocation_size, tx->ring_cpu_ptr, + tx->ring_dma_ptr); tx->ring_allocation_size = 0; tx->ring_cpu_ptr = NULL; tx->ring_dma_ptr = 0; @@ -1780,8 +1778,8 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx) sizeof(struct lan743x_tx_descriptor), PAGE_SIZE); dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, - ring_allocation_size, &dma_ptr); + cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, + ring_allocation_size, &dma_ptr, GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; @@ -1798,8 +1796,9 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx) } tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev, - sizeof(*tx->head_cpu_ptr), &dma_ptr); + cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev, + sizeof(*tx->head_cpu_ptr), &dma_ptr, + GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; @@ -2281,10 +2280,9 @@ static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) } if (rx->head_cpu_ptr) { - pci_free_consistent(rx->adapter->pdev, - sizeof(*rx->head_cpu_ptr), - rx->head_cpu_ptr, - rx->head_dma_ptr); + dma_free_coherent(&rx->adapter->pdev->dev, + sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr, + rx->head_dma_ptr); rx->head_cpu_ptr = NULL; rx->head_dma_ptr = 0; } @@ -2293,10 +2291,9 @@ static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) rx->buffer_info = NULL; if (rx->ring_cpu_ptr) { - pci_free_consistent(rx->adapter->pdev, - rx->ring_allocation_size, - rx->ring_cpu_ptr, - rx->ring_dma_ptr); + dma_free_coherent(&rx->adapter->pdev->dev, + rx->ring_allocation_size, rx->ring_cpu_ptr, + rx->ring_dma_ptr); rx->ring_allocation_size = 0; rx->ring_cpu_ptr = NULL; rx->ring_dma_ptr = 0; @@ -2327,8 +2324,8 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) sizeof(struct lan743x_rx_descriptor), PAGE_SIZE); dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, - ring_allocation_size, &dma_ptr); + cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, + ring_allocation_size, &dma_ptr, GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; @@ -2345,8 +2342,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx) } rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; dma_ptr = 0; - cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev, - sizeof(*rx->head_cpu_ptr), &dma_ptr); + cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev, + sizeof(*rx->head_cpu_ptr), &dma_ptr, + GFP_KERNEL); if (!cpu_ptr) { ret = -ENOMEM; goto cleanup; From fb059b26bcc3f9ba7999535121716dfc3479bda2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 14 Jul 2020 11:06:20 +0200 Subject: [PATCH 0992/2056] net: neterion: s2io: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GPF_ with a correct flag. It has been compile tested. When memory is allocated in 'init_shared_mem()' GFP_KERNEL can be used because this flag is already used to allocate some memory in this function. While at it, update some debug message to match the new function names. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/s2io.c | 191 +++++++++++++-------------- 1 file changed, 90 insertions(+), 101 deletions(-) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 67e62603fe3b..a0980f81591e 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -640,11 +640,11 @@ static int init_shared_mem(struct s2io_nic *nic) int k = 0; dma_addr_t tmp_p; void *tmp_v; - tmp_v = pci_alloc_consistent(nic->pdev, - PAGE_SIZE, &tmp_p); + tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE, + &tmp_p, GFP_KERNEL); if (!tmp_v) { DBG_PRINT(INFO_DBG, - "pci_alloc_consistent failed for TxDL\n"); + "dma_alloc_coherent failed for TxDL\n"); return -ENOMEM; } /* If we got a zero DMA address(can happen on @@ -658,11 +658,12 @@ static int init_shared_mem(struct s2io_nic *nic) "%s: Zero DMA address for TxDL. " "Virtual address %p\n", dev->name, tmp_v); - tmp_v = pci_alloc_consistent(nic->pdev, - PAGE_SIZE, &tmp_p); + tmp_v = dma_alloc_coherent(&nic->pdev->dev, + PAGE_SIZE, &tmp_p, + GFP_KERNEL); if (!tmp_v) { DBG_PRINT(INFO_DBG, - "pci_alloc_consistent failed for TxDL\n"); + "dma_alloc_coherent failed for TxDL\n"); return -ENOMEM; } mem_allocated += PAGE_SIZE; @@ -734,8 +735,8 @@ static int init_shared_mem(struct s2io_nic *nic) rx_blocks = &ring->rx_blocks[j]; size = SIZE_OF_BLOCK; /* size is always page size */ - tmp_v_addr = pci_alloc_consistent(nic->pdev, size, - &tmp_p_addr); + tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size, + &tmp_p_addr, GFP_KERNEL); if (tmp_v_addr == NULL) { /* * In case of failure, free_shared_mem() @@ -835,8 +836,8 @@ static int init_shared_mem(struct s2io_nic *nic) /* Allocation and initialization of Statistics block */ size = sizeof(struct stat_block); mac_control->stats_mem = - pci_alloc_consistent(nic->pdev, size, - &mac_control->stats_mem_phy); + dma_alloc_coherent(&nic->pdev->dev, size, + &mac_control->stats_mem_phy, GFP_KERNEL); if (!mac_control->stats_mem) { /* @@ -906,18 +907,18 @@ static void free_shared_mem(struct s2io_nic *nic) fli = &fifo->list_info[mem_blks]; if (!fli->list_virt_addr) break; - pci_free_consistent(nic->pdev, PAGE_SIZE, - fli->list_virt_addr, - fli->list_phy_addr); + dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, + fli->list_virt_addr, + fli->list_phy_addr); swstats->mem_freed += PAGE_SIZE; } /* If we got a zero DMA address during allocation, * free the page now */ if (mac_control->zerodma_virt_addr) { - pci_free_consistent(nic->pdev, PAGE_SIZE, - mac_control->zerodma_virt_addr, - (dma_addr_t)0); + dma_free_coherent(&nic->pdev->dev, PAGE_SIZE, + mac_control->zerodma_virt_addr, + (dma_addr_t)0); DBG_PRINT(INIT_DBG, "%s: Freeing TxDL with zero DMA address. " "Virtual address %p\n", @@ -939,8 +940,8 @@ static void free_shared_mem(struct s2io_nic *nic) tmp_p_addr = ring->rx_blocks[j].block_dma_addr; if (tmp_v_addr == NULL) break; - pci_free_consistent(nic->pdev, size, - tmp_v_addr, tmp_p_addr); + dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr, + tmp_p_addr); swstats->mem_freed += size; kfree(ring->rx_blocks[j].rxds); swstats->mem_freed += sizeof(struct rxd_info) * @@ -993,10 +994,9 @@ static void free_shared_mem(struct s2io_nic *nic) if (mac_control->stats_mem) { swstats->mem_freed += mac_control->stats_mem_sz; - pci_free_consistent(nic->pdev, - mac_control->stats_mem_sz, - mac_control->stats_mem, - mac_control->stats_mem_phy); + dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz, + mac_control->stats_mem, + mac_control->stats_mem_phy); } } @@ -2316,8 +2316,9 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, txds = txdlp; if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) { - pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, - sizeof(u64), PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, + (dma_addr_t)txds->Buffer_Pointer, + sizeof(u64), DMA_TO_DEVICE); txds++; } @@ -2326,8 +2327,8 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); return NULL; } - pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, + skb_headlen(skb), DMA_TO_DEVICE); frg_cnt = skb_shinfo(skb)->nr_frags; if (frg_cnt) { txds++; @@ -2335,9 +2336,9 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; if (!txds->Buffer_Pointer) break; - pci_unmap_page(nic->pdev, + dma_unmap_page(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer, - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); } } memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); @@ -2521,11 +2522,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, memset(rxdp, 0, sizeof(struct RxD1)); skb_reserve(skb, NET_IP_ALIGN); rxdp1->Buffer0_ptr = - pci_map_single(ring->pdev, skb->data, + dma_map_single(&ring->pdev->dev, skb->data, size - NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp1->Buffer0_ptr)) + DMA_FROM_DEVICE); + if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr)) goto pci_map_failed; rxdp->Control_2 = @@ -2557,17 +2557,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, if (from_card_up) { rxdp3->Buffer0_ptr = - pci_map_single(ring->pdev, ba->ba_0, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp3->Buffer0_ptr)) + dma_map_single(&ring->pdev->dev, + ba->ba_0, BUF0_LEN, + DMA_FROM_DEVICE); + if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr)) goto pci_map_failed; } else - pci_dma_sync_single_for_device(ring->pdev, - (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&ring->pdev->dev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, + DMA_FROM_DEVICE); rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); if (ring->rxd_mode == RXD_MODE_3B) { @@ -2577,29 +2576,28 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, * Buffer2 will have L3/L4 header plus * L4 payload */ - rxdp3->Buffer2_ptr = pci_map_single(ring->pdev, + rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev, skb->data, ring->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp3->Buffer2_ptr)) + if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr)) goto pci_map_failed; if (from_card_up) { rxdp3->Buffer1_ptr = - pci_map_single(ring->pdev, + dma_map_single(&ring->pdev->dev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(nic->pdev, - rxdp3->Buffer1_ptr)) { - pci_unmap_single(ring->pdev, + if (dma_mapping_error(&nic->pdev->dev, + rxdp3->Buffer1_ptr)) { + dma_unmap_single(&ring->pdev->dev, (dma_addr_t)(unsigned long) skb->data, ring->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); goto pci_map_failed; } } @@ -2668,27 +2666,24 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) continue; if (sp->rxd_mode == RXD_MODE_1) { rxdp1 = (struct RxD1 *)rxdp; - pci_unmap_single(sp->pdev, + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp1->Buffer0_ptr, dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); memset(rxdp, 0, sizeof(struct RxD1)); } else if (sp->rxd_mode == RXD_MODE_3B) { rxdp3 = (struct RxD3 *)rxdp; - pci_unmap_single(sp->pdev, + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, + BUF0_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer1_ptr, - BUF1_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, + BUF1_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); + dev->mtu + 4, DMA_FROM_DEVICE); memset(rxdp, 0, sizeof(struct RxD3)); } swstats->mem_freed += skb->truesize; @@ -2919,23 +2914,21 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget) } if (ring_data->rxd_mode == RXD_MODE_1) { rxdp1 = (struct RxD1 *)rxdp; - pci_unmap_single(ring_data->pdev, (dma_addr_t) - rxdp1->Buffer0_ptr, + dma_unmap_single(&ring_data->pdev->dev, + (dma_addr_t)rxdp1->Buffer0_ptr, ring_data->mtu + HEADER_ETHERNET_II_802_3_SIZE + HEADER_802_2_SIZE + HEADER_SNAP_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); } else if (ring_data->rxd_mode == RXD_MODE_3B) { rxdp3 = (struct RxD3 *)rxdp; - pci_dma_sync_single_for_cpu(ring_data->pdev, - (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, - PCI_DMA_FROMDEVICE); - pci_unmap_single(ring_data->pdev, + dma_sync_single_for_cpu(&ring_data->pdev->dev, + (dma_addr_t)rxdp3->Buffer0_ptr, + BUF0_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&ring_data->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, - ring_data->mtu + 4, - PCI_DMA_FROMDEVICE); + ring_data->mtu + 4, DMA_FROM_DEVICE); } prefetch(skb->data); rx_osm_handler(ring_data, rxdp); @@ -4117,9 +4110,9 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb_headlen(skb); - txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, - frg_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) + txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data, + frg_len, DMA_TO_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer)) goto pci_map_failed; txdp->Host_Control = (unsigned long)skb; @@ -6772,10 +6765,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, * Host Control is NULL */ rxdp1->Buffer0_ptr = *temp0 = - pci_map_single(sp->pdev, (*skb)->data, + dma_map_single(&sp->pdev->dev, (*skb)->data, size - NET_IP_ALIGN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) + DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr)) goto memalloc_failed; rxdp->Host_Control = (unsigned long) (*skb); } @@ -6798,37 +6791,34 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp, } stats->mem_allocated += (*skb)->truesize; rxdp3->Buffer2_ptr = *temp2 = - pci_map_single(sp->pdev, (*skb)->data, - dev->mtu + 4, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) + dma_map_single(&sp->pdev->dev, (*skb)->data, + dev->mtu + 4, DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr)) goto memalloc_failed; rxdp3->Buffer0_ptr = *temp0 = - pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, - rxdp3->Buffer0_ptr)) { - pci_unmap_single(sp->pdev, + dma_map_single(&sp->pdev->dev, ba->ba_0, + BUF0_LEN, DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) { + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); goto memalloc_failed; } rxdp->Host_Control = (unsigned long) (*skb); /* Buffer-1 will be dummy buffer not used */ rxdp3->Buffer1_ptr = *temp1 = - pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(sp->pdev, - rxdp3->Buffer1_ptr)) { - pci_unmap_single(sp->pdev, + dma_map_single(&sp->pdev->dev, ba->ba_1, + BUF1_LEN, DMA_FROM_DEVICE); + if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) { + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer0_ptr, - BUF0_LEN, PCI_DMA_FROMDEVICE); - pci_unmap_single(sp->pdev, + BUF0_LEN, DMA_FROM_DEVICE); + dma_unmap_single(&sp->pdev->dev, (dma_addr_t)rxdp3->Buffer2_ptr, dev->mtu + 4, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); goto memalloc_failed; } } @@ -7675,17 +7665,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) return ret; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__); dma_flag = true; - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { DBG_PRINT(ERR_DBG, - "Unable to obtain 64bit DMA " - "for consistent allocations\n"); + "Unable to obtain 64bit DMA for coherent allocations\n"); pci_disable_device(pdev); return -ENOMEM; } - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__); } else { pci_disable_device(pdev); From 8331bbe9eab7515b98adf99b39d85ef34057b6f2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 14 Jul 2020 11:14:12 +0200 Subject: [PATCH 0993/2056] net: neterion: vxge: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below. No GFP_ flag needs to be corrected. It has been compile tested. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- .../net/ethernet/neterion/vxge/vxge-config.c | 42 ++++++------ .../net/ethernet/neterion/vxge/vxge-main.c | 64 +++++++++---------- 2 files changed, 52 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c index 51cd57ab3d95..4f1f90f5e178 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c @@ -1102,10 +1102,10 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool) hldev = blockpool->hldev; list_for_each_safe(p, n, &blockpool->free_block_list) { - pci_unmap_single(hldev->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, - ((struct __vxge_hw_blockpool_entry *)p)->length, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&hldev->pdev->dev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + DMA_BIDIRECTIONAL); vxge_os_dma_free(hldev->pdev, ((struct __vxge_hw_blockpool_entry *)p)->memblock, @@ -1178,10 +1178,10 @@ __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev, goto blockpool_create_exit; } - dma_addr = pci_map_single(hldev->pdev, memblock, - VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(hldev->pdev, - dma_addr))) { + dma_addr = dma_map_single(&hldev->pdev->dev, memblock, + VXGE_HW_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) { vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); __vxge_hw_blockpool_destroy(blockpool); status = VXGE_HW_ERR_OUT_OF_MEMORY; @@ -2264,10 +2264,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh, goto exit; } - dma_addr = pci_map_single(devh->pdev, block_addr, length, - PCI_DMA_BIDIRECTIONAL); + dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length, + DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) { vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); blockpool->req_out--; goto exit; @@ -2359,11 +2359,10 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size, if (!memblock) goto exit; - dma_object->addr = pci_map_single(devh->pdev, memblock, size, - PCI_DMA_BIDIRECTIONAL); + dma_object->addr = dma_map_single(&devh->pdev->dev, memblock, + size, DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(devh->pdev, - dma_object->addr))) { + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) { vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); memblock = NULL; @@ -2410,11 +2409,10 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool) if (blockpool->pool_size < blockpool->pool_max) break; - pci_unmap_single( - (blockpool->hldev)->pdev, - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, - ((struct __vxge_hw_blockpool_entry *)p)->length, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&(blockpool->hldev)->pdev->dev, + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, + ((struct __vxge_hw_blockpool_entry *)p)->length, + DMA_BIDIRECTIONAL); vxge_os_dma_free( (blockpool->hldev)->pdev, @@ -2445,8 +2443,8 @@ static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh, blockpool = &devh->block_pool; if (size != blockpool->block_size) { - pci_unmap_single(devh->pdev, dma_object->addr, size, - PCI_DMA_BIDIRECTIONAL); + dma_unmap_single(&devh->pdev->dev, dma_object->addr, size, + DMA_BIDIRECTIONAL); vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); } else { diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 5de85b9e9e35..b0faa737b817 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -241,10 +241,10 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring) rx_priv = vxge_hw_ring_rxd_private_get(dtrh); rx_priv->skb_data = rx_priv->skb->data; - dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, - rx_priv->data_size, PCI_DMA_FROMDEVICE); + dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data, + rx_priv->data_size, DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { + if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) { ring->stats.pci_map_fail++; return -EIO; } @@ -323,8 +323,8 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan, static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, struct vxge_rx_priv *rx_priv) { - pci_dma_sync_single_for_device(ring->pdev, - rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma, + rx_priv->data_size, DMA_FROM_DEVICE); vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); vxge_hw_ring_rxd_pre_post(ring->handle, dtr); @@ -425,8 +425,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, if (!vxge_rx_map(dtr, ring)) { skb_put(skb, pkt_length); - pci_unmap_single(ring->pdev, data_dma, - data_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&ring->pdev->dev, + data_dma, data_size, + DMA_FROM_DEVICE); vxge_hw_ring_rxd_pre_post(ringh, dtr); vxge_post(&dtr_cnt, &first_dtr, dtr, @@ -458,9 +459,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr, skb_reserve(skb_up, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); - pci_dma_sync_single_for_cpu(ring->pdev, - data_dma, data_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&ring->pdev->dev, + data_dma, data_size, + DMA_FROM_DEVICE); vxge_debug_mem(VXGE_TRACE, "%s: %s:%d skb_up = %p", @@ -585,13 +586,13 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr, } /* for unfragmented skb */ - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], + skb_headlen(skb), DMA_TO_DEVICE); for (j = 0; j < frg_cnt; j++) { - pci_unmap_page(fifo->pdev, - txd_priv->dma_buffers[i++], - skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&fifo->pdev->dev, + txd_priv->dma_buffers[i++], + skb_frag_size(frag), DMA_TO_DEVICE); frag += 1; } @@ -897,10 +898,10 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) first_frg_len = skb_headlen(skb); - dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, - PCI_DMA_TODEVICE); + dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data, + first_frg_len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { + if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) { vxge_hw_fifo_txdl_free(fifo_hw, dtr); fifo->stats.pci_map_fail++; goto _exit0; @@ -977,12 +978,12 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev) j = 0; frag = &skb_shinfo(skb)->frags[0]; - pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++], + skb_headlen(skb), DMA_TO_DEVICE); for (; j < i; j++) { - pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], - skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j], + skb_frag_size(frag), DMA_TO_DEVICE); frag += 1; } @@ -1012,8 +1013,8 @@ vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata) if (state != VXGE_HW_RXD_STATE_POSTED) return; - pci_unmap_single(ring->pdev, rx_priv->data_dma, - rx_priv->data_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma, + rx_priv->data_size, DMA_FROM_DEVICE); dev_kfree_skb(rx_priv->skb); rx_priv->skb_data = NULL; @@ -1048,12 +1049,12 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata) frag = &skb_shinfo(skb)->frags[0]; /* for unfragmented skb */ - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], + skb_headlen(skb), DMA_TO_DEVICE); for (j = 0; j < frg_cnt; j++) { - pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], - skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++], + skb_frag_size(frag), DMA_TO_DEVICE); frag += 1; } @@ -4387,21 +4388,20 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) goto _exit0; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 64bit DMA", __func__); high_dma = 1; - if (pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64))) { + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { vxge_debug_init(VXGE_ERR, "%s : unable to obtain 64bit DMA for " "consistent allocations", __func__); ret = -ENOMEM; goto _exit1; } - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { vxge_debug_ll_config(VXGE_TRACE, "%s : using 32bit DMA", __func__); } else { From ef8d57e6b7f2b26e9def159749fc3d0d3df4ccfd Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Tue, 14 Jul 2020 17:20:54 +0300 Subject: [PATCH 0994/2056] mlxsw: reg: Add session_id and pid to MPAT register Allow setting session_id and pid as part of port analyzer configurations. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 76f61bef03f8..e460d9d05d81 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -8662,6 +8662,13 @@ MLXSW_REG_DEFINE(mpat, MLXSW_REG_MPAT_ID, MLXSW_REG_MPAT_LEN); */ MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4); +/* reg_mpat_session_id + * Mirror Session ID. + * Used for MIRROR_SESSION trap. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, session_id, 0x00, 24, 4); + /* reg_mpat_system_port * A unique port identifier for the final destination of the packet. * Access: RW @@ -8719,6 +8726,18 @@ enum mlxsw_reg_mpat_span_type { */ MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4); +/* reg_mpat_pide + * Policer enable. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, pide, 0x0C, 15, 1); + +/* reg_mpat_pid + * Policer ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, pid, 0x0C, 0, 14); + /* Remote SPAN - Ethernet VLAN * - - - - - - - - - - - - - - */ From 95c68833fa1db2a05496f8c2bc0171a14210b047 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Tue, 14 Jul 2020 17:20:55 +0300 Subject: [PATCH 0995/2056] mlxsw: reg: add mirroring_pid_base to MOGCR register Allow setting mirroring_pid_base using MOGCR register. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index e460d9d05d81..6af44aee501d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -9521,6 +9521,14 @@ MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1); */ MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1); +/* reg_mogcr_mirroring_pid_base + * Base policer id for mirroring policers. + * Must have an even value (e.g. 1000, not 1001). + * Reserved when SwitchX/-2, Switch-IB/2, Spectrum-1 and Quantum. + * Access: RW + */ +MLXSW_ITEM32(reg, mogcr, mirroring_pid_base, 0x0C, 0, 14); + /* MPAGR - Monitoring Port Analyzer Global Register * ------------------------------------------------ * This register is used for global port analyzer configurations. From 34e4ace56f106f6fc5050b8e4c6041bca78a431c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:20:56 +0300 Subject: [PATCH 0996/2056] mlxsw: spectrum_span: Add per-ASIC SPAN agent operations The various SPAN agent types differ in their mirror targets (i.e., physical port netdev vs. VLAN netdev) and the encapsulation headers that they need to encapsulate the mirrored packets with. The Spectrum-2 and Spectrum-3 ASICs support a SPAN agent type that is able to mirror towards the CPU, whereas the Spectrum-1 ASIC does not. Prepare for the addition of this new SPAN agent type by splitting the SPAN agent operations to be per-ASIC. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_span.c | 31 ++++++++++++++++--- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 6374765a112d..6a257eb0df49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -22,6 +22,8 @@ struct mlxsw_sp_span { struct work_struct work; struct mlxsw_sp *mlxsw_sp; const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr; + const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr; + size_t span_entry_ops_arr_size; struct list_head analyzed_ports_list; struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ struct list_head trigger_entries_list; @@ -626,7 +628,19 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { }; static const -struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = { +struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { + &mlxsw_sp_span_entry_ops_phys, +#if IS_ENABLED(CONFIG_NET_IPGRE) + &mlxsw_sp_span_entry_ops_gretap4, +#endif +#if IS_ENABLED(CONFIG_IPV6_GRE) + &mlxsw_sp_span_entry_ops_gretap6, +#endif + &mlxsw_sp_span_entry_ops_vlan, +}; + +static const +struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { &mlxsw_sp_span_entry_ops_phys, #if IS_ENABLED(CONFIG_NET_IPGRE) &mlxsw_sp_span_entry_ops_gretap4, @@ -894,11 +908,12 @@ static const struct mlxsw_sp_span_entry_ops * mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev) { + struct mlxsw_sp_span *span = mlxsw_sp->span; size_t i; - for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i) - if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) - return mlxsw_sp_span_entry_types[i]; + for (i = 0; i < span->span_entry_ops_arr_size; ++i) + if (span->span_entry_ops_arr[i]->can_handle(to_dev)) + return span->span_entry_ops_arr[i]; return NULL; } @@ -1519,7 +1534,11 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { + size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; + mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; + mlxsw_sp->span->span_entry_ops_arr_size = arr_size; return 0; } @@ -1536,7 +1555,11 @@ const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) { + size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; + mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; + mlxsw_sp->span->span_entry_ops_arr_size = arr_size; return 0; } From f4a626e2ca09aeffd30509349759fb30379fe83f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:20:57 +0300 Subject: [PATCH 0997/2056] mlxsw: spectrum_span: Add driver private info to parms_set() callback The parms_set() callback is supposed to fill in the parameters for the SPAN agent, such as the destination port and encapsulation info, if any. When mirroring to the CPU port we cannot resolve the destination port (the CPU port) without access to the driver private info. Pass the driver private info to parms_set() callback so that it could be used later on to resolve the CPU port. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_span.c | 19 ++++++++++++------- .../ethernet/mellanox/mlxsw/spectrum_span.h | 3 ++- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 6a257eb0df49..40289afdaaa8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -129,7 +129,8 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) } static int -mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { sparmsp->dest_port = netdev_priv(to_dev); @@ -405,7 +406,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, } static int -mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); @@ -506,7 +508,8 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, } static int -mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); @@ -580,7 +583,8 @@ mlxsw_sp_span_vlan_can_handle(const struct net_device *dev) } static int -mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { struct net_device *real_dev; @@ -652,7 +656,8 @@ struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { }; static int -mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, +mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { return mlxsw_sp_span_entry_unoffloadable(sparmsp); @@ -935,7 +940,7 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work) if (!refcount_read(&curr->ref_count)) continue; - err = curr->ops->parms_set(curr->to_dev, &sparms); + err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); if (err) continue; @@ -971,7 +976,7 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, } memset(&sparms, 0, sizeof(sparms)); - err = ops->parms_set(to_dev, &sparms); + err = ops->parms_set(mlxsw_sp, to_dev, &sparms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 29b96b222e25..c21d8dfd371b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -52,7 +52,8 @@ struct mlxsw_sp_span_entry { struct mlxsw_sp_span_entry_ops { bool (*can_handle)(const struct net_device *to_dev); - int (*parms_set)(const struct net_device *to_dev, + int (*parms_set)(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp); int (*configure)(struct mlxsw_sp_span_entry *span_entry, struct mlxsw_sp_span_parms sparms); From 6edc8beab443ec23d10e489007b4e94d9a05846d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:20:58 +0300 Subject: [PATCH 0998/2056] mlxsw: spectrum_span: Do not dereference destination netdev Currently, the destination netdev to which we mirror must be a valid netdev. However, this is going to change with the introduction of mirroring towards the CPU port, as the CPU port does not have a backing netdev. Avoid dereferencing the destination netdev when it is not clear if it is valid or not. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 40289afdaaa8..0ef9505d336f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -692,16 +692,15 @@ mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp, goto set_parms; if (sparms.dest_port->mlxsw_sp != mlxsw_sp) { - netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", - sparms.dest_port->dev->name); + dev_err(mlxsw_sp->bus_info->dev, + "Cannot mirror to a port which belongs to a different mlxsw instance\n"); sparms.dest_port = NULL; goto set_parms; } err = span_entry->ops->configure(span_entry, sparms); if (err) { - netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", - sparms.dest_port->dev->name); + dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n"); sparms.dest_port = NULL; goto set_parms; } From fa8c08b8fcbd4a4bd201297d27a9ccbd2eb4b1f1 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:20:59 +0300 Subject: [PATCH 0999/2056] mlxsw: spectrum_span: Add support for mirroring towards CPU port The Spectrum-2 and Spectrum-3 ASICs are able to mirror packets towards the CPU. These packets are then trapped like any other packet, but with a special packet trap and additional metadata such as why the packet was mirrored. The ability to mirror packets towards the CPU will be utilized by a subsequent patch set that will mirror packets that were dropped by the ASIC for various buffer-related reasons, such as tail-drop and early-drop. Add mirroring towards the CPU as a new SPAN agent type and re-use the functions that mirror to a physical port where possible. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_span.c | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 0ef9505d336f..0336edb29cc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -128,6 +128,38 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->span); } +static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev) +{ + return !dev; +} + +static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + return -EOPNOTSUPP; +} + +static int +mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + return -EOPNOTSUPP; +} + +static void +mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { + .can_handle = mlxsw_sp1_span_cpu_can_handle, + .parms_set = mlxsw_sp1_span_entry_cpu_parms, + .configure = mlxsw_sp1_span_entry_cpu_configure, + .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure, +}; + static int mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, @@ -633,6 +665,7 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = { static const struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { + &mlxsw_sp1_span_entry_ops_cpu, &mlxsw_sp_span_entry_ops_phys, #if IS_ENABLED(CONFIG_NET_IPGRE) &mlxsw_sp_span_entry_ops_gretap4, @@ -643,8 +676,49 @@ struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = { &mlxsw_sp_span_entry_ops_vlan, }; +static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev) +{ + return !dev; +} + +static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + struct mlxsw_sp_span_parms *sparmsp) +{ + sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT]; + return 0; +} + +static int +mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry, + struct mlxsw_sp_span_parms sparms) +{ + /* Mirroring to the CPU port is like mirroring to any other physical + * port. Its local port is used instead of that of the physical port. + */ + return mlxsw_sp_span_entry_phys_configure(span_entry, sparms); +} + +static void +mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) +{ + enum mlxsw_reg_mpat_span_type span_type; + + span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH; + mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type); +} + +static const +struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { + .can_handle = mlxsw_sp2_span_cpu_can_handle, + .parms_set = mlxsw_sp2_span_entry_cpu_parms, + .configure = mlxsw_sp2_span_entry_cpu_configure, + .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure, +}; + static const struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = { + &mlxsw_sp2_span_entry_ops_cpu, &mlxsw_sp_span_entry_ops_phys, #if IS_ENABLED(CONFIG_NET_IPGRE) &mlxsw_sp_span_entry_ops_gretap4, @@ -1540,6 +1614,13 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp) { size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr); + /* Must be first to avoid NULL pointer dereference by subsequent + * can_handle() callbacks. + */ + if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] != + &mlxsw_sp1_span_entry_ops_cpu)) + return -EINVAL; + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr; mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr; mlxsw_sp->span->span_entry_ops_arr_size = arr_size; @@ -1561,6 +1642,13 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) { size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr); + /* Must be first to avoid NULL pointer dereference by subsequent + * can_handle() callbacks. + */ + if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] != + &mlxsw_sp2_span_entry_ops_cpu)) + return -EINVAL; + mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr; mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr; mlxsw_sp->span->span_entry_ops_arr_size = arr_size; From a120ecc3c5d8da86aca29f142f92a1547578a7bb Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:21:00 +0300 Subject: [PATCH 1000/2056] mlxsw: spectrum_span: Allow passing parameters to SPAN agents Currently, the only parameter of a SPAN agent is the netdev which the SPAN agent should mirror to. The next patch will add the ability to request a SPAN agent that mirrors to a specific netdev and has a specific policer ID bound to it. This is required when mirroring packets to the CPU port. Therefore, encapsulate the sole parameter to mlxsw_sp_span_agent_get() in a structure, so that it could later be extended with policer information. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c | 4 +++- drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c | 5 ++++- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 5 +++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h | 8 ++++++-- 5 files changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 73d56012654b..0e32123097d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -136,11 +136,13 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id) { + struct mlxsw_sp_span_agent_parms agent_parms; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = priv; int err; - err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id); + agent_parms.to_dev = out_dev; + err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c index 195e28ab8e65..ab4ec44b566a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c @@ -27,6 +27,7 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_mall_entry *mall_entry) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_span_agent_parms agent_parms; struct mlxsw_sp_span_trigger_parms parms; enum mlxsw_sp_span_trigger trigger; int err; @@ -36,8 +37,9 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } - err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, - &mall_entry->mirror.span_id); + agent_parms.to_dev = mall_entry->mirror.to_dev; + err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id, + &agent_parms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index 901acd87353f..a5ce1eec5418 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -1295,10 +1295,13 @@ static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = {}; + struct mlxsw_sp_span_agent_parms agent_parms = { + .to_dev = mall_entry->mirror.to_dev, + }; int span_id; int err; - err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev, &span_id); + err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 0336edb29cc3..48eb197e649d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -1032,9 +1032,10 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp) mlxsw_core_schedule_work(&mlxsw_sp->span->work); } -int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, - const struct net_device *to_dev, int *p_span_id) +int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, + const struct mlxsw_sp_span_agent_parms *parms) { + const struct net_device *to_dev = parms->to_dev; const struct mlxsw_sp_span_entry_ops *ops; struct mlxsw_sp_span_entry *span_entry; struct mlxsw_sp_span_parms sparms; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index c21d8dfd371b..25f73561a9fe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -35,6 +35,10 @@ struct mlxsw_sp_span_trigger_parms { int span_id; }; +struct mlxsw_sp_span_agent_parms { + const struct net_device *to_dev; +}; + struct mlxsw_sp_span_entry_ops; struct mlxsw_sp_span_ops { @@ -74,8 +78,8 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu); void mlxsw_sp_span_speed_update_work(struct work_struct *work); -int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, - const struct net_device *to_dev, int *p_span_id); +int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, + const struct mlxsw_sp_span_agent_parms *parms); void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id); int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress); From 4039504e6a0c1abdbbabb4693e5e251c55009f21 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:21:01 +0300 Subject: [PATCH 1001/2056] mlxsw: spectrum_span: Allow setting policer on a SPAN agent When mirroring packets to the CPU port the mirrored packets are trapped to the CPU. However, unlike other traps, it is not possible to set a policer on the associated trap group. Instead, the policer needs to be set on the SPAN agent. Moreover, the policer ID must be within a specified range: From a configurable (even) base ID to this base plus the maximum number of SPAN agents. While the immediate use case is to set the policer on a SPAN agent that mirrors to the CPU port, a policer can be set on any SPAN agent. Therefore, the operation is implemented for all SPAN agent types. Extend the SPAN agent request API to allow passing the desired policer ID that should be bound to the SPAN agent. Return an error for Spectrum-1, as it does not support policer setting on a SPAN agent. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../mlxsw/spectrum_acl_flex_actions.c | 2 +- .../mellanox/mlxsw/spectrum_matchall.c | 2 +- .../ethernet/mellanox/mlxsw/spectrum_span.c | 107 +++++++++++++++++- .../ethernet/mellanox/mlxsw/spectrum_span.h | 6 + 4 files changed, 114 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 0e32123097d8..18444f675100 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -136,7 +136,7 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, const struct net_device *out_dev, bool ingress, int *p_span_id) { - struct mlxsw_sp_span_agent_parms agent_parms; + struct mlxsw_sp_span_agent_parms agent_parms = {}; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = priv; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c index ab4ec44b566a..f30599ad6019 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c @@ -27,7 +27,7 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_mall_entry *mall_entry) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_span_agent_parms agent_parms; + struct mlxsw_sp_span_agent_parms agent_parms = {}; struct mlxsw_sp_span_trigger_parms parms; enum mlxsw_sp_span_trigger trigger; int err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 48eb197e649d..323eaf979aea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -27,6 +27,8 @@ struct mlxsw_sp_span { struct list_head analyzed_ports_list; struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */ struct list_head trigger_entries_list; + u16 policer_id_base; + refcount_t policer_id_base_ref_count; atomic_t active_entries_count; int entries_count; struct mlxsw_sp_span_entry entries[]; @@ -88,6 +90,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL); if (!span) return -ENOMEM; + refcount_set(&span->policer_id_base_ref_count, 0); span->entries_count = entries_count; atomic_set(&span->active_entries_count, 0); mutex_init(&span->analyzed_ports_lock); @@ -182,6 +185,8 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); } @@ -478,6 +483,8 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, @@ -580,6 +587,8 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry, /* Create a new port analayzer entry for local_port. */ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl, MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER, @@ -643,6 +652,8 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry, mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true, MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH); + mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable); + mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id); mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); @@ -790,6 +801,45 @@ mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry) span_entry->ops->deconfigure(span_entry); } +static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, + u16 policer_id) +{ + struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp; + u16 policer_id_base; + int err; + + /* Policers set on SPAN agents must be in the range of + * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the + * base is set and the new policer is not within the range, then we + * must error out. + */ + if (refcount_read(&span->policer_id_base_ref_count)) { + if (policer_id < span->policer_id_base || + policer_id >= span->policer_id_base + span->entries_count) + return -EINVAL; + + refcount_inc(&span->policer_id_base_ref_count); + return 0; + } + + /* Base must be even. */ + policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1; + err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp, + policer_id_base); + if (err) + return err; + + span->policer_id_base = policer_id_base; + refcount_set(&span->policer_id_base_ref_count, 1); + + return 0; +} + +static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) +{ + refcount_dec(&span->policer_id_base_ref_count); +} + static struct mlxsw_sp_span_entry * mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, @@ -809,6 +859,15 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp, if (!span_entry) return NULL; + if (sparms.policer_enable) { + int err; + + err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span, + sparms.policer_id); + if (err) + return NULL; + } + atomic_inc(&mlxsw_sp->span->active_entries_count); span_entry->ops = ops; refcount_set(&span_entry->ref_count, 1); @@ -823,6 +882,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, { mlxsw_sp_span_entry_deconfigure(span_entry); atomic_dec(&mlxsw_sp->span->active_entries_count); + if (span_entry->parms.policer_enable) + mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span); } struct mlxsw_sp_span_entry * @@ -861,6 +922,24 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id) return NULL; } +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp, + const struct net_device *to_dev, + const struct mlxsw_sp_span_parms *sparms) +{ + int i; + + for (i = 0; i < mlxsw_sp->span->entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i]; + + if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev && + curr->parms.policer_enable == sparms->policer_enable && + curr->parms.policer_id == sparms->policer_id) + return curr; + } + return NULL; +} + static struct mlxsw_sp_span_entry * mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, @@ -869,7 +948,8 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_span_entry *span_entry; - span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); + span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev, + &sparms); if (span_entry) { /* Already exists, just take a reference */ refcount_inc(&span_entry->ref_count); @@ -1054,6 +1134,8 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id, if (err) return err; + sparms.policer_id = parms->policer_id; + sparms.policer_enable = parms->policer_enable; span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); if (!span_entry) return -ENOBUFS; @@ -1634,9 +1716,16 @@ static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed) return mtu * 5 / 2; } +static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, + u16 policer_id_base) +{ + return -EOPNOTSUPP; +} + const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = { .init = mlxsw_sp1_span_init, .buffsize_get = mlxsw_sp1_span_buffsize_get, + .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set, }; static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp) @@ -1672,9 +1761,24 @@ static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed) return __mlxsw_sp_span_buffsize_get(mtu, speed, factor); } +static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp, + u16 policer_id_base) +{ + char mogcr_pl[MLXSW_REG_MOGCR_LEN]; + int err; + + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); + if (err) + return err; + + mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl); +} + const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = { .init = mlxsw_sp2_span_init, .buffsize_get = mlxsw_sp2_span_buffsize_get, + .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, }; static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) @@ -1687,4 +1791,5 @@ static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed) const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = { .init = mlxsw_sp2_span_init, .buffsize_get = mlxsw_sp3_span_buffsize_get, + .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index 25f73561a9fe..1c746dd3b1bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -21,6 +21,8 @@ struct mlxsw_sp_span_parms { union mlxsw_sp_l3addr daddr; union mlxsw_sp_l3addr saddr; u16 vid; + u16 policer_id; + bool policer_enable; }; enum mlxsw_sp_span_trigger { @@ -37,6 +39,8 @@ struct mlxsw_sp_span_trigger_parms { struct mlxsw_sp_span_agent_parms { const struct net_device *to_dev; + u16 policer_id; + bool policer_enable; }; struct mlxsw_sp_span_entry_ops; @@ -44,6 +48,8 @@ struct mlxsw_sp_span_entry_ops; struct mlxsw_sp_span_ops { int (*init)(struct mlxsw_sp *mlxsw_sp); u32 (*buffsize_get)(int mtu, u32 speed); + int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp, + u16 policer_id_base); }; struct mlxsw_sp_span_entry { From 47e4b1620e804cf3b45523b0023400df330ea25c Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Tue, 14 Jul 2020 17:21:02 +0300 Subject: [PATCH 1002/2056] mlxsw: reg: Increase trap identifier to 10 bits The trap identifier was increased to 10 bits in new versions of the Programmer's Reference Manual (PRM). Increase it accordingly in the Host PacKet Trap (HPKT) register and in the Completion Queue Element (CQE). This is significant for subsequent patches that will introduce trap identifiers which utilize the extended range. Signed-off-by: Amit Cohen Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | 2 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 +- drivers/net/ethernet/mellanox/mlxsw/trap.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 32c7cabfb261..697593e44f8a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -176,7 +176,7 @@ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14); /* pci_cqe_trap_id * Trap ID that captured the packet. */ -MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9); +MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 10); /* pci_cqe_crc * Length include CRC. Indicates the length field includes diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 6af44aee501d..408003520602 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5778,7 +5778,7 @@ MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6); * Note: A trap ID can only be associated with a single trap group. The device * will associate the trap ID with the last trap group configured. */ -MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9); +MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 10); enum { MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT, diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 28e60697d14e..8cbb9cf5b57b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -108,7 +108,7 @@ enum { MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3, MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4, - MLXSW_TRAP_ID_MAX = 0x1FF + MLXSW_TRAP_ID_MAX = 0x3FF, }; enum mlxsw_event_trap_id { From 0cc32c5b5ca84cd3ad3bbb1441f713d171b1efcf Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:21:03 +0300 Subject: [PATCH 1003/2056] mlxsw: trap: Add trap identifiers for mirrored packets Packets that are mirrored to the CPU port are trapped with one of eight trap identifiers. Add them. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/trap.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 8cbb9cf5b57b..33909887d0ac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -107,6 +107,14 @@ enum { MLXSW_TRAP_ID_ACL2 = 0x1C2, MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3, MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4, + MLXSW_TRAP_ID_MIRROR_SESSION0 = 0x220, + MLXSW_TRAP_ID_MIRROR_SESSION1 = 0x221, + MLXSW_TRAP_ID_MIRROR_SESSION2 = 0x222, + MLXSW_TRAP_ID_MIRROR_SESSION3 = 0x223, + MLXSW_TRAP_ID_MIRROR_SESSION4 = 0x224, + MLXSW_TRAP_ID_MIRROR_SESSION5 = 0x225, + MLXSW_TRAP_ID_MIRROR_SESSION6 = 0x226, + MLXSW_TRAP_ID_MIRROR_SESSION7 = 0x227, MLXSW_TRAP_ID_MAX = 0x3FF, }; From a76423a144a8a109cffbd7311c3278f8a856b25f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:21:04 +0300 Subject: [PATCH 1004/2056] mlxsw: pci: Add mirror reason field to CQEv2 The Completion Queue Element version 2 (CQEv2) includes a field called 'mirror_reason' which indicates why the packet was mirrored to the CPU. Add the field so that it can be used by a later patch. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 697593e44f8a..a2c1fbd3e0d1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -213,6 +213,11 @@ mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12); */ MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20); +/* pci_cqe_mirror_reason + * Mirror reason. + */ +MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8); + /* pci_cqe_owner * Ownership bit. */ From eacc86ec510b9ad64d6dddf3a9924c29d1c4f57b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:21:05 +0300 Subject: [PATCH 1005/2056] mlxsw: pci: Retrieve mirror reason from CQE during receive In case the mirror reason is valid, retrieve it into the Rx information so that it could be used during listener lookup in a later patch. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 + drivers/net/ethernet/mellanox/mlxsw/pci.c | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 7d6b0a232789..c736b8673791 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -176,6 +176,7 @@ struct mlxsw_rx_info { u16 lag_id; } u; u8 lag_port_index; + u8 mirror_reason; int trap_id; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c04ec1a92826..1c64b03ff48e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -547,9 +547,9 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, { struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_elem_info *elem_info; + struct mlxsw_rx_info rx_info = {}; char *wqe; struct sk_buff *skb; - struct mlxsw_rx_info rx_info; u16 byte_count; int err; @@ -582,6 +582,10 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe); mlxsw_skb_cb(skb)->cookie_index = cookie_index; + } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 && + rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 && + mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) { + rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe); } byte_count = mlxsw_pci_cqe_byte_count_get(cqe); From 6a8c101e0795d4fe892f281a4e08b2a05cbb9e20 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 14 Jul 2020 17:21:06 +0300 Subject: [PATCH 1006/2056] mlxsw: core: Use mirror reason during Rx listener lookup The Rx listener abstraction allows the switch driver (e.g., mlxsw_spectrum) to register a function that is called when a packet is received (trapped) for a specific reason. Up until now, the Rx listener lookup was solely based on the trap identifier. However, when a packet is mirrored to the CPU the trap identifier merely indicates that the packet was mirrored, but not why it was mirrored. This makes it impossible for the switch driver to register different Rx listeners for different mirror reasons. Solve this by allowing the switch driver to register a Rx listener with a mirror reason and by extending the Rx listener lookup to take the mirror reason into account. Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/core.h | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 8b3791d73c99..1363168b3c82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1524,7 +1524,8 @@ static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a, { return (rxl_a->func == rxl_b->func && rxl_a->local_port == rxl_b->local_port && - rxl_a->trap_id == rxl_b->trap_id); + rxl_a->trap_id == rxl_b->trap_id && + rxl_a->mirror_reason == rxl_b->mirror_reason); } static struct mlxsw_rx_listener_item * @@ -2044,7 +2045,8 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, rxl = &rxl_item->rxl; if ((rxl->local_port == MLXSW_PORT_DONT_CARE || rxl->local_port == local_port) && - rxl->trap_id == rx_info->trap_id) { + rxl->trap_id == rx_info->trap_id && + rxl->mirror_reason == rx_info->mirror_reason) { if (rxl_item->enabled) found = true; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c736b8673791..c1c1e039323a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -61,6 +61,7 @@ void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core, struct mlxsw_rx_listener { void (*func)(struct sk_buff *skb, u8 local_port, void *priv); u8 local_port; + u8 mirror_reason; u16 trap_id; }; From 101e314d7ff385a842c9fb0ddfe074024f4803f2 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:22:56 +0200 Subject: [PATCH 1007/2056] s390/qeth: reject unsupported link type earlier Rather than delaying the decision until netdev setup, immediately reject a device when we discover that it has an unsupported link type (ie. Token Ring). Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 38 ++++++++++++++++++++++--------- drivers/s390/net/qeth_l3_main.c | 6 ----- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 88e998de2d03..c0f551602a5a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2436,6 +2436,17 @@ static int qeth_cm_setup(struct qeth_card *card) return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); } +static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) +{ + if (link_type == QETH_LINK_TYPE_LANE_TR || + link_type == QETH_LINK_TYPE_HSTR) { + dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); + return false; + } + + return true; +} + static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) { struct net_device *dev = card->dev; @@ -2495,8 +2506,8 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, { __u16 mtu, framesize; __u16 len; - __u8 link_type; struct qeth_cmd_buffer *iob; + u8 link_type = 0; QETH_CARD_TEXT(card, 2, "ulpenacb"); @@ -2516,9 +2527,11 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { memcpy(&link_type, QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); - card->info.link_type = link_type; - } else - card->info.link_type = 0; + if (!qeth_is_supported_link_type(card, link_type)) + return -EPROTONOSUPPORT; + } + + card->info.link_type = link_type; QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); return 0; } @@ -3100,7 +3113,6 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, enum qeth_prot_versions prot, unsigned int data_length) { - enum qeth_link_types link_type = card->info.link_type; struct qeth_cmd_buffer *iob; struct qeth_ipacmd_hdr *hdr; @@ -3116,7 +3128,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, hdr->command = cmd_code; hdr->initiator = IPA_CMD_INITIATOR_HOST; /* hdr->seqno is set by qeth_send_control_data() */ - hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1; + hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; hdr->rel_adapter_no = (u8) card->dev->dev_port; hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; hdr->param_count = 1; @@ -3199,18 +3211,22 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_query_cmds_supp *query_cmd; QETH_CARD_TEXT(card, 3, "quyadpcb"); if (qeth_setadpparms_inspect_rc(cmd)) return -EIO; - if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { - card->info.link_type = - cmd->data.setadapterparms.data.query_cmds_supp.lan_type; + query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; + if (query_cmd->lan_type & 0x7f) { + if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) + return -EPROTONOSUPPORT; + + card->info.link_type = query_cmd->lan_type; QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); } - card->options.adp.supported = - cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; + + card->options.adp.supported = query_cmd->supported_cmds; return 0; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 1e50aa0297a3..d4ce653ff111 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1919,12 +1919,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) return rc; if (IS_OSD(card) || IS_OSX(card)) { - if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || - (card->info.link_type == QETH_LINK_TYPE_HSTR)) { - pr_info("qeth_l3: ignoring TR device\n"); - return -ENODEV; - } - card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; /*IPv6 address autoconfiguration stuff*/ From dbdd04d311afb9d64b0722e8a8a00b359625e8de Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:22:57 +0200 Subject: [PATCH 1008/2056] s390/qeth: fine-tune errno when cmds are cancelled If we cancel all pending cmds (eg. when tearing down the device), don't blame it on an IO error. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c0f551602a5a..7d51be6665cc 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -969,7 +969,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) spin_lock_irqsave(&card->lock, flags); list_for_each_entry(iob, &card->cmd_waiter_list, list) - qeth_notify_cmd(iob, -EIO); + qeth_notify_cmd(iob, -ECANCELED); spin_unlock_irqrestore(&card->lock, flags); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); From 08e95cae648ef8db998d48c38c4774831da5f869 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:22:58 +0200 Subject: [PATCH 1009/2056] s390/qeth: only init the isolation mode when necessary A newly initialized device defaults to ISOLATION_MODE_NONE, don't bother with programming this a second time. Then remove the OSD/OSX check, it's already done in the sysfs path whenever the user actually changes the configuration. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 7d51be6665cc..514795c5eaad 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4663,8 +4663,7 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) QETH_CARD_TEXT(card, 4, "setactlo"); - if ((IS_OSD(card) || IS_OSX(card)) && - qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + if (qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, fallback); if (rc) { @@ -5347,9 +5346,11 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) card->info.hwtrap = 0; - rc = qeth_set_access_ctrl_online(card, 0); - if (rc) - goto out; + if (card->options.isolation != ISOLATION_MODE_NONE) { + rc = qeth_set_access_ctrl_online(card, 0); + if (rc) + goto out; + } rc = qeth_init_qdio_queues(card); if (rc) { From 62f0379c6c9b169b99a89c4677a32e45d7778bc4 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:22:59 +0200 Subject: [PATCH 1010/2056] s390/qeth: don't clear the configured isolation mode When qeth_set_access_ctrl_online() is called during the device's initialization and discovers that isolation mode isn't supported, don't clear the user's currently configured mode. They intentionally choose to operate the device in this specific mode, and degrading the isolation is not an option. Only adjust the configuration when called via sysfs (ie. fallback = 1), and here follow the common pattern and restore it from prev_isolation. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 514795c5eaad..782a5128ac04 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4663,19 +4663,19 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) QETH_CARD_TEXT(card, 4, "setactlo"); - if (qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { - rc = qeth_setadpparms_set_access_ctrl(card, - card->options.isolation, fallback); - if (rc) { - QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", - rc, CARD_DEVID(card)); - rc = -EOPNOTSUPP; - } - } else if (card->options.isolation != ISOLATION_MODE_NONE) { - card->options.isolation = ISOLATION_MODE_NONE; - + if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + return -EOPNOTSUPP; + } + + rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, + fallback); + if (rc) { + QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", + rc, CARD_DEVID(card)); rc = -EOPNOTSUPP; } return rc; From 65878fd95261d3a831e085e2c6f1e8a6d7979b14 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:23:00 +0200 Subject: [PATCH 1011/2056] s390/qeth: clean up error handling for isolation mode cmds As the cmd IO path has learned to propagate errnos back to its callers, let them deal with errors instead of trying to restore their previous configuration from within the IO error path. Also translate the HW error to a meaningful errno, instead of returning -EIO for all cases (and don't map this to -EOPNOTSUPP later on...). While at it, add a READ_ONCE() / WRITE_ONCE() pair to ensure that the data path always sees a valid isolation mode during reconfiguration. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 5 +- drivers/s390/net/qeth_core_main.c | 85 ++++++++++--------------------- drivers/s390/net/qeth_core_sys.c | 18 +++---- 3 files changed, 39 insertions(+), 69 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 51ea56b73a97..c77a87105ea8 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -721,7 +721,6 @@ struct qeth_card_options { struct qeth_vnicc_info vnicc; /* VNICC options */ enum qeth_discipline_id layer; enum qeth_ipa_isolation_modes isolation; - enum qeth_ipa_isolation_modes prev_isolation; int sniffer; enum qeth_cq cq; char hsuid[9]; @@ -1071,6 +1070,9 @@ int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); int qeth_query_card_info(struct qeth_card *card, struct carrier_info *carrier_info); +int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes mode); + unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, @@ -1078,7 +1080,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, int elements_needed); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); -int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); void qeth_trace_features(struct qeth_card *); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 782a5128ac04..bd6489d87ede 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4557,7 +4557,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_set_access_ctrl *access_ctrl_req; - int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); @@ -4571,70 +4570,54 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, cmd->data.setadapterparms.hdr.return_code); switch (qeth_setadpparms_inspect_rc(cmd)) { case SET_ACCESS_CTRL_RC_SUCCESS: - if (card->options.isolation == ISOLATION_MODE_NONE) { + if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) dev_info(&card->gdev->dev, "QDIO data connection isolation is deactivated\n"); - } else { + else dev_info(&card->gdev->dev, "QDIO data connection isolation is activated\n"); - } - break; + return 0; case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", CARD_DEVID(card)); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return 0; case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", CARD_DEVID(card)); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return 0; case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: dev_err(&card->gdev->dev, "Adapter does not " "support QDIO data connection isolation\n"); - break; + return -EOPNOTSUPP; case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: dev_err(&card->gdev->dev, "Adapter is dedicated. " "QDIO data connection isolation not supported\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EOPNOTSUPP; case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: dev_err(&card->gdev->dev, "TSO does not permit QDIO data connection isolation\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EPERM; case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: dev_err(&card->gdev->dev, "The adjacent switch port does not " "support reflective relay mode\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EOPNOTSUPP; case SET_ACCESS_CTRL_RC_REFLREL_FAILED: dev_err(&card->gdev->dev, "The reflective relay mode cannot be " "enabled at the adjacent switch port"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EREMOTEIO; case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: dev_warn(&card->gdev->dev, "Turning off reflective relay mode " "at the adjacent switch failed\n"); - break; + /* benign error while disabling ISOLATION_MODE_FWD */ + return 0; default: - /* this should never happen */ - if (fallback) - card->options.isolation = card->options.prev_isolation; - break; + return -EIO; } - return (cmd->hdr.return_code) ? -EIO : 0; } -static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, - enum qeth_ipa_isolation_modes isolation, int fallback) +int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes mode) { int rc; struct qeth_cmd_buffer *iob; @@ -4643,41 +4626,28 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, QETH_CARD_TEXT(card, 4, "setacctl"); + if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + dev_err(&card->gdev->dev, + "Adapter does not support QDIO data connection isolation\n"); + return -EOPNOTSUPP; + } + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, SETADP_DATA_SIZEOF(set_access_ctrl)); if (!iob) return -ENOMEM; cmd = __ipa_cmd(iob); access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; - access_ctrl_req->subcmd_code = isolation; + access_ctrl_req->subcmd_code = mode; rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, - &fallback); - QETH_CARD_TEXT_(card, 2, "rc=%d", rc); - return rc; -} - -int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) -{ - int rc = 0; - - QETH_CARD_TEXT(card, 4, "setactlo"); - - if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { - dev_err(&card->gdev->dev, "Adapter does not " - "support QDIO data connection isolation\n"); - if (fallback) - card->options.isolation = card->options.prev_isolation; - return -EOPNOTSUPP; - } - - rc = qeth_setadpparms_set_access_ctrl(card, card->options.isolation, - fallback); + NULL); if (rc) { + QETH_CARD_TEXT_(card, 2, "rc=%d", rc); QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", rc, CARD_DEVID(card)); - rc = -EOPNOTSUPP; } + return rc; } @@ -5347,7 +5317,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok) card->info.hwtrap = 0; if (card->options.isolation != ISOLATION_MODE_NONE) { - rc = qeth_set_access_ctrl_online(card, 0); + rc = qeth_setadpparms_set_access_ctrl(card, + card->options.isolation); if (rc) goto out; } @@ -6858,7 +6829,7 @@ netdev_features_t qeth_features_check(struct sk_buff *skb, /* Traffic with local next-hop is not eligible for some offloads: */ if (skb->ip_summed == CHECKSUM_PARTIAL && - card->options.isolation != ISOLATION_MODE_FWD) { + READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { netdev_features_t restricted = 0; if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index c901c942fed7..8def82336f53 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -448,19 +448,17 @@ static ssize_t qeth_dev_isolation_store(struct device *dev, rc = -EINVAL; goto out; } - rc = count; - /* defer IP assist if device is offline (until discipline->set_online)*/ - card->options.prev_isolation = card->options.isolation; - card->options.isolation = isolation; - if (qeth_card_hw_is_reachable(card)) { - int ipa_rc = qeth_set_access_ctrl_online(card, 1); - if (ipa_rc != 0) - rc = ipa_rc; - } + if (qeth_card_hw_is_reachable(card)) + rc = qeth_setadpparms_set_access_ctrl(card, isolation); + + if (!rc) + WRITE_ONCE(card->options.isolation, isolation); + out: mutex_unlock(&card->conf_mutex); - return rc; + + return rc ? rc : count; } static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, From 2ccd31f8c28fb8e99eb4a70d2d06f5d61ee2c6ab Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:23:01 +0200 Subject: [PATCH 1012/2056] s390/qeth: use u64_to_user_ptr() in the OAT code Use the correct helper for casting to a user pointer. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bd6489d87ede..31fa9ffc7cf4 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4907,10 +4907,8 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); if (!rc) { - if (is_compat_task()) - tmp = compat_ptr(oat_data.ptr); - else - tmp = (void __user *)(unsigned long)oat_data.ptr; + tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : + u64_to_user_ptr(oat_data.ptr); if (copy_to_user(tmp, priv.buffer, priv.response_len)) { From f7ec2d2a920edf6a5ffba21ce57181429cfc68b0 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:23:02 +0200 Subject: [PATCH 1013/2056] s390/qeth: clean up a magic number in the OAT callback Use the correct struct member instead of hardcoding its offset. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 31fa9ffc7cf4..c5ff7edb4ba1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4839,7 +4839,6 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_qoat_priv *priv; - char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); @@ -4848,13 +4847,12 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; - resdata = (char *)data + 28; if (resdatalen > (priv->buffer_len - priv->response_len)) return -ENOSPC; - memcpy((priv->buffer + priv->response_len), resdata, - resdatalen); + memcpy(priv->buffer + priv->response_len, + &cmd->data.setadapterparms.hdr, resdatalen); priv->response_len += resdatalen; if (cmd->data.setadapterparms.hdr.seq_no < From 4b2eee35336c18ae6ddae25f4438af0c892c835f Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:23:03 +0200 Subject: [PATCH 1014/2056] s390/qeth: cleanup OAT code While initially just trying to fix up the indentation, condense a few lines and get rid of a goto label. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 40 ++++++++++--------------------- 1 file changed, 12 insertions(+), 28 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c5ff7edb4ba1..053a25e34e4b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4835,17 +4835,17 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) } static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) + struct qeth_reply *reply, + unsigned long data) { struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; - struct qeth_qoat_priv *priv; + struct qeth_qoat_priv *priv = reply->param; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); if (qeth_setadpparms_inspect_rc(cmd)) return -EIO; - priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; if (resdatalen > (priv->buffer_len - priv->response_len)) @@ -4873,24 +4873,17 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) QETH_CARD_TEXT(card, 3, "qoatcmd"); - if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { - rc = -EOPNOTSUPP; - goto out; - } + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) + return -EOPNOTSUPP; - if (copy_from_user(&oat_data, udata, - sizeof(struct qeth_query_oat_data))) { - rc = -EFAULT; - goto out; - } + if (copy_from_user(&oat_data, udata, sizeof(oat_data))) + return -EFAULT; priv.buffer_len = oat_data.buffer_len; priv.response_len = 0; priv.buffer = vzalloc(oat_data.buffer_len); - if (!priv.buffer) { - rc = -ENOMEM; - goto out; - } + if (!priv.buffer) + return -ENOMEM; iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, SETADP_DATA_SIZEOF(query_oat)); @@ -4902,28 +4895,19 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) oat_req = &cmd->data.setadapterparms.data.query_oat; oat_req->subcmd_code = oat_data.command; - rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, - &priv); + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); if (!rc) { tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : u64_to_user_ptr(oat_data.ptr); - - if (copy_to_user(tmp, priv.buffer, - priv.response_len)) { - rc = -EFAULT; - goto out_free; - } - oat_data.response_len = priv.response_len; - if (copy_to_user(udata, &oat_data, - sizeof(struct qeth_query_oat_data))) + if (copy_to_user(tmp, priv.buffer, priv.response_len) || + copy_to_user(udata, &oat_data, sizeof(oat_data))) rc = -EFAULT; } out_free: vfree(priv.buffer); -out: return rc; } From 0973292f579a91ebd3b2866a0eed27d26717acdf Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:23:04 +0200 Subject: [PATCH 1015/2056] s390/qeth: unify RX-mode hashtables To keep track of the addresses programmed from an RX modeset, we have two separate hashtables (L2: mac_htable, L3: ip_mc_htable). These are never used at the same time, so unify them into a single rx_mode_addrs hashtable. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 3 +-- drivers/s390/net/qeth_core_main.c | 1 + drivers/s390/net/qeth_l2_main.c | 9 ++++----- drivers/s390/net/qeth_l3_main.c | 13 ++++++------- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index c77a87105ea8..3d54c8bbfa86 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -803,14 +803,13 @@ struct qeth_card { struct workqueue_struct *event_wq; struct workqueue_struct *cmd_wq; wait_queue_head_t wait_q; - DECLARE_HASHTABLE(mac_htable, 4); DECLARE_HASHTABLE(ip_htable, 4); DECLARE_HASHTABLE(local_addrs4, 4); DECLARE_HASHTABLE(local_addrs6, 4); spinlock_t local_addrs4_lock; spinlock_t local_addrs6_lock; struct mutex ip_lock; - DECLARE_HASHTABLE(ip_mc_htable, 4); + DECLARE_HASHTABLE(rx_mode_addrs, 4); struct work_struct rx_mode_work; struct work_struct kernel_thread_starter; spinlock_t thread_mask_lock; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 053a25e34e4b..01a280b5e8d2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1647,6 +1647,7 @@ static void qeth_setup_card(struct qeth_card *card) qeth_init_qdio_info(card); INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); + hash_init(card->rx_mode_addrs); hash_init(card->local_addrs4); hash_init(card->local_addrs6); spin_lock_init(&card->local_addrs4_lock); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2d3bca3c0141..ef7a2db7a724 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -156,7 +156,7 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) struct hlist_node *tmp; int i; - hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { hash_del(&mac->hnode); kfree(mac); } @@ -438,7 +438,7 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); struct qeth_mac *mac; - hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) { + hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) { if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) { mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; return; @@ -452,7 +452,7 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) ether_addr_copy(mac->mac_addr, ha->addr); mac->disp_flag = QETH_DISP_ADDR_ADD; - hash_add(card->mac_htable, &mac->hnode, mac_hash); + hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash); } static void qeth_l2_rx_mode_work(struct work_struct *work) @@ -475,7 +475,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work) qeth_l2_add_mac(card, ha); netif_addr_unlock_bh(dev); - hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { switch (mac->disp_flag) { case QETH_DISP_ADDR_DELETE: qeth_l2_remove_mac(card, mac->mac_addr); @@ -601,7 +601,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev) return rc; } - hash_init(card->mac_htable); INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work); return 0; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d4ce653ff111..15a12487ff7a 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -58,7 +58,7 @@ static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, struct qeth_ipaddr *addr; if (query->is_multicast) { - hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) + hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key) if (qeth_l3_addr_match_ip(addr, query)) return addr; } else { @@ -239,7 +239,7 @@ static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) struct hlist_node *tmp; int i; - hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) { hash_del(&addr->hnode); kfree(addr); } @@ -1093,7 +1093,7 @@ static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg) if (!ipm) continue; - hash_add(card->ip_mc_htable, &ipm->hnode, + hash_add(card->rx_mode_addrs, &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); } @@ -1124,8 +1124,8 @@ static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg) if (!ipm) continue; - hash_add(card->ip_mc_htable, - &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + hash_add(card->rx_mode_addrs, &ipm->hnode, + qeth_l3_ipaddr_hash(ipm)); } read_unlock_bh(&in6_dev->lock); @@ -1219,7 +1219,7 @@ static void qeth_l3_rx_mode_work(struct work_struct *work) vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card); rtnl_unlock(); - hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) { switch (addr->disp_flag) { case QETH_DISP_ADDR_DELETE: rc = qeth_l3_deregister_addr_entry(card, addr); @@ -1998,7 +1998,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev) } } - hash_init(card->ip_mc_htable); INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); return 0; } From 94ae20f7356b418ba0fb451f9567e40d51e915b7 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 14 Jul 2020 16:23:05 +0200 Subject: [PATCH 1016/2056] s390/qeth: constify the MPC initialization data We're not modifying these data blobs, so mark them as constant. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 2 +- drivers/s390/net/qeth_core_mpc.c | 16 ++++++++-------- drivers/s390/net/qeth_core_mpc.h | 17 ++++++++--------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 01a280b5e8d2..8a76022fceda 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -2026,7 +2026,7 @@ static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, } static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, - void *data, + const void *data, unsigned int data_length) { struct qeth_cmd_buffer *iob; diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c index e3f4866c158e..68c2588b9dcc 100644 --- a/drivers/s390/net/qeth_core_mpc.c +++ b/drivers/s390/net/qeth_core_mpc.c @@ -10,7 +10,7 @@ #include #include "qeth_core_mpc.h" -unsigned char IDX_ACTIVATE_READ[] = { +const unsigned char IDX_ACTIVATE_READ[] = { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, @@ -18,7 +18,7 @@ unsigned char IDX_ACTIVATE_READ[] = { 0x00, 0x00 }; -unsigned char IDX_ACTIVATE_WRITE[] = { +const unsigned char IDX_ACTIVATE_WRITE[] = { 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, @@ -26,7 +26,7 @@ unsigned char IDX_ACTIVATE_WRITE[] = { 0x00, 0x00 }; -unsigned char CM_ENABLE[] = { +const unsigned char CM_ENABLE[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63, 0x10, 0x00, 0x00, 0x01, @@ -45,7 +45,7 @@ unsigned char CM_ENABLE[] = { 0xff, 0xff, 0xff }; -unsigned char CM_SETUP[] = { +const unsigned char CM_SETUP[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, 0x10, 0x00, 0x00, 0x01, @@ -65,7 +65,7 @@ unsigned char CM_SETUP[] = { 0x04, 0x06, 0xc8, 0x00 }; -unsigned char ULP_ENABLE[] = { +const unsigned char ULP_ENABLE[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b, 0x10, 0x00, 0x00, 0x01, @@ -85,7 +85,7 @@ unsigned char ULP_ENABLE[] = { 0xf1, 0x00, 0x00 }; -unsigned char ULP_SETUP[] = { +const unsigned char ULP_SETUP[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c, 0x10, 0x00, 0x00, 0x01, @@ -107,7 +107,7 @@ unsigned char ULP_SETUP[] = { 0x00, 0x00, 0x00, 0x00 }; -unsigned char DM_ACT[] = { +const unsigned char DM_ACT[] = { 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55, 0x10, 0x00, 0x00, 0x01, @@ -123,7 +123,7 @@ unsigned char DM_ACT[] = { 0x05, 0x40, 0x01, 0x01, 0x00 }; -unsigned char IPA_PDU_HEADER[] = { +const unsigned char IPA_PDU_HEADER[] = { 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 9d6f39d8f9ab..b459def0fb26 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -13,13 +13,13 @@ #include #include +extern const unsigned char IPA_PDU_HEADER[]; #define IPA_PDU_HEADER_SIZE 0x40 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e) #define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26) #define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29) #define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a) -extern unsigned char IPA_PDU_HEADER[]; #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_SEQ_NO_LENGTH 4 @@ -858,7 +858,7 @@ extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); /* END OF IP Assist related definitions */ /*****************************************************************************/ -extern unsigned char CM_ENABLE[]; +extern const unsigned char CM_ENABLE[]; #define CM_ENABLE_SIZE 0x63 #define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c) #define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53) @@ -868,7 +868,7 @@ extern unsigned char CM_ENABLE[]; (PDU_ENCAPSULATION(buffer) + 0x13) -extern unsigned char CM_SETUP[]; +extern const unsigned char CM_SETUP[]; #define CM_SETUP_SIZE 0x64 #define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) @@ -877,7 +877,7 @@ extern unsigned char CM_SETUP[]; #define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ (PDU_ENCAPSULATION(buffer) + 0x1a) -extern unsigned char ULP_ENABLE[]; +extern const unsigned char ULP_ENABLE[]; #define ULP_ENABLE_SIZE 0x6b #define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61) #define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c) @@ -898,7 +898,7 @@ extern unsigned char ULP_ENABLE[]; #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50) #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19) -extern unsigned char ULP_SETUP[]; +extern const unsigned char ULP_SETUP[]; #define ULP_SETUP_SIZE 0x6c #define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) @@ -910,7 +910,7 @@ extern unsigned char ULP_SETUP[]; (PDU_ENCAPSULATION(buffer) + 0x1a) -extern unsigned char DM_ACT[]; +extern const unsigned char DM_ACT[]; #define DM_ACT_SIZE 0x55 #define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c) #define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51) @@ -921,9 +921,8 @@ extern unsigned char DM_ACT[]; #define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c) #define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20) -extern unsigned char IDX_ACTIVATE_READ[]; -extern unsigned char IDX_ACTIVATE_WRITE[]; - +extern const unsigned char IDX_ACTIVATE_READ[]; +extern const unsigned char IDX_ACTIVATE_WRITE[]; #define IDX_ACTIVATE_SIZE 0x22 #define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b) #define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c) From b3ba9ae8dcd2f0a95f02131b9382cea980bae96f Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 14 Jul 2020 17:45:03 +0200 Subject: [PATCH 1017/2056] net: phy: realtek: add support for RTL8125B-internal PHY Realtek assigned a new PHY ID for the RTL8125B-internal PHY. It's however compatible with the RTL8125A-internal PHY. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/phy/realtek.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c7229d022a27..95dbe5e8e1d8 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -637,6 +637,18 @@ static struct phy_driver realtek_drvs[] = { .write_page = rtl821x_write_page, .read_mmd = rtl8125_read_mmd, .write_mmd = rtl8125_write_mmd, + }, { + PHY_ID_MATCH_EXACT(0x001cc840), + .name = "RTL8125B 2.5Gbps internal", + .get_features = rtl8125_get_features, + .config_aneg = rtl8125_config_aneg, + .read_status = rtl8125_read_status, + .suspend = genphy_suspend, + .resume = rtlgen_resume, + .read_page = rtl821x_read_page, + .write_page = rtl821x_write_page, + .read_mmd = rtl8125_read_mmd, + .write_mmd = rtl8125_write_mmd, }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", From 0439297be95111cf9ef5ece2091af16d140ce2ef Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 14 Jul 2020 17:46:03 +0200 Subject: [PATCH 1018/2056] r8169: add support for RTL8125B Add support for RTL8125B rev.b. In my tests 2.5Gbps worked well w/o firmware, however for a stable link at 1Gbps firmware revision 0.0.2 is needed. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.h | 1 + drivers/net/ethernet/realtek/r8169_main.c | 110 ++++++++++++++---- .../net/ethernet/realtek/r8169_phy_config.c | 53 +++++++++ 3 files changed, 141 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h index 422a8e5a8d3b..7be86ef5a584 100644 --- a/drivers/net/ethernet/realtek/r8169.h +++ b/drivers/net/ethernet/realtek/r8169.h @@ -65,6 +65,7 @@ enum mac_version { RTL_GIGA_MAC_VER_52, RTL_GIGA_MAC_VER_60, RTL_GIGA_MAC_VER_61, + RTL_GIGA_MAC_VER_63, RTL_GIGA_MAC_NONE }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 745e1739e9c8..975f5c10ba47 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -56,6 +56,7 @@ #define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" #define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" #define FIRMWARE_8125A_3 "rtl_nic/rtl8125a-3.fw" +#define FIRMWARE_8125B_2 "rtl_nic/rtl8125b-2.fw" /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -146,6 +147,8 @@ static const struct { [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3}, [RTL_GIGA_MAC_VER_60] = {"RTL8125A" }, [RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3}, + /* reserve 62 for CFG_METHOD_4 in the vendor driver */ + [RTL_GIGA_MAC_VER_63] = {"RTL8125B", FIRMWARE_8125B_2}, }; static const struct pci_device_id rtl8169_pci_tbl[] = { @@ -335,6 +338,7 @@ enum rtl8125_registers { IntrStatus_8125 = 0x3c, TxPoll_8125 = 0x90, MAC0_BKP = 0x19e0, + EEE_TXIDLE_TIMER_8125 = 0x6048, }; #define RX_VLAN_INNER_8125 BIT(22) @@ -655,6 +659,7 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3); MODULE_FIRMWARE(FIRMWARE_8107E_1); MODULE_FIRMWARE(FIRMWARE_8107E_2); MODULE_FIRMWARE(FIRMWARE_8125A_3); +MODULE_FIRMWARE(FIRMWARE_8125B_2); static inline struct device *tp_to_dev(struct rtl8169_private *tp) { @@ -966,7 +971,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val) case RTL_GIGA_MAC_VER_31: r8168dp_2_mdio_write(tp, location, val); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: r8168g_mdio_write(tp, location, val); break; default: @@ -983,7 +988,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location) case RTL_GIGA_MAC_VER_28: case RTL_GIGA_MAC_VER_31: return r8168dp_2_mdio_read(tp, location); - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: return r8168g_mdio_read(tp, location); default: return r8169_mdio_read(tp, location); @@ -1389,7 +1394,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) break; case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_37: - case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63: options = RTL_R8(tp, Config2) & ~PME_SIGNAL; if (wolopts) options |= PME_SIGNAL; @@ -1935,7 +1940,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) u16 val; enum mac_version ver; } mac_info[] = { - /* 8125 family. */ + /* 8125B family. */ + { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 }, + + /* 8125A family. */ { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 }, { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 }, @@ -2073,6 +2081,17 @@ static void rtl8125a_config_eee_mac(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1)); } +static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp) +{ + RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20); +} + +static void rtl8125b_config_eee_mac(struct rtl8169_private *tp) +{ + rtl8125_set_eee_txidle_timer(tp); + r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0)); +} + static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) { const u16 w[] = { @@ -2174,7 +2193,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_32: case RTL_GIGA_MAC_VER_33: case RTL_GIGA_MAC_VER_34: - case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63: RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); break; @@ -2208,11 +2227,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - case RTL_GIGA_MAC_VER_52: - case RTL_GIGA_MAC_VER_60: - case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80); break; case RTL_GIGA_MAC_VER_40: @@ -2244,11 +2259,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_46: case RTL_GIGA_MAC_VER_47: case RTL_GIGA_MAC_VER_48: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - case RTL_GIGA_MAC_VER_52: - case RTL_GIGA_MAC_VER_60: - case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63: RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0); break; case RTL_GIGA_MAC_VER_40: @@ -2279,7 +2290,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52: RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); break; - case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST); break; default: @@ -2442,6 +2453,12 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond) return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY; } +DECLARE_RTL_COND(rtl_rxtx_empty_cond_2) +{ + /* IntrMitigate has new functionality on RTL8125 */ + return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103; +} + static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) { switch (tp->mac_version) { @@ -2452,6 +2469,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); break; + case RTL_GIGA_MAC_VER_63: + RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42); + rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42); + break; default: break; } @@ -3523,18 +3545,27 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) /* disable new tx descriptor format */ r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000); - r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); - r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200); + else + r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400); + + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000); + else + r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020); + r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c); r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033); r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040); r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030); r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); - r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067); + r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); - r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0); + r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001); udelay(1); @@ -3545,7 +3576,10 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10); - rtl8125a_config_eee_mac(tp); + if (tp->mac_version == RTL_GIGA_MAC_VER_63) + rtl8125b_config_eee_mac(tp); + else + rtl8125a_config_eee_mac(tp); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); udelay(10); @@ -3617,6 +3651,26 @@ static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) rtl_hw_start_8125_common(tp); } +static void rtl_hw_start_8125b(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8125b[] = { + { 0x0b, 0xffff, 0xa908 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x4b, 0xffff, 0xa908 }, + { 0x5e, 0xffff, 0x20eb }, + { 0x22, 0x0030, 0x0020 }, + { 0x62, 0x0030, 0x0020 }, + }; + + rtl_set_def_aspm_entry_latency(tp); + rtl_hw_aspm_clkreq_enable(tp, false); + + rtl_ephy_init(tp, e_info_8125b); + rtl_hw_start_8125_common(tp); + + rtl_hw_aspm_clkreq_enable(tp, true); +} + static void rtl_hw_config(struct rtl8169_private *tp) { static const rtl_generic_fct hw_configs[] = { @@ -3667,6 +3721,7 @@ static void rtl_hw_config(struct rtl8169_private *tp) [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117, [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1, [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2, + [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b, }; if (hw_configs[tp->mac_version]) @@ -3753,6 +3808,15 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) netdev_update_features(dev); rtl_jumbo_config(tp); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + rtl8125_set_eee_txidle_timer(tp); + break; + default: + break; + } + return 0; } @@ -3899,7 +3963,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down) RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq); rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); break; - case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63: rtl_enable_rxdvgate(tp); fsleep(2000); break; @@ -5075,7 +5139,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp) case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48: rtl_hw_init_8168g(tp); break; - case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: rtl_hw_init_8125(tp); break; default: diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c index bc8bf48bdbb0..913d030d73eb 100644 --- a/drivers/net/ethernet/realtek/r8169_phy_config.c +++ b/drivers/net/ethernet/realtek/r8169_phy_config.c @@ -97,6 +97,14 @@ static void rtl8125a_config_eee_phy(struct phy_device *phydev) phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); } +static void rtl8125b_config_eee_phy(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000); + phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000); + phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000); + phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000); +} + static void rtl8169s_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1147,6 +1155,11 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp, rtl_writephy_batch(phydev, phy_reg_init); } +static void rtl8125_legacy_force_mode(struct phy_device *phydev) +{ + phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0); +} + static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev) { @@ -1250,6 +1263,45 @@ static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp, rtl8125a_config_eee_phy(phydev); } +static void rtl8125b_hw_phy_config(struct rtl8169_private *tp, + struct phy_device *phydev) +{ + r8169_apply_firmware(tp); + + phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800); + phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090); + phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001); + + phy_write(phydev, 0x1f, 0x0b87); + phy_write(phydev, 0x16, 0x80f5); + phy_write(phydev, 0x17, 0x760e); + phy_write(phydev, 0x16, 0x8107); + phy_write(phydev, 0x17, 0x360e); + phy_write(phydev, 0x16, 0x8551); + phy_modify(phydev, 0x17, 0xff00, 0x0800); + phy_write(phydev, 0x1f, 0x0000); + + phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000); + phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300); + + r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417); + r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417); + + phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040); + phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000); + + rtl8125_legacy_force_mode(phydev); + rtl8125b_config_eee_phy(phydev); +} + void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, enum mac_version ver) { @@ -1308,6 +1360,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev, [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config, [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config, [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config, + [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config, }; if (phy_configs[ver]) From 81adcd65b685fb9ac6d16d89f1ed889f22b5777b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 14 Jul 2020 20:35:01 +0200 Subject: [PATCH 1019/2056] ksz884x: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'ksz_alloc_desc()', GFP_KERNEL can be used because a few lines below, GFP_KERNEL is also used in the 'ksz_alloc_soft_desc()' calls. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 68 ++++++++++++--------------- 1 file changed, 30 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 2ce7304d3753..bb646b65cc95 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4390,9 +4390,9 @@ static int ksz_alloc_desc(struct dev_info *adapter) DESC_ALIGNMENT; adapter->desc_pool.alloc_virt = - pci_zalloc_consistent(adapter->pdev, - adapter->desc_pool.alloc_size, - &adapter->desc_pool.dma_addr); + dma_alloc_coherent(&adapter->pdev->dev, + adapter->desc_pool.alloc_size, + &adapter->desc_pool.dma_addr, GFP_KERNEL); if (adapter->desc_pool.alloc_virt == NULL) { adapter->desc_pool.alloc_size = 0; return 1; @@ -4431,7 +4431,8 @@ static int ksz_alloc_desc(struct dev_info *adapter) static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf, int direction) { - pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction); + dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len, + direction); dev_kfree_skb(dma_buf->skb); dma_buf->skb = NULL; dma_buf->dma = 0; @@ -4456,16 +4457,15 @@ static void ksz_init_rx_buffers(struct dev_info *adapter) dma_buf = DMA_BUFFER(desc); if (dma_buf->skb && dma_buf->len != adapter->mtu) - free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE); + free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE); dma_buf->len = adapter->mtu; if (!dma_buf->skb) dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC); if (dma_buf->skb && !dma_buf->dma) - dma_buf->dma = pci_map_single( - adapter->pdev, - skb_tail_pointer(dma_buf->skb), - dma_buf->len, - PCI_DMA_FROMDEVICE); + dma_buf->dma = dma_map_single(&adapter->pdev->dev, + skb_tail_pointer(dma_buf->skb), + dma_buf->len, + DMA_FROM_DEVICE); /* Set descriptor. */ set_rx_buf(desc, dma_buf->dma); @@ -4543,11 +4543,10 @@ static void ksz_free_desc(struct dev_info *adapter) /* Free memory. */ if (adapter->desc_pool.alloc_virt) - pci_free_consistent( - adapter->pdev, - adapter->desc_pool.alloc_size, - adapter->desc_pool.alloc_virt, - adapter->desc_pool.dma_addr); + dma_free_coherent(&adapter->pdev->dev, + adapter->desc_pool.alloc_size, + adapter->desc_pool.alloc_virt, + adapter->desc_pool.dma_addr); /* Reset resource pool. */ adapter->desc_pool.alloc_size = 0; @@ -4590,12 +4589,10 @@ static void ksz_free_buffers(struct dev_info *adapter, static void ksz_free_mem(struct dev_info *adapter) { /* Free transmit buffers. */ - ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, - PCI_DMA_TODEVICE); + ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE); /* Free receive buffers. */ - ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, - PCI_DMA_FROMDEVICE); + ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE); /* Free descriptors. */ ksz_free_desc(adapter); @@ -4657,9 +4654,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) dma_buf->len = skb_headlen(skb); - dma_buf->dma = pci_map_single( - hw_priv->pdev, skb->data, dma_buf->len, - PCI_DMA_TODEVICE); + dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, + dma_buf->len, DMA_TO_DEVICE); set_tx_buf(desc, dma_buf->dma); set_tx_len(desc, dma_buf->len); @@ -4676,11 +4672,10 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) dma_buf = DMA_BUFFER(desc); dma_buf->len = skb_frag_size(this_frag); - dma_buf->dma = pci_map_single( - hw_priv->pdev, - skb_frag_address(this_frag), - dma_buf->len, - PCI_DMA_TODEVICE); + dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, + skb_frag_address(this_frag), + dma_buf->len, + DMA_TO_DEVICE); set_tx_buf(desc, dma_buf->dma); set_tx_len(desc, dma_buf->len); @@ -4700,9 +4695,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev) } else { dma_buf->len = len; - dma_buf->dma = pci_map_single( - hw_priv->pdev, skb->data, dma_buf->len, - PCI_DMA_TODEVICE); + dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data, + dma_buf->len, DMA_TO_DEVICE); set_tx_buf(desc, dma_buf->dma); set_tx_len(desc, dma_buf->len); } @@ -4756,9 +4750,8 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal) } dma_buf = DMA_BUFFER(desc); - pci_unmap_single( - hw_priv->pdev, dma_buf->dma, dma_buf->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma, + dma_buf->len, DMA_TO_DEVICE); /* This descriptor contains the last buffer in the packet. */ if (dma_buf->skb) { @@ -4991,9 +4984,8 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw, packet_len = status.rx.frame_len - 4; dma_buf = DMA_BUFFER(desc); - pci_dma_sync_single_for_cpu( - hw_priv->pdev, dma_buf->dma, packet_len + 4, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma, + packet_len + 4, DMA_FROM_DEVICE); do { /* skb->data != skb->head */ @@ -6935,8 +6927,8 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) result = -ENODEV; - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || - pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) || + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) return result; reg_base = pci_resource_start(pdev, 0); From 641ca08547f83bd265477150a66cf2378bc98ed7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:19 -0700 Subject: [PATCH 1020/2056] nfp: convert to new udp_tunnel_nic infra NFP conversion is pretty straightforward. We want to be able to sleep, and only get callbacks when the device is open. NFP did not ask for port replay when ports were removed, now new infra will provide this feature for free. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 5 - .../ethernet/netronome/nfp/nfp_net_common.c | 126 +++++------------- 2 files changed, 34 insertions(+), 97 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index ff4438478ea9..df5b748be068 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -575,8 +575,6 @@ struct nfp_net_dp { * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter - * @vxlan_ports: VXLAN ports for RX inner csum offload communicated to HW - * @vxlan_usecnt: IPv4/IPv6 VXLAN port use counts * @qcp_cfg: Pointer to QCP queue used for configuration notification * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues @@ -661,9 +659,6 @@ struct nfp_net { u32 tx_coalesce_usecs; u32 tx_coalesce_max_frames; - __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS]; - u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS]; - u8 __iomem *qcp_cfg; u8 __iomem *tx_bar; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 83ff18140bfe..44608873d3d9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2867,15 +2867,6 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) for (r = 0; r < nn->dp.num_rx_rings; r++) nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); - /* Since reconfiguration requests while NFP is down are ignored we - * have to wipe the entire VXLAN configuration and reinitialize it. - */ - if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) { - memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); - memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - udp_tunnel_get_rx_info(nn->dp.netdev); - } - return 0; } @@ -3566,87 +3557,6 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len) return 0; } -/** - * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW - * @nn: NFP Net device to reconfigure - * @idx: Index into the port table where new port should be written - * @port: UDP port to configure (pass zero to remove VXLAN port) - */ -static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) -{ - int i; - - nn->vxlan_ports[idx] = port; - - if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN)) - return; - - BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); - for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) - nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port), - be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | - be16_to_cpu(nn->vxlan_ports[i])); - - nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN); -} - -/** - * nfp_net_find_vxlan_idx() - find table entry of the port or a free one - * @nn: NFP Network structure - * @port: UDP port to look for - * - * Return: if the port is already in the table -- it's position; - * if the port is not in the table -- free position to use; - * if the table is full -- -ENOSPC. - */ -static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) -{ - int i, free_idx = -ENOSPC; - - for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) { - if (nn->vxlan_ports[i] == port) - return i; - if (!nn->vxlan_usecnt[i]) - free_idx = i; - } - - return free_idx; -} - -static void nfp_net_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - struct nfp_net *nn = netdev_priv(netdev); - int idx; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - idx = nfp_net_find_vxlan_idx(nn, ti->port); - if (idx == -ENOSPC) - return; - - if (!nn->vxlan_usecnt[idx]++) - nfp_net_set_vxlan_port(nn, idx, ti->port); -} - -static void nfp_net_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - struct nfp_net *nn = netdev_priv(netdev); - int idx; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - idx = nfp_net_find_vxlan_idx(nn, ti->port); - if (idx == -ENOSPC || !nn->vxlan_usecnt[idx]) - return; - - if (!--nn->vxlan_usecnt[idx]) - nfp_net_set_vxlan_port(nn, idx, 0); -} - static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf) { struct bpf_prog *prog = bpf->prog; @@ -3757,12 +3667,43 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, .ndo_get_phys_port_name = nfp_net_get_phys_port_name, - .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, - .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_bpf = nfp_net_xdp, .ndo_get_devlink_port = nfp_devlink_get_devlink_port, }; +static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; + + BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1); + for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) { + struct udp_tunnel_info ti0, ti1; + + udp_tunnel_nic_get_port(netdev, table, i, &ti0); + udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1); + + nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port), + be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port)); + } + + return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); +} + +static const struct udp_tunnel_nic_info nfp_udp_tunnels = { + .sync_table = nfp_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { + .n_entries = NFP_NET_N_VXLAN_PORTS, + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, + }, + }, +}; + /** * nfp_net_info() - Print general info about the NIC * @nn: NFP Net device to reconfigure @@ -4010,6 +3951,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn) if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->udp_tunnel_nic_info = &nfp_udp_tunnels; nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; } if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { From 8f0545d232ca4eb1449584c68725d02f03a1d1a9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:20 -0700 Subject: [PATCH 1021/2056] be2net: convert to new udp_tunnel_nic infra Convert be2net to new udp_tunnel_nic infra. NIC only takes one VxLAN port. Remove the port tracking using a list. The warning in be_work_del_vxlan_port() looked suspicious - like the driver expected ports to be removed in order of addition. be2net unregisters ports when going down and re-registers them (for skyhawk) when coming up, but it never checks if the device is up in the add_port / del_port callbacks. Make it use UDP_TUNNEL_NIC_INFO_OPEN_ONLY. Sadly this driver calls its own open/close functions directly so the udp_tunnel_nic_reset_ntf() workaround is needed. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 5 - drivers/net/ethernet/emulex/benet/be_main.c | 198 ++++---------------- 2 files changed, 38 insertions(+), 165 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 6e9022083004..8689d4a51fe5 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -654,8 +654,6 @@ struct be_adapter { u8 hba_port_num; u16 pvid; __be16 vxlan_port; /* offloaded vxlan port num */ - int vxlan_port_count; /* active vxlan port count */ - struct list_head vxlan_port_list; /* vxlan port list */ struct phy_info phy; u8 wol_cap; bool wol_en; @@ -679,9 +677,6 @@ struct be_adapter { struct be_cmd_work { struct work_struct work; struct be_adapter *adapter; - union { - __be16 vxlan_port; - } info; }; #define be_physfn(adapter) (!adapter->virtfn) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e26f59336cfd..676e437d78f6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3829,8 +3829,8 @@ static int be_open(struct net_device *netdev) be_link_status_update(adapter, link_status); netif_tx_start_all_queues(netdev); - if (skyhawk_chip(adapter)) - udp_tunnel_get_rx_info(netdev); + + udp_tunnel_nic_reset_ntf(netdev); return 0; err: @@ -3967,18 +3967,23 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -static int be_enable_vxlan_offloads(struct be_adapter *adapter) +/* VxLAN offload Notes: + * + * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't + * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload + * is expected to work across all types of IP tunnels once exported. Skyhawk + * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN + * offloads in hw_enc_features only when a VxLAN port is added. If other (non + * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for + * those other tunnels are unexported on the fly through ndo_features_check(). + */ +static int be_vxlan_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { - struct net_device *netdev = adapter->netdev; + struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; - struct be_vxlan_port *vxlan_port; - __be16 port; int status; - vxlan_port = list_first_entry(&adapter->vxlan_port_list, - struct be_vxlan_port, list); - port = vxlan_port->port; - status = be_cmd_manage_iface(adapter, adapter->if_handle, OP_CONVERT_NORMAL_TO_TUNNEL); if (status) { @@ -3987,25 +3992,26 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter) } adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS; - status = be_cmd_set_vxlan_port(adapter, port); + status = be_cmd_set_vxlan_port(adapter, ti->port); if (status) { dev_warn(dev, "Failed to add VxLAN port\n"); return status; } - adapter->vxlan_port = port; + adapter->vxlan_port = ti->port; netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL; dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n", - be16_to_cpu(port)); + be16_to_cpu(ti->port)); return 0; } -static void be_disable_vxlan_offloads(struct be_adapter *adapter) +static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { - struct net_device *netdev = adapter->netdev; + struct be_adapter *adapter = netdev_priv(netdev); if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) be_cmd_manage_iface(adapter, adapter->if_handle, @@ -4018,8 +4024,19 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) adapter->vxlan_port = 0; netdev->hw_enc_features = 0; + return 0; } +static const struct udp_tunnel_nic_info be_udp_tunnels = { + .set_port = be_vxlan_set_port, + .unset_port = be_vxlan_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, struct be_resources *vft_res) { @@ -4135,7 +4152,7 @@ static int be_clear(struct be_adapter *adapter) &vft_res); } - be_disable_vxlan_offloads(adapter); + be_vxlan_unset_port(adapter->netdev, 0, 0, NULL); be_if_destroy(adapter); @@ -5053,147 +5070,6 @@ static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter, return work; } -/* VxLAN offload Notes: - * - * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't - * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload - * is expected to work across all types of IP tunnels once exported. Skyhawk - * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN - * offloads in hw_enc_features only when a VxLAN port is added. If other (non - * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for - * those other tunnels are unexported on the fly through ndo_features_check(). - * - * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack - * adds more than one port, disable offloads and re-enable them again when - * there's only one port left. We maintain a list of ports for this purpose. - */ -static void be_work_add_vxlan_port(struct work_struct *work) -{ - struct be_cmd_work *cmd_work = - container_of(work, struct be_cmd_work, work); - struct be_adapter *adapter = cmd_work->adapter; - struct device *dev = &adapter->pdev->dev; - __be16 port = cmd_work->info.vxlan_port; - struct be_vxlan_port *vxlan_port; - int status; - - /* Bump up the alias count if it is an existing port */ - list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) { - if (vxlan_port->port == port) { - vxlan_port->port_aliases++; - goto done; - } - } - - /* Add a new port to our list. We don't need a lock here since port - * add/delete are done only in the context of a single-threaded work - * queue (be_wq). - */ - vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL); - if (!vxlan_port) - goto done; - - vxlan_port->port = port; - INIT_LIST_HEAD(&vxlan_port->list); - list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list); - adapter->vxlan_port_count++; - - if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) { - dev_info(dev, - "Only one UDP port supported for VxLAN offloads\n"); - dev_info(dev, "Disabling VxLAN offloads\n"); - goto err; - } - - if (adapter->vxlan_port_count > 1) - goto done; - - status = be_enable_vxlan_offloads(adapter); - if (!status) - goto done; - -err: - be_disable_vxlan_offloads(adapter); -done: - kfree(cmd_work); - return; -} - -static void be_work_del_vxlan_port(struct work_struct *work) -{ - struct be_cmd_work *cmd_work = - container_of(work, struct be_cmd_work, work); - struct be_adapter *adapter = cmd_work->adapter; - __be16 port = cmd_work->info.vxlan_port; - struct be_vxlan_port *vxlan_port; - - /* Nothing to be done if a port alias is being deleted */ - list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) { - if (vxlan_port->port == port) { - if (vxlan_port->port_aliases) { - vxlan_port->port_aliases--; - goto done; - } - break; - } - } - - /* No port aliases left; delete the port from the list */ - list_del(&vxlan_port->list); - adapter->vxlan_port_count--; - - /* Disable VxLAN offload if this is the offloaded port */ - if (adapter->vxlan_port == vxlan_port->port) { - WARN_ON(adapter->vxlan_port_count); - be_disable_vxlan_offloads(adapter); - dev_info(&adapter->pdev->dev, - "Disabled VxLAN offloads for UDP port %d\n", - be16_to_cpu(port)); - goto out; - } - - /* If only 1 port is left, re-enable VxLAN offload */ - if (adapter->vxlan_port_count == 1) - be_enable_vxlan_offloads(adapter); - -out: - kfree(vxlan_port); -done: - kfree(cmd_work); -} - -static void be_cfg_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti, - void (*func)(struct work_struct *)) -{ - struct be_adapter *adapter = netdev_priv(netdev); - struct be_cmd_work *cmd_work; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) - return; - - cmd_work = be_alloc_work(adapter, func); - if (cmd_work) { - cmd_work->info.vxlan_port = ti->port; - queue_work(be_wq, &cmd_work->work); - } -} - -static void be_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port); -} - -static void be_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port); -} - static netdev_features_t be_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -5309,8 +5185,8 @@ static const struct net_device_ops be_netdev_ops = { #endif .ndo_bridge_setlink = be_ndo_bridge_setlink, .ndo_bridge_getlink = be_ndo_bridge_getlink, - .ndo_udp_tunnel_add = be_add_vxlan_port, - .ndo_udp_tunnel_del = be_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = be_features_check, .ndo_get_phys_port_id = be_get_phys_port_id, }; @@ -5342,6 +5218,9 @@ static void be_netdev_init(struct net_device *netdev) netdev->ethtool_ops = &be_ethtool_ops; + if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter)) + netdev->udp_tunnel_nic_info = &be_udp_tunnels; + /* MTU range: 256 - 9000 */ netdev->min_mtu = BE_MIN_MTU; netdev->max_mtu = BE_MAX_MTU; @@ -5819,7 +5698,6 @@ static int be_drv_init(struct be_adapter *adapter) /* Must be a power of 2 or else MODULO will BUG_ON */ adapter->be_get_temp_freq = 64; - INIT_LIST_HEAD(&adapter->vxlan_port_list); return 0; free_rx_filter: From b5c5f8d06292e2dd9961c4b88fde9ae0ea458d97 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:21 -0700 Subject: [PATCH 1022/2056] xgbe: switch to more generic VxLAN detection Instead of looping though the list of ports just check if the geometry of the packet is correct for VxLAN. HW most likely doesn't care about the exact port, anyway, since only first port is actually offloaded, and this way we won't have to maintain the port list at all. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 27 +++++++----------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a87264f95f1a..dfeddf5fbf78 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1773,13 +1773,8 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) return 0; } -static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb) +static bool xgbe_is_vxlan(struct sk_buff *skb) { - struct xgbe_vxlan_data *vdata; - - if (pdata->vxlan_force_disable) - return false; - if (!skb->encapsulation) return false; @@ -1801,19 +1796,13 @@ static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb) return false; } - /* See if we have the UDP port in our list */ - list_for_each_entry(vdata, &pdata->vxlan_ports, list) { - if ((skb->protocol == htons(ETH_P_IP)) && - (vdata->sa_family == AF_INET) && - (vdata->port == udp_hdr(skb)->dest)) - return true; - else if ((skb->protocol == htons(ETH_P_IPV6)) && - (vdata->sa_family == AF_INET6) && - (vdata->port == udp_hdr(skb)->dest)) - return true; - } + if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + (skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr))) + return false; - return false; + return true; } static int xgbe_is_tso(struct sk_buff *skb) @@ -1864,7 +1853,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE, 1); - if (xgbe_is_vxlan(pdata, skb)) + if (xgbe_is_vxlan(skb)) XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN, 1); From 4df587ab87b0b09f4873964f71020cf5c12ea878 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:22 -0700 Subject: [PATCH 1023/2056] xgbe: convert to new udp_tunnel_nic infra Make use of the new udp_tunnel_nic infra. Don't clear the features when VxLAN port is not present to make all drivers behave the same. Driver will now (until we address the problem in the core) leave the RX UDP tunnel feature always on, since this is what most drivers do. Remove the list of VxLAN ports, just program the one core told us to. The driver seem to want to clear the VxLAN ports on close but it doesn't seem to flush the port list properly so it'd get wrong use counts after close/open. Again since it calls its own open handler we need the reset notification hack. v2: - fix kbuild warning Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 252 +++------------------- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 12 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 13 +- 3 files changed, 32 insertions(+), 245 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index dfeddf5fbf78..43294a148f8a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -904,114 +904,40 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) } } -static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata) +static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { - struct net_device *netdev = pdata->netdev; + struct xgbe_prv_data *pdata = netdev_priv(netdev); - if (!pdata->vxlan_offloads_set) - return; + pdata->vxlan_port = be16_to_cpu(ti->port); + pdata->hw_if.enable_vxlan(pdata); - netdev_info(netdev, "disabling VXLAN offloads\n"); - - netdev->hw_enc_features &= ~(NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_GRO | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM); - - netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM); - - pdata->vxlan_offloads_set = 0; + return 0; } -static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata) +static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) { - if (!pdata->vxlan_port_set) - return; + struct xgbe_prv_data *pdata = netdev_priv(netdev); pdata->hw_if.disable_vxlan(pdata); - - pdata->vxlan_port_set = 0; pdata->vxlan_port = 0; + + return 0; } -static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata) +static const struct udp_tunnel_nic_info xgbe_udp_tunnels = { + .set_port = xgbe_vxlan_set_port, + .unset_port = xgbe_vxlan_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + +const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void) { - xgbe_disable_vxlan_offloads(pdata); - - xgbe_disable_vxlan_hw(pdata); -} - -static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata) -{ - struct net_device *netdev = pdata->netdev; - - if (pdata->vxlan_offloads_set) - return; - - netdev_info(netdev, "enabling VXLAN offloads\n"); - - netdev->hw_enc_features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_GRO | - pdata->vxlan_features; - - netdev->features |= pdata->vxlan_features; - - pdata->vxlan_offloads_set = 1; -} - -static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata) -{ - struct xgbe_vxlan_data *vdata; - - if (pdata->vxlan_port_set) - return; - - if (list_empty(&pdata->vxlan_ports)) - return; - - vdata = list_first_entry(&pdata->vxlan_ports, - struct xgbe_vxlan_data, list); - - pdata->vxlan_port_set = 1; - pdata->vxlan_port = be16_to_cpu(vdata->port); - - pdata->hw_if.enable_vxlan(pdata); -} - -static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata) -{ - /* VXLAN acceleration desired? */ - if (!pdata->vxlan_features) - return; - - /* VXLAN acceleration possible? */ - if (pdata->vxlan_force_disable) - return; - - xgbe_enable_vxlan_hw(pdata); - - xgbe_enable_vxlan_offloads(pdata); -} - -static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata) -{ - xgbe_disable_vxlan_hw(pdata); - - if (pdata->vxlan_features) - xgbe_enable_vxlan_offloads(pdata); - - pdata->vxlan_force_disable = 0; + return &xgbe_udp_tunnels; } static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) @@ -1406,7 +1332,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata) hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); - udp_tunnel_get_rx_info(netdev); + udp_tunnel_nic_reset_ntf(netdev); netif_tx_start_all_queues(netdev); @@ -1447,7 +1373,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) xgbe_stop_timers(pdata); flush_workqueue(pdata->dev_workqueue); - xgbe_reset_vxlan_accel(pdata); + xgbe_vxlan_unset_port(netdev, 0, 0, NULL); hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); @@ -2260,23 +2186,12 @@ static netdev_features_t xgbe_fix_features(struct net_device *netdev, netdev_features_t features) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - netdev_features_t vxlan_base, vxlan_mask; + netdev_features_t vxlan_base; vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT; - vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM; - pdata->vxlan_features = features & vxlan_mask; - - /* Only fix VXLAN-related features */ - if (!pdata->vxlan_features) - return features; - - /* If VXLAN isn't supported then clear any features: - * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets - * automatically set if ndo_udp_tunnel_add is set. - */ if (!pdata->hw_feat.vxn) - return features & ~vxlan_mask; + return features; /* VXLAN CSUM requires VXLAN base */ if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) && @@ -2307,15 +2222,6 @@ static netdev_features_t xgbe_fix_features(struct net_device *netdev, } } - pdata->vxlan_features = features & vxlan_mask; - - /* Adjust UDP Tunnel based on current state */ - if (pdata->vxlan_force_disable) { - netdev_notice(netdev, - "VXLAN acceleration disabled, turning off udp tunnel features\n"); - features &= ~vxlan_mask; - } - return features; } @@ -2325,14 +2231,12 @@ static int xgbe_set_features(struct net_device *netdev, struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; - netdev_features_t udp_tunnel; int ret = 0; rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; - udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL; if ((features & NETIF_F_RXHASH) && !rxhash) ret = hw_if->enable_rss(pdata); @@ -2356,11 +2260,6 @@ static int xgbe_set_features(struct net_device *netdev, else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) hw_if->disable_rx_vlan_filtering(pdata); - if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel) - xgbe_enable_vxlan_accel(pdata); - else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel) - xgbe_disable_vxlan_accel(pdata); - pdata->netdev_features = features; DBGPR("<--xgbe_set_features\n"); @@ -2368,101 +2267,6 @@ static int xgbe_set_features(struct net_device *netdev, return 0; } -static void xgbe_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_vxlan_data *vdata; - - if (!pdata->hw_feat.vxn) - return; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - pdata->vxlan_port_count++; - - netif_dbg(pdata, drv, netdev, - "adding VXLAN tunnel, family=%hx/port=%hx\n", - ti->sa_family, be16_to_cpu(ti->port)); - - if (pdata->vxlan_force_disable) - return; - - vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC); - if (!vdata) { - /* Can no longer properly track VXLAN ports */ - pdata->vxlan_force_disable = 1; - netif_dbg(pdata, drv, netdev, - "internal error, disabling VXLAN accelerations\n"); - - xgbe_disable_vxlan_accel(pdata); - - return; - } - vdata->sa_family = ti->sa_family; - vdata->port = ti->port; - - list_add_tail(&vdata->list, &pdata->vxlan_ports); - - /* First port added? */ - if (pdata->vxlan_port_count == 1) { - xgbe_enable_vxlan_accel(pdata); - - return; - } -} - -static void xgbe_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_vxlan_data *vdata; - - if (!pdata->hw_feat.vxn) - return; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - netif_dbg(pdata, drv, netdev, - "deleting VXLAN tunnel, family=%hx/port=%hx\n", - ti->sa_family, be16_to_cpu(ti->port)); - - /* Don't need safe version since loop terminates with deletion */ - list_for_each_entry(vdata, &pdata->vxlan_ports, list) { - if (vdata->sa_family != ti->sa_family) - continue; - - if (vdata->port != ti->port) - continue; - - list_del(&vdata->list); - kfree(vdata); - - break; - } - - pdata->vxlan_port_count--; - if (!pdata->vxlan_port_count) { - xgbe_reset_vxlan_accel(pdata); - - return; - } - - if (pdata->vxlan_force_disable) - return; - - /* See if VXLAN tunnel id needs to be changed */ - vdata = list_first_entry(&pdata->vxlan_ports, - struct xgbe_vxlan_data, list); - if (pdata->vxlan_port == be16_to_cpu(vdata->port)) - return; - - pdata->vxlan_port = be16_to_cpu(vdata->port); - pdata->hw_if.set_vxlan_id(pdata); -} - static netdev_features_t xgbe_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) @@ -2492,8 +2296,8 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_setup_tc = xgbe_setup_tc, .ndo_fix_features = xgbe_fix_features, .ndo_set_features = xgbe_set_features, - .ndo_udp_tunnel_add = xgbe_udp_tunnel_add, - .ndo_udp_tunnel_del = xgbe_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = xgbe_features_check, }; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 2a70714a791d..a218dc6f2edd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -192,7 +192,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev) mutex_init(&pdata->i2c_mutex); init_completion(&pdata->i2c_complete); init_completion(&pdata->mdio_complete); - INIT_LIST_HEAD(&pdata->vxlan_ports); pdata->msg_enable = netif_msg_init(debug, default_msg_level); @@ -366,17 +365,12 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_RX_UDP_TUNNEL_PORT; + NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_RX_UDP_TUNNEL_PORT; + NETIF_F_GSO_UDP_TUNNEL_CSUM; - pdata->vxlan_offloads_set = 1; - pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_RX_UDP_TUNNEL_PORT; + netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info(); } netdev->vlan_features |= NETIF_F_SG | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 5897e46faca5..ba8321ec1ee7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -1014,12 +1014,6 @@ struct xgbe_version_data { unsigned int an_cdr_workaround; }; -struct xgbe_vxlan_data { - struct list_head list; - sa_family_t sa_family; - __be16 port; -}; - struct xgbe_prv_data { struct net_device *netdev; struct pci_dev *pcidev; @@ -1172,13 +1166,7 @@ struct xgbe_prv_data { u32 rss_options; /* VXLAN settings */ - unsigned int vxlan_port_set; - unsigned int vxlan_offloads_set; - unsigned int vxlan_force_disable; - unsigned int vxlan_port_count; - struct list_head vxlan_ports; u16 vxlan_port; - netdev_features_t vxlan_features; /* Netdev related settings */ unsigned char mac_addr[ETH_ALEN]; @@ -1321,6 +1309,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *); void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *); const struct net_device_ops *xgbe_get_netdev_ops(void); const struct ethtool_ops *xgbe_get_ethtool_ops(void); +const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void); #ifdef CONFIG_AMD_XGBE_DCB const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void); From 085c5c42e3654eb24c0f6fc6dc703f76fff1e6ce Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:23 -0700 Subject: [PATCH 1024/2056] bnx2x: convert to new udp_tunnel_nic infra Fairly straightforward conversion - no need to keep track of the use count, and replay when ports get removed, also callbacks can just sleep. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 8 +- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 8 +- .../net/ethernet/broadcom/bnx2x/bnx2x_main.c | 136 ++++-------------- 3 files changed, 29 insertions(+), 123 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index dee61d96680e..d04994840b87 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1287,7 +1287,6 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, - BNX2X_SP_RTNL_CHANGE_UDP_PORT, BNX2X_SP_RTNL_UPDATE_SVID, }; @@ -1343,11 +1342,6 @@ enum bnx2x_udp_port_type { BNX2X_UDP_PORT_MAX, }; -struct bnx2x_udp_tunnel { - u16 dst_port; - u8 count; -}; - struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -1855,7 +1849,7 @@ struct bnx2x { bool accept_any_vlan; /* Vxlan/Geneve related information */ - struct bnx2x_udp_tunnel udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; + u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; }; /* Tx queues may be less or equal to Rx queues */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a9817cd283fe..7e4c93be4451 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -960,12 +960,12 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { - port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; start_params->vxlan_dst_port = port; } - if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { - port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; start_params->geneve_dst_port = port; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index fd957212bc1b..7f24d2689fdd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10152,7 +10152,6 @@ static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; struct bnx2x_func_state_params func_params = {NULL}; - struct bnx2x_udp_tunnel *udp_tunnel; u16 vxlan_port = 0, geneve_port = 0; int rc; @@ -10169,15 +10168,13 @@ static int bnx2x_udp_port_update(struct bnx2x *bp) __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes); - if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { - udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; - geneve_port = udp_tunnel->dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) { + geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; switch_update_params->geneve_dst_port = geneve_port; } - if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { - udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; - vxlan_port = udp_tunnel->dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) { + vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; switch_update_params->vxlan_dst_port = vxlan_port; } @@ -10197,93 +10194,26 @@ static int bnx2x_udp_port_update(struct bnx2x *bp) return rc; } -static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port, - enum bnx2x_udp_port_type type) -{ - struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; - - if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp)) - return; - - if (udp_port->count && udp_port->dst_port == port) { - udp_port->count++; - return; - } - - if (udp_port->count) { - DP(BNX2X_MSG_SP, - "UDP tunnel [%d] - destination port limit reached\n", - type); - return; - } - - udp_port->dst_port = port; - udp_port->count = 1; - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); -} - -static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port, - enum bnx2x_udp_port_type type) -{ - struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; - - if (!IS_PF(bp) || CHIP_IS_E1x(bp)) - return; - - if (!udp_port->count || udp_port->dst_port != port) { - DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n", - type); - return; - } - - /* Remove reference, and make certain it's no longer in use */ - udp_port->count--; - if (udp_port->count) - return; - udp_port->dst_port = 0; - - if (netif_running(bp->dev)) - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); - else - DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", - type, port); -} - -static void bnx2x_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table) { struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(ti->port); + struct udp_tunnel_info ti; - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); - break; - default: - break; - } + udp_tunnel_nic_get_port(netdev, table, 0, &ti); + bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port); + + return bnx2x_udp_port_update(bp); } -static void bnx2x_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); - break; - default: - break; - } -} +static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = { + .sync_table = bnx2x_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_OPEN_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; static int bnx2x_close(struct net_device *dev); @@ -10407,24 +10337,6 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state)) bnx2x_handle_update_svid_cmd(bp); - if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, - &bp->sp_rtnl_state)) { - if (bnx2x_udp_port_update(bp)) { - /* On error, forget configuration */ - memset(bp->udp_tunnel_ports, 0, - sizeof(struct bnx2x_udp_tunnel) * - BNX2X_UDP_PORT_MAX); - } else { - /* Since we don't store additional port information, - * if no ports are configured for any feature ask for - * information about currently configured ports. - */ - if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count && - !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) - udp_tunnel_get_rx_info(bp->dev); - } - } - /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) */ @@ -12620,9 +12532,6 @@ static int bnx2x_open(struct net_device *dev) if (rc) return rc; - if (IS_PF(bp)) - udp_tunnel_get_rx_info(dev); - return 0; } @@ -13162,8 +13071,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, - .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add, - .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) @@ -13358,6 +13267,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (IS_PF(bp)) + dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels; } dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | From ad166a8ec26521a903d5a4c4b3e8d99150cc8a70 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:24 -0700 Subject: [PATCH 1025/2056] cxgb4: convert to new udp_tunnel_nic infra Convert to new infra, this driver is very simple. The check of adapter->rawf_cnt in cxgb_udp_tunnel_unset_port() is kept from the old port deletion function but it's dodgy since nothing ever updates that member once its set during init. Also .set_port callback always adds the raw mac filter.. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 - .../net/ethernet/chelsio/cxgb4/cxgb4_main.c | 108 +++++------------- 2 files changed, 31 insertions(+), 79 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ff53c78307c5..c59e9ccc2f18 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1109,9 +1109,7 @@ struct adapter { int msg_enable; __be16 vxlan_port; - u8 vxlan_port_cnt; __be16 geneve_port; - u8 geneve_port_cnt; struct adapter_params params; struct cxgb4_virt_res vres; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 0991631f3a91..de078a5bf23e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3732,129 +3732,71 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -static void cxgb_del_udp_tunnel(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int cxgb_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct port_info *pi = netdev_priv(netdev); struct adapter *adapter = pi->adapter; - unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; int ret = 0, i; - if (chip_ver < CHELSIO_T6) - return; - switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: - if (!adapter->vxlan_port_cnt || - adapter->vxlan_port != ti->port) - return; /* Invalid VxLAN destination port */ - - adapter->vxlan_port_cnt--; - if (adapter->vxlan_port_cnt) - return; - adapter->vxlan_port = 0; t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0); break; case UDP_TUNNEL_TYPE_GENEVE: - if (!adapter->geneve_port_cnt || - adapter->geneve_port != ti->port) - return; /* Invalid GENEVE destination port */ - - adapter->geneve_port_cnt--; - if (adapter->geneve_port_cnt) - return; - adapter->geneve_port = 0; t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0); break; default: - return; + return -EINVAL; } /* Matchall mac entries can be deleted only after all tunnel ports * are brought down or removed. */ if (!adapter->rawf_cnt) - return; + return 0; for_each_port(adapter, i) { pi = adap2pinfo(adapter, i); ret = t4_free_raw_mac_filt(adapter, pi->viid, match_all_mac, match_all_mac, - adapter->rawf_start + - pi->port_id, + adapter->rawf_start + pi->port_id, 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to free mac filter entry, for port %d\n", i); - return; + return ret; } } + + return 0; } -static void cxgb_add_udp_tunnel(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int cxgb_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct port_info *pi = netdev_priv(netdev); struct adapter *adapter = pi->adapter; - unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; int i, ret; - if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt) - return; - switch (ti->type) { case UDP_TUNNEL_TYPE_VXLAN: - /* Callback for adding vxlan port can be called with the same - * port for both IPv4 and IPv6. We should not disable the - * offloading when the same port for both protocols is added - * and later one of them is removed. - */ - if (adapter->vxlan_port_cnt && - adapter->vxlan_port == ti->port) { - adapter->vxlan_port_cnt++; - return; - } - - /* We will support only one VxLAN port */ - if (adapter->vxlan_port_cnt) { - netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", - be16_to_cpu(adapter->vxlan_port), - be16_to_cpu(ti->port)); - return; - } - adapter->vxlan_port = ti->port; - adapter->vxlan_port_cnt = 1; - t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F); break; case UDP_TUNNEL_TYPE_GENEVE: - if (adapter->geneve_port_cnt && - adapter->geneve_port == ti->port) { - adapter->geneve_port_cnt++; - return; - } - - /* We will support only one GENEVE port */ - if (adapter->geneve_port_cnt) { - netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n", - be16_to_cpu(adapter->geneve_port), - be16_to_cpu(ti->port)); - return; - } - adapter->geneve_port = ti->port; - adapter->geneve_port_cnt = 1; - t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F); break; default: - return; + return -EINVAL; } /* Create a 'match all' mac filter entry for inner mac, @@ -3869,18 +3811,27 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev, ret = t4_alloc_raw_mac_filt(adapter, pi->viid, match_all_mac, match_all_mac, - adapter->rawf_start + - pi->port_id, + adapter->rawf_start + pi->port_id, 1, pi->port_id, false); if (ret < 0) { netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n", be16_to_cpu(ti->port)); - cxgb_del_udp_tunnel(netdev, ti); - return; + return ret; } } + + return 0; } +static const struct udp_tunnel_nic_info cxgb_udp_tunnels = { + .set_port = cxgb_udp_tunnel_set_port, + .unset_port = cxgb_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + static netdev_features_t cxgb_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -3930,8 +3881,8 @@ static const struct net_device_ops cxgb4_netdev_ops = { #endif /* CONFIG_CHELSIO_T4_FCOE */ .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, .ndo_setup_tc = cxgb_setup_tc, - .ndo_udp_tunnel_add = cxgb_add_udp_tunnel, - .ndo_udp_tunnel_del = cxgb_del_udp_tunnel, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = cxgb_features_check, .ndo_fix_features = cxgb_fix_features, }; @@ -6761,6 +6712,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_HW_TLS_RECORD; + + if (adapter->rawf_cnt) + netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels; } if (highdma) From fc9a7def5d3d9869722a02ecd2daaf02d7241348 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:25 -0700 Subject: [PATCH 1026/2056] enic: convert to new udp_tunnel_nic infra Convert to new infra, now the refcounting will be correct, and driver gets port replay of other ports when offloaded port gets removed. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 105 ++++++++------------ 1 file changed, 39 insertions(+), 66 deletions(-) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index cd5fe4f6b54c..6bc7e7ba38c3 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -176,50 +176,18 @@ static void enic_unset_affinity_hint(struct enic *enic) irq_set_affinity_hint(enic->msix_entry[i].vector, NULL); } -static void enic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int enic_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct enic *enic = netdev_priv(netdev); - __be16 port = ti->port; int err; spin_lock_bh(&enic->devcmd_lock); - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) { - netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported"); - goto error; - } - - switch (ti->sa_family) { - case AF_INET6: - if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) { - netdev_info(netdev, "vxlan: only IPv4 offload supported"); - goto error; - } - /* Fall through */ - case AF_INET: - break; - default: - goto error; - } - - if (enic->vxlan.vxlan_udp_port_number) { - if (ntohs(port) == enic->vxlan.vxlan_udp_port_number) - netdev_warn(netdev, "vxlan: udp port already offloaded"); - else - netdev_info(netdev, "vxlan: offload supported for only one UDP port"); - - goto error; - } - if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) && - !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) { - netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter"); - goto error; - } - err = vnic_dev_overlay_offload_cfg(enic->vdev, OVERLAY_CFG_VXLAN_PORT_UPDATE, - ntohs(port)); + ntohs(ti->port)); if (err) goto error; @@ -228,52 +196,50 @@ static void enic_udp_tunnel_add(struct net_device *netdev, if (err) goto error; - enic->vxlan.vxlan_udp_port_number = ntohs(port); - - netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ", - (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family); - - goto unlock; - + enic->vxlan.vxlan_udp_port_number = ntohs(ti->port); error: - netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d", - ntohs(port), ti->sa_family, ti->type); -unlock: spin_unlock_bh(&enic->devcmd_lock); + + return err; } -static void enic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int enic_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { struct enic *enic = netdev_priv(netdev); int err; spin_lock_bh(&enic->devcmd_lock); - if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) || - ti->type != UDP_TUNNEL_TYPE_VXLAN) { - netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded", - ntohs(ti->port), ti->sa_family, ti->type); - goto unlock; - } - err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN, OVERLAY_OFFLOAD_DISABLE); - if (err) { - netdev_err(netdev, "vxlan: del offload udp port: %d failed", - ntohs(ti->port)); + if (err) goto unlock; - } enic->vxlan.vxlan_udp_port_number = 0; - netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n", - ntohs(ti->port), ti->sa_family); - unlock: spin_unlock_bh(&enic->devcmd_lock); + + return err; } +static const struct udp_tunnel_nic_info enic_udp_tunnels = { + .set_port = enic_udp_tunnel_set_port, + .unset_port = enic_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}, enic_udp_tunnels_v4 = { + .set_port = enic_udp_tunnel_set_port, + .unset_port = enic_udp_tunnel_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_IPV4_ONLY, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + static netdev_features_t enic_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) @@ -2526,8 +2492,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif - .ndo_udp_tunnel_add = enic_udp_tunnel_add, - .ndo_udp_tunnel_del = enic_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = enic_features_check, }; @@ -2552,8 +2518,8 @@ static const struct net_device_ops enic_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = enic_rx_flow_steer, #endif - .ndo_udp_tunnel_add = enic_udp_tunnel_add, - .ndo_udp_tunnel_del = enic_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = enic_features_check, }; @@ -2963,6 +2929,13 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) patch_level = fls(patch_level); patch_level = patch_level ? patch_level - 1 : 0; enic->vxlan.patch_level = patch_level; + + if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 || + enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) { + netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4; + if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6) + netdev->udp_tunnel_nic_info = &enic_udp_tunnels; + } } netdev->features |= netdev->hw_features; From 3fcd2ba10fb8ea35c119167543a01fcb04ae9966 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:26 -0700 Subject: [PATCH 1027/2056] liquidio: convert to new udp_tunnel_nic infra This driver is just a super thin FW interface, but Derek let us know the table has 1024 entries. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/cavium/liquidio/lio_main.c | 59 +++++++++++-------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 19689d72bc4e..e73bc211779a 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2670,6 +2670,35 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, return ret; } +static int liquidio_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, + unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + +static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { + .set_port = liquidio_udp_tunnel_set_port, + .unset_port = liquidio_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + /** \brief Net device fix features * @param netdev pointer to network device * @param request features requested @@ -2758,30 +2787,6 @@ static int liquidio_set_features(struct net_device *netdev, return 0; } -static void liquidio_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - liquidio_vxlan_port_command(netdev, - OCTNET_CMD_VXLAN_PORT_CONFIG, - htons(ti->port), - OCTNET_CMD_VXLAN_PORT_ADD); -} - -static void liquidio_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - liquidio_vxlan_port_command(netdev, - OCTNET_CMD_VXLAN_PORT_CONFIG, - htons(ti->port), - OCTNET_CMD_VXLAN_PORT_DEL); -} - static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac, bool is_admin_assigned) { @@ -3208,8 +3213,8 @@ static const struct net_device_ops lionetdevops = { .ndo_do_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, - .ndo_udp_tunnel_add = liquidio_add_vxlan_port, - .ndo_udp_tunnel_del = liquidio_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_set_vf_mac = liquidio_set_vf_mac, .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, @@ -3564,6 +3569,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->hw_enc_features = (lio->enc_dev_capability & ~NETIF_F_LRO); + netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; + lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; netdev->vlan_features = lio->dev_capability; From 6a8c1a75e551748ebebc4998c238a9d63ec5484d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:27 -0700 Subject: [PATCH 1028/2056] liquidio_vf: convert to new udp_tunnel_nic infra Carbon copy of the previous change. This driver is just a super thin FW interface, but Derek let us know the table has 1024 entries. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../ethernet/cavium/liquidio/lio_vf_main.c | 59 +++++++++++-------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index bbd9bfa4a989..90ef21086f27 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1767,6 +1767,35 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, return ret; } +static int liquidio_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, + unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + +static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { + .set_port = liquidio_udp_tunnel_set_port, + .unset_port = liquidio_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + /** \brief Net device fix features * @param netdev pointer to network device * @param request features requested @@ -1835,30 +1864,6 @@ static int liquidio_set_features(struct net_device *netdev, return 0; } -static void liquidio_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - liquidio_vxlan_port_command(netdev, - OCTNET_CMD_VXLAN_PORT_CONFIG, - htons(ti->port), - OCTNET_CMD_VXLAN_PORT_ADD); -} - -static void liquidio_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - liquidio_vxlan_port_command(netdev, - OCTNET_CMD_VXLAN_PORT_CONFIG, - htons(ti->port), - OCTNET_CMD_VXLAN_PORT_DEL); -} - static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, @@ -1873,8 +1878,8 @@ static const struct net_device_ops lionetdevops = { .ndo_do_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, - .ndo_udp_tunnel_add = liquidio_add_vxlan_port, - .ndo_udp_tunnel_del = liquidio_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, }; static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) @@ -2095,6 +2100,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->hw_enc_features = (lio->enc_dev_capability & ~NETIF_F_LRO); + netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; + netdev->vlan_features = lio->dev_capability; /* Add any unchangeable hw features */ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | From f7529b4ba3c98470b0e367ba447ad0da84dc308c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:28 -0700 Subject: [PATCH 1029/2056] fm10k: convert to new udp_tunnel_nic infra Straightforward conversion to new infra. Driver restores info after close/open cycle by calling its internal restore function so just use that, no need for udp_tunnel_nic_reset_ntf() here. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/fm10k/fm10k.h | 10 +- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 9 +- .../net/ethernet/intel/fm10k/fm10k_netdev.c | 164 +++--------------- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 4 - 4 files changed, 28 insertions(+), 159 deletions(-) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index f9be10a04dd6..6119a4108838 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -221,12 +221,6 @@ struct fm10k_iov_data { struct fm10k_vf_info vf_info[]; }; -struct fm10k_udp_port { - struct list_head list; - sa_family_t sa_family; - __be16 port; -}; - enum fm10k_macvlan_request_type { FM10K_UC_MAC_REQUEST, FM10K_MC_MAC_REQUEST, @@ -370,8 +364,8 @@ struct fm10k_intfc { u32 rssrk[FM10K_RSSRK_SIZE]; /* UDP encapsulation port tracking information */ - struct list_head vxlan_port; - struct list_head geneve_port; + __be16 vxlan_port; + __be16 geneve_port; /* MAC/VLAN update queue */ struct list_head macvlan_requests; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 34f1f5350f68..d88dd41a9442 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -635,15 +635,8 @@ static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector, static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb) { struct fm10k_intfc *interface = netdev_priv(skb->dev); - struct fm10k_udp_port *vxlan_port; - /* we can only offload a vxlan if we recognize it as such */ - vxlan_port = list_first_entry_or_null(&interface->vxlan_port, - struct fm10k_udp_port, list); - - if (!vxlan_port) - return NULL; - if (vxlan_port->port != udp_hdr(skb)->dest) + if (interface->vxlan_port != udp_hdr(skb)->dest) return NULL; /* return offset of udp_hdr plus 8 bytes for VXLAN header */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 1450a9f98c5a..5c19ff452558 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -366,39 +366,6 @@ static void fm10k_request_glort_range(struct fm10k_intfc *interface) } } -/** - * fm10k_free_udp_port_info - * @interface: board private structure - * - * This function frees both geneve_port and vxlan_port structures - **/ -static void fm10k_free_udp_port_info(struct fm10k_intfc *interface) -{ - struct fm10k_udp_port *port; - - /* flush all entries from vxlan list */ - port = list_first_entry_or_null(&interface->vxlan_port, - struct fm10k_udp_port, list); - while (port) { - list_del(&port->list); - kfree(port); - port = list_first_entry_or_null(&interface->vxlan_port, - struct fm10k_udp_port, - list); - } - - /* flush all entries from geneve list */ - port = list_first_entry_or_null(&interface->geneve_port, - struct fm10k_udp_port, list); - while (port) { - list_del(&port->list); - kfree(port); - port = list_first_entry_or_null(&interface->vxlan_port, - struct fm10k_udp_port, - list); - } -} - /** * fm10k_restore_udp_port_info * @interface: board private structure @@ -408,131 +375,52 @@ static void fm10k_free_udp_port_info(struct fm10k_intfc *interface) static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface) { struct fm10k_hw *hw = &interface->hw; - struct fm10k_udp_port *port; /* only the PF supports configuring tunnels */ if (hw->mac.type != fm10k_mac_pf) return; - port = list_first_entry_or_null(&interface->vxlan_port, - struct fm10k_udp_port, list); - /* restore tunnel configuration register */ fm10k_write_reg(hw, FM10K_TUNNEL_CFG, - (port ? ntohs(port->port) : 0) | + ntohs(interface->vxlan_port) | (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT)); - port = list_first_entry_or_null(&interface->geneve_port, - struct fm10k_udp_port, list); - /* restore Geneve tunnel configuration register */ fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE, - (port ? ntohs(port->port) : 0)); -} - -static struct fm10k_udp_port * -fm10k_remove_tunnel_port(struct list_head *ports, - struct udp_tunnel_info *ti) -{ - struct fm10k_udp_port *port; - - list_for_each_entry(port, ports, list) { - if ((port->port == ti->port) && - (port->sa_family == ti->sa_family)) { - list_del(&port->list); - return port; - } - } - - return NULL; -} - -static void fm10k_insert_tunnel_port(struct list_head *ports, - struct udp_tunnel_info *ti) -{ - struct fm10k_udp_port *port; - - /* remove existing port entry from the list so that the newest items - * are always at the tail of the list. - */ - port = fm10k_remove_tunnel_port(ports, ti); - if (!port) { - port = kmalloc(sizeof(*port), GFP_ATOMIC); - if (!port) - return; - port->port = ti->port; - port->sa_family = ti->sa_family; - } - - list_add_tail(&port->list, ports); + ntohs(interface->geneve_port)); } /** - * fm10k_udp_tunnel_add + * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change * @dev: network interface device structure - * @ti: Tunnel endpoint information + * @table: Tunnel table (according to tables of @fm10k_udp_tunnels) * - * This function is called when a new UDP tunnel port has been added. + * This function is called when a new UDP tunnel port is added or deleted. * Due to hardware restrictions, only one port per type can be offloaded at - * once. + * once. Core will send to the driver a port of its choice. **/ -static void fm10k_udp_tunnel_add(struct net_device *dev, - struct udp_tunnel_info *ti) +static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table) { struct fm10k_intfc *interface = netdev_priv(dev); + struct udp_tunnel_info ti; - /* only the PF supports configuring tunnels */ - if (interface->hw.mac.type != fm10k_mac_pf) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - fm10k_insert_tunnel_port(&interface->vxlan_port, ti); - break; - case UDP_TUNNEL_TYPE_GENEVE: - fm10k_insert_tunnel_port(&interface->geneve_port, ti); - break; - default: - return; - } + udp_tunnel_nic_get_port(dev, table, 0, &ti); + if (!table) + interface->vxlan_port = ti.port; + else + interface->geneve_port = ti.port; fm10k_restore_udp_port_info(interface); + return 0; } -/** - * fm10k_udp_tunnel_del - * @dev: network interface device structure - * @ti: Tunnel end point information - * - * This function is called when a new UDP tunnel port is deleted. The freed - * port will be removed from the list, then we reprogram the offloaded port - * based on the head of the list. - **/ -static void fm10k_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) -{ - struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_udp_port *port = NULL; - - if (interface->hw.mac.type != fm10k_mac_pf) - return; - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti); - break; - case UDP_TUNNEL_TYPE_GENEVE: - port = fm10k_remove_tunnel_port(&interface->geneve_port, ti); - break; - default: - return; - } - - /* if we did remove a port we need to free its memory */ - kfree(port); - - fm10k_restore_udp_port_info(interface); -} +static const struct udp_tunnel_nic_info fm10k_udp_tunnels = { + .sync_table = fm10k_udp_tunnel_sync, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; /** * fm10k_open - Called when a network interface is made active @@ -580,8 +468,6 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; - udp_tunnel_get_rx_info(netdev); - fm10k_up(interface); return 0; @@ -615,8 +501,6 @@ int fm10k_close(struct net_device *netdev) fm10k_qv_free_irq(interface); - fm10k_free_udp_port_info(interface); - fm10k_free_all_tx_resources(interface); fm10k_free_all_rx_resources(interface); @@ -1647,8 +1531,8 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, .ndo_get_vf_config = fm10k_ndo_get_vf_config, .ndo_get_vf_stats = fm10k_ndo_get_vf_stats, - .ndo_udp_tunnel_add = fm10k_udp_tunnel_add, - .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, .ndo_features_check = fm10k_features_check, @@ -1695,6 +1579,8 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) NETIF_F_SG; dev->features |= NETIF_F_GSO_UDP_TUNNEL; + + dev->udp_tunnel_nic_info = &fm10k_udp_tunnels; } /* all features defined to this point should be changeable */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index d122d0087191..140212bfe08b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2066,10 +2066,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, interface->tx_itr = FM10K_TX_ITR_DEFAULT; interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT; - /* initialize udp port lists */ - INIT_LIST_HEAD(&interface->vxlan_port); - INIT_LIST_HEAD(&interface->geneve_port); - /* Initialize the MAC/VLAN queue */ INIT_LIST_HEAD(&interface->macvlan_requests); From 8cd160a29415f1789d473b1dc07fcc9d02a02b87 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:29 -0700 Subject: [PATCH 1030/2056] qede: convert to new udp_tunnel_nic infra Covert to new infra. Looks like this driver was not doing ref counting, and sleeping in the callback. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 1 + .../net/ethernet/qlogic/qede/qede_filter.c | 144 ++++++------------ drivers/net/ethernet/qlogic/qede/qede_main.c | 18 ++- 3 files changed, 59 insertions(+), 104 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 591dd4051d06..8adda5dc9e88 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -543,6 +543,7 @@ void qede_set_dcbnl_ops(struct net_device *ndev); void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); +void qede_set_udp_tunnels(struct qede_dev *edev); void qede_reload(struct qede_dev *edev, struct qede_reload_args *args, bool is_locked); int qede_change_mtu(struct net_device *dev, int new_mtu); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index d8100434e340..b7d0b6ccebd3 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -953,115 +953,67 @@ int qede_set_features(struct net_device *dev, netdev_features_t features) return 0; } -void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) +static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table) { struct qede_dev *edev = netdev_priv(dev); struct qed_tunn_params tunn_params; - u16 t_port = ntohs(ti->port); + struct udp_tunnel_info ti; + u16 *save_port; int rc; memset(&tunn_params, 0, sizeof(tunn_params)); - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (!edev->dev_info.common.vxlan_enable) - return; - - if (edev->vxlan_dst_port) - return; - + udp_tunnel_nic_get_port(dev, table, 0, &ti); + if (ti.type == UDP_TUNNEL_TYPE_VXLAN) { tunn_params.update_vxlan_port = 1; - tunn_params.vxlan_port = t_port; - - __qede_lock(edev); - rc = edev->ops->tunn_config(edev->cdev, &tunn_params); - __qede_unlock(edev); - - if (!rc) { - edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", - t_port); - } else { - DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n", - t_port); - } - - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (!edev->dev_info.common.geneve_enable) - return; - - if (edev->geneve_dst_port) - return; - + tunn_params.vxlan_port = ntohs(ti.port); + save_port = &edev->vxlan_dst_port; + } else { tunn_params.update_geneve_port = 1; - tunn_params.geneve_port = t_port; - - __qede_lock(edev); - rc = edev->ops->tunn_config(edev->cdev, &tunn_params); - __qede_unlock(edev); - - if (!rc) { - edev->geneve_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, - "Added geneve port=%d\n", t_port); - } else { - DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n", - t_port); - } - - break; - default: - return; + tunn_params.geneve_port = ntohs(ti.port); + save_port = &edev->geneve_dst_port; } + + __qede_lock(edev); + rc = edev->ops->tunn_config(edev->cdev, &tunn_params); + __qede_unlock(edev); + if (rc) + return rc; + + *save_port = ntohs(ti.port); + return 0; } -void qede_udp_tunnel_del(struct net_device *dev, - struct udp_tunnel_info *ti) +static const struct udp_tunnel_nic_info qede_udp_tunnels_both = { + .sync_table = qede_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}, qede_udp_tunnels_vxlan = { + .sync_table = qede_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}, qede_udp_tunnels_geneve = { + .sync_table = qede_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, }, + }, +}; + +void qede_set_udp_tunnels(struct qede_dev *edev) { - struct qede_dev *edev = netdev_priv(dev); - struct qed_tunn_params tunn_params; - u16 t_port = ntohs(ti->port); - - memset(&tunn_params, 0, sizeof(tunn_params)); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - if (t_port != edev->vxlan_dst_port) - return; - - tunn_params.update_vxlan_port = 1; - tunn_params.vxlan_port = 0; - - __qede_lock(edev); - edev->ops->tunn_config(edev->cdev, &tunn_params); - __qede_unlock(edev); - - edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", - t_port); - - break; - case UDP_TUNNEL_TYPE_GENEVE: - if (t_port != edev->geneve_dst_port) - return; - - tunn_params.update_geneve_port = 1; - tunn_params.geneve_port = 0; - - __qede_lock(edev); - edev->ops->tunn_config(edev->cdev, &tunn_params); - __qede_unlock(edev); - - edev->geneve_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", - t_port); - break; - default: - return; - } + if (edev->dev_info.common.vxlan_enable && + edev->dev_info.common.geneve_enable) + edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both; + else if (edev->dev_info.common.vxlan_enable) + edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan; + else if (edev->dev_info.common.geneve_enable) + edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve; } static void qede_xdp_reload_func(struct qede_dev *edev, diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 8cd27f8f1b3a..a653dd0e5c22 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -663,8 +663,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif - .ndo_udp_tunnel_add = qede_udp_tunnel_add, - .ndo_udp_tunnel_del = qede_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = qede_features_check, .ndo_bpf = qede_xdp, #ifdef CONFIG_RFS_ACCEL @@ -687,8 +687,8 @@ static const struct net_device_ops qede_netdev_vf_ops = { .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, - .ndo_udp_tunnel_add = qede_udp_tunnel_add, - .ndo_udp_tunnel_del = qede_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = qede_features_check, }; @@ -706,8 +706,8 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_fix_features = qede_fix_features, .ndo_set_features = qede_set_features, .ndo_get_stats64 = qede_get_stats64, - .ndo_udp_tunnel_add = qede_udp_tunnel_add, - .ndo_udp_tunnel_del = qede_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = qede_features_check, .ndo_bpf = qede_xdp, }; @@ -822,6 +822,8 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_GSO_UDP_TUNNEL_CSUM); ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM); + + qede_set_udp_tunnels(edev); } if (edev->dev_info.common.gre_enable) { @@ -2421,7 +2423,7 @@ static int qede_open(struct net_device *ndev) if (rc) return rc; - udp_tunnel_get_rx_info(ndev); + udp_tunnel_nic_reset_ntf(ndev); edev->ops->common->update_drv_state(edev->cdev, true); @@ -2523,7 +2525,7 @@ static void qede_recovery_handler(struct qede_dev *edev) goto err; qede_config_rx_mode(edev->ndev); - udp_tunnel_get_rx_info(edev->ndev); + udp_tunnel_nic_reset_ntf(edev->ndev); } edev->state = curr_state; From 78c6bc2bdf122db07dc04a64e5eda26bcc0c1e15 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 14 Jul 2020 12:18:30 -0700 Subject: [PATCH 1031/2056] qlcnic: convert to new udp_tunnel_nic infra Straightforward conversion to new infra, 1 VxLAN port, handler may sleep. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 7 +- .../ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 31 ++------- .../net/ethernet/qlogic/qlcnic/qlcnic_main.c | 64 +++++++------------ 3 files changed, 32 insertions(+), 70 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index d838774af5a6..d67f8265724a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -536,8 +536,6 @@ struct qlcnic_hardware_context { u8 extend_lb_time; u8 phys_port_id[ETH_ALEN]; u8 lb_mode; - u8 vxlan_port_count; - u16 vxlan_port; struct device *hwmon_dev; u32 post_mode; bool run_post; @@ -1026,9 +1024,6 @@ struct qlcnic_ipaddr { #define QLCNIC_HAS_PHYS_PORT_ID 0x40000 #define QLCNIC_TSS_RSS 0x80000 -#define QLCNIC_ADD_VXLAN_PORT 0x100000 -#define QLCNIC_DEL_VXLAN_PORT 0x200000 - #define QLCNIC_VLAN_FILTERING 0x800000 #define QLCNIC_IS_MSI_FAMILY(adapter) \ @@ -1700,6 +1695,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *); int qlcnic_set_default_offload_settings(struct qlcnic_adapter *); int qlcnic_reset_npar_config(struct qlcnic_adapter *); int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); +int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port); +int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index cda5b0a9e948..0e2f2fb6c3a9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1028,9 +1028,8 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, #define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1 #define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0 -static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter) +int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port) { - u16 port = adapter->ahw->vxlan_port; struct qlcnic_cmd_args cmd; int ret = 0; @@ -1057,10 +1056,8 @@ static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter) return ret; } -static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, - bool state) +int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port) { - u16 vxlan_port = adapter->ahw->vxlan_port; struct qlcnic_cmd_args cmd; int ret = 0; @@ -1071,18 +1068,18 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, if (ret) return ret; - cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING : - QLCNIC_DISABLE_INGRESS_ENCAP_PARSING; + cmd.req.arg[1] = port ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING : + QLCNIC_DISABLE_INGRESS_ENCAP_PARSING; ret = qlcnic_issue_cmd(adapter, &cmd); if (ret) netdev_err(adapter->netdev, "Failed to %s VXLAN parsing for port %d\n", - state ? "enable" : "disable", vxlan_port); + port ? "enable" : "disable", port); else netdev_info(adapter->netdev, "%s VXLAN parsing for port %d\n", - state ? "Enabled" : "Disabled", vxlan_port); + port ? "Enabled" : "Disabled", port); qlcnic_free_mbx_args(&cmd); @@ -1093,22 +1090,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); - - if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) { - if (qlcnic_set_vxlan_port(adapter)) - return; - - if (qlcnic_set_vxlan_parsing(adapter, true)) - return; - - adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT; - } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) { - if (qlcnic_set_vxlan_parsing(adapter, false)) - return; - - adapter->ahw->vxlan_port = 0; - adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT; - } } /** diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index e52af092a793..173c7300cdf7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -471,48 +471,29 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev, return 0; } -static void qlcnic_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table) { - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_hardware_context *ahw = adapter->ahw; + struct qlcnic_adapter *adapter = netdev_priv(dev); + struct udp_tunnel_info ti; + int err; - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - /* Adapter supports only one VXLAN port. Use very first port - * for enabling offload - */ - if (!qlcnic_encap_rx_offload(adapter)) - return; - if (!ahw->vxlan_port_count) { - ahw->vxlan_port_count = 1; - ahw->vxlan_port = ntohs(ti->port); - adapter->flags |= QLCNIC_ADD_VXLAN_PORT; - return; + udp_tunnel_nic_get_port(dev, table, 0, &ti); + if (ti.port) { + err = qlcnic_set_vxlan_port(adapter, ntohs(ti.port)); + if (err) + return err; } - if (ahw->vxlan_port == ntohs(ti->port)) - ahw->vxlan_port_count++; + return qlcnic_set_vxlan_parsing(adapter, ntohs(ti.port)); } -static void qlcnic_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_hardware_context *ahw = adapter->ahw; - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || - (ahw->vxlan_port != ntohs(ti->port))) - return; - - ahw->vxlan_port_count--; - if (!ahw->vxlan_port_count) - adapter->flags |= QLCNIC_DEL_VXLAN_PORT; -} +static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = { + .sync_table = qlcnic_udp_tunnel_sync, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; static netdev_features_t qlcnic_features_check(struct sk_buff *skb, struct net_device *dev, @@ -540,8 +521,8 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_fdb_del = qlcnic_fdb_del, .ndo_fdb_dump = qlcnic_fdb_dump, .ndo_get_phys_port_id = qlcnic_get_phys_port_id, - .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, - .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = qlcnic_features_check, #ifdef CONFIG_QLCNIC_SRIOV .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac, @@ -2017,7 +1998,7 @@ qlcnic_attach(struct qlcnic_adapter *adapter) qlcnic_create_sysfs_entries(adapter); if (qlcnic_encap_rx_offload(adapter)) - udp_tunnel_get_rx_info(netdev); + udp_tunnel_nic_reset_ntf(netdev); adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; return 0; @@ -2335,9 +2316,12 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, NETIF_F_TSO6; } - if (qlcnic_encap_rx_offload(adapter)) + if (qlcnic_encap_rx_offload(adapter)) { netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->udp_tunnel_nic_info = &qlcnic_udp_tunnels; + } + netdev->hw_features = netdev->features; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; From 769a29ce2af45481c1971be5a96250696fb4acf4 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:19 +0800 Subject: [PATCH 1032/2056] rtw88: 8821c: add basic functions RTL8821CE chipsets are 802.11ac dual-band WiFi + BT combo chips. This patch adds the basic functions such as parameter tables, chip information, power on flow. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-2-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/main.h | 1 + drivers/net/wireless/realtek/rtw88/reg.h | 4 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 651 ++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 188 + .../wireless/realtek/rtw88/rtw8821c_table.c | 6611 +++++++++++++++++ .../wireless/realtek/rtw88/rtw8821c_table.h | 15 + .../net/wireless/realtek/rtw88/rtw8821ce.c | 30 + .../net/wireless/realtek/rtw88/rtw8821ce.h | 14 + 8 files changed, 7514 insertions(+) create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8821c.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8821c.h create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8821c_table.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8821c_table.h create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8821ce.c create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8821ce.h diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 0841f5fa4bf2..8f6e10acd65f 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -183,6 +183,7 @@ enum rtw_chip_type { RTW_CHIP_TYPE_8822B, RTW_CHIP_TYPE_8822C, RTW_CHIP_TYPE_8723D, + RTW_CHIP_TYPE_8821C, }; enum rtw_tx_queue_type { diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 5a3e9cc7c400..f1275757f6b4 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -43,6 +43,8 @@ #define BITS_EF_ADDR (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR) #define BITS_PLL 0xf0 +#define REG_AFE_XTAL_CTRL 0x24 +#define REG_AFE_PLL_CTRL 0x28 #define REG_AFE_CTRL3 0x2c #define BIT_MASK_XTAL 0x00FFF000 #define BIT_XTAL_GMP_BIT4 BIT(28) @@ -476,6 +478,8 @@ #define REG_RFE_CTRL_E 0x0974 #define REG_2ND_CCA_CTRL 0x0976 +#define REG_CCK0_FAREPORT 0xa2c + #define REG_DIS_DPD 0x0a70 #define DIS_DPD_MASK GENMASK(9, 0) #define DIS_DPD_RATE6M BIT(0) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c new file mode 100644 index 000000000000..9989eab95256 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "coex.h" +#include "fw.h" +#include "tx.h" +#include "rx.h" +#include "phy.h" +#include "rtw8821c.h" +#include "rtw8821c_table.h" +#include "mac.h" +#include "reg.h" +#include "debug.h" +#include "bf.h" + +static void rtw8821ce_efuse_parsing(struct rtw_efuse *efuse, + struct rtw8821c_efuse *map) +{ + ether_addr_copy(efuse->addr, map->e.mac_addr); +} + +static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw8821c_efuse *map; + int i; + + map = (struct rtw8821c_efuse *)log_map; + + efuse->rfe_option = map->rfe_option; + efuse->rf_board_option = map->rf_board_option; + efuse->crystal_cap = map->xtal_k; + efuse->pa_type_2g = map->pa_type; + efuse->pa_type_5g = map->pa_type; + efuse->lna_type_2g = map->lna_type_2g[0]; + efuse->lna_type_5g = map->lna_type_5g[0]; + efuse->channel_plan = map->channel_plan; + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + efuse->bt_setting = map->rf_bt_setting; + efuse->regd = map->rf_board_option & 0x7; + efuse->thermal_meter[0] = map->thermal_meter; + efuse->thermal_meter_k = map->thermal_meter; + efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g; + efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g; + + for (i = 0; i < 4; i++) + efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; + + switch (rtw_hci_type(rtwdev)) { + case RTW_HCI_TYPE_PCIE: + rtw8821ce_efuse_parsing(efuse, map); + break; + default: + /* unsupported now */ + return -ENOTSUPP; + } + + return 0; +} + +static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) +{ + u8 crystal_cap, val; + + /* power on BB/RF domain */ + val = rtw_read8(rtwdev, REG_SYS_FUNC_EN); + val |= BIT_FEN_PCIEA; + rtw_write8(rtwdev, REG_SYS_FUNC_EN, val); + + /* toggle BB reset */ + val |= BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST; + rtw_write8(rtwdev, REG_SYS_FUNC_EN, val); + val &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST); + rtw_write8(rtwdev, REG_SYS_FUNC_EN, val); + val |= BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST; + rtw_write8(rtwdev, REG_SYS_FUNC_EN, val); + + rtw_write8(rtwdev, REG_RF_CTRL, + BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB); + usleep_range(10, 11); + rtw_write8(rtwdev, REG_WLRF1 + 3, + BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB); + usleep_range(10, 11); + + /* pre init before header files config */ + rtw_write32_clr(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); + + rtw_phy_load_tables(rtwdev); + + crystal_cap = rtwdev->efuse.crystal_cap & 0x3F; + rtw_write32_mask(rtwdev, REG_AFE_XTAL_CTRL, 0x7e000000, crystal_cap); + rtw_write32_mask(rtwdev, REG_AFE_PLL_CTRL, 0x7e, crystal_cap); + rtw_write32_mask(rtwdev, REG_CCK0_FAREPORT, BIT(18) | BIT(22), 0); + + /* post init after header files config */ + rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); + + rtw_phy_init(rtwdev); +} + +static int rtw8821c_mac_init(struct rtw_dev *rtwdev) +{ + u32 value32; + u16 pre_txcnt; + + /* protocol configuration */ + rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME); + rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1); + pre_txcnt = WLAN_PRE_TXCNT_TIME_TH | BIT_EN_PRECNT; + rtw_write8(rtwdev, REG_PRECNT_CTRL, (u8)(pre_txcnt & 0xFF)); + rtw_write8(rtwdev, REG_PRECNT_CTRL + 1, (u8)(pre_txcnt >> 8)); + value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) | + (WLAN_MAX_AGG_PKT_LIMIT << 16) | + (WLAN_RTS_MAX_AGG_PKT_LIMIT << 24); + rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32); + rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2, + WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8); + rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH); + rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH); + rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH); + rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH); + rtw_write8_set(rtwdev, REG_INIRTS_RATE_SEL, BIT(5)); + + /* EDCA configuration */ + rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0); + rtw_write16(rtwdev, REG_TXPAUSE, 0); + rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME); + rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME); + rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG); + rtw_write16(rtwdev, REG_EDCA_VO_PARAM + 2, WLAN_VO_TXOP_LIMIT); + rtw_write16(rtwdev, REG_EDCA_VI_PARAM + 2, WLAN_VI_TXOP_LIMIT); + rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG); + rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG); + + /* Set beacon cotnrol - enable TSF and other related functions */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + /* Set send beacon related registers */ + rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME); + rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT); + rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME); + rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8); + + /* WMAC configuration */ + rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0); + rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2); + rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG); + rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512); + rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2); + rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1); + rtw_write8(rtwdev, REG_ACKTO_CCK, 0x40); + rtw_write8_set(rtwdev, REG_WMAC_TRXPTCL_CTL_H, BIT(1)); + rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, BIT(6)); + rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2); + rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1); + + return 0; +} + +static void rtw8821c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) +{ + u8 ldo_pwr; + + ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3); + ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7); + rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr); +} + +static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821c[] = { + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x004A, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0}, + {0x0300, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0301, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8821c[] = { + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0001, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS}, + {0x0000, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0}, + {0x0075, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), BIT(1)}, + {0x0075, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0}, + {0x10C3, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(0), 0}, + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), BIT(3)}, + {0x0074, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0x0022, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0062, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)), + (BIT(7) | BIT(6) | BIT(5))}, + {0x0061, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)), 0}, + {0x007C, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8821c[] = { + {0x0093, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0x001F, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0049, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0006, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0002, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x10C3, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), BIT(1)}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0x0020, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3), 0}, + {0x0000, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), BIT(5)}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8821c[] = { + {0x0007, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, 0xFF, 0x20}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), BIT(2)}, + {0x004A, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(5), 0}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), 0}, + {0x004F, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(0), 0}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0046, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(6), BIT(6)}, + {0x0067, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(2), 0}, + {0x0046, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7), BIT(7)}, + {0x0062, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(4), BIT(4)}, + {0x0081, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0}, + {0x0005, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, BIT(0), BIT(0)}, + {0x0086, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_POLLING, BIT(1), 0}, + {0x0090, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_PCI_MSK, + RTW_PWR_ADDR_MAC, + RTW_PWR_CMD_WRITE, BIT(1), 0}, + {0x0044, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, 0xFF, 0}, + {0x0040, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, 0xFF, 0x90}, + {0x0041, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, 0xFF, 0x00}, + {0x0042, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_SDIO_MSK, + RTW_PWR_ADDR_SDIO, + RTW_PWR_CMD_WRITE, 0xFF, 0x04}, + {0xFFFF, + RTW_PWR_CUT_ALL_MSK, + RTW_PWR_INTF_ALL_MSK, + 0, + RTW_PWR_CMD_END, 0, 0}, +}; + +static const struct rtw_pwr_seq_cmd *card_enable_flow_8821c[] = { + trans_carddis_to_cardemu_8821c, + trans_cardemu_to_act_8821c, + NULL +}; + +static const struct rtw_pwr_seq_cmd *card_disable_flow_8821c[] = { + trans_act_to_cardemu_8821c, + trans_cardemu_to_carddis_8821c, + NULL +}; + +static const struct rtw_intf_phy_para usb2_param_8821c[] = { + {0xFFFF, 0x00, + RTW_IP_SEL_PHY, + RTW_INTF_PHY_CUT_ALL, + RTW_INTF_PHY_PLATFORM_ALL}, +}; + +static const struct rtw_intf_phy_para usb3_param_8821c[] = { + {0xFFFF, 0x0000, + RTW_IP_SEL_PHY, + RTW_INTF_PHY_CUT_ALL, + RTW_INTF_PHY_PLATFORM_ALL}, +}; + +static const struct rtw_intf_phy_para pcie_gen1_param_8821c[] = { + {0x0009, 0x6380, + RTW_IP_SEL_PHY, + RTW_INTF_PHY_CUT_ALL, + RTW_INTF_PHY_PLATFORM_ALL}, + {0xFFFF, 0x0000, + RTW_IP_SEL_PHY, + RTW_INTF_PHY_CUT_ALL, + RTW_INTF_PHY_PLATFORM_ALL}, +}; + +static const struct rtw_intf_phy_para pcie_gen2_param_8821c[] = { + {0xFFFF, 0x0000, + RTW_IP_SEL_PHY, + RTW_INTF_PHY_CUT_ALL, + RTW_INTF_PHY_PLATFORM_ALL}, +}; + +static const struct rtw_intf_phy_para_table phy_para_table_8821c = { + .usb2_para = usb2_param_8821c, + .usb3_para = usb3_param_8821c, + .gen1_para = pcie_gen1_param_8821c, + .gen2_para = pcie_gen2_param_8821c, + .n_usb2_para = ARRAY_SIZE(usb2_param_8821c), + .n_usb3_para = ARRAY_SIZE(usb2_param_8821c), + .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8821c), + .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8821c), +}; + +static const struct rtw_rfe_def rtw8821c_rfe_defs[] = { + [0] = RTW_DEF_RFE(8821c, 0, 0), +}; + +static const struct rtw_ltecoex_addr rtw8821c_ltecoex_addr = { + .ctrl = LTECOEX_ACCESS_CTRL, + .wdata = LTECOEX_WRITE_DATA, + .rdata = LTECOEX_READ_DATA, +}; + +static struct rtw_page_table page_table_8821c[] = { + /* not sure what [0] stands for */ + {16, 16, 16, 14, 1}, + {16, 16, 16, 14, 1}, + {16, 16, 0, 0, 1}, + {16, 16, 16, 0, 1}, + {16, 16, 16, 14, 1}, +}; + +static struct rtw_rqpn rqpn_table_8821c[] = { + /* not sure what [0] stands for */ + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH}, + {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL, + RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW, + RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH}, +}; + +static struct rtw_prioq_addrs prioq_addrs_8821c = { + .prio[RTW_DMA_MAPPING_EXTRA] = { + .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2, + }, + .prio[RTW_DMA_MAPPING_LOW] = { + .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2, + }, + .prio[RTW_DMA_MAPPING_NORMAL] = { + .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2, + }, + .prio[RTW_DMA_MAPPING_HIGH] = { + .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2, + }, + .wsize = true, +}; + +static struct rtw_chip_ops rtw8821c_ops = { + .phy_set_param = rtw8821c_phy_set_param, + .read_efuse = rtw8821c_read_efuse, + .mac_init = rtw8821c_mac_init, + .read_rf = rtw_phy_read_rf, + .write_rf = rtw_phy_write_rf_reg_sipi, + .set_antenna = NULL, + .cfg_ldo25 = rtw8821c_cfg_ldo25, +}; + +struct rtw_chip_info rtw8821c_hw_spec = { + .ops = &rtw8821c_ops, + .id = RTW_CHIP_TYPE_8821C, + .fw_name = "rtw88/rtw8821c_fw.bin", + .wlan_cpu = RTW_WCPU_11AC, + .tx_pkt_desc_sz = 48, + .tx_buf_desc_sz = 16, + .rx_pkt_desc_sz = 24, + .rx_buf_desc_sz = 8, + .phy_efuse_size = 512, + .log_efuse_size = 512, + .ptct_efuse_size = 96, + .txff_size = 65536, + .rxff_size = 16384, + .txgi_factor = 1, + .is_pwr_by_rate_dec = true, + .max_power_index = 0x3f, + .csi_buf_pg_num = 0, + .band = RTW_BAND_2G | RTW_BAND_5G, + .page_size = 128, + .ht_supported = true, + .vht_supported = true, + .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), + .sys_func_en = 0xD8, + .pwr_on_seq = card_enable_flow_8821c, + .pwr_off_seq = card_disable_flow_8821c, + .page_table = page_table_8821c, + .rqpn_table = rqpn_table_8821c, + .prioq_addrs = &prioq_addrs_8821c, + .intf_table = &phy_para_table_8821c, + .rf_base_addr = {0x2800, 0x2c00}, + .rf_sipi_addr = {0xc90, 0xe90}, + .ltecoex_addr = &rtw8821c_ltecoex_addr, + .mac_tbl = &rtw8821c_mac_tbl, + .agc_tbl = &rtw8821c_agc_tbl, + .bb_tbl = &rtw8821c_bb_tbl, + .rf_tbl = {&rtw8821c_rf_a_tbl}, + .rfe_defs = rtw8821c_rfe_defs, + .rfe_defs_size = ARRAY_SIZE(rtw8821c_rfe_defs), + .rx_ldpc = false, +}; +EXPORT_SYMBOL(rtw8821c_hw_spec); + +MODULE_FIRMWARE("rtw88/rtw8821c_fw.bin"); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821c driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h new file mode 100644 index 000000000000..1c357e2b099b --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -0,0 +1,188 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW8821C_H__ +#define __RTW8821C_H__ + +#include + +#define RCR_VHT_ACK BIT(26) + +struct rtw8821ce_efuse { + u8 mac_addr[ETH_ALEN]; /* 0xd0 */ + u8 vender_id[2]; + u8 device_id[2]; + u8 sub_vender_id[2]; + u8 sub_device_id[2]; + u8 pmc[2]; + u8 exp_device_cap[2]; + u8 msi_cap; + u8 ltr_cap; /* 0xe3 */ + u8 exp_link_control[2]; + u8 link_cap[4]; + u8 link_control[2]; + u8 serial_number[8]; + u8 res0:2; /* 0xf4 */ + u8 ltr_en:1; + u8 res1:2; + u8 obff:2; + u8 res2:3; + u8 obff_cap:2; + u8 res3:4; + u8 res4[3]; + u8 class_code[3]; + u8 pci_pm_L1_2_supp:1; + u8 pci_pm_L1_1_supp:1; + u8 aspm_pm_L1_2_supp:1; + u8 aspm_pm_L1_1_supp:1; + u8 L1_pm_substates_supp:1; + u8 res5:3; + u8 port_common_mode_restore_time; + u8 port_t_power_on_scale:2; + u8 res6:1; + u8 port_t_power_on_value:5; + u8 res7; +}; + +struct rtw8821c_efuse { + __le16 rtl_id; + u8 res0[0x0e]; + + /* power index for four RF paths */ + struct rtw_txpwr_idx txpwr_idx_table[4]; + + u8 channel_plan; /* 0xb8 */ + u8 xtal_k; + u8 thermal_meter; + u8 iqk_lck; + u8 pa_type; /* 0xbc */ + u8 lna_type_2g[2]; /* 0xbd */ + u8 lna_type_5g[2]; + u8 rf_board_option; + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; + u8 tx_pwr_calibrate_rate; + u8 rf_antenna_option; /* 0xc9 */ + u8 rfe_option; + u8 country_code[2]; + u8 res[3]; + union { + struct rtw8821ce_efuse e; + }; +}; + +static inline void +_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) +{ + /* 0xC00-0xCFF and 0xE00-0xEFF have the same layout */ + rtw_write32_mask(rtwdev, addr, mask, data); + rtw_write32_mask(rtwdev, addr + 0x200, mask, data); +} + +#define rtw_write32s_mask(rtwdev, addr, mask, data) \ + do { \ + BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00); \ + \ + _rtw_write32s_mask(rtwdev, addr, mask, data); \ + } while (0) + +#define BIT_FEN_PCIEA BIT(6) +#define WLAN_SLOT_TIME 0x09 +#define WLAN_PIFS_TIME 0x19 +#define WLAN_SIFS_CCK_CONT_TX 0xA +#define WLAN_SIFS_OFDM_CONT_TX 0xE +#define WLAN_SIFS_CCK_TRX 0x10 +#define WLAN_SIFS_OFDM_TRX 0x10 +#define WLAN_VO_TXOP_LIMIT 0x186 +#define WLAN_VI_TXOP_LIMIT 0x3BC +#define WLAN_RDG_NAV 0x05 +#define WLAN_TXOP_NAV 0x1B +#define WLAN_CCK_RX_TSF 0x30 +#define WLAN_OFDM_RX_TSF 0x30 +#define WLAN_TBTT_PROHIBIT 0x04 +#define WLAN_TBTT_HOLD_TIME 0x064 +#define WLAN_DRV_EARLY_INT 0x04 +#define WLAN_BCN_DMA_TIME 0x02 + +#define WLAN_RX_FILTER0 0x0FFFFFFF +#define WLAN_RX_FILTER2 0xFFFF +#define WLAN_RCR_CFG 0xE400220E +#define WLAN_RXPKT_MAX_SZ 12288 +#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9) + +#define WLAN_AMPDU_MAX_TIME 0x70 +#define WLAN_RTS_LEN_TH 0xFF +#define WLAN_RTS_TX_TIME_TH 0x08 +#define WLAN_MAX_AGG_PKT_LIMIT 0x20 +#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20 +#define FAST_EDCA_VO_TH 0x06 +#define FAST_EDCA_VI_TH 0x06 +#define FAST_EDCA_BE_TH 0x06 +#define FAST_EDCA_BK_TH 0x06 +#define WLAN_BAR_RETRY_LIMIT 0x01 +#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08 + +#define WLAN_TX_FUNC_CFG1 0x30 +#define WLAN_TX_FUNC_CFG2 0x30 +#define WLAN_MAC_OPT_NORM_FUNC1 0x98 +#define WLAN_MAC_OPT_LB_FUNC1 0x80 +#define WLAN_MAC_OPT_FUNC2 0x30810041 + +#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \ + (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \ + (WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \ + (WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX)) + +#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\ + (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP)) + +#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16)) +#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8) +#define WLAN_PRE_TXCNT_TIME_TH 0x1E4 + +#define REG_INIRTS_RATE_SEL 0x0480 +#define REG_HTSTFWT 0x800 +#define REG_RXPSEL 0x808 +#define BIT_RX_PSEL_RST (BIT(28) | BIT(29)) +#define REG_TXPSEL 0x80c +#define REG_RXCCAMSK 0x814 +#define REG_CCASEL 0x82c +#define REG_PDMFTH 0x830 +#define REG_CCA2ND 0x838 +#define REG_L1WT 0x83c +#define REG_L1PKWT 0x840 +#define REG_MRC 0x850 +#define REG_CLKTRK 0x860 +#define REG_ADCCLK 0x8ac +#define REG_ADC160 0x8c4 +#define REG_ADC40 0x8c8 +#define REG_CDDTXP 0x93c +#define REG_TXPSEL1 0x940 +#define REG_ACBB0 0x948 +#define REG_ACBBRXFIR 0x94c +#define REG_ACGG2TBL 0x958 +#define REG_RXSB 0xa00 +#define REG_ADCINI 0xa04 +#define REG_TXSF2 0xa24 +#define REG_TXSF6 0xa28 +#define REG_RXDESC 0xa2c +#define REG_ENTXCCK 0xa80 +#define REG_AGCTR_A 0xc08 +#define REG_TXDFIR 0xc20 +#define REG_RXIGI_A 0xc50 +#define REG_TRSW 0xca0 +#define REG_RFESEL0 0xcb0 +#define REG_RFESEL8 0xcb4 +#define REG_RFECTL 0xcb8 +#define REG_RFEINV 0xcbc +#define REG_AGCTR_B 0xe08 +#define REG_RXIGI_B 0xe50 +#define REG_ANTWT 0x1904 +#define REG_IQKFAILMSK 0x1bf0 + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c new file mode 100644 index 000000000000..970f903f7dc7 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c @@ -0,0 +1,6611 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include "main.h" +#include "phy.h" +#include "rtw8821c_table.h" + +static const u32 rtw8821c_mac[] = { + 0x010, 0x00000043, + 0x025, 0x0000001D, + 0x026, 0x000000CE, + 0x04F, 0x00000001, + 0x029, 0x000000F9, + 0x420, 0x00000080, + 0x421, 0x0000000F, + 0x428, 0x0000000A, + 0x429, 0x00000010, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000007, + 0x437, 0x00000008, + 0x43C, 0x00000004, + 0x43D, 0x00000005, + 0x43E, 0x00000007, + 0x43F, 0x00000008, + 0x440, 0x0000005D, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000010, + 0x445, 0x000000F0, + 0x446, 0x00000001, + 0x447, 0x000000FE, + 0x448, 0x00000000, + 0x449, 0x00000000, + 0x44A, 0x00000000, + 0x44B, 0x00000040, + 0x44C, 0x00000010, + 0x44D, 0x000000F0, + 0x44E, 0x0000003F, + 0x44F, 0x00000000, + 0x450, 0x00000000, + 0x451, 0x00000000, + 0x452, 0x00000000, + 0x453, 0x00000040, + 0x455, 0x00000070, + 0x45E, 0x00000004, + 0x49C, 0x00000010, + 0x49D, 0x000000F0, + 0x49E, 0x00000000, + 0x49F, 0x00000006, + 0x4A0, 0x000000E0, + 0x4A1, 0x00000003, + 0x4A2, 0x00000000, + 0x4A3, 0x00000040, + 0x4A4, 0x00000015, + 0x4A5, 0x000000F0, + 0x4A6, 0x00000000, + 0x4A7, 0x00000006, + 0x4A8, 0x000000E0, + 0x4A9, 0x00000000, + 0x4AA, 0x00000000, + 0x4AB, 0x00000000, + 0x7DA, 0x00000008, + 0x1448, 0x00000006, + 0x144A, 0x00000006, + 0x144C, 0x00000006, + 0x144E, 0x00000006, + 0x4C8, 0x000000FF, + 0x4C9, 0x00000008, + 0x4CC, 0x000000FF, + 0x4CD, 0x000000FF, + 0x4CE, 0x00000001, + 0x4CF, 0x00000008, + 0x500, 0x00000026, + 0x501, 0x000000A2, + 0x502, 0x0000002F, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000A3, + 0x506, 0x0000005E, + 0x507, 0x00000000, + 0x508, 0x0000002B, + 0x509, 0x000000A4, + 0x50A, 0x0000005E, + 0x50B, 0x00000000, + 0x50C, 0x0000004F, + 0x50D, 0x000000A4, + 0x50E, 0x00000000, + 0x50F, 0x00000000, + 0x512, 0x0000001C, + 0x514, 0x0000000A, + 0x516, 0x0000000A, + 0x521, 0x0000002F, + 0x525, 0x0000004F, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55C, 0x00000050, + 0x55D, 0x000000FF, + 0x577, 0x0000000B, + 0x578, 0x00000014, + 0x579, 0x00000014, + 0x57A, 0x00000014, + 0x5BE, 0x00000064, + 0x605, 0x00000030, + 0x608, 0x0000000E, + 0x609, 0x00000022, + 0x60C, 0x00000018, + 0x6A0, 0x000000FF, + 0x6A1, 0x000000FF, + 0x6A2, 0x000000FF, + 0x6A3, 0x000000FF, + 0x6A4, 0x000000FF, + 0x6A5, 0x000000FF, + 0x6DE, 0x00000084, + 0x620, 0x000000FF, + 0x621, 0x000000FF, + 0x622, 0x000000FF, + 0x623, 0x000000FF, + 0x624, 0x000000FF, + 0x625, 0x000000FF, + 0x626, 0x000000FF, + 0x627, 0x000000FF, + 0x638, 0x00000050, + 0x63C, 0x0000000A, + 0x63D, 0x0000000A, + 0x63E, 0x0000000E, + 0x63F, 0x0000000E, + 0x640, 0x00000040, + 0x642, 0x00000040, + 0x643, 0x00000000, + 0x652, 0x000000C8, + 0x66E, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70A, 0x00000065, + 0x70B, 0x00000087, + 0x718, 0x00000040, + 0x7D4, 0x00000098, + +}; + +RTW_DECL_TABLE_PHY_COND(rtw8821c_mac, rtw_phy_cfg_mac); + +static const u32 rtw8821c_agc[] = { + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000003, + 0x81C, 0xFA020003, + 0x81C, 0xF9040003, + 0x81C, 0xF8060003, + 0x81C, 0xF7080003, + 0x81C, 0xF60A0003, + 0x81C, 0xF50C0003, + 0x81C, 0xF40E0003, + 0x81C, 0xF3100003, + 0x81C, 0xF2120003, + 0x81C, 0xF1140003, + 0x81C, 0xF0160003, + 0x81C, 0xEF180003, + 0x81C, 0xEE1A0003, + 0x81C, 0xED1C0003, + 0x81C, 0xEC1E0003, + 0x81C, 0xEB200003, + 0x81C, 0xEA220003, + 0x81C, 0xE9240003, + 0x81C, 0xE8260003, + 0x81C, 0xE7280003, + 0x81C, 0xE62A0003, + 0x81C, 0xE52C0003, + 0x81C, 0xE42E0003, + 0x81C, 0xE3300003, + 0x81C, 0xE2320003, + 0x81C, 0xE1340003, + 0x81C, 0xC4360003, + 0x81C, 0xC3380003, + 0x81C, 0xC23A0003, + 0x81C, 0xC13C0003, + 0x81C, 0x883E0003, + 0x81C, 0x87400003, + 0x81C, 0x86420003, + 0x81C, 0x85440003, + 0x81C, 0x84460003, + 0x81C, 0x83480003, + 0x81C, 0x824A0003, + 0x81C, 0x814C0003, + 0x81C, 0x804E0003, + 0x81C, 0x64500003, + 0x81C, 0x63520003, + 0x81C, 0x62540003, + 0x81C, 0x61560003, + 0x81C, 0x60580003, + 0x81C, 0x475A0003, + 0x81C, 0x465C0003, + 0x81C, 0x455E0003, + 0x81C, 0x44600003, + 0x81C, 0x43620003, + 0x81C, 0x42640003, + 0x81C, 0x41660003, + 0x81C, 0x40680003, + 0x81C, 0x236A0003, + 0x81C, 0x226C0003, + 0x81C, 0x056E0003, + 0x81C, 0x04700003, + 0x81C, 0x03720003, + 0x81C, 0x02740003, + 0x81C, 0x01760003, + 0x81C, 0x01780003, + 0x81C, 0x017A0003, + 0x81C, 0x017C0003, + 0x81C, 0x017E0003, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000003, + 0x81C, 0xFA020003, + 0x81C, 0xF9040003, + 0x81C, 0xF8060003, + 0x81C, 0xF7080003, + 0x81C, 0xF60A0003, + 0x81C, 0xF50C0003, + 0x81C, 0xF40E0003, + 0x81C, 0xF3100003, + 0x81C, 0xF2120003, + 0x81C, 0xF1140003, + 0x81C, 0xF0160003, + 0x81C, 0xEF180003, + 0x81C, 0xEE1A0003, + 0x81C, 0xED1C0003, + 0x81C, 0xEC1E0003, + 0x81C, 0xEB200003, + 0x81C, 0xEA220003, + 0x81C, 0xE9240003, + 0x81C, 0xE8260003, + 0x81C, 0xE7280003, + 0x81C, 0xE62A0003, + 0x81C, 0xE52C0003, + 0x81C, 0xE42E0003, + 0x81C, 0xE3300003, + 0x81C, 0xE2320003, + 0x81C, 0xE1340003, + 0x81C, 0xC4360003, + 0x81C, 0xC3380003, + 0x81C, 0xC23A0003, + 0x81C, 0xC13C0003, + 0x81C, 0x883E0003, + 0x81C, 0x87400003, + 0x81C, 0x86420003, + 0x81C, 0x85440003, + 0x81C, 0x84460003, + 0x81C, 0x83480003, + 0x81C, 0x824A0003, + 0x81C, 0x814C0003, + 0x81C, 0x804E0003, + 0x81C, 0x64500003, + 0x81C, 0x63520003, + 0x81C, 0x62540003, + 0x81C, 0x61560003, + 0x81C, 0x60580003, + 0x81C, 0x475A0003, + 0x81C, 0x465C0003, + 0x81C, 0x455E0003, + 0x81C, 0x44600003, + 0x81C, 0x43620003, + 0x81C, 0x42640003, + 0x81C, 0x41660003, + 0x81C, 0x40680003, + 0x81C, 0x236A0003, + 0x81C, 0x226C0003, + 0x81C, 0x056E0003, + 0x81C, 0x04700003, + 0x81C, 0x03720003, + 0x81C, 0x02740003, + 0x81C, 0x01760003, + 0x81C, 0x01780003, + 0x81C, 0x017A0003, + 0x81C, 0x017C0003, + 0x81C, 0x017E0003, + 0xA0000000, 0x00000000, + 0x81C, 0xFB000003, + 0x81C, 0xFA020003, + 0x81C, 0xF9040003, + 0x81C, 0xF8060003, + 0x81C, 0xF7080003, + 0x81C, 0xF60A0003, + 0x81C, 0xF50C0003, + 0x81C, 0xF40E0003, + 0x81C, 0xF3100003, + 0x81C, 0xF2120003, + 0x81C, 0xF1140003, + 0x81C, 0xF0160003, + 0x81C, 0xEF180003, + 0x81C, 0xEE1A0003, + 0x81C, 0xED1C0003, + 0x81C, 0xEC1E0003, + 0x81C, 0xEB200003, + 0x81C, 0xEA220003, + 0x81C, 0xE9240003, + 0x81C, 0xE8260003, + 0x81C, 0xE7280003, + 0x81C, 0xE62A0003, + 0x81C, 0xCA2C0003, + 0x81C, 0xC92E0003, + 0x81C, 0xC8300003, + 0x81C, 0xC7320003, + 0x81C, 0xC6340003, + 0x81C, 0xC5360003, + 0x81C, 0xC4380003, + 0x81C, 0xC33A0003, + 0x81C, 0xC23C0003, + 0x81C, 0xC13E0003, + 0x81C, 0x88400003, + 0x81C, 0x87420003, + 0x81C, 0x86440003, + 0x81C, 0x85460003, + 0x81C, 0x84480003, + 0x81C, 0x834A0003, + 0x81C, 0x674C0003, + 0x81C, 0x664E0003, + 0x81C, 0x65500003, + 0x81C, 0x64520003, + 0x81C, 0x63540003, + 0x81C, 0x62560003, + 0x81C, 0x61580003, + 0x81C, 0x455A0003, + 0x81C, 0x445C0003, + 0x81C, 0x435E0003, + 0x81C, 0x42600003, + 0x81C, 0x41620003, + 0x81C, 0x25640003, + 0x81C, 0x24660003, + 0x81C, 0x23680003, + 0x81C, 0x226A0003, + 0x81C, 0x216C0003, + 0x81C, 0x016E0003, + 0x81C, 0x01700003, + 0x81C, 0x01720003, + 0x81C, 0x01740003, + 0x81C, 0x01760003, + 0x81C, 0x01780003, + 0x81C, 0x017A0003, + 0x81C, 0x017C0003, + 0x81C, 0x017E0003, + 0xB0000000, 0x00000000, + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFD000103, + 0x81C, 0xFC020103, + 0x81C, 0xFB040103, + 0x81C, 0xFA060103, + 0x81C, 0xF9080103, + 0x81C, 0xF80A0103, + 0x81C, 0xF70C0103, + 0x81C, 0xF60E0103, + 0x81C, 0xF5100103, + 0x81C, 0xF4120103, + 0x81C, 0xF3140103, + 0x81C, 0xF2160103, + 0x81C, 0xF1180103, + 0x81C, 0xF01A0103, + 0x81C, 0xEF1C0103, + 0x81C, 0xEE1E0103, + 0x81C, 0xED200103, + 0x81C, 0xEC220103, + 0x81C, 0xEB240103, + 0x81C, 0xEA260103, + 0x81C, 0xE9280103, + 0x81C, 0xE82A0103, + 0x81C, 0xE72C0103, + 0x81C, 0xE62E0103, + 0x81C, 0xE5300103, + 0x81C, 0xE4320103, + 0x81C, 0xE3340103, + 0x81C, 0xE2360103, + 0x81C, 0xE1380103, + 0x81C, 0xE03A0103, + 0x81C, 0xC33C0103, + 0x81C, 0xC23E0103, + 0x81C, 0xC1400103, + 0x81C, 0xC0420103, + 0x81C, 0xA3440103, + 0x81C, 0xA2460103, + 0x81C, 0xA1480103, + 0x81C, 0xA04A0103, + 0x81C, 0x824C0103, + 0x81C, 0x814E0103, + 0x81C, 0x80500103, + 0x81C, 0x62520103, + 0x81C, 0x61540103, + 0x81C, 0x60560103, + 0x81C, 0x24580103, + 0x81C, 0x235A0103, + 0x81C, 0x225C0103, + 0x81C, 0x215E0103, + 0x81C, 0x20600103, + 0x81C, 0x03620103, + 0x81C, 0x02640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF6000103, + 0x81C, 0xF5020103, + 0x81C, 0xF4040103, + 0x81C, 0xF3060103, + 0x81C, 0xF2080103, + 0x81C, 0xF10A0103, + 0x81C, 0xF00C0103, + 0x81C, 0xEF0E0103, + 0x81C, 0xEE100103, + 0x81C, 0xED120103, + 0x81C, 0xEC140103, + 0x81C, 0xCE160103, + 0x81C, 0xEA180103, + 0x81C, 0xE91A0103, + 0x81C, 0xE81C0103, + 0x81C, 0xE71E0103, + 0x81C, 0xE6200103, + 0x81C, 0xE5220103, + 0x81C, 0xE4240103, + 0x81C, 0xE3260103, + 0x81C, 0xE2280103, + 0x81C, 0xE12A0103, + 0x81C, 0xC32C0103, + 0x81C, 0xA62E0103, + 0x81C, 0xC1300103, + 0x81C, 0xA4320103, + 0x81C, 0xA3340103, + 0x81C, 0xA2360103, + 0x81C, 0xA1380103, + 0x81C, 0x833A0103, + 0x81C, 0x823C0103, + 0x81C, 0x813E0103, + 0x81C, 0x63400103, + 0x81C, 0x62420103, + 0x81C, 0x61440103, + 0x81C, 0x60460103, + 0x81C, 0x25480103, + 0x81C, 0x244A0103, + 0x81C, 0x234C0103, + 0x81C, 0x064E0103, + 0x81C, 0x21500103, + 0x81C, 0x04520103, + 0x81C, 0x03540103, + 0x81C, 0x02560103, + 0x81C, 0x01580103, + 0x81C, 0x005A0103, + 0x81C, 0x005C0103, + 0x81C, 0x005E0103, + 0x81C, 0x00600103, + 0x81C, 0x00620103, + 0x81C, 0x00640103, + 0x81C, 0x00660103, + 0x81C, 0x00680103, + 0x81C, 0x006A0103, + 0x81C, 0x006C0103, + 0x81C, 0x006E0103, + 0x81C, 0x00700103, + 0x81C, 0x00720103, + 0x81C, 0x00740103, + 0x81C, 0x00760103, + 0x81C, 0x00780103, + 0x81C, 0x007A0103, + 0x81C, 0x007C0103, + 0x81C, 0x007E0103, + 0xA0000000, 0x00000000, + 0x81C, 0xFD000103, + 0x81C, 0xFC020103, + 0x81C, 0xFB040103, + 0x81C, 0xFA060103, + 0x81C, 0xF9080103, + 0x81C, 0xF80A0103, + 0x81C, 0xF70C0103, + 0x81C, 0xF60E0103, + 0x81C, 0xF5100103, + 0x81C, 0xF4120103, + 0x81C, 0xF3140103, + 0x81C, 0xF2160103, + 0x81C, 0xF1180103, + 0x81C, 0xF01A0103, + 0x81C, 0xEF1C0103, + 0x81C, 0xEE1E0103, + 0x81C, 0xED200103, + 0x81C, 0xEC220103, + 0x81C, 0xEB240103, + 0x81C, 0xEA260103, + 0x81C, 0xE9280103, + 0x81C, 0xE82A0103, + 0x81C, 0xE72C0103, + 0x81C, 0xE62E0103, + 0x81C, 0xE5300103, + 0x81C, 0xE4320103, + 0x81C, 0xE3340103, + 0x81C, 0xE2360103, + 0x81C, 0xE1380103, + 0x81C, 0xE03A0103, + 0x81C, 0xA83C0103, + 0x81C, 0xA73E0103, + 0x81C, 0xA6400103, + 0x81C, 0xA5420103, + 0x81C, 0xA4440103, + 0x81C, 0xA3460103, + 0x81C, 0xA2480103, + 0x81C, 0xA14A0103, + 0x81C, 0x834C0103, + 0x81C, 0x824E0103, + 0x81C, 0x81500103, + 0x81C, 0x63520103, + 0x81C, 0x62540103, + 0x81C, 0x61560103, + 0x81C, 0x25580103, + 0x81C, 0x245A0103, + 0x81C, 0x235C0103, + 0x81C, 0x225E0103, + 0x81C, 0x04600103, + 0x81C, 0x03620103, + 0x81C, 0x02640103, + 0x81C, 0x01660103, + 0x81C, 0x01680103, + 0x81C, 0x016A0103, + 0x81C, 0x016C0103, + 0x81C, 0x016E0103, + 0x81C, 0x01700103, + 0x81C, 0x01720103, + 0x81C, 0x01740103, + 0x81C, 0x01760103, + 0x81C, 0x01780103, + 0x81C, 0x017A0103, + 0x81C, 0x017C0103, + 0x81C, 0x017E0103, + 0xB0000000, 0x00000000, + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000203, + 0x81C, 0xFA020203, + 0x81C, 0xF9040203, + 0x81C, 0xF8060203, + 0x81C, 0xF7080203, + 0x81C, 0xF60A0203, + 0x81C, 0xF50C0203, + 0x81C, 0xF40E0203, + 0x81C, 0xF3100203, + 0x81C, 0xF2120203, + 0x81C, 0xF1140203, + 0x81C, 0xF0160203, + 0x81C, 0xEF180203, + 0x81C, 0xEE1A0203, + 0x81C, 0xED1C0203, + 0x81C, 0xEC1E0203, + 0x81C, 0xEB200203, + 0x81C, 0xEA220203, + 0x81C, 0xE9240203, + 0x81C, 0xE8260203, + 0x81C, 0xE7280203, + 0x81C, 0xE62A0203, + 0x81C, 0xE52C0203, + 0x81C, 0xE42E0203, + 0x81C, 0xE3300203, + 0x81C, 0xE2320203, + 0x81C, 0xE1340203, + 0x81C, 0xC5360203, + 0x81C, 0xC4380203, + 0x81C, 0xC33A0203, + 0x81C, 0xC23C0203, + 0x81C, 0xC13E0203, + 0x81C, 0xA4400203, + 0x81C, 0xA3420203, + 0x81C, 0xA2440203, + 0x81C, 0xA1460203, + 0x81C, 0xA0480203, + 0x81C, 0x834A0203, + 0x81C, 0x824C0203, + 0x81C, 0x814E0203, + 0x81C, 0x63500203, + 0x81C, 0x62520203, + 0x81C, 0x61540203, + 0x81C, 0x60560203, + 0x81C, 0x23580203, + 0x81C, 0x225A0203, + 0x81C, 0x215C0203, + 0x81C, 0x205E0203, + 0x81C, 0x04600203, + 0x81C, 0x03620203, + 0x81C, 0x02640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF6000203, + 0x81C, 0xF5020203, + 0x81C, 0xF4040203, + 0x81C, 0xF3060203, + 0x81C, 0xF2080203, + 0x81C, 0xF10A0203, + 0x81C, 0xF00C0203, + 0x81C, 0xEF0E0203, + 0x81C, 0xEE100203, + 0x81C, 0xED120203, + 0x81C, 0xEC140203, + 0x81C, 0xEB160203, + 0x81C, 0xEA180203, + 0x81C, 0xE91A0203, + 0x81C, 0xE81C0203, + 0x81C, 0xE71E0203, + 0x81C, 0xE6200203, + 0x81C, 0xE5220203, + 0x81C, 0xE4240203, + 0x81C, 0xE3260203, + 0x81C, 0xE2280203, + 0x81C, 0xE12A0203, + 0x81C, 0xE02C0203, + 0x81C, 0xC22E0203, + 0x81C, 0xC1300203, + 0x81C, 0xC0320203, + 0x81C, 0xA3340203, + 0x81C, 0xA2360203, + 0x81C, 0xA1380203, + 0x81C, 0xA03A0203, + 0x81C, 0x833C0203, + 0x81C, 0x823E0203, + 0x81C, 0x81400203, + 0x81C, 0x80420203, + 0x81C, 0x62440203, + 0x81C, 0x61460203, + 0x81C, 0x42480203, + 0x81C, 0x414A0203, + 0x81C, 0x234C0203, + 0x81C, 0x224E0203, + 0x81C, 0x21500203, + 0x81C, 0x20520203, + 0x81C, 0x03540203, + 0x81C, 0x02560203, + 0x81C, 0x01580203, + 0x81C, 0x005A0203, + 0x81C, 0x005C0203, + 0x81C, 0x005E0203, + 0x81C, 0x00600203, + 0x81C, 0x00620203, + 0x81C, 0x00640203, + 0x81C, 0x00660203, + 0x81C, 0x00680203, + 0x81C, 0x006A0203, + 0x81C, 0x006C0203, + 0x81C, 0x006E0203, + 0x81C, 0x00700203, + 0x81C, 0x00720203, + 0x81C, 0x00740203, + 0x81C, 0x00760203, + 0x81C, 0x00780203, + 0x81C, 0x007A0203, + 0x81C, 0x007C0203, + 0x81C, 0x007E0203, + 0xA0000000, 0x00000000, + 0x81C, 0xFC000203, + 0x81C, 0xFB020203, + 0x81C, 0xFA040203, + 0x81C, 0xF9060203, + 0x81C, 0xF8080203, + 0x81C, 0xF70A0203, + 0x81C, 0xF60C0203, + 0x81C, 0xF50E0203, + 0x81C, 0xF4100203, + 0x81C, 0xF3120203, + 0x81C, 0xF2140203, + 0x81C, 0xF1160203, + 0x81C, 0xF0180203, + 0x81C, 0xEF1A0203, + 0x81C, 0xEE1C0203, + 0x81C, 0xED1E0203, + 0x81C, 0xEC200203, + 0x81C, 0xEB220203, + 0x81C, 0xEA240203, + 0x81C, 0xE9260203, + 0x81C, 0xE8280203, + 0x81C, 0xE72A0203, + 0x81C, 0xE62C0203, + 0x81C, 0xE52E0203, + 0x81C, 0xE4300203, + 0x81C, 0xE3320203, + 0x81C, 0xE2340203, + 0x81C, 0xE1360203, + 0x81C, 0xC5380203, + 0x81C, 0xC43A0203, + 0x81C, 0xC33C0203, + 0x81C, 0xC23E0203, + 0x81C, 0xA6400203, + 0x81C, 0xA5420203, + 0x81C, 0xA4440203, + 0x81C, 0xA3460203, + 0x81C, 0xA2480203, + 0x81C, 0x844A0203, + 0x81C, 0x834C0203, + 0x81C, 0x824E0203, + 0x81C, 0x64500203, + 0x81C, 0x63520203, + 0x81C, 0x62540203, + 0x81C, 0x61560203, + 0x81C, 0x60580203, + 0x81C, 0x235A0203, + 0x81C, 0x225C0203, + 0x81C, 0x215E0203, + 0x81C, 0x04600203, + 0x81C, 0x03620203, + 0x81C, 0x02640203, + 0x81C, 0x01660203, + 0x81C, 0x01680203, + 0x81C, 0x016A0203, + 0x81C, 0x016C0203, + 0x81C, 0x016E0203, + 0x81C, 0x01700203, + 0x81C, 0x01720203, + 0x81C, 0x01740203, + 0x81C, 0x01760203, + 0x81C, 0x01780203, + 0x81C, 0x017A0203, + 0x81C, 0x017C0203, + 0x81C, 0x017E0203, + 0xB0000000, 0x00000000, + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFB000303, + 0x81C, 0xFA020303, + 0x81C, 0xF9040303, + 0x81C, 0xF8060303, + 0x81C, 0xF7080303, + 0x81C, 0xF60A0303, + 0x81C, 0xF50C0303, + 0x81C, 0xF40E0303, + 0x81C, 0xF3100303, + 0x81C, 0xF2120303, + 0x81C, 0xF1140303, + 0x81C, 0xF0160303, + 0x81C, 0xEF180303, + 0x81C, 0xEE1A0303, + 0x81C, 0xED1C0303, + 0x81C, 0xEC1E0303, + 0x81C, 0xEB200303, + 0x81C, 0xEA220303, + 0x81C, 0xE9240303, + 0x81C, 0xE8260303, + 0x81C, 0xE7280303, + 0x81C, 0xE62A0303, + 0x81C, 0xE52C0303, + 0x81C, 0xE42E0303, + 0x81C, 0xE3300303, + 0x81C, 0xE2320303, + 0x81C, 0xE1340303, + 0x81C, 0xC4360303, + 0x81C, 0xC3380303, + 0x81C, 0xC23A0303, + 0x81C, 0xC13C0303, + 0x81C, 0xA53E0303, + 0x81C, 0xA4400303, + 0x81C, 0xA3420303, + 0x81C, 0xA2440303, + 0x81C, 0xA1460303, + 0x81C, 0x83480303, + 0x81C, 0x824A0303, + 0x81C, 0x814C0303, + 0x81C, 0x644E0303, + 0x81C, 0x63500303, + 0x81C, 0x62520303, + 0x81C, 0x61540303, + 0x81C, 0x60560303, + 0x81C, 0x23580303, + 0x81C, 0x225A0303, + 0x81C, 0x215C0303, + 0x81C, 0x045E0303, + 0x81C, 0x03600303, + 0x81C, 0x02620303, + 0x81C, 0x01640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xF5000303, + 0x81C, 0xF4020303, + 0x81C, 0xF3040303, + 0x81C, 0xF2060303, + 0x81C, 0xF1080303, + 0x81C, 0xF00A0303, + 0x81C, 0xEF0C0303, + 0x81C, 0xEE0E0303, + 0x81C, 0xED100303, + 0x81C, 0xEC120303, + 0x81C, 0xEB140303, + 0x81C, 0xEA160303, + 0x81C, 0xE9180303, + 0x81C, 0xE81A0303, + 0x81C, 0xE71C0303, + 0x81C, 0xE61E0303, + 0x81C, 0xE5200303, + 0x81C, 0xE4220303, + 0x81C, 0xE3240303, + 0x81C, 0xE2260303, + 0x81C, 0xE1280303, + 0x81C, 0xE02A0303, + 0x81C, 0xA72C0303, + 0x81C, 0xA62E0303, + 0x81C, 0xA5300303, + 0x81C, 0xA4320303, + 0x81C, 0xA3340303, + 0x81C, 0xA2360303, + 0x81C, 0xA1380303, + 0x81C, 0xA03A0303, + 0x81C, 0x823C0303, + 0x81C, 0x643E0303, + 0x81C, 0x63400303, + 0x81C, 0x62420303, + 0x81C, 0x61440303, + 0x81C, 0x60460303, + 0x81C, 0x24480303, + 0x81C, 0x234A0303, + 0x81C, 0x224C0303, + 0x81C, 0x054E0303, + 0x81C, 0x04500303, + 0x81C, 0x03520303, + 0x81C, 0x02540303, + 0x81C, 0x01560303, + 0x81C, 0x00580303, + 0x81C, 0x005A0303, + 0x81C, 0x005C0303, + 0x81C, 0x005E0303, + 0x81C, 0x00600303, + 0x81C, 0x00620303, + 0x81C, 0x00640303, + 0x81C, 0x00660303, + 0x81C, 0x00680303, + 0x81C, 0x006A0303, + 0x81C, 0x006C0303, + 0x81C, 0x006E0303, + 0x81C, 0x00700303, + 0x81C, 0x00720303, + 0x81C, 0x00740303, + 0x81C, 0x00760303, + 0x81C, 0x00780303, + 0x81C, 0x007A0303, + 0x81C, 0x007C0303, + 0x81C, 0x007E0303, + 0xA0000000, 0x00000000, + 0x81C, 0xFC000303, + 0x81C, 0xFB020303, + 0x81C, 0xFA040303, + 0x81C, 0xF9060303, + 0x81C, 0xF8080303, + 0x81C, 0xF70A0303, + 0x81C, 0xF60C0303, + 0x81C, 0xF50E0303, + 0x81C, 0xF4100303, + 0x81C, 0xF3120303, + 0x81C, 0xF2140303, + 0x81C, 0xF1160303, + 0x81C, 0xF0180303, + 0x81C, 0xEF1A0303, + 0x81C, 0xEE1C0303, + 0x81C, 0xED1E0303, + 0x81C, 0xEC200303, + 0x81C, 0xEB220303, + 0x81C, 0xEA240303, + 0x81C, 0xE9260303, + 0x81C, 0xE8280303, + 0x81C, 0xE72A0303, + 0x81C, 0xE62C0303, + 0x81C, 0xE52E0303, + 0x81C, 0xE4300303, + 0x81C, 0xE3320303, + 0x81C, 0xE2340303, + 0x81C, 0xE1360303, + 0x81C, 0xC4380303, + 0x81C, 0xC33A0303, + 0x81C, 0xC23C0303, + 0x81C, 0xC13E0303, + 0x81C, 0xA5400303, + 0x81C, 0xA4420303, + 0x81C, 0xA3440303, + 0x81C, 0xA2460303, + 0x81C, 0xA1480303, + 0x81C, 0x834A0303, + 0x81C, 0x824C0303, + 0x81C, 0x814E0303, + 0x81C, 0x64500303, + 0x81C, 0x63520303, + 0x81C, 0x62540303, + 0x81C, 0x61560303, + 0x81C, 0x24580303, + 0x81C, 0x235A0303, + 0x81C, 0x225C0303, + 0x81C, 0x215E0303, + 0x81C, 0x04600303, + 0x81C, 0x03620303, + 0x81C, 0x02640303, + 0x81C, 0x01660303, + 0x81C, 0x01680303, + 0x81C, 0x016A0303, + 0x81C, 0x016C0303, + 0x81C, 0x016E0303, + 0x81C, 0x01700303, + 0x81C, 0x01720303, + 0x81C, 0x01740303, + 0x81C, 0x01760303, + 0x81C, 0x01780303, + 0x81C, 0x017A0303, + 0x81C, 0x017C0303, + 0x81C, 0x017E0303, + 0xB0000000, 0x00000000, + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000803, + 0x81C, 0xFB020803, + 0x81C, 0xFA040803, + 0x81C, 0xF9060803, + 0x81C, 0xF8080803, + 0x81C, 0xF70A0803, + 0x81C, 0xF60C0803, + 0x81C, 0xF50E0803, + 0x81C, 0xF4100803, + 0x81C, 0xF3120803, + 0x81C, 0xF2140803, + 0x81C, 0xF1160803, + 0x81C, 0xF0180803, + 0x81C, 0xEF1A0803, + 0x81C, 0xEE1C0803, + 0x81C, 0xED1E0803, + 0x81C, 0xB5200803, + 0x81C, 0xB4220803, + 0x81C, 0xB3240803, + 0x81C, 0xB2260803, + 0x81C, 0xB1280803, + 0x81C, 0xB02A0803, + 0x81C, 0xAF2C0803, + 0x81C, 0xAE2E0803, + 0x81C, 0xAD300803, + 0x81C, 0xAC320803, + 0x81C, 0xAB340803, + 0x81C, 0xAA360803, + 0x81C, 0xA9380803, + 0x81C, 0xA83A0803, + 0x81C, 0xA73C0803, + 0x81C, 0xA63E0803, + 0x81C, 0x88400803, + 0x81C, 0x87420803, + 0x81C, 0x86440803, + 0x81C, 0x85460803, + 0x81C, 0x84480803, + 0x81C, 0x834A0803, + 0x81C, 0x674C0803, + 0x81C, 0x664E0803, + 0x81C, 0x65500803, + 0x81C, 0x64520803, + 0x81C, 0x63540803, + 0x81C, 0x62560803, + 0x81C, 0x61580803, + 0x81C, 0x455A0803, + 0x81C, 0x445C0803, + 0x81C, 0x435E0803, + 0x81C, 0x42600803, + 0x81C, 0x41620803, + 0x81C, 0x25640803, + 0x81C, 0x24660803, + 0x81C, 0x23680803, + 0x81C, 0x226A0803, + 0x81C, 0x216C0803, + 0x81C, 0x016E0803, + 0x81C, 0x01700803, + 0x81C, 0x01720803, + 0x81C, 0x01740803, + 0x81C, 0x01760803, + 0x81C, 0x01780803, + 0x81C, 0x017A0803, + 0x81C, 0x017C0803, + 0x81C, 0x017E0803, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFC000803, + 0x81C, 0xFB020803, + 0x81C, 0xFA040803, + 0x81C, 0xF9060803, + 0x81C, 0xF8080803, + 0x81C, 0xF70A0803, + 0x81C, 0xF60C0803, + 0x81C, 0xF50E0803, + 0x81C, 0xF4100803, + 0x81C, 0xF3120803, + 0x81C, 0xF2140803, + 0x81C, 0xF1160803, + 0x81C, 0xF0180803, + 0x81C, 0xEF1A0803, + 0x81C, 0xEE1C0803, + 0x81C, 0xED1E0803, + 0x81C, 0xB5200803, + 0x81C, 0xB4220803, + 0x81C, 0xB3240803, + 0x81C, 0xB2260803, + 0x81C, 0xB1280803, + 0x81C, 0xB02A0803, + 0x81C, 0xAF2C0803, + 0x81C, 0xAE2E0803, + 0x81C, 0xAD300803, + 0x81C, 0xAC320803, + 0x81C, 0xAB340803, + 0x81C, 0xAA360803, + 0x81C, 0xA9380803, + 0x81C, 0xA83A0803, + 0x81C, 0xA73C0803, + 0x81C, 0xA63E0803, + 0x81C, 0x88400803, + 0x81C, 0x87420803, + 0x81C, 0x86440803, + 0x81C, 0x85460803, + 0x81C, 0x84480803, + 0x81C, 0x834A0803, + 0x81C, 0x674C0803, + 0x81C, 0x664E0803, + 0x81C, 0x65500803, + 0x81C, 0x64520803, + 0x81C, 0x63540803, + 0x81C, 0x62560803, + 0x81C, 0x61580803, + 0x81C, 0x455A0803, + 0x81C, 0x445C0803, + 0x81C, 0x435E0803, + 0x81C, 0x42600803, + 0x81C, 0x41620803, + 0x81C, 0x25640803, + 0x81C, 0x24660803, + 0x81C, 0x23680803, + 0x81C, 0x226A0803, + 0x81C, 0x216C0803, + 0x81C, 0x016E0803, + 0x81C, 0x01700803, + 0x81C, 0x01720803, + 0x81C, 0x01740803, + 0x81C, 0x01760803, + 0x81C, 0x01780803, + 0x81C, 0x017A0803, + 0x81C, 0x017C0803, + 0x81C, 0x017E0803, + 0xA0000000, 0x00000000, + 0x81C, 0xFC000803, + 0x81C, 0xFB020803, + 0x81C, 0xFA040803, + 0x81C, 0xF9060803, + 0x81C, 0xF8080803, + 0x81C, 0xF70A0803, + 0x81C, 0xF60C0803, + 0x81C, 0xF50E0803, + 0x81C, 0xF4100803, + 0x81C, 0xF3120803, + 0x81C, 0xF2140803, + 0x81C, 0xF1160803, + 0x81C, 0xF0180803, + 0x81C, 0xEF1A0803, + 0x81C, 0xEE1C0803, + 0x81C, 0xED1E0803, + 0x81C, 0xB5200803, + 0x81C, 0xB4220803, + 0x81C, 0xB3240803, + 0x81C, 0xB2260803, + 0x81C, 0xB1280803, + 0x81C, 0xB02A0803, + 0x81C, 0xAF2C0803, + 0x81C, 0xAE2E0803, + 0x81C, 0xAD300803, + 0x81C, 0xAC320803, + 0x81C, 0xAB340803, + 0x81C, 0xAA360803, + 0x81C, 0xA9380803, + 0x81C, 0xA83A0803, + 0x81C, 0xA73C0803, + 0x81C, 0xA63E0803, + 0x81C, 0x88400803, + 0x81C, 0x87420803, + 0x81C, 0x86440803, + 0x81C, 0x85460803, + 0x81C, 0x84480803, + 0x81C, 0x834A0803, + 0x81C, 0x674C0803, + 0x81C, 0x664E0803, + 0x81C, 0x65500803, + 0x81C, 0x64520803, + 0x81C, 0x63540803, + 0x81C, 0x62560803, + 0x81C, 0x61580803, + 0x81C, 0x455A0803, + 0x81C, 0x445C0803, + 0x81C, 0x435E0803, + 0x81C, 0x42600803, + 0x81C, 0x41620803, + 0x81C, 0x25640803, + 0x81C, 0x24660803, + 0x81C, 0x23680803, + 0x81C, 0x226A0803, + 0x81C, 0x216C0803, + 0x81C, 0x016E0803, + 0x81C, 0x01700803, + 0x81C, 0x01720803, + 0x81C, 0x01740803, + 0x81C, 0x01760803, + 0x81C, 0x01780803, + 0x81C, 0x017A0803, + 0x81C, 0x017C0803, + 0x81C, 0x017E0803, + 0xB0000000, 0x00000000, + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000913, + 0x81C, 0xFE020913, + 0x81C, 0xFD040913, + 0x81C, 0xFC060913, + 0x81C, 0xFB080913, + 0x81C, 0xFA0A0913, + 0x81C, 0xF90C0913, + 0x81C, 0xF80E0913, + 0x81C, 0xF7100913, + 0x81C, 0xF6120913, + 0x81C, 0xF5140913, + 0x81C, 0xF4160913, + 0x81C, 0xF3180913, + 0x81C, 0xF21A0913, + 0x81C, 0xF11C0913, + 0x81C, 0x941E0913, + 0x81C, 0x93200913, + 0x81C, 0x92220913, + 0x81C, 0x91240913, + 0x81C, 0x90260913, + 0x81C, 0x8F280913, + 0x81C, 0x8E2A0913, + 0x81C, 0x8D2C0913, + 0x81C, 0x8C2E0913, + 0x81C, 0x8B300913, + 0x81C, 0x8A320913, + 0x81C, 0x89340913, + 0x81C, 0x88360913, + 0x81C, 0x87380913, + 0x81C, 0x863A0913, + 0x81C, 0x853C0913, + 0x81C, 0x843E0913, + 0x81C, 0x83400913, + 0x81C, 0x82420913, + 0x81C, 0x81440913, + 0x81C, 0x07460913, + 0x81C, 0x06480913, + 0x81C, 0x054A0913, + 0x81C, 0x044C0913, + 0x81C, 0x034E0913, + 0x81C, 0x02500913, + 0x81C, 0x01520913, + 0x81C, 0x88540903, + 0x81C, 0x87560903, + 0x81C, 0x86580903, + 0x81C, 0x855A0903, + 0x81C, 0x845C0903, + 0x81C, 0x835E0903, + 0x81C, 0x82600903, + 0x81C, 0x81620903, + 0x81C, 0x07640903, + 0x81C, 0x06660903, + 0x81C, 0x05680903, + 0x81C, 0x046A0903, + 0x81C, 0x036C0903, + 0x81C, 0x026E0903, + 0x81C, 0x01700903, + 0x81C, 0x01720903, + 0x81C, 0x01740903, + 0x81C, 0x01760903, + 0x81C, 0x01780903, + 0x81C, 0x017A0903, + 0x81C, 0x017C0903, + 0x81C, 0x017E0903, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0x81C, 0xFF000913, + 0x81C, 0xFE020913, + 0x81C, 0xFD040913, + 0x81C, 0xFC060913, + 0x81C, 0xFB080913, + 0x81C, 0xFA0A0913, + 0x81C, 0xF90C0913, + 0x81C, 0xF80E0913, + 0x81C, 0xF7100913, + 0x81C, 0xF6120913, + 0x81C, 0xF5140913, + 0x81C, 0xF4160913, + 0x81C, 0xF3180913, + 0x81C, 0xF21A0913, + 0x81C, 0xF11C0913, + 0x81C, 0x941E0913, + 0x81C, 0x93200913, + 0x81C, 0x92220913, + 0x81C, 0x91240913, + 0x81C, 0x90260913, + 0x81C, 0x8F280913, + 0x81C, 0x8E2A0913, + 0x81C, 0x8D2C0913, + 0x81C, 0x8C2E0913, + 0x81C, 0x8B300913, + 0x81C, 0x8A320913, + 0x81C, 0x89340913, + 0x81C, 0x88360913, + 0x81C, 0x87380913, + 0x81C, 0x863A0913, + 0x81C, 0x853C0913, + 0x81C, 0x843E0913, + 0x81C, 0x83400913, + 0x81C, 0x82420913, + 0x81C, 0x81440913, + 0x81C, 0x07460913, + 0x81C, 0x06480913, + 0x81C, 0x054A0913, + 0x81C, 0x044C0913, + 0x81C, 0x034E0913, + 0x81C, 0x02500913, + 0x81C, 0x01520913, + 0x81C, 0x88540903, + 0x81C, 0x87560903, + 0x81C, 0x86580903, + 0x81C, 0x855A0903, + 0x81C, 0x845C0903, + 0x81C, 0x835E0903, + 0x81C, 0x82600903, + 0x81C, 0x81620903, + 0x81C, 0x07640903, + 0x81C, 0x06660903, + 0x81C, 0x05680903, + 0x81C, 0x046A0903, + 0x81C, 0x036C0903, + 0x81C, 0x026E0903, + 0x81C, 0x01700903, + 0x81C, 0x01720903, + 0x81C, 0x01740903, + 0x81C, 0x01760903, + 0x81C, 0x01780903, + 0x81C, 0x017A0903, + 0x81C, 0x017C0903, + 0x81C, 0x017E0903, + 0xA0000000, 0x00000000, + 0x81C, 0xFF000913, + 0x81C, 0xFE020913, + 0x81C, 0xFD040913, + 0x81C, 0xFC060913, + 0x81C, 0xFB080913, + 0x81C, 0xFA0A0913, + 0x81C, 0xF90C0913, + 0x81C, 0xF80E0913, + 0x81C, 0xF7100913, + 0x81C, 0xF6120913, + 0x81C, 0xF5140913, + 0x81C, 0xF4160913, + 0x81C, 0xF3180913, + 0x81C, 0xF21A0913, + 0x81C, 0xF11C0913, + 0x81C, 0x941E0913, + 0x81C, 0x93200913, + 0x81C, 0x92220913, + 0x81C, 0x91240913, + 0x81C, 0x90260913, + 0x81C, 0x8F280913, + 0x81C, 0x8E2A0913, + 0x81C, 0x8D2C0913, + 0x81C, 0x8C2E0913, + 0x81C, 0x8B300913, + 0x81C, 0x8A320913, + 0x81C, 0x89340913, + 0x81C, 0x88360913, + 0x81C, 0x87380913, + 0x81C, 0x863A0913, + 0x81C, 0x853C0913, + 0x81C, 0x843E0913, + 0x81C, 0x83400913, + 0x81C, 0x82420913, + 0x81C, 0x81440913, + 0x81C, 0x07460913, + 0x81C, 0x06480913, + 0x81C, 0x054A0913, + 0x81C, 0x044C0913, + 0x81C, 0x034E0913, + 0x81C, 0x02500913, + 0x81C, 0x01520913, + 0x81C, 0x88540903, + 0x81C, 0x87560903, + 0x81C, 0x86580903, + 0x81C, 0x855A0903, + 0x81C, 0x845C0903, + 0x81C, 0x835E0903, + 0x81C, 0x82600903, + 0x81C, 0x81620903, + 0x81C, 0x07640903, + 0x81C, 0x06660903, + 0x81C, 0x05680903, + 0x81C, 0x046A0903, + 0x81C, 0x036C0903, + 0x81C, 0x026E0903, + 0x81C, 0x01700903, + 0x81C, 0x01720903, + 0x81C, 0x01740903, + 0x81C, 0x01760903, + 0x81C, 0x01780903, + 0x81C, 0x017A0903, + 0x81C, 0x017C0903, + 0x81C, 0x017E0903, + 0xB0000000, 0x00000000, + 0x80001004, 0x00000000, 0x40000000, 0x00000000, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + 0x90001005, 0x00000000, 0x40000000, 0x00000000, + 0xC50, 0x00000022, + 0xC50, 0x00000022, + 0xA0000000, 0x00000000, + 0xC50, 0x00000022, + 0xC50, 0x00000020, + 0xB0000000, 0x00000000, + +}; + +RTW_DECL_TABLE_PHY_COND(rtw8821c_agc, rtw_phy_cfg_agc); + +static const u32 rtw8821c_bb[] = { + 0x800, 0x9020D010, + 0x804, 0x80018180, + 0x808, 0x04028211, + 0x80C, 0x13D10011, + 0x810, 0x21104255, + 0x814, 0x020C3D10, + 0x818, 0x84A10385, + 0x81C, 0x1E1E081F, + 0x820, 0x0001AAAA, + 0x824, 0x00030FE0, + 0x828, 0x0000CCCC, + 0x82C, 0x75CB7010, + 0x830, 0x79A0EAAA, + 0x834, 0x072E698A, + 0x838, 0x87766461, + 0x83C, 0x9194B2B6, + 0x840, 0x171740E0, + 0x844, 0x4D3D7CDB, + 0x848, 0x4AD0408B, + 0x84C, 0x6AFBF7A5, + 0x850, 0x28A74706, + 0x854, 0x0001520C, + 0x858, 0x4060C000, + 0x85C, 0x74010160, + 0x860, 0x68A7C321, + 0x864, 0x79F27432, + 0x868, 0x8CA7A314, + 0x86C, 0x558C2878, + 0x870, 0x55555555, + 0x874, 0x27612C2E, + 0x878, 0xC0003152, + 0x87C, 0x5C8FC000, + 0x880, 0x00000000, + 0x884, 0x00000000, + 0x888, 0x00000000, + 0x88C, 0x00000000, + 0x890, 0x00000000, + 0x894, 0x00000000, + 0x898, 0x00000000, + 0x89C, 0x00000000, + 0x8A0, 0x00000013, + 0x8A4, 0x7F7F7F7F, + 0x8A8, 0x2202033E, + 0x8AC, 0xF00F000A, + 0x8B0, 0x00000600, + 0x8B4, 0x000FC080, + 0x8B8, 0xEC0057FF, + 0x8BC, 0x2CB520A3, + 0x8C0, 0xFFE04020, + 0x8C4, 0x47C00000, + 0x8C8, 0x00025165, + 0x8CC, 0x08188492, + 0x8D0, 0x0000B800, + 0x8D4, 0x860308A0, + 0x8D8, 0x290B5612, + 0x8DC, 0x00000000, + 0x8E0, 0x32D16777, + 0x8E4, 0x49092925, + 0x8E8, 0xFFFFC42C, + 0x8EC, 0x99999999, + 0x8F0, 0x00009999, + 0x8F4, 0x00D80FA1, + 0x8F8, 0x400000C0, + 0x8FC, 0x00000130, + 0x900, 0x00C00000, + 0x904, 0x0FFF0FFF, + 0x908, 0x00000000, + 0x90C, 0x13000000, + 0x910, 0x0000FC00, + 0x914, 0xC6380000, + 0x918, 0x1C1028C0, + 0x91C, 0x64B11A1C, + 0x920, 0xE0767233, + 0x924, 0x855A2500, + 0x928, 0x4AB0E4E4, + 0x92C, 0xFFFEB200, + 0x930, 0xFFFFFFFE, + 0x934, 0x001FFFFF, + 0x938, 0x00008480, + 0x93C, 0xE41C0642, + 0x940, 0x0E470430, + 0x944, 0x00000000, + 0x948, 0xAC000000, + 0x94C, 0x10000083, + 0x950, 0xB2010080, + 0x954, 0x86510080, + 0x958, 0x00000181, + 0x95C, 0x04248000, + 0x960, 0x00000000, + 0x964, 0x00000000, + 0x968, 0x00000000, + 0x96C, 0x00000000, + 0x970, 0x00001FFF, + 0x974, 0x04000FFF, + 0x978, 0x00000000, + 0x97C, 0x00000000, + 0x980, 0x00000000, + 0x984, 0x00000000, + 0x988, 0x00000000, + 0x98C, 0x23440000, + 0x990, 0x27100000, + 0x994, 0xFFFF0100, + 0x998, 0xFFFFFF5C, + 0x99C, 0xFFFFFFFF, + 0x9A0, 0x000000FF, + 0x9A4, 0x80000088, + 0x9A8, 0x0C2F0000, + 0x9AC, 0x01560000, + 0x9B0, 0x70000000, + 0x9B4, 0x00000000, + 0x9B8, 0x00000000, + 0x9BC, 0x00000000, + 0x9C0, 0x00000000, + 0x9C4, 0x00000000, + 0x9C8, 0x00000000, + 0x9CC, 0x00000000, + 0x9D0, 0x00000000, + 0x9D4, 0x00000000, + 0x9D8, 0x00000000, + 0x9DC, 0x00000000, + 0x9E0, 0x00000000, + 0x9E4, 0x02000402, + 0x9E8, 0x000022D4, + 0x9EC, 0x00000000, + 0x9F0, 0x00000000, + 0x9F4, 0x00000000, + 0x9F8, 0x00000000, + 0x9FC, 0xEFFFF7FF, + 0xA00, 0x00D040C8, + 0xA04, 0x80FF800C, + 0xA08, 0x9C838300, + 0xA0C, 0x297E000F, + 0xA10, 0x9500BB78, + 0xA14, 0x1114D028, + 0xA18, 0x00881117, + 0xA1C, 0x89140F00, + 0xA20, 0xE82C0000, + 0xA24, 0x64B80C1C, + 0xA28, 0x00008810, + 0xA2C, 0x00D20000, + 0xA70, 0x101FBF00, + 0xA74, 0x00000107, + 0xA78, 0x00008900, + 0xA7C, 0x225B0606, + 0xA80, 0x21807532, + 0xA84, 0x80120000, + 0xA88, 0x048C0000, + 0xA8C, 0x12345678, + 0xA90, 0xABCDEF00, + 0xA94, 0x001B1B89, + 0xA98, 0x00000000, + 0xA9C, 0x3F000000, + 0xAA0, 0x00000000, + 0xAA4, 0x00080000, + 0xAA8, 0xEACF0004, + 0xAAC, 0x01235667, + 0xAB0, 0x00000000, + 0xB00, 0xE1000440, + 0xB04, 0x00800000, + 0xB08, 0xFF02030B, + 0xB0C, 0x01EAA406, + 0xB10, 0x00030690, + 0xB14, 0x006000FA, + 0xB18, 0x00000002, + 0xB1C, 0x00000002, + 0xB20, 0x4B00001F, + 0xB24, 0x4E8E3E40, + 0xB28, 0x03020100, + 0xB2C, 0x07060504, + 0xB30, 0x0B0A0908, + 0xB34, 0x0F0E0D0C, + 0xB38, 0x13121110, + 0xB3C, 0x0000003A, + 0xB40, 0x00000000, + 0xB44, 0x80000000, + 0xB48, 0x3F0000FA, + 0xB4C, 0x88C80020, + 0xB50, 0x00000000, + 0xB54, 0x00004241, + 0xB58, 0xE0008208, + 0xB5C, 0x41EFFFF9, + 0xB60, 0x00000000, + 0xB64, 0x00200063, + 0xB68, 0x0000003A, + 0xB6C, 0x00000102, + 0xB70, 0x4E6D1870, + 0xB74, 0x03020100, + 0xB78, 0x07060504, + 0xB7C, 0x0B0A0908, + 0xB80, 0x0F0E0D0C, + 0xB84, 0x13121110, + 0xB88, 0x00000000, + 0xB8C, 0x00000000, + 0xC00, 0x00000007, + 0xC04, 0x03050020, + 0xC08, 0x60403231, + 0xC0C, 0x00012345, + 0xC10, 0x00000100, + 0xC14, 0x01000000, + 0xC18, 0x00000000, + 0xC1C, 0x40040053, + 0xC20, 0x400503A3, + 0xC24, 0x00000000, + 0xC28, 0x00000000, + 0xC2C, 0x00000000, + 0xC30, 0x00000000, + 0xC34, 0x00000000, + 0xC38, 0x00000000, + 0xC3C, 0x00000000, + 0xC40, 0x00000000, + 0xC44, 0x00000000, + 0xC48, 0x00000000, + 0xC4C, 0x00000000, + 0xC50, 0x00000020, + 0xC54, 0x00000000, + 0xC58, 0xD8020402, + 0xC5C, 0xDE000120, + 0xC68, 0x0000003F, + 0xC6C, 0x0000122A, + 0xC70, 0x00000000, + 0xC74, 0x00000000, + 0xC78, 0x00000000, + 0xC7C, 0x00000000, + 0xC80, 0x00000000, + 0xC84, 0x00000000, + 0xC88, 0x00000000, + 0xC8C, 0x07000000, + 0xC94, 0x01000100, + 0xC98, 0x201C8000, + 0xC9C, 0x00000000, + 0xCA0, 0x0000A555, + 0xCA4, 0x08040201, + 0xCA8, 0x80402010, + 0xCAC, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0xCB0, 0x77777717, + 0xCB4, 0x00000073, + 0xA0000000, 0x00000000, + 0xCB0, 0x77775747, + 0xCB4, 0x10000077, + 0xB0000000, 0x00000000, + 0xCB8, 0x00000000, + 0xCBC, 0x00000000, + 0xCC0, 0x00000000, + 0xCC4, 0x00000000, + 0xCC8, 0x00000000, + 0xCCC, 0x00000000, + 0xCD0, 0x00000000, + 0xCD4, 0x00000000, + 0xCD8, 0x00000000, + 0xCDC, 0x00000000, + 0xCE0, 0x00000000, + 0xCE4, 0x00000000, + 0xCE8, 0x00000000, + 0xCEC, 0x00000000, + 0xE00, 0x00000007, + 0xE04, 0x00000020, + 0xE08, 0x60403231, + 0xE0C, 0x00012345, + 0xE10, 0x00000100, + 0xE14, 0x01000000, + 0xE18, 0x00000000, + 0xE1C, 0x40040053, + 0xE20, 0x00020103, + 0xE24, 0x00000000, + 0xE28, 0x00000000, + 0xE2C, 0x00000000, + 0xE30, 0x00000000, + 0xE34, 0x00000000, + 0xE38, 0x00000000, + 0xE3C, 0x00000000, + 0xE40, 0x00000000, + 0xE44, 0x00000000, + 0xE48, 0x00000000, + 0xE4C, 0x00000000, + 0xE50, 0x00000020, + 0xE54, 0x00000000, + 0xE58, 0xD8020402, + 0xE5C, 0xDE000120, + 0xE68, 0x59799979, + 0xE6C, 0x0000122A, + 0xE70, 0x99795979, + 0xE74, 0x99795979, + 0xE78, 0x99799979, + 0xE7C, 0x99791979, + 0xE80, 0x19791979, + 0xE84, 0x19791979, + 0xE88, 0x00000000, + 0xE8C, 0x07000000, + 0xE94, 0x01000100, + 0xE98, 0x201C8000, + 0xE9C, 0x00000000, + 0xEA0, 0x0000A555, + 0xEA4, 0x08040201, + 0xEA8, 0x80402010, + 0xEAC, 0x00000000, + 0xEB0, 0x98543210, + 0xEB4, 0x000000BA, + 0xEB8, 0x00000000, + 0xEBC, 0x00000000, + 0xEC0, 0x00000000, + 0xEC4, 0x00000000, + 0xEC8, 0x00000000, + 0xECC, 0x00000000, + 0xED0, 0x00000000, + 0xED4, 0x00000000, + 0xED8, 0x00000000, + 0xEDC, 0x00000000, + 0xEE0, 0x00000000, + 0xEE4, 0x00000000, + 0xEE8, 0x00000000, + 0xEEC, 0x00000000, + 0x1900, 0x00000000, + 0x1904, 0x00238000, + 0x1908, 0x00000000, + 0x190C, 0x00000000, + 0x1910, 0x00001800, + 0x1914, 0x00000000, + 0x1918, 0x00000000, + 0x191C, 0x00000000, + 0x1920, 0x00000000, + 0x1924, 0x00000000, + 0x1928, 0x00000000, + 0x192C, 0x00000000, + 0x1930, 0x00000000, + 0x1934, 0x00000000, + 0x1938, 0x00000000, + 0x193C, 0x00000000, + 0x1940, 0x00000000, + 0x1944, 0x00000000, + 0x1948, 0x00000000, + 0x194C, 0x00000000, + 0x1950, 0x00000000, + 0x1954, 0x00000000, + 0x1958, 0x00000000, + 0x195C, 0x00000000, + 0x1960, 0x00000000, + 0x1964, 0x00000000, + 0x1968, 0x00000000, + 0x196C, 0x00000000, + 0x1970, 0x00000000, + 0x1974, 0x00000000, + 0x1978, 0x00000000, + 0x197C, 0x00000000, + 0x1980, 0x00000000, + 0x1984, 0x03000000, + 0x1988, 0x21401E88, + 0x198C, 0x00004000, + 0x1990, 0x00000000, + 0x1994, 0x00000000, + 0x1998, 0x00000053, + 0x199C, 0x00000000, + 0x19A0, 0x00000000, + 0x19A4, 0x00000000, + 0x19A8, 0x010A0000, + 0x19AC, 0x0E47E47F, + 0x19B0, 0x00008000, + 0x19B4, 0x0E47E47F, + 0x19B8, 0x00000000, + 0x19BC, 0x00000000, + 0x19C0, 0x00000000, + 0x19C4, 0x00000000, + 0x19C8, 0x00000000, + 0x19CC, 0x00000000, + 0x19D0, 0x00000000, + 0x19D4, 0x77777777, + 0x19D8, 0x00000777, + 0x19DC, 0x133E0F37, + 0x19E0, 0x00000000, + 0x19E4, 0x00000000, + 0x19E8, 0x00000000, + 0x19EC, 0x00000000, + 0x19F0, 0x00000000, + 0x19F4, 0x00000000, + 0x19F8, 0x01A00000, + 0x19FC, 0x00000000, + 0x1C00, 0x00000100, + 0x1C04, 0x01000000, + 0x1C08, 0x00000100, + 0x1C0C, 0x01000000, + 0x1C10, 0x00000100, + 0x1C14, 0x01000000, + 0x1C18, 0x00000100, + 0x1C1C, 0x01000000, + 0x1C20, 0x00000100, + 0x1C24, 0x01000000, + 0x1C28, 0x00000100, + 0x1C2C, 0x01000000, + 0x1C30, 0x00000100, + 0x1C34, 0x01000000, + 0x1C38, 0x00000000, + 0x1C3C, 0x00008000, + 0x1C40, 0x000C0100, + 0x1C44, 0x000000F3, + 0x1C48, 0x1A8249A8, + 0x1C4C, 0x1461C826, + 0x1C50, 0x0001469E, + 0x1C54, 0x58D158D1, + 0x1C58, 0x04490088, + 0x1C5C, 0x04004400, + 0x1C60, 0x00000000, + 0x1C64, 0x04004400, + 0x1C68, 0x0B7B7B75, + 0x1C6C, 0x01000000, + 0x1C70, 0x00A08145, + 0x1C74, 0x2080E0E0, + 0x1C78, 0x00000000, + 0x1C7C, 0x00000010, + 0x1C80, 0x00000100, + 0x1C84, 0x01000000, + 0x1C88, 0x00000100, + 0x1C8C, 0x01000000, + 0x1C90, 0x00000100, + 0x1C94, 0x01000000, + 0x1C98, 0x00000100, + 0x1C9C, 0x01000000, + 0x1CA0, 0x00000100, + 0x1CA4, 0x01000000, + 0x1CA8, 0x00000100, + 0x1CAC, 0x01000000, + 0x1CB0, 0x00000100, + 0x1CB4, 0x01000000, + 0x1CB8, 0x00000000, + 0x1CBC, 0x00000000, + 0x1CC0, 0x201B0100, + 0x1CC4, 0x00308000, + 0x1CC8, 0x5B74B6E9, + 0x1CCC, 0x01000000, + 0x1CD0, 0x00000400, + 0x1CD4, 0x01000000, + 0x1CD8, 0x01B8ADEB, + 0x1CDC, 0x01000000, + 0x1CE0, 0x00030003, + 0x1CE4, 0x4E4A0306, + 0x1CE8, 0x00000100, + 0x1CEC, 0x01000000, + 0x1CF0, 0x00000100, + 0x1CF4, 0x01000000, + 0x1CF8, 0x01B8ADEB, + 0x1CFC, 0x00000000, + 0xC60, 0x700B8040, + 0xC60, 0x700B8040, + 0xC60, 0x70146040, + 0xC60, 0x70246040, + 0xC60, 0x70346040, + 0xC60, 0x70446040, + 0xC60, 0x705B2040, + 0xC60, 0x70646040, + 0xC60, 0x707B8040, + 0xC60, 0x708B8040, + 0xC60, 0x709B8040, + 0xC60, 0x70AB8040, + 0xC60, 0x70BB6040, + 0xC60, 0x70C06040, + 0xC60, 0x70D06040, + 0xC60, 0x70EF6040, + 0xC60, 0x70F06040, + 0xE60, 0x700B8040, + 0xE60, 0x700B8040, + 0xE60, 0x70146040, + 0xE60, 0x70246040, + 0xE60, 0x70346040, + 0xE60, 0x70446040, + 0xE60, 0x705B2040, + 0xE60, 0x70646040, + 0xE60, 0x707B8040, + 0xE60, 0x708B8040, + 0xE60, 0x709B8040, + 0xE60, 0x70AB8040, + 0xE60, 0x70BB6040, + 0xE60, 0x70C06040, + 0xE60, 0x70D06040, + 0xE60, 0x70EF6040, + 0xE60, 0x70F06040, + 0xC64, 0x00800000, + 0xC64, 0x08800001, + 0xC64, 0x00800002, + 0xC64, 0x00800003, + 0xC64, 0x00800004, + 0xC64, 0x00800005, + 0xC64, 0x00800006, + 0xC64, 0x08800007, + 0xC64, 0x00004000, + 0xE64, 0x00800000, + 0xE64, 0x08800001, + 0xE64, 0x00800002, + 0xE64, 0x00800003, + 0xE64, 0x00800004, + 0xE64, 0x00800005, + 0xE64, 0x00800006, + 0xE64, 0x08800007, + 0xE64, 0x00004000, + 0x1B00, 0xF8000008, + 0x1B00, 0xF80A7008, + 0x1B00, 0xF8015008, + 0x1B00, 0xF8000008, + 0x1B04, 0xE24629D2, + 0x1B08, 0x00000080, + 0x1B0C, 0x00000000, + 0x1B10, 0x00011C00, + 0x1B14, 0x00000000, + 0x1B18, 0x00292903, + 0x1B1C, 0xA2193C32, + 0x1B20, 0x01840008, + 0x1B24, 0x01860008, + 0x1B28, 0x80060300, + 0x1B2C, 0x00000003, + 0x1B30, 0x20000000, + 0x1B34, 0x00000800, + 0x1B3C, 0x20000000, + 0x1BC0, 0x01000000, + 0x1BCC, 0x00000000, + 0x1B90, 0x0001E018, + 0x1B94, 0xF76D9F84, + 0x1BC8, 0x000C44AA, + 0x1BCC, 0x11978200, + 0x1B8C, 0x00002000, + 0x1B9C, 0x5B554F48, + 0x1BA0, 0x6F6B6661, + 0x1BA4, 0x817D7874, + 0x1BA8, 0x908C8884, + 0x1BAC, 0x9D9A9793, + 0x1BB0, 0xAAA7A4A1, + 0x1BB4, 0xB6B3B0AD, + 0x1B40, 0x02CE03E8, + 0x1B44, 0x01FD024C, + 0x1B48, 0x01A101C9, + 0x1B4C, 0x016A0183, + 0x1B50, 0x01430153, + 0x1B54, 0x01280134, + 0x1B58, 0x0112011C, + 0x1B5C, 0x01000107, + 0x1B60, 0x00F200F9, + 0x1B64, 0x00E500EB, + 0x1B68, 0x00DA00E0, + 0x1B6C, 0x00D200D6, + 0x1B70, 0x00C900CD, + 0x1B74, 0x00C200C5, + 0x1B78, 0x00BB00BE, + 0x1B7C, 0x00B500B8, + 0x1BDC, 0x40CAFFE1, + 0x1BDC, 0x4080A1E3, + 0x1BDC, 0x405165E5, + 0x1BDC, 0x403340E7, + 0x1BDC, 0x402028E9, + 0x1BDC, 0x401419EB, + 0x1BDC, 0x400D10ED, + 0x1BDC, 0x40080AEF, + 0x1BDC, 0x400506F1, + 0x1BDC, 0x400304F3, + 0x1BDC, 0x400203F5, + 0x1BDC, 0x400102F7, + 0x1BDC, 0x400101F9, + 0x1BDC, 0x400101FB, + 0x1BDC, 0x400101FD, + 0x1BDC, 0x400101FF, + 0x1BDC, 0x40CAFF81, + 0x1BDC, 0x4080A183, + 0x1BDC, 0x40516585, + 0x1BDC, 0x40334087, + 0x1BDC, 0x40202889, + 0x1BDC, 0x4014198B, + 0x1BDC, 0x400D108D, + 0x1BDC, 0x40080A8F, + 0x1BDC, 0x40050691, + 0x1BDC, 0x40030493, + 0x1BDC, 0x40020395, + 0x1BDC, 0x40010297, + 0x1BDC, 0x40010199, + 0x1BDC, 0x4001019B, + 0x1BDC, 0x4001019D, + 0x1BDC, 0x4001019F, + 0x1BDC, 0x00000000, + 0x1BDC, 0xD0000001, + 0x1BDC, 0xD0000003, + 0x1BDC, 0xD0000005, + 0x1BDC, 0xD0000007, + 0x1BDC, 0xD0000009, + 0x1BDC, 0xD000000B, + 0x1BDC, 0xD000000D, + 0x1BDC, 0xD000000F, + 0x1BDC, 0xD0000011, + 0x1BDC, 0xD0000013, + 0x1BDC, 0xD0000015, + 0x1BDC, 0xD0000017, + 0x1BDC, 0xD0000019, + 0x1BDC, 0xD000001B, + 0x1BDC, 0xD000001D, + 0x1BDC, 0xD000001F, + 0x1BDC, 0xD0000021, + 0x1BDC, 0xD0000023, + 0x1BDC, 0xD0000025, + 0x1BDC, 0xD0000027, + 0x1BDC, 0xD0000029, + 0x1BDC, 0xD000002B, + 0x1BDC, 0xD000002D, + 0x1BDC, 0xD000002F, + 0x1BDC, 0xD0000031, + 0x1BDC, 0xD0000033, + 0x1BDC, 0xD0000035, + 0x1BDC, 0xD0000037, + 0x1BDC, 0xD0000039, + 0x1BDC, 0xD000003B, + 0x1BDC, 0xD000003D, + 0x1BDC, 0xD000003F, + 0x1BDC, 0xD0000041, + 0x1BDC, 0xD0000043, + 0x1BDC, 0xD0000045, + 0x1BDC, 0xD0000047, + 0x1BDC, 0xD0000049, + 0x1BDC, 0xD000004B, + 0x1BDC, 0xD000004D, + 0x1BDC, 0xD000004F, + 0x1BDC, 0xD0000051, + 0x1BDC, 0xD0000053, + 0x1BDC, 0xD0000055, + 0x1BDC, 0xD0000057, + 0x1BDC, 0xD0000059, + 0x1BDC, 0xD000005B, + 0x1BDC, 0xD000005D, + 0x1BDC, 0xD000005F, + 0x1BDC, 0xD0000061, + 0x1BDC, 0xD0000063, + 0x1BDC, 0xD0000065, + 0x1BDC, 0xD0000067, + 0x1BDC, 0xD0000069, + 0x1BDC, 0xD000006B, + 0x1BDC, 0xD000006D, + 0x1BDC, 0xD000006F, + 0x1BDC, 0xD0000071, + 0x1BDC, 0xD0000073, + 0x1BDC, 0xD0000075, + 0x1BDC, 0xD0000077, + 0x1BDC, 0xD0000079, + 0x1BDC, 0xD000007B, + 0x1BDC, 0xD000007D, + 0x1BDC, 0xD000007F, + 0x1BDC, 0x90000081, + 0x1BDC, 0x90000083, + 0x1BDC, 0x90000085, + 0x1BDC, 0x90000087, + 0x1BDC, 0x90000089, + 0x1BDC, 0x9000008B, + 0x1BDC, 0x9000008D, + 0x1BDC, 0x9000008F, + 0x1BDC, 0x90000091, + 0x1BDC, 0x90000093, + 0x1BDC, 0x90000095, + 0x1BDC, 0x90000097, + 0x1BDC, 0x90000099, + 0x1BDC, 0x9000009B, + 0x1BDC, 0x9000009D, + 0x1BDC, 0x9000009F, + 0x1BDC, 0x900000A1, + 0x1BDC, 0x900000A3, + 0x1BDC, 0x900000A5, + 0x1BDC, 0x900000A7, + 0x1BDC, 0x900000A9, + 0x1BDC, 0x900000AB, + 0x1BDC, 0x900000AD, + 0x1BDC, 0x900000AF, + 0x1BDC, 0x900000B1, + 0x1BDC, 0x900000B3, + 0x1BDC, 0x900000B5, + 0x1BDC, 0x900000B7, + 0x1BDC, 0x900000B9, + 0x1BDC, 0x900000BB, + 0x1BDC, 0x900000BD, + 0x1BDC, 0x900000BF, + 0x1BDC, 0x900000C1, + 0x1BDC, 0x900000C3, + 0x1BDC, 0x900000C5, + 0x1BDC, 0x900000C7, + 0x1BDC, 0x900000C9, + 0x1BDC, 0x900000CB, + 0x1BDC, 0x900000CD, + 0x1BDC, 0x900000CF, + 0x1BDC, 0x900000D1, + 0x1BDC, 0x900000D3, + 0x1BDC, 0x900000D5, + 0x1BDC, 0x900000D7, + 0x1BDC, 0x900000D9, + 0x1BDC, 0x900000DB, + 0x1BDC, 0x900000DD, + 0x1BDC, 0x900000DF, + 0x1BDC, 0x900000E1, + 0x1BDC, 0x900000E3, + 0x1BDC, 0x900000E5, + 0x1BDC, 0x900000E7, + 0x1BDC, 0x900000E9, + 0x1BDC, 0x900000EB, + 0x1BDC, 0x900000ED, + 0x1BDC, 0x900000EF, + 0x1BDC, 0x900000F1, + 0x1BDC, 0x900000F3, + 0x1BDC, 0x900000F5, + 0x1BDC, 0x900000F7, + 0x1BDC, 0x900000F9, + 0x1BDC, 0x900000FB, + 0x1BDC, 0x900000FD, + 0x1BDC, 0x900000FF, + 0x1BDC, 0x00000000, + 0x1B00, 0xF8000000, + 0x1B80, 0x00000007, + 0x1B80, 0x090A0005, + 0x1B80, 0x090A0007, + 0x1B80, 0x0FFE0015, + 0x1B80, 0x0FFE0017, + 0x1B80, 0x00220025, + 0x1B80, 0x00220027, + 0x1B80, 0x00040035, + 0x1B80, 0x00040037, + 0x1B80, 0x05C00045, + 0x1B80, 0x05C00047, + 0x1B80, 0x00070055, + 0x1B80, 0x00070057, + 0x1B80, 0x64000065, + 0x1B80, 0x64000067, + 0x1B80, 0x00020075, + 0x1B80, 0x00020077, + 0x1B80, 0x00080085, + 0x1B80, 0x00080087, + 0x1B80, 0x80000095, + 0x1B80, 0x80000097, + 0x1B80, 0x090800A5, + 0x1B80, 0x090800A7, + 0x1B80, 0x0F0200B5, + 0x1B80, 0x0F0200B7, + 0x1B80, 0x002200C5, + 0x1B80, 0x002200C7, + 0x1B80, 0x000400D5, + 0x1B80, 0x000400D7, + 0x1B80, 0x05C000E5, + 0x1B80, 0x05C000E7, + 0x1B80, 0x000700F5, + 0x1B80, 0x000700F7, + 0x1B80, 0x64020105, + 0x1B80, 0x64020107, + 0x1B80, 0x00020115, + 0x1B80, 0x00020117, + 0x1B80, 0x00040125, + 0x1B80, 0x00040127, + 0x1B80, 0x4A000135, + 0x1B80, 0x4A000137, + 0x1B80, 0x4B040145, + 0x1B80, 0x4B040147, + 0x1B80, 0x85030155, + 0x1B80, 0x85030157, + 0x1B80, 0x40090165, + 0x1B80, 0x40090167, + 0x1B80, 0xE02A0175, + 0x1B80, 0xE02A0177, + 0x1B80, 0x4B050185, + 0x1B80, 0x4B050187, + 0x1B80, 0x86030195, + 0x1B80, 0x86030197, + 0x1B80, 0x400B01A5, + 0x1B80, 0x400B01A7, + 0x1B80, 0xE02A01B5, + 0x1B80, 0xE02A01B7, + 0x1B80, 0x4B0001C5, + 0x1B80, 0x4B0001C7, + 0x1B80, 0x000701D5, + 0x1B80, 0x000701D7, + 0x1B80, 0x4C0001E5, + 0x1B80, 0x4C0001E7, + 0x1B80, 0x000401F5, + 0x1B80, 0x000401F7, + 0x1B80, 0x4D040205, + 0x1B80, 0x4D040207, + 0x1B80, 0x2EE00215, + 0x1B80, 0x2EE00217, + 0x1B80, 0x00000225, + 0x1B80, 0x00000227, + 0x1B80, 0x2EF00235, + 0x1B80, 0x2EF00237, + 0x1B80, 0x00000245, + 0x1B80, 0x00000247, + 0x1B80, 0x20810255, + 0x1B80, 0x20810257, + 0x1B80, 0x23450265, + 0x1B80, 0x23450267, + 0x1B80, 0x4D000275, + 0x1B80, 0x4D000277, + 0x1B80, 0x00040285, + 0x1B80, 0x00040287, + 0x1B80, 0x30000295, + 0x1B80, 0x30000297, + 0x1B80, 0xE1D602A5, + 0x1B80, 0xE1D602A7, + 0x1B80, 0xF01102B5, + 0x1B80, 0xF01102B7, + 0x1B80, 0xF11102C5, + 0x1B80, 0xF11102C7, + 0x1B80, 0xF21102D5, + 0x1B80, 0xF21102D7, + 0x1B80, 0xF31102E5, + 0x1B80, 0xF31102E7, + 0x1B80, 0xF41102F5, + 0x1B80, 0xF41102F7, + 0x1B80, 0xF5110305, + 0x1B80, 0xF5110307, + 0x1B80, 0xF6110315, + 0x1B80, 0xF6110317, + 0x1B80, 0xF7110325, + 0x1B80, 0xF7110327, + 0x1B80, 0xF8110335, + 0x1B80, 0xF8110337, + 0x1B80, 0xF9110345, + 0x1B80, 0xF9110347, + 0x1B80, 0xFA110355, + 0x1B80, 0xFA110357, + 0x1B80, 0xFB110365, + 0x1B80, 0xFB110367, + 0x1B80, 0xFC110375, + 0x1B80, 0xFC110377, + 0x1B80, 0xFD110385, + 0x1B80, 0xFD110387, + 0x1B80, 0xFE110395, + 0x1B80, 0xFE110397, + 0x1B80, 0xFF1103A5, + 0x1B80, 0xFF1103A7, + 0x1B80, 0x000103B5, + 0x1B80, 0x000103B7, + 0x1B80, 0x305503C5, + 0x1B80, 0x305503C7, + 0x1B80, 0x306D03D5, + 0x1B80, 0x306D03D7, + 0x1B80, 0x30B803E5, + 0x1B80, 0x30B803E7, + 0x1B80, 0x30BB03F5, + 0x1B80, 0x30BB03F7, + 0x1B80, 0x306F0405, + 0x1B80, 0x306F0407, + 0x1B80, 0x307A0415, + 0x1B80, 0x307A0417, + 0x1B80, 0x30850425, + 0x1B80, 0x30850427, + 0x1B80, 0x30C50435, + 0x1B80, 0x30C50437, + 0x1B80, 0x30BF0445, + 0x1B80, 0x30BF0447, + 0x1B80, 0x30D30455, + 0x1B80, 0x30D30457, + 0x1B80, 0x30DE0465, + 0x1B80, 0x30DE0467, + 0x1B80, 0x30E90475, + 0x1B80, 0x30E90477, + 0x1B80, 0x304C0485, + 0x1B80, 0x304C0487, + 0x1B80, 0x31180495, + 0x1B80, 0x31180497, + 0x1B80, 0x312904A5, + 0x1B80, 0x312904A7, + 0x1B80, 0x313E04B5, + 0x1B80, 0x313E04B7, + 0x1B80, 0x4D0404C5, + 0x1B80, 0x4D0404C7, + 0x1B80, 0x2EE004D5, + 0x1B80, 0x2EE004D7, + 0x1B80, 0x000004E5, + 0x1B80, 0x000004E7, + 0x1B80, 0x2EF004F5, + 0x1B80, 0x2EF004F7, + 0x1B80, 0x00000505, + 0x1B80, 0x00000507, + 0x1B80, 0x20810515, + 0x1B80, 0x20810517, + 0x1B80, 0xA3B50525, + 0x1B80, 0xA3B50527, + 0x1B80, 0x4D000535, + 0x1B80, 0x4D000537, + 0x1B80, 0x30000545, + 0x1B80, 0x30000547, + 0x1B80, 0xE1690555, + 0x1B80, 0xE1690557, + 0x1B80, 0x4D040565, + 0x1B80, 0x4D040567, + 0x1B80, 0x20800575, + 0x1B80, 0x20800577, + 0x1B80, 0x00000585, + 0x1B80, 0x00000587, + 0x1B80, 0x4D000595, + 0x1B80, 0x4D000597, + 0x1B80, 0x550705A5, + 0x1B80, 0x550705A7, + 0x1B80, 0xE16105B5, + 0x1B80, 0xE16105B7, + 0x1B80, 0xE16105C5, + 0x1B80, 0xE16105C7, + 0x1B80, 0x4D0405D5, + 0x1B80, 0x4D0405D7, + 0x1B80, 0x208805E5, + 0x1B80, 0x208805E7, + 0x1B80, 0x020005F5, + 0x1B80, 0x020005F7, + 0x1B80, 0x4D000605, + 0x1B80, 0x4D000607, + 0x1B80, 0x550F0615, + 0x1B80, 0x550F0617, + 0x1B80, 0xE1610625, + 0x1B80, 0xE1610627, + 0x1B80, 0x4F020635, + 0x1B80, 0x4F020637, + 0x1B80, 0x4E000645, + 0x1B80, 0x4E000647, + 0x1B80, 0x53020655, + 0x1B80, 0x53020657, + 0x1B80, 0x52010665, + 0x1B80, 0x52010667, + 0x1B80, 0xE1650675, + 0x1B80, 0xE1650677, + 0x1B80, 0x4D080685, + 0x1B80, 0x4D080687, + 0x1B80, 0x57100695, + 0x1B80, 0x57100697, + 0x1B80, 0x570006A5, + 0x1B80, 0x570006A7, + 0x1B80, 0x4D0006B5, + 0x1B80, 0x4D0006B7, + 0x1B80, 0x000106C5, + 0x1B80, 0x000106C7, + 0x1B80, 0xE16906D5, + 0x1B80, 0xE16906D7, + 0x1B80, 0x000106E5, + 0x1B80, 0x000106E7, + 0x1B80, 0x308F06F5, + 0x1B80, 0x308F06F7, + 0x1B80, 0x00230705, + 0x1B80, 0x00230707, + 0x1B80, 0xE1C90715, + 0x1B80, 0xE1C90717, + 0x1B80, 0x00020725, + 0x1B80, 0x00020727, + 0x1B80, 0x54E90735, + 0x1B80, 0x54E90737, + 0x1B80, 0x0BA60745, + 0x1B80, 0x0BA60747, + 0x1B80, 0x00230755, + 0x1B80, 0x00230757, + 0x1B80, 0xE1C90765, + 0x1B80, 0xE1C90767, + 0x1B80, 0x00020775, + 0x1B80, 0x00020777, + 0x1B80, 0x4D300785, + 0x1B80, 0x4D300787, + 0x1B80, 0x30A80795, + 0x1B80, 0x30A80797, + 0x1B80, 0x308B07A5, + 0x1B80, 0x308B07A7, + 0x1B80, 0x002207B5, + 0x1B80, 0x002207B7, + 0x1B80, 0xE1C907C5, + 0x1B80, 0xE1C907C7, + 0x1B80, 0x000207D5, + 0x1B80, 0x000207D7, + 0x1B80, 0x54E807E5, + 0x1B80, 0x54E807E7, + 0x1B80, 0x0BA607F5, + 0x1B80, 0x0BA607F7, + 0x1B80, 0x00220805, + 0x1B80, 0x00220807, + 0x1B80, 0xE1C90815, + 0x1B80, 0xE1C90817, + 0x1B80, 0x00020825, + 0x1B80, 0x00020827, + 0x1B80, 0x4D300835, + 0x1B80, 0x4D300837, + 0x1B80, 0x30A80845, + 0x1B80, 0x30A80847, + 0x1B80, 0x63F10855, + 0x1B80, 0x63F10857, + 0x1B80, 0xE1690865, + 0x1B80, 0xE1690867, + 0x1B80, 0xE1C90875, + 0x1B80, 0xE1C90877, + 0x1B80, 0x63F40885, + 0x1B80, 0x63F40887, + 0x1B80, 0xE1690895, + 0x1B80, 0xE1690897, + 0x1B80, 0xE1C908A5, + 0x1B80, 0xE1C908A7, + 0x1B80, 0x0BA808B5, + 0x1B80, 0x0BA808B7, + 0x1B80, 0x63F808C5, + 0x1B80, 0x63F808C7, + 0x1B80, 0xE16908D5, + 0x1B80, 0xE16908D7, + 0x1B80, 0xE1C908E5, + 0x1B80, 0xE1C908E7, + 0x1B80, 0x0BA908F5, + 0x1B80, 0x0BA908F7, + 0x1B80, 0x63FC0905, + 0x1B80, 0x63FC0907, + 0x1B80, 0xE1690915, + 0x1B80, 0xE1690917, + 0x1B80, 0xE1C90925, + 0x1B80, 0xE1C90927, + 0x1B80, 0x63FF0935, + 0x1B80, 0x63FF0937, + 0x1B80, 0xE1690945, + 0x1B80, 0xE1690947, + 0x1B80, 0xE1C90955, + 0x1B80, 0xE1C90957, + 0x1B80, 0x63000965, + 0x1B80, 0x63000967, + 0x1B80, 0xE1690975, + 0x1B80, 0xE1690977, + 0x1B80, 0xE1C90985, + 0x1B80, 0xE1C90987, + 0x1B80, 0x63030995, + 0x1B80, 0x63030997, + 0x1B80, 0xE16909A5, + 0x1B80, 0xE16909A7, + 0x1B80, 0xE1C909B5, + 0x1B80, 0xE1C909B7, + 0x1B80, 0xF4D409C5, + 0x1B80, 0xF4D409C7, + 0x1B80, 0x630709D5, + 0x1B80, 0x630709D7, + 0x1B80, 0xE16909E5, + 0x1B80, 0xE16909E7, + 0x1B80, 0xE1C909F5, + 0x1B80, 0xE1C909F7, + 0x1B80, 0xF5DB0A05, + 0x1B80, 0xF5DB0A07, + 0x1B80, 0x630B0A15, + 0x1B80, 0x630B0A17, + 0x1B80, 0xE1690A25, + 0x1B80, 0xE1690A27, + 0x1B80, 0xE1C90A35, + 0x1B80, 0xE1C90A37, + 0x1B80, 0x630E0A45, + 0x1B80, 0x630E0A47, + 0x1B80, 0xE1690A55, + 0x1B80, 0xE1690A57, + 0x1B80, 0xE1C90A65, + 0x1B80, 0xE1C90A67, + 0x1B80, 0x4D300A75, + 0x1B80, 0x4D300A77, + 0x1B80, 0x55010A85, + 0x1B80, 0x55010A87, + 0x1B80, 0x57040A95, + 0x1B80, 0x57040A97, + 0x1B80, 0x57000AA5, + 0x1B80, 0x57000AA7, + 0x1B80, 0x96000AB5, + 0x1B80, 0x96000AB7, + 0x1B80, 0x57080AC5, + 0x1B80, 0x57080AC7, + 0x1B80, 0x57000AD5, + 0x1B80, 0x57000AD7, + 0x1B80, 0x95000AE5, + 0x1B80, 0x95000AE7, + 0x1B80, 0x4D000AF5, + 0x1B80, 0x4D000AF7, + 0x1B80, 0x6C070B05, + 0x1B80, 0x6C070B07, + 0x1B80, 0x7B200B15, + 0x1B80, 0x7B200B17, + 0x1B80, 0x7A000B25, + 0x1B80, 0x7A000B27, + 0x1B80, 0x79000B35, + 0x1B80, 0x79000B37, + 0x1B80, 0x7F200B45, + 0x1B80, 0x7F200B47, + 0x1B80, 0x7E000B55, + 0x1B80, 0x7E000B57, + 0x1B80, 0x7D000B65, + 0x1B80, 0x7D000B67, + 0x1B80, 0x00010B75, + 0x1B80, 0x00010B77, + 0x1B80, 0x62850B85, + 0x1B80, 0x62850B87, + 0x1B80, 0xE1690B95, + 0x1B80, 0xE1690B97, + 0x1B80, 0x00010BA5, + 0x1B80, 0x00010BA7, + 0x1B80, 0x5C320BB5, + 0x1B80, 0x5C320BB7, + 0x1B80, 0xE1C50BC5, + 0x1B80, 0xE1C50BC7, + 0x1B80, 0xE1950BD5, + 0x1B80, 0xE1950BD7, + 0x1B80, 0x00010BE5, + 0x1B80, 0x00010BE7, + 0x1B80, 0x5C320BF5, + 0x1B80, 0x5C320BF7, + 0x1B80, 0x63F40C05, + 0x1B80, 0x63F40C07, + 0x1B80, 0x62850C15, + 0x1B80, 0x62850C17, + 0x1B80, 0x0BB00C25, + 0x1B80, 0x0BB00C27, + 0x1B80, 0xE1690C35, + 0x1B80, 0xE1690C37, + 0x1B80, 0xE1C90C45, + 0x1B80, 0xE1C90C47, + 0x1B80, 0x5C320C55, + 0x1B80, 0x5C320C57, + 0x1B80, 0x63FC0C65, + 0x1B80, 0x63FC0C67, + 0x1B80, 0x62850C75, + 0x1B80, 0x62850C77, + 0x1B80, 0x0BB10C85, + 0x1B80, 0x0BB10C87, + 0x1B80, 0xE1690C95, + 0x1B80, 0xE1690C97, + 0x1B80, 0xE1C90CA5, + 0x1B80, 0xE1C90CA7, + 0x1B80, 0x63030CB5, + 0x1B80, 0x63030CB7, + 0x1B80, 0xE1690CC5, + 0x1B80, 0xE1690CC7, + 0x1B80, 0xE1C90CD5, + 0x1B80, 0xE1C90CD7, + 0x1B80, 0xF7040CE5, + 0x1B80, 0xF7040CE7, + 0x1B80, 0x630B0CF5, + 0x1B80, 0x630B0CF7, + 0x1B80, 0xE1690D05, + 0x1B80, 0xE1690D07, + 0x1B80, 0xE1C90D15, + 0x1B80, 0xE1C90D17, + 0x1B80, 0x00010D25, + 0x1B80, 0x00010D27, + 0x1B80, 0x30F70D35, + 0x1B80, 0x30F70D37, + 0x1B80, 0x00230D45, + 0x1B80, 0x00230D47, + 0x1B80, 0xE1CE0D55, + 0x1B80, 0xE1CE0D57, + 0x1B80, 0x00020D65, + 0x1B80, 0x00020D67, + 0x1B80, 0x54E90D75, + 0x1B80, 0x54E90D77, + 0x1B80, 0x0BA60D85, + 0x1B80, 0x0BA60D87, + 0x1B80, 0x00230D95, + 0x1B80, 0x00230D97, + 0x1B80, 0xE1CE0DA5, + 0x1B80, 0xE1CE0DA7, + 0x1B80, 0x00020DB5, + 0x1B80, 0x00020DB7, + 0x1B80, 0x4D100DC5, + 0x1B80, 0x4D100DC7, + 0x1B80, 0x30A80DD5, + 0x1B80, 0x30A80DD7, + 0x1B80, 0x30F10DE5, + 0x1B80, 0x30F10DE7, + 0x1B80, 0x00220DF5, + 0x1B80, 0x00220DF7, + 0x1B80, 0xE1CE0E05, + 0x1B80, 0xE1CE0E07, + 0x1B80, 0x00020E15, + 0x1B80, 0x00020E17, + 0x1B80, 0x54E80E25, + 0x1B80, 0x54E80E27, + 0x1B80, 0x0BA60E35, + 0x1B80, 0x0BA60E37, + 0x1B80, 0x00220E45, + 0x1B80, 0x00220E47, + 0x1B80, 0xE1CE0E55, + 0x1B80, 0xE1CE0E57, + 0x1B80, 0x00020E65, + 0x1B80, 0x00020E67, + 0x1B80, 0x4D100E75, + 0x1B80, 0x4D100E77, + 0x1B80, 0x30A80E85, + 0x1B80, 0x30A80E87, + 0x1B80, 0x5C320E95, + 0x1B80, 0x5C320E97, + 0x1B80, 0x54F00EA5, + 0x1B80, 0x54F00EA7, + 0x1B80, 0x67F10EB5, + 0x1B80, 0x67F10EB7, + 0x1B80, 0xE1950EC5, + 0x1B80, 0xE1950EC7, + 0x1B80, 0xE1CE0ED5, + 0x1B80, 0xE1CE0ED7, + 0x1B80, 0x67F40EE5, + 0x1B80, 0x67F40EE7, + 0x1B80, 0xE1950EF5, + 0x1B80, 0xE1950EF7, + 0x1B80, 0xE1CE0F05, + 0x1B80, 0xE1CE0F07, + 0x1B80, 0x5C320F15, + 0x1B80, 0x5C320F17, + 0x1B80, 0x54F10F25, + 0x1B80, 0x54F10F27, + 0x1B80, 0x0BA80F35, + 0x1B80, 0x0BA80F37, + 0x1B80, 0x67F80F45, + 0x1B80, 0x67F80F47, + 0x1B80, 0xE1950F55, + 0x1B80, 0xE1950F57, + 0x1B80, 0xE1CE0F65, + 0x1B80, 0xE1CE0F67, + 0x1B80, 0x5C320F75, + 0x1B80, 0x5C320F77, + 0x1B80, 0x54F10F85, + 0x1B80, 0x54F10F87, + 0x1B80, 0x0BA90F95, + 0x1B80, 0x0BA90F97, + 0x1B80, 0x67FC0FA5, + 0x1B80, 0x67FC0FA7, + 0x1B80, 0xE1950FB5, + 0x1B80, 0xE1950FB7, + 0x1B80, 0xE1CE0FC5, + 0x1B80, 0xE1CE0FC7, + 0x1B80, 0x67FF0FD5, + 0x1B80, 0x67FF0FD7, + 0x1B80, 0xE1950FE5, + 0x1B80, 0xE1950FE7, + 0x1B80, 0xE1CE0FF5, + 0x1B80, 0xE1CE0FF7, + 0x1B80, 0x5C321005, + 0x1B80, 0x5C321007, + 0x1B80, 0x54F21015, + 0x1B80, 0x54F21017, + 0x1B80, 0x67001025, + 0x1B80, 0x67001027, + 0x1B80, 0xE1951035, + 0x1B80, 0xE1951037, + 0x1B80, 0xE1CE1045, + 0x1B80, 0xE1CE1047, + 0x1B80, 0x67031055, + 0x1B80, 0x67031057, + 0x1B80, 0xE1951065, + 0x1B80, 0xE1951067, + 0x1B80, 0xE1CE1075, + 0x1B80, 0xE1CE1077, + 0x1B80, 0xF9CC1085, + 0x1B80, 0xF9CC1087, + 0x1B80, 0x67071095, + 0x1B80, 0x67071097, + 0x1B80, 0xE19510A5, + 0x1B80, 0xE19510A7, + 0x1B80, 0xE1CE10B5, + 0x1B80, 0xE1CE10B7, + 0x1B80, 0xFAD310C5, + 0x1B80, 0xFAD310C7, + 0x1B80, 0x5C3210D5, + 0x1B80, 0x5C3210D7, + 0x1B80, 0x54F310E5, + 0x1B80, 0x54F310E7, + 0x1B80, 0x670B10F5, + 0x1B80, 0x670B10F7, + 0x1B80, 0xE1951105, + 0x1B80, 0xE1951107, + 0x1B80, 0xE1CE1115, + 0x1B80, 0xE1CE1117, + 0x1B80, 0x670E1125, + 0x1B80, 0x670E1127, + 0x1B80, 0xE1951135, + 0x1B80, 0xE1951137, + 0x1B80, 0xE1CE1145, + 0x1B80, 0xE1CE1147, + 0x1B80, 0x4D101155, + 0x1B80, 0x4D101157, + 0x1B80, 0x30A81165, + 0x1B80, 0x30A81167, + 0x1B80, 0x00011175, + 0x1B80, 0x00011177, + 0x1B80, 0x6C001185, + 0x1B80, 0x6C001187, + 0x1B80, 0x00061195, + 0x1B80, 0x00061197, + 0x1B80, 0x530011A5, + 0x1B80, 0x530011A7, + 0x1B80, 0x57F711B5, + 0x1B80, 0x57F711B7, + 0x1B80, 0x582111C5, + 0x1B80, 0x582111C7, + 0x1B80, 0x592E11D5, + 0x1B80, 0x592E11D7, + 0x1B80, 0x5A3811E5, + 0x1B80, 0x5A3811E7, + 0x1B80, 0x5B4111F5, + 0x1B80, 0x5B4111F7, + 0x1B80, 0x00071205, + 0x1B80, 0x00071207, + 0x1B80, 0x5C001215, + 0x1B80, 0x5C001217, + 0x1B80, 0x4B001225, + 0x1B80, 0x4B001227, + 0x1B80, 0x4E8F1235, + 0x1B80, 0x4E8F1237, + 0x1B80, 0x4F151245, + 0x1B80, 0x4F151247, + 0x1B80, 0x00041255, + 0x1B80, 0x00041257, + 0x1B80, 0xE1B31265, + 0x1B80, 0xE1B31267, + 0x1B80, 0xAB001275, + 0x1B80, 0xAB001277, + 0x1B80, 0x00011285, + 0x1B80, 0x00011287, + 0x1B80, 0x6C001295, + 0x1B80, 0x6C001297, + 0x1B80, 0x000612A5, + 0x1B80, 0x000612A7, + 0x1B80, 0x530012B5, + 0x1B80, 0x530012B7, + 0x1B80, 0x57F712C5, + 0x1B80, 0x57F712C7, + 0x1B80, 0x582112D5, + 0x1B80, 0x582112D7, + 0x1B80, 0x592E12E5, + 0x1B80, 0x592E12E7, + 0x1B80, 0x5A3812F5, + 0x1B80, 0x5A3812F7, + 0x1B80, 0x5B411305, + 0x1B80, 0x5B411307, + 0x1B80, 0x00071315, + 0x1B80, 0x00071317, + 0x1B80, 0x5C001325, + 0x1B80, 0x5C001327, + 0x1B80, 0x4B401335, + 0x1B80, 0x4B401337, + 0x1B80, 0x4E971345, + 0x1B80, 0x4E971347, + 0x1B80, 0x4F111355, + 0x1B80, 0x4F111357, + 0x1B80, 0x00041365, + 0x1B80, 0x00041367, + 0x1B80, 0xE1B31375, + 0x1B80, 0xE1B31377, + 0x1B80, 0xAB001385, + 0x1B80, 0xAB001387, + 0x1B80, 0x8B001395, + 0x1B80, 0x8B001397, + 0x1B80, 0xAB0013A5, + 0x1B80, 0xAB0013A7, + 0x1B80, 0x8A1913B5, + 0x1B80, 0x8A1913B7, + 0x1B80, 0x301D13C5, + 0x1B80, 0x301D13C7, + 0x1B80, 0x000113D5, + 0x1B80, 0x000113D7, + 0x1B80, 0x6C0113E5, + 0x1B80, 0x6C0113E7, + 0x1B80, 0x000613F5, + 0x1B80, 0x000613F7, + 0x1B80, 0x53011405, + 0x1B80, 0x53011407, + 0x1B80, 0x57F71415, + 0x1B80, 0x57F71417, + 0x1B80, 0x58211425, + 0x1B80, 0x58211427, + 0x1B80, 0x592E1435, + 0x1B80, 0x592E1437, + 0x1B80, 0x5A381445, + 0x1B80, 0x5A381447, + 0x1B80, 0x5B411455, + 0x1B80, 0x5B411457, + 0x1B80, 0x00071465, + 0x1B80, 0x00071467, + 0x1B80, 0x5C001475, + 0x1B80, 0x5C001477, + 0x1B80, 0x4B001485, + 0x1B80, 0x4B001487, + 0x1B80, 0x4E871495, + 0x1B80, 0x4E871497, + 0x1B80, 0x4F1114A5, + 0x1B80, 0x4F1114A7, + 0x1B80, 0x000414B5, + 0x1B80, 0x000414B7, + 0x1B80, 0xE1B314C5, + 0x1B80, 0xE1B314C7, + 0x1B80, 0xAB0014D5, + 0x1B80, 0xAB0014D7, + 0x1B80, 0x000614E5, + 0x1B80, 0x000614E7, + 0x1B80, 0x577714F5, + 0x1B80, 0x577714F7, + 0x1B80, 0x00071505, + 0x1B80, 0x00071507, + 0x1B80, 0x4E861515, + 0x1B80, 0x4E861517, + 0x1B80, 0x00041525, + 0x1B80, 0x00041527, + 0x1B80, 0x00011535, + 0x1B80, 0x00011537, + 0x1B80, 0x00011545, + 0x1B80, 0x00011547, + 0x1B80, 0x7B241555, + 0x1B80, 0x7B241557, + 0x1B80, 0x7A401565, + 0x1B80, 0x7A401567, + 0x1B80, 0x79001575, + 0x1B80, 0x79001577, + 0x1B80, 0x55031585, + 0x1B80, 0x55031587, + 0x1B80, 0x31611595, + 0x1B80, 0x31611597, + 0x1B80, 0x7B1C15A5, + 0x1B80, 0x7B1C15A7, + 0x1B80, 0x7A4015B5, + 0x1B80, 0x7A4015B7, + 0x1B80, 0x550B15C5, + 0x1B80, 0x550B15C7, + 0x1B80, 0x316115D5, + 0x1B80, 0x316115D7, + 0x1B80, 0x7B2015E5, + 0x1B80, 0x7B2015E7, + 0x1B80, 0x7A0015F5, + 0x1B80, 0x7A0015F7, + 0x1B80, 0x55131605, + 0x1B80, 0x55131607, + 0x1B80, 0x74011615, + 0x1B80, 0x74011617, + 0x1B80, 0x74001625, + 0x1B80, 0x74001627, + 0x1B80, 0x8E001635, + 0x1B80, 0x8E001637, + 0x1B80, 0x00011645, + 0x1B80, 0x00011647, + 0x1B80, 0x57021655, + 0x1B80, 0x57021657, + 0x1B80, 0x57001665, + 0x1B80, 0x57001667, + 0x1B80, 0x97001675, + 0x1B80, 0x97001677, + 0x1B80, 0x00011685, + 0x1B80, 0x00011687, + 0x1B80, 0x4F781695, + 0x1B80, 0x4F781697, + 0x1B80, 0x538816A5, + 0x1B80, 0x538816A7, + 0x1B80, 0xE17516B5, + 0x1B80, 0xE17516B7, + 0x1B80, 0x548016C5, + 0x1B80, 0x548016C7, + 0x1B80, 0x540016D5, + 0x1B80, 0x540016D7, + 0x1B80, 0x548116E5, + 0x1B80, 0x548116E7, + 0x1B80, 0x540016F5, + 0x1B80, 0x540016F7, + 0x1B80, 0x54821705, + 0x1B80, 0x54821707, + 0x1B80, 0x54001715, + 0x1B80, 0x54001717, + 0x1B80, 0xE1801725, + 0x1B80, 0xE1801727, + 0x1B80, 0xBF1D1735, + 0x1B80, 0xBF1D1737, + 0x1B80, 0x301D1745, + 0x1B80, 0x301D1747, + 0x1B80, 0xE1551755, + 0x1B80, 0xE1551757, + 0x1B80, 0xE15A1765, + 0x1B80, 0xE15A1767, + 0x1B80, 0xE15E1775, + 0x1B80, 0xE15E1777, + 0x1B80, 0xE1651785, + 0x1B80, 0xE1651787, + 0x1B80, 0xE1C51795, + 0x1B80, 0xE1C51797, + 0x1B80, 0x551317A5, + 0x1B80, 0x551317A7, + 0x1B80, 0xE16117B5, + 0x1B80, 0xE16117B7, + 0x1B80, 0x551517C5, + 0x1B80, 0x551517C7, + 0x1B80, 0xE16517D5, + 0x1B80, 0xE16517D7, + 0x1B80, 0xE1C517E5, + 0x1B80, 0xE1C517E7, + 0x1B80, 0x000117F5, + 0x1B80, 0x000117F7, + 0x1B80, 0x54BF1805, + 0x1B80, 0x54BF1807, + 0x1B80, 0x54C01815, + 0x1B80, 0x54C01817, + 0x1B80, 0x54A31825, + 0x1B80, 0x54A31827, + 0x1B80, 0x54C11835, + 0x1B80, 0x54C11837, + 0x1B80, 0x54A41845, + 0x1B80, 0x54A41847, + 0x1B80, 0x4C181855, + 0x1B80, 0x4C181857, + 0x1B80, 0xBF071865, + 0x1B80, 0xBF071867, + 0x1B80, 0x54C21875, + 0x1B80, 0x54C21877, + 0x1B80, 0x54A41885, + 0x1B80, 0x54A41887, + 0x1B80, 0xBF041895, + 0x1B80, 0xBF041897, + 0x1B80, 0x54C118A5, + 0x1B80, 0x54C118A7, + 0x1B80, 0x54A318B5, + 0x1B80, 0x54A318B7, + 0x1B80, 0xBF0118C5, + 0x1B80, 0xBF0118C7, + 0x1B80, 0xE1D318D5, + 0x1B80, 0xE1D318D7, + 0x1B80, 0x54DF18E5, + 0x1B80, 0x54DF18E7, + 0x1B80, 0x000118F5, + 0x1B80, 0x000118F7, + 0x1B80, 0x54BF1905, + 0x1B80, 0x54BF1907, + 0x1B80, 0x54E51915, + 0x1B80, 0x54E51917, + 0x1B80, 0x050A1925, + 0x1B80, 0x050A1927, + 0x1B80, 0x54DF1935, + 0x1B80, 0x54DF1937, + 0x1B80, 0x00011945, + 0x1B80, 0x00011947, + 0x1B80, 0x7F201955, + 0x1B80, 0x7F201957, + 0x1B80, 0x7E001965, + 0x1B80, 0x7E001967, + 0x1B80, 0x7D001975, + 0x1B80, 0x7D001977, + 0x1B80, 0x55011985, + 0x1B80, 0x55011987, + 0x1B80, 0x5C311995, + 0x1B80, 0x5C311997, + 0x1B80, 0xE16119A5, + 0x1B80, 0xE16119A7, + 0x1B80, 0xE16519B5, + 0x1B80, 0xE16519B7, + 0x1B80, 0x548019C5, + 0x1B80, 0x548019C7, + 0x1B80, 0x540019D5, + 0x1B80, 0x540019D7, + 0x1B80, 0x548119E5, + 0x1B80, 0x548119E7, + 0x1B80, 0x540019F5, + 0x1B80, 0x540019F7, + 0x1B80, 0x54821A05, + 0x1B80, 0x54821A07, + 0x1B80, 0x54001A15, + 0x1B80, 0x54001A17, + 0x1B80, 0xE1801A25, + 0x1B80, 0xE1801A27, + 0x1B80, 0xBFED1A35, + 0x1B80, 0xBFED1A37, + 0x1B80, 0x301D1A45, + 0x1B80, 0x301D1A47, + 0x1B80, 0x00231A55, + 0x1B80, 0x00231A57, + 0x1B80, 0x7B201A65, + 0x1B80, 0x7B201A67, + 0x1B80, 0x7A001A75, + 0x1B80, 0x7A001A77, + 0x1B80, 0x79001A85, + 0x1B80, 0x79001A87, + 0x1B80, 0xE1C91A95, + 0x1B80, 0xE1C91A97, + 0x1B80, 0x00021AA5, + 0x1B80, 0x00021AA7, + 0x1B80, 0x00011AB5, + 0x1B80, 0x00011AB7, + 0x1B80, 0x00221AC5, + 0x1B80, 0x00221AC7, + 0x1B80, 0x7B201AD5, + 0x1B80, 0x7B201AD7, + 0x1B80, 0x7A001AE5, + 0x1B80, 0x7A001AE7, + 0x1B80, 0x79001AF5, + 0x1B80, 0x79001AF7, + 0x1B80, 0xE1C91B05, + 0x1B80, 0xE1C91B07, + 0x1B80, 0x00021B15, + 0x1B80, 0x00021B17, + 0x1B80, 0x00011B25, + 0x1B80, 0x00011B27, + 0x1B80, 0x74021B35, + 0x1B80, 0x74021B37, + 0x1B80, 0x003F1B45, + 0x1B80, 0x003F1B47, + 0x1B80, 0x74001B55, + 0x1B80, 0x74001B57, + 0x1B80, 0x00021B65, + 0x1B80, 0x00021B67, + 0x1B80, 0x00011B75, + 0x1B80, 0x00011B77, + 0x1B80, 0x4D041B85, + 0x1B80, 0x4D041B87, + 0x1B80, 0x2EF81B95, + 0x1B80, 0x2EF81B97, + 0x1B80, 0x00001BA5, + 0x1B80, 0x00001BA7, + 0x1B80, 0x23301BB5, + 0x1B80, 0x23301BB7, + 0x1B80, 0x00241BC5, + 0x1B80, 0x00241BC7, + 0x1B80, 0x23E01BD5, + 0x1B80, 0x23E01BD7, + 0x1B80, 0x003F1BE5, + 0x1B80, 0x003F1BE7, + 0x1B80, 0x23FC1BF5, + 0x1B80, 0x23FC1BF7, + 0x1B80, 0xBFCE1C05, + 0x1B80, 0xBFCE1C07, + 0x1B80, 0x2EF01C15, + 0x1B80, 0x2EF01C17, + 0x1B80, 0x00001C25, + 0x1B80, 0x00001C27, + 0x1B80, 0x4D001C35, + 0x1B80, 0x4D001C37, + 0x1B80, 0x00011C45, + 0x1B80, 0x00011C47, + 0x1B80, 0x549F1C55, + 0x1B80, 0x549F1C57, + 0x1B80, 0x54FF1C65, + 0x1B80, 0x54FF1C67, + 0x1B80, 0x54001C75, + 0x1B80, 0x54001C77, + 0x1B80, 0x00011C85, + 0x1B80, 0x00011C87, + 0x1B80, 0x5C311C95, + 0x1B80, 0x5C311C97, + 0x1B80, 0x07141CA5, + 0x1B80, 0x07141CA7, + 0x1B80, 0x54001CB5, + 0x1B80, 0x54001CB7, + 0x1B80, 0x5C321CC5, + 0x1B80, 0x5C321CC7, + 0x1B80, 0x00011CD5, + 0x1B80, 0x00011CD7, + 0x1B80, 0x5C321CE5, + 0x1B80, 0x5C321CE7, + 0x1B80, 0x07141CF5, + 0x1B80, 0x07141CF7, + 0x1B80, 0x54001D05, + 0x1B80, 0x54001D07, + 0x1B80, 0x5C311D15, + 0x1B80, 0x5C311D17, + 0x1B80, 0x00011D25, + 0x1B80, 0x00011D27, + 0x1B80, 0x4C981D35, + 0x1B80, 0x4C981D37, + 0x1B80, 0x4C181D45, + 0x1B80, 0x4C181D47, + 0x1B80, 0x00011D55, + 0x1B80, 0x00011D57, + 0x1B80, 0x5C321D65, + 0x1B80, 0x5C321D67, + 0x1B80, 0x62841D75, + 0x1B80, 0x62841D77, + 0x1B80, 0x66861D85, + 0x1B80, 0x66861D87, + 0x1B80, 0x6C031D95, + 0x1B80, 0x6C031D97, + 0x1B80, 0x7B201DA5, + 0x1B80, 0x7B201DA7, + 0x1B80, 0x7A001DB5, + 0x1B80, 0x7A001DB7, + 0x1B80, 0x79001DC5, + 0x1B80, 0x79001DC7, + 0x1B80, 0x7F201DD5, + 0x1B80, 0x7F201DD7, + 0x1B80, 0x7E001DE5, + 0x1B80, 0x7E001DE7, + 0x1B80, 0x7D001DF5, + 0x1B80, 0x7D001DF7, + 0x1B80, 0x09011E05, + 0x1B80, 0x09011E07, + 0x1B80, 0x0C011E15, + 0x1B80, 0x0C011E17, + 0x1B80, 0x0BA61E25, + 0x1B80, 0x0BA61E27, + 0x1B80, 0x00011E35, + 0x1B80, 0x00011E37, + 0x1B80, 0x00000006, + 0x1B80, 0x00000002, + +}; + +RTW_DECL_TABLE_PHY_COND(rtw8821c_bb, rtw_phy_cfg_bb); + +static const struct rtw_phy_pg_cfg_pair rtw8821c_bb_pg_type0[] = { + { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, }, + { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363636, }, + { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, }, + { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363636, }, + { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, }, + { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363636, }, + { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, }, + { 0, 0, 0, 0x00000c44, 0xffffffff, 0x22222224, }, + { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, }, + { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, }, + { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343434, }, + { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343434, }, + { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, }, + { 1, 0, 0, 0x00000c44, 0xffffffff, 0x20202022, }, +}; + +RTW_DECL_TABLE_BB_PG(rtw8821c_bb_pg_type0); + +static const u32 rtw8821c_rf_a[] = { + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x000, 0x00010000, + 0x018, 0x00010D24, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x000, 0x00010000, + 0x018, 0x00010D24, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x000, 0x00010000, + 0x018, 0x00010D24, + 0xA0000000, 0x00000000, + 0x000, 0x00010000, + 0x018, 0x00010D24, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000002, + 0x03E, 0x0000003F, + 0x03F, 0x000C0F4E, + 0x033, 0x00000001, + 0x03E, 0x00000034, + 0x03F, 0x0004080E, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000002, + 0x03E, 0x0000003F, + 0x03F, 0x000C0F4E, + 0x033, 0x00000001, + 0x03E, 0x00000034, + 0x03F, 0x0004080E, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000002, + 0x03E, 0x0000003F, + 0x03F, 0x000C0F4E, + 0x033, 0x00000001, + 0x03E, 0x00000034, + 0x03F, 0x0004080E, + 0xA0000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000002, + 0x03E, 0x0000003F, + 0x03F, 0x000C0F4E, + 0x033, 0x00000001, + 0x03E, 0x00000034, + 0x03F, 0x0004080E, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x000005DF, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0x0B1, 0x0007DBE4, + 0x0B2, 0x000225D1, + 0x0B3, 0x000FC760, + 0x0B4, 0x00099DD0, + 0x0B5, 0x000400FC, + 0x0B6, 0x000187F0, + 0x0B7, 0x00030018, + 0x0B8, 0x00080800, + 0x0B9, 0x00000000, + 0x0BA, 0x00008000, + 0x0BB, 0x00000004, + 0x0BC, 0x00040000, + 0x0BD, 0x00000000, + 0x0BE, 0x00000000, + 0x0BF, 0x00000000, + 0x0C0, 0x00000000, + 0x0C1, 0x00000000, + 0x0C2, 0x00000000, + 0x0C3, 0x00000000, + 0x0C4, 0x00002402, + 0x0C5, 0x00000009, + 0x0C6, 0x00040299, + 0x0C7, 0x00055555, + 0x0C8, 0x0000C16C, + 0x0C9, 0x0001C140, + 0x0CA, 0x00000000, + 0x0CB, 0x00000000, + 0x0CC, 0x00000000, + 0x0CD, 0x00000000, + 0x0CE, 0x00090C00, + 0x0CF, 0x0006D200, + 0x0DF, 0x00000009, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0x0B1, 0x0007DBE4, + 0x0B2, 0x000225D1, + 0x0B3, 0x000FC760, + 0x0B4, 0x00099DD0, + 0x0B5, 0x000400FC, + 0x0B6, 0x000187F0, + 0x0B7, 0x00030018, + 0x0B8, 0x00080800, + 0x0B9, 0x00000000, + 0x0BA, 0x00008000, + 0x0BB, 0x00000004, + 0x0BC, 0x00040000, + 0x0BD, 0x00000000, + 0x0BE, 0x00000000, + 0x0BF, 0x00000000, + 0x0C0, 0x00000000, + 0x0C1, 0x00000000, + 0x0C2, 0x00000000, + 0x0C3, 0x00000000, + 0x0C4, 0x00002402, + 0x0C5, 0x00000009, + 0x0C6, 0x00040299, + 0x0C7, 0x00055555, + 0x0C8, 0x0000C16C, + 0x0C9, 0x0001C140, + 0x0CA, 0x00000000, + 0x0CB, 0x00000000, + 0x0CC, 0x00000000, + 0x0CD, 0x00000000, + 0x0CE, 0x00090C00, + 0x0CF, 0x0006D200, + 0x0DF, 0x00000009, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0x0B1, 0x0007DBE4, + 0x0B2, 0x000225D1, + 0x0B3, 0x000FC760, + 0x0B4, 0x00099DD0, + 0x0B5, 0x000400FC, + 0x0B6, 0x000187F0, + 0x0B7, 0x00030018, + 0x0B8, 0x00080800, + 0x0B9, 0x00000000, + 0x0BA, 0x00008000, + 0x0BB, 0x00000004, + 0x0BC, 0x00040000, + 0x0BD, 0x00000000, + 0x0BE, 0x00000000, + 0x0BF, 0x00000000, + 0x0C0, 0x00000000, + 0x0C1, 0x00000000, + 0x0C2, 0x00000000, + 0x0C3, 0x00000000, + 0x0C4, 0x00002402, + 0x0C5, 0x00000009, + 0x0C6, 0x00040299, + 0x0C7, 0x00055555, + 0x0C8, 0x0000C16C, + 0x0C9, 0x0001C140, + 0x0CA, 0x00000000, + 0x0CB, 0x00000000, + 0x0CC, 0x00000000, + 0x0CD, 0x00000000, + 0x0CE, 0x00090C00, + 0x0CF, 0x0006D200, + 0x0DF, 0x00000009, + 0xA0000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0x0B1, 0x0007DBE4, + 0x0B2, 0x000225D1, + 0x0B3, 0x000FC760, + 0x0B4, 0x00099DD0, + 0x0B5, 0x000400FC, + 0x0B6, 0x000187F0, + 0x0B7, 0x00030018, + 0x0B8, 0x00080800, + 0x0B9, 0x00000000, + 0x0BA, 0x00008000, + 0x0BB, 0x00000004, + 0x0BC, 0x00040000, + 0x0BD, 0x00000000, + 0x0BE, 0x00000000, + 0x0BF, 0x00000000, + 0x0C0, 0x00000000, + 0x0C1, 0x00000000, + 0x0C2, 0x00000000, + 0x0C3, 0x00000000, + 0x0C4, 0x00002402, + 0x0C5, 0x00000009, + 0x0C6, 0x00040299, + 0x0C7, 0x00055555, + 0x0C8, 0x0000C16C, + 0x0C9, 0x0001C140, + 0x0CA, 0x00000000, + 0x0CB, 0x00000000, + 0x0CC, 0x00000000, + 0x0CD, 0x00000000, + 0x0CE, 0x00090C00, + 0x0CF, 0x0006D200, + 0x0DF, 0x00000009, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00010000, + 0x033, 0x00000058, + 0x03F, 0x0000001C, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00010000, + 0x033, 0x00000058, + 0x03F, 0x0000001C, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00010000, + 0x033, 0x00000058, + 0x03F, 0x0000001C, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00010000, + 0x033, 0x00000058, + 0x03F, 0x0000001C, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00010524, + 0x081, 0x0000FCC1, + 0x089, 0x00000004, + 0x08A, 0x0008A186, + 0x08B, 0x0006FFFC, + 0x08C, 0x000312C7, + 0x08D, 0x00020888, + 0x08E, 0x00064140, + 0x08F, 0x000A8010, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00010524, + 0x081, 0x0000FCC1, + 0x089, 0x00000004, + 0x08A, 0x0008A186, + 0x08B, 0x0006FFFC, + 0x08C, 0x000312C7, + 0x08D, 0x00020888, + 0x08E, 0x00064140, + 0x08F, 0x000A8010, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00010524, + 0x081, 0x0000FCC1, + 0x089, 0x00000004, + 0x08A, 0x0008A186, + 0x08B, 0x0007060C, + 0x08C, 0x000312C7, + 0x08D, 0x00020888, + 0x08E, 0x00064140, + 0x08F, 0x000A8010, + 0xA0000000, 0x00000000, + 0x018, 0x00010524, + 0x081, 0x0000FCC1, + 0x089, 0x00000004, + 0x08A, 0x0008A186, + 0x08B, 0x0007060C, + 0x08C, 0x000312C7, + 0x08D, 0x00020888, + 0x08E, 0x00064140, + 0x08F, 0x000A8010, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000020, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000020, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0DD, 0x00000020, + 0xA0000000, 0x00000000, + 0x0DD, 0x00000020, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00020000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00020000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000007, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000006, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000005, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000004, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000003, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000002, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000001, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000000, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000007, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000006, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000005, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000004, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000003, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000002, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000001, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000000, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000007, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000006, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000005, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000004, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000003, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000002, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000001, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000000, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0xA0000000, 0x00000000, + 0x033, 0x00000007, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000006, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000005, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000004, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000003, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000002, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000001, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000000, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000000F, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x0000000E, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x0000000D, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x0000000C, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x0000000B, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x0000000A, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000009, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000008, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000000F, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x0000000E, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x0000000D, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x0000000C, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x0000000B, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x0000000A, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000009, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000008, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x0000000F, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x0000000E, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x0000000D, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x0000000C, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x0000000B, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x0000000A, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000009, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000008, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0xA0000000, 0x00000000, + 0x033, 0x0000000F, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x0000000E, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x0000000D, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x0000000C, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x0000000B, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x0000000A, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000009, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000008, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000017, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000016, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000015, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000014, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000013, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000012, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000011, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000010, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000017, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000016, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000015, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000014, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000013, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000012, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000011, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000010, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000017, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000016, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000015, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000014, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000013, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000012, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000011, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000010, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x033, 0x00000017, + 0x03E, 0x00038000, + 0x03F, 0x000C3186, + 0x033, 0x00000016, + 0x03E, 0x00038080, + 0x03F, 0x000C3186, + 0x033, 0x00000015, + 0x03E, 0x000380C8, + 0x03F, 0x000C3186, + 0x033, 0x00000014, + 0x03E, 0x00038190, + 0x03F, 0x000C3186, + 0x033, 0x00000013, + 0x03E, 0x00038998, + 0x03F, 0x000C3186, + 0x033, 0x00000012, + 0x03E, 0x00039840, + 0x03F, 0x000C3186, + 0x033, 0x00000011, + 0x03E, 0x000398C4, + 0x03F, 0x000C3186, + 0x033, 0x00000010, + 0x03E, 0x00039930, + 0x03F, 0x000C3186, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x0000000F, + 0x033, 0x00000001, + 0x03F, 0x0000000A, + 0x033, 0x00000002, + 0x03F, 0x00000005, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x0000000F, + 0x033, 0x00000001, + 0x03F, 0x0000000A, + 0x033, 0x00000002, + 0x03F, 0x00000005, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x0000000F, + 0x033, 0x00000001, + 0x03F, 0x0000000A, + 0x033, 0x00000002, + 0x03F, 0x00000005, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x0000000F, + 0x033, 0x00000001, + 0x03F, 0x0000000A, + 0x033, 0x00000002, + 0x03F, 0x00000005, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00000401, + 0x084, 0x00001209, + 0x086, 0x000001A0, + 0x087, 0x000E8180, + 0x088, 0x00006020, + 0x0DF, 0x00008009, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00000401, + 0x084, 0x00001209, + 0x086, 0x000001A0, + 0x087, 0x000E8180, + 0x088, 0x00006020, + 0x0DF, 0x00008009, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00000401, + 0x084, 0x00001209, + 0x086, 0x000001A0, + 0x087, 0x000E8180, + 0x088, 0x00006020, + 0x0DF, 0x00008009, + 0xA0000000, 0x00000000, + 0x018, 0x00000401, + 0x084, 0x00001209, + 0x086, 0x000001A0, + 0x087, 0x000E8180, + 0x088, 0x00006020, + 0x0DF, 0x00008009, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00008000, + 0x033, 0x0000000F, + 0x03F, 0x0000003C, + 0x033, 0x0000000E, + 0x03F, 0x00000038, + 0x033, 0x0000000D, + 0x03F, 0x00000030, + 0x033, 0x0000000C, + 0x03F, 0x00000028, + 0x033, 0x0000000B, + 0x03F, 0x00000020, + 0x033, 0x0000000A, + 0x03F, 0x00000018, + 0x033, 0x00000009, + 0x03F, 0x00000010, + 0x033, 0x00000008, + 0x03F, 0x00000008, + 0x033, 0x00000007, + 0x03F, 0x0000003C, + 0x033, 0x00000006, + 0x03F, 0x00000038, + 0x033, 0x00000005, + 0x03F, 0x00000030, + 0x033, 0x00000004, + 0x03F, 0x00000028, + 0x033, 0x00000003, + 0x03F, 0x00000020, + 0x033, 0x00000002, + 0x03F, 0x00000018, + 0x033, 0x00000001, + 0x03F, 0x00000010, + 0x033, 0x00000000, + 0x03F, 0x00000008, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00008000, + 0x033, 0x0000000F, + 0x03F, 0x0000003C, + 0x033, 0x0000000E, + 0x03F, 0x00000038, + 0x033, 0x0000000D, + 0x03F, 0x00000030, + 0x033, 0x0000000C, + 0x03F, 0x00000028, + 0x033, 0x0000000B, + 0x03F, 0x00000020, + 0x033, 0x0000000A, + 0x03F, 0x00000018, + 0x033, 0x00000009, + 0x03F, 0x00000010, + 0x033, 0x00000008, + 0x03F, 0x00000008, + 0x033, 0x00000007, + 0x03F, 0x0000003C, + 0x033, 0x00000006, + 0x03F, 0x00000038, + 0x033, 0x00000005, + 0x03F, 0x00000030, + 0x033, 0x00000004, + 0x03F, 0x00000028, + 0x033, 0x00000003, + 0x03F, 0x00000020, + 0x033, 0x00000002, + 0x03F, 0x00000018, + 0x033, 0x00000001, + 0x03F, 0x00000010, + 0x033, 0x00000000, + 0x03F, 0x00000008, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00008000, + 0x033, 0x0000000F, + 0x03F, 0x0000003C, + 0x033, 0x0000000E, + 0x03F, 0x00000038, + 0x033, 0x0000000D, + 0x03F, 0x00000030, + 0x033, 0x0000000C, + 0x03F, 0x00000028, + 0x033, 0x0000000B, + 0x03F, 0x00000020, + 0x033, 0x0000000A, + 0x03F, 0x00000018, + 0x033, 0x00000009, + 0x03F, 0x00000010, + 0x033, 0x00000008, + 0x03F, 0x00000008, + 0x033, 0x00000007, + 0x03F, 0x0000003C, + 0x033, 0x00000006, + 0x03F, 0x00000038, + 0x033, 0x00000005, + 0x03F, 0x00000030, + 0x033, 0x00000004, + 0x03F, 0x00000028, + 0x033, 0x00000003, + 0x03F, 0x00000020, + 0x033, 0x00000002, + 0x03F, 0x00000018, + 0x033, 0x00000001, + 0x03F, 0x00000010, + 0x033, 0x00000000, + 0x03F, 0x00000008, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00008000, + 0x033, 0x0000000F, + 0x03F, 0x0000003C, + 0x033, 0x0000000E, + 0x03F, 0x00000038, + 0x033, 0x0000000D, + 0x03F, 0x00000030, + 0x033, 0x0000000C, + 0x03F, 0x00000028, + 0x033, 0x0000000B, + 0x03F, 0x00000020, + 0x033, 0x0000000A, + 0x03F, 0x00000018, + 0x033, 0x00000009, + 0x03F, 0x00000010, + 0x033, 0x00000008, + 0x03F, 0x00000008, + 0x033, 0x00000007, + 0x03F, 0x0000003C, + 0x033, 0x00000006, + 0x03F, 0x00000038, + 0x033, 0x00000005, + 0x03F, 0x00000030, + 0x033, 0x00000004, + 0x03F, 0x00000028, + 0x033, 0x00000003, + 0x03F, 0x00000020, + 0x033, 0x00000002, + 0x03F, 0x00000018, + 0x033, 0x00000001, + 0x03F, 0x00000010, + 0x033, 0x00000000, + 0x03F, 0x00000008, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000002, + 0x033, 0x0000001E, + 0x03F, 0x00000000, + 0x033, 0x0000001C, + 0x03F, 0x00000000, + 0x033, 0x0000000E, + 0x03F, 0x00000000, + 0x033, 0x0000000C, + 0x03F, 0x00000000, + 0x033, 0x0000000A, + 0x03F, 0x00000002, + 0x033, 0x00000008, + 0x03F, 0x00000000, + 0x033, 0x00000036, + 0x03F, 0x00000000, + 0x033, 0x00000037, + 0x03F, 0x00000000, + 0x033, 0x00000034, + 0x03F, 0x00000000, + 0x033, 0x00000026, + 0x03F, 0x00000006, + 0x033, 0x00000027, + 0x03F, 0x00000006, + 0x033, 0x00000024, + 0x03F, 0x00000006, + 0x033, 0x00000022, + 0x03F, 0x00000006, + 0x033, 0x00000020, + 0x03F, 0x00000006, + 0x033, 0x00000006, + 0x03F, 0x00000000, + 0x033, 0x00000007, + 0x03F, 0x00000006, + 0x033, 0x00000004, + 0x03F, 0x00000006, + 0x033, 0x00000002, + 0x03F, 0x00000006, + 0x033, 0x00000000, + 0x03F, 0x00000006, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000002, + 0x033, 0x0000001E, + 0x03F, 0x00000000, + 0x033, 0x0000001C, + 0x03F, 0x00000000, + 0x033, 0x0000000E, + 0x03F, 0x00000000, + 0x033, 0x0000000C, + 0x03F, 0x00000000, + 0x033, 0x0000000A, + 0x03F, 0x00000002, + 0x033, 0x00000008, + 0x03F, 0x00000000, + 0x033, 0x00000036, + 0x03F, 0x00000000, + 0x033, 0x00000037, + 0x03F, 0x00000000, + 0x033, 0x00000034, + 0x03F, 0x00000000, + 0x033, 0x00000026, + 0x03F, 0x00000006, + 0x033, 0x00000027, + 0x03F, 0x00000006, + 0x033, 0x00000024, + 0x03F, 0x00000006, + 0x033, 0x00000022, + 0x03F, 0x00000006, + 0x033, 0x00000020, + 0x03F, 0x00000006, + 0x033, 0x00000006, + 0x03F, 0x00000000, + 0x033, 0x00000007, + 0x03F, 0x00000006, + 0x033, 0x00000004, + 0x03F, 0x00000006, + 0x033, 0x00000002, + 0x03F, 0x00000006, + 0x033, 0x00000000, + 0x03F, 0x00000006, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00000002, + 0x033, 0x0000001E, + 0x03F, 0x00000000, + 0x033, 0x0000001C, + 0x03F, 0x00000000, + 0x033, 0x0000000E, + 0x03F, 0x00000000, + 0x033, 0x0000000C, + 0x03F, 0x00000000, + 0x033, 0x0000000A, + 0x03F, 0x00000002, + 0x033, 0x00000008, + 0x03F, 0x00000000, + 0x033, 0x00000036, + 0x03F, 0x00000000, + 0x033, 0x00000037, + 0x03F, 0x00000000, + 0x033, 0x00000034, + 0x03F, 0x00000000, + 0x033, 0x00000026, + 0x03F, 0x00000006, + 0x033, 0x00000027, + 0x03F, 0x00000006, + 0x033, 0x00000024, + 0x03F, 0x00000006, + 0x033, 0x00000022, + 0x03F, 0x00000006, + 0x033, 0x00000020, + 0x03F, 0x00000006, + 0x033, 0x00000006, + 0x03F, 0x00000000, + 0x033, 0x00000007, + 0x03F, 0x00000006, + 0x033, 0x00000004, + 0x03F, 0x00000006, + 0x033, 0x00000002, + 0x03F, 0x00000006, + 0x033, 0x00000000, + 0x03F, 0x00000006, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00000002, + 0x033, 0x0000001E, + 0x03F, 0x00000000, + 0x033, 0x0000001C, + 0x03F, 0x00000000, + 0x033, 0x0000000E, + 0x03F, 0x00000000, + 0x033, 0x0000000C, + 0x03F, 0x00000000, + 0x033, 0x0000000A, + 0x03F, 0x00000002, + 0x033, 0x00000008, + 0x03F, 0x00000000, + 0x033, 0x00000036, + 0x03F, 0x00000000, + 0x033, 0x00000037, + 0x03F, 0x00000000, + 0x033, 0x00000034, + 0x03F, 0x00000000, + 0x033, 0x00000026, + 0x03F, 0x00000006, + 0x033, 0x00000027, + 0x03F, 0x00000006, + 0x033, 0x00000024, + 0x03F, 0x00000006, + 0x033, 0x00000022, + 0x03F, 0x00000006, + 0x033, 0x00000020, + 0x03F, 0x00000006, + 0x033, 0x00000006, + 0x03F, 0x00000000, + 0x033, 0x00000007, + 0x03F, 0x00000006, + 0x033, 0x00000004, + 0x03F, 0x00000006, + 0x033, 0x00000002, + 0x03F, 0x00000006, + 0x033, 0x00000000, + 0x03F, 0x00000006, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0A0, 0x000F0005, + 0x0A1, 0x0006C000, + 0x0A2, 0x0000161B, + 0x0A3, 0x000B9CBD, + 0x0AF, 0x00070000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0A0, 0x000F0005, + 0x0A1, 0x0006C000, + 0x0A2, 0x0000161B, + 0x0A3, 0x000B9CBD, + 0x0AF, 0x00070000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0A0, 0x000F0005, + 0x0A1, 0x0006C000, + 0x0A2, 0x0000161B, + 0x0A3, 0x000B9CBD, + 0x0AF, 0x00070000, + 0xA0000000, 0x00000000, + 0x0A0, 0x000F0005, + 0x0A1, 0x0006C000, + 0x0A2, 0x0000161B, + 0x0A3, 0x000B9CBD, + 0x0AF, 0x00070000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0DE, 0x00000200, + 0x0EE, 0x00000100, + 0x033, 0x00000007, + 0x03F, 0x00000043, + 0x033, 0x00000006, + 0x03F, 0x0000007A, + 0x033, 0x00000005, + 0x03F, 0x00000041, + 0x033, 0x00000004, + 0x03F, 0x00000079, + 0x033, 0x00000003, + 0x03F, 0x00000043, + 0x033, 0x00000002, + 0x03F, 0x0000007A, + 0x033, 0x00000001, + 0x03F, 0x00000041, + 0x033, 0x00000000, + 0x03F, 0x00000079, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0DE, 0x00000200, + 0x0EE, 0x00000100, + 0x033, 0x00000007, + 0x03F, 0x00000043, + 0x033, 0x00000006, + 0x03F, 0x0000007A, + 0x033, 0x00000005, + 0x03F, 0x00000041, + 0x033, 0x00000004, + 0x03F, 0x00000079, + 0x033, 0x00000003, + 0x03F, 0x00000043, + 0x033, 0x00000002, + 0x03F, 0x0000007A, + 0x033, 0x00000001, + 0x03F, 0x00000041, + 0x033, 0x00000000, + 0x03F, 0x00000079, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0DE, 0x00000200, + 0x0EE, 0x00000100, + 0x033, 0x00000007, + 0x03F, 0x00000043, + 0x033, 0x00000006, + 0x03F, 0x0000007A, + 0x033, 0x00000005, + 0x03F, 0x00000041, + 0x033, 0x00000004, + 0x03F, 0x00000079, + 0x033, 0x00000003, + 0x03F, 0x00000043, + 0x033, 0x00000002, + 0x03F, 0x0000007A, + 0x033, 0x00000001, + 0x03F, 0x00000041, + 0x033, 0x00000000, + 0x03F, 0x00000079, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0DE, 0x00000200, + 0x0EE, 0x00000100, + 0x033, 0x00000007, + 0x03F, 0x00000043, + 0x033, 0x00000006, + 0x03F, 0x0000007A, + 0x033, 0x00000005, + 0x03F, 0x00000041, + 0x033, 0x00000004, + 0x03F, 0x00000079, + 0x033, 0x00000003, + 0x03F, 0x00000043, + 0x033, 0x00000002, + 0x03F, 0x0000007A, + 0x033, 0x00000001, + 0x03F, 0x00000041, + 0x033, 0x00000000, + 0x03F, 0x00000079, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0B8, 0x00080A00, + 0x0B0, 0x000FF0FA, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0B8, 0x00080A00, + 0x0B0, 0x000FF0FA, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0B8, 0x00080A00, + 0x0B0, 0x000FF0FA, + 0xA0000000, 0x00000000, + 0x0B8, 0x00080A00, + 0x0B0, 0x000FF0FA, + 0xB0000000, 0x00000000, + 0xFFE, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0CA, 0x00080000, + 0x0C9, 0x0001C141, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0CA, 0x00080000, + 0x0C9, 0x0001C141, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0CA, 0x00080000, + 0x0C9, 0x0001C141, + 0xA0000000, 0x00000000, + 0x0CA, 0x00080000, + 0x0C9, 0x0001C141, + 0xB0000000, 0x00000000, + 0xFFE, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0xA0000000, 0x00000000, + 0x0B0, 0x000FF0F8, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00018D24, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00018D24, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00018D24, + 0xA0000000, 0x00000000, + 0x018, 0x00018D24, + 0xB0000000, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0xFFE, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00010D24, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00010D24, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x018, 0x00010D24, + 0xA0000000, 0x00000000, + 0x018, 0x00010D24, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x01B, 0x00003A40, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x01B, 0x00003A40, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x01B, 0x00003A40, + 0xA0000000, 0x00000000, + 0x01B, 0x00003A40, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x0004D3A3, + 0x062, 0x0000D303, + 0x063, 0x00000002, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x0004D3A3, + 0x062, 0x0000D303, + 0x063, 0x00000002, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x061, 0x0004D3A1, + 0x062, 0x0000D3A3, + 0x063, 0x00000002, + 0xA0000000, 0x00000000, + 0x061, 0x0004D3A1, + 0x062, 0x0000D3A3, + 0x063, 0x00000002, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000200, + 0x030, 0x00000000, + 0x03F, 0x00033303, + 0x030, 0x00001000, + 0x03F, 0x00033303, + 0x030, 0x00002000, + 0x03F, 0x00033303, + 0x030, 0x00003000, + 0x03F, 0x00033303, + 0x030, 0x00004000, + 0x03F, 0x00033303, + 0x030, 0x00005000, + 0x03F, 0x00033303, + 0x030, 0x00006000, + 0x03F, 0x00033303, + 0x030, 0x00007000, + 0x03F, 0x00033303, + 0x030, 0x00008000, + 0x03F, 0x00033303, + 0x030, 0x00009000, + 0x03F, 0x00033303, + 0x030, 0x0000A000, + 0x03F, 0x00033303, + 0x030, 0x0000B000, + 0x03F, 0x00033303, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000200, + 0x030, 0x00000000, + 0x03F, 0x000333A3, + 0x030, 0x00001000, + 0x03F, 0x000333A3, + 0x030, 0x00002000, + 0x03F, 0x000333A3, + 0x030, 0x00003000, + 0x03F, 0x000333A3, + 0x030, 0x00004000, + 0x03F, 0x000313A3, + 0x030, 0x00005000, + 0x03F, 0x000313A3, + 0x030, 0x00006000, + 0x03F, 0x000313A3, + 0x030, 0x00007000, + 0x03F, 0x000313A3, + 0x030, 0x00008000, + 0x03F, 0x000333A3, + 0x030, 0x00009000, + 0x03F, 0x000333A3, + 0x030, 0x0000A000, + 0x03F, 0x000333A3, + 0x030, 0x0000B000, + 0x03F, 0x000333A3, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000200, + 0x030, 0x00000000, + 0x03F, 0x000335A3, + 0x030, 0x00001000, + 0x03F, 0x000335A3, + 0x030, 0x00002000, + 0x03F, 0x000335A3, + 0x030, 0x00003000, + 0x03F, 0x000335A3, + 0x030, 0x00004000, + 0x03F, 0x000335A3, + 0x030, 0x00005000, + 0x03F, 0x000335A3, + 0x030, 0x00006000, + 0x03F, 0x000335A3, + 0x030, 0x00007000, + 0x03F, 0x000335A3, + 0x030, 0x00008000, + 0x03F, 0x000335A3, + 0x030, 0x00009000, + 0x03F, 0x000335A3, + 0x030, 0x0000A000, + 0x03F, 0x000335A3, + 0x030, 0x0000B000, + 0x03F, 0x000335A3, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000200, + 0x030, 0x00000000, + 0x03F, 0x000335A3, + 0x030, 0x00001000, + 0x03F, 0x000335A3, + 0x030, 0x00002000, + 0x03F, 0x000335A3, + 0x030, 0x00003000, + 0x03F, 0x000335A3, + 0x030, 0x00004000, + 0x03F, 0x000335A3, + 0x030, 0x00005000, + 0x03F, 0x000335A3, + 0x030, 0x00006000, + 0x03F, 0x000335A3, + 0x030, 0x00007000, + 0x03F, 0x000335A3, + 0x030, 0x00008000, + 0x03F, 0x000335A3, + 0x030, 0x00009000, + 0x03F, 0x000335A3, + 0x030, 0x0000A000, + 0x03F, 0x000335A3, + 0x030, 0x0000B000, + 0x03F, 0x000335A3, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000080, + 0x033, 0x00000000, + 0x03F, 0x00033303, + 0x033, 0x00000001, + 0x03F, 0x00033303, + 0x033, 0x00000002, + 0x03F, 0x00033303, + 0x033, 0x00000003, + 0x03F, 0x00033303, + 0x033, 0x00000004, + 0x03F, 0x00033303, + 0x033, 0x00000005, + 0x03F, 0x00033303, + 0x033, 0x00000006, + 0x03F, 0x00033303, + 0x033, 0x00000007, + 0x03F, 0x00033303, + 0x033, 0x00000008, + 0x03F, 0x00033303, + 0x033, 0x00000009, + 0x03F, 0x00033303, + 0x033, 0x0000000A, + 0x03F, 0x00033303, + 0x033, 0x0000000B, + 0x03F, 0x00033303, + 0x033, 0x0000000C, + 0x03F, 0x00033303, + 0x033, 0x0000000D, + 0x03F, 0x00033303, + 0x033, 0x0000000E, + 0x03F, 0x00033303, + 0x033, 0x0000000F, + 0x03F, 0x00033303, + 0x033, 0x00000010, + 0x03F, 0x00033303, + 0x033, 0x00000011, + 0x03F, 0x00033303, + 0x033, 0x00000012, + 0x03F, 0x00033303, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000080, + 0x033, 0x00000000, + 0x03F, 0x000333A3, + 0x033, 0x00000001, + 0x03F, 0x000333A3, + 0x033, 0x00000002, + 0x03F, 0x000333A3, + 0x033, 0x00000003, + 0x03F, 0x000333A3, + 0x033, 0x00000004, + 0x03F, 0x000333A3, + 0x033, 0x00000005, + 0x03F, 0x000333A3, + 0x033, 0x00000006, + 0x03F, 0x000333A3, + 0x033, 0x00000007, + 0x03F, 0x000333A3, + 0x033, 0x00000008, + 0x03F, 0x000313A3, + 0x033, 0x00000009, + 0x03F, 0x000313A3, + 0x033, 0x0000000A, + 0x03F, 0x000313A3, + 0x033, 0x0000000B, + 0x03F, 0x000313A3, + 0x033, 0x0000000C, + 0x03F, 0x000313A3, + 0x033, 0x0000000D, + 0x03F, 0x000333A3, + 0x033, 0x0000000E, + 0x03F, 0x000333A3, + 0x033, 0x0000000F, + 0x03F, 0x000333A3, + 0x033, 0x00000010, + 0x03F, 0x000333A3, + 0x033, 0x00000011, + 0x03F, 0x000333A3, + 0x033, 0x00000012, + 0x03F, 0x000333A3, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000080, + 0x033, 0x00000000, + 0x03F, 0x000335A3, + 0x033, 0x00000001, + 0x03F, 0x000335A3, + 0x033, 0x00000002, + 0x03F, 0x000335A3, + 0x033, 0x00000003, + 0x03F, 0x000335A3, + 0x033, 0x00000004, + 0x03F, 0x000335A3, + 0x033, 0x00000005, + 0x03F, 0x000335A3, + 0x033, 0x00000006, + 0x03F, 0x000335A3, + 0x033, 0x00000007, + 0x03F, 0x000335A3, + 0x033, 0x00000008, + 0x03F, 0x000335A3, + 0x033, 0x00000009, + 0x03F, 0x000335A3, + 0x033, 0x0000000A, + 0x03F, 0x000335A3, + 0x033, 0x0000000B, + 0x03F, 0x000335A3, + 0x033, 0x0000000C, + 0x03F, 0x000335A3, + 0x033, 0x0000000D, + 0x03F, 0x000335A3, + 0x033, 0x0000000E, + 0x03F, 0x000335A3, + 0x033, 0x0000000F, + 0x03F, 0x000335A3, + 0x033, 0x00000010, + 0x03F, 0x000335A3, + 0x033, 0x00000011, + 0x03F, 0x000335A3, + 0x033, 0x00000012, + 0x03F, 0x000335A3, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000080, + 0x033, 0x00000000, + 0x03F, 0x000335A3, + 0x033, 0x00000001, + 0x03F, 0x000335A3, + 0x033, 0x00000002, + 0x03F, 0x000335A3, + 0x033, 0x00000003, + 0x03F, 0x000335A3, + 0x033, 0x00000004, + 0x03F, 0x000335A3, + 0x033, 0x00000005, + 0x03F, 0x000335A3, + 0x033, 0x00000006, + 0x03F, 0x000335A3, + 0x033, 0x00000007, + 0x03F, 0x000335A3, + 0x033, 0x00000008, + 0x03F, 0x000335A3, + 0x033, 0x00000009, + 0x03F, 0x000335A3, + 0x033, 0x0000000A, + 0x03F, 0x000335A3, + 0x033, 0x0000000B, + 0x03F, 0x000335A3, + 0x033, 0x0000000C, + 0x03F, 0x000335A3, + 0x033, 0x0000000D, + 0x03F, 0x000335A3, + 0x033, 0x0000000E, + 0x03F, 0x000335A3, + 0x033, 0x0000000F, + 0x03F, 0x000335A3, + 0x033, 0x00000010, + 0x03F, 0x000335A3, + 0x033, 0x00000011, + 0x03F, 0x000335A3, + 0x033, 0x00000012, + 0x03F, 0x000335A3, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000040, + 0x030, 0x00000644, + 0x030, 0x00001135, + 0x030, 0x00002133, + 0x030, 0x00004000, + 0x030, 0x00005000, + 0x030, 0x00006000, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000040, + 0x030, 0x00000644, + 0x030, 0x00001412, + 0x030, 0x00002202, + 0x030, 0x00004000, + 0x030, 0x00005000, + 0x030, 0x00006000, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000040, + 0x030, 0x00000640, + 0x030, 0x00001512, + 0x030, 0x00002202, + 0x030, 0x00004000, + 0x030, 0x00005000, + 0x030, 0x00006000, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000040, + 0x030, 0x00000640, + 0x030, 0x00001512, + 0x030, 0x00002202, + 0x030, 0x00004000, + 0x030, 0x00005000, + 0x030, 0x00006000, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000800, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000800, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000800, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000800, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000001, + 0x033, 0x00000021, + 0x03F, 0x00000004, + 0x033, 0x00000022, + 0x03F, 0x00000007, + 0x033, 0x00000023, + 0x03F, 0x00000024, + 0x033, 0x00000024, + 0x03F, 0x00000027, + 0x033, 0x00000025, + 0x03F, 0x0000002A, + 0x033, 0x00000026, + 0x03F, 0x0000002D, + 0x033, 0x00000027, + 0x03F, 0x00000030, + 0x033, 0x00000028, + 0x03F, 0x00000033, + 0x033, 0x00000029, + 0x03F, 0x00000036, + 0x033, 0x0000002A, + 0x03F, 0x00000039, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000E42, + 0x033, 0x00000021, + 0x03F, 0x00000E45, + 0x033, 0x00000022, + 0x03F, 0x00000E65, + 0x033, 0x00000023, + 0x03F, 0x00000E68, + 0x033, 0x00000024, + 0x03F, 0x00000EE4, + 0x033, 0x00000025, + 0x03F, 0x00000EE7, + 0x033, 0x00000026, + 0x03F, 0x00000EEA, + 0x033, 0x00000027, + 0x03F, 0x00000EED, + 0x033, 0x00000028, + 0x03F, 0x00000EF0, + 0x033, 0x00000029, + 0x03F, 0x00000EF3, + 0x033, 0x0000002A, + 0x03F, 0x00000EF6, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000E42, + 0x033, 0x00000021, + 0x03F, 0x00000E45, + 0x033, 0x00000022, + 0x03F, 0x00000E48, + 0x033, 0x00000023, + 0x03F, 0x00000E68, + 0x033, 0x00000024, + 0x03F, 0x00000E6B, + 0x033, 0x00000025, + 0x03F, 0x00000EAA, + 0x033, 0x00000026, + 0x03F, 0x00000EEA, + 0x033, 0x00000027, + 0x03F, 0x00000EED, + 0x033, 0x00000028, + 0x03F, 0x00000EF0, + 0x033, 0x00000029, + 0x03F, 0x00000EF3, + 0x033, 0x0000002A, + 0x03F, 0x00000EF6, + 0xA0000000, 0x00000000, + 0x033, 0x00000020, + 0x03F, 0x00000E42, + 0x033, 0x00000021, + 0x03F, 0x00000E45, + 0x033, 0x00000022, + 0x03F, 0x00000E65, + 0x033, 0x00000023, + 0x03F, 0x00000E68, + 0x033, 0x00000024, + 0x03F, 0x00000EE4, + 0x033, 0x00000025, + 0x03F, 0x00000EE7, + 0x033, 0x00000026, + 0x03F, 0x00000EEA, + 0x033, 0x00000027, + 0x03F, 0x00000EED, + 0x033, 0x00000028, + 0x03F, 0x00000EF0, + 0x033, 0x00000029, + 0x03F, 0x00000EF3, + 0x033, 0x0000002A, + 0x03F, 0x00000EF6, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000001, + 0x033, 0x00000061, + 0x03F, 0x00000004, + 0x033, 0x00000062, + 0x03F, 0x00000007, + 0x033, 0x00000063, + 0x03F, 0x00000024, + 0x033, 0x00000064, + 0x03F, 0x00000027, + 0x033, 0x00000065, + 0x03F, 0x0000002A, + 0x033, 0x00000066, + 0x03F, 0x0000002D, + 0x033, 0x00000067, + 0x03F, 0x00000030, + 0x033, 0x00000068, + 0x03F, 0x00000033, + 0x033, 0x00000069, + 0x03F, 0x00000036, + 0x033, 0x0000006A, + 0x03F, 0x00000039, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000E42, + 0x033, 0x00000061, + 0x03F, 0x00000E45, + 0x033, 0x00000062, + 0x03F, 0x00000E65, + 0x033, 0x00000063, + 0x03F, 0x00000E68, + 0x033, 0x00000064, + 0x03F, 0x00000EE5, + 0x033, 0x00000065, + 0x03F, 0x00000EE8, + 0x033, 0x00000066, + 0x03F, 0x00000EEB, + 0x033, 0x00000067, + 0x03F, 0x00000EEE, + 0x033, 0x00000068, + 0x03F, 0x00000EF1, + 0x033, 0x00000069, + 0x03F, 0x00000EF4, + 0x033, 0x0000006A, + 0x03F, 0x00000EF7, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000E09, + 0x033, 0x00000061, + 0x03F, 0x00000E43, + 0x033, 0x00000062, + 0x03F, 0x00000E46, + 0x033, 0x00000063, + 0x03F, 0x00000E49, + 0x033, 0x00000064, + 0x03F, 0x00000E88, + 0x033, 0x00000065, + 0x03F, 0x00000E8B, + 0x033, 0x00000066, + 0x03F, 0x00000ECB, + 0x033, 0x00000067, + 0x03F, 0x00000ECE, + 0x033, 0x00000068, + 0x03F, 0x00000EF0, + 0x033, 0x00000069, + 0x03F, 0x00000EF3, + 0x033, 0x0000006A, + 0x03F, 0x00000EF6, + 0xA0000000, 0x00000000, + 0x033, 0x00000060, + 0x03F, 0x00000E42, + 0x033, 0x00000061, + 0x03F, 0x00000E45, + 0x033, 0x00000062, + 0x03F, 0x00000E65, + 0x033, 0x00000063, + 0x03F, 0x00000E68, + 0x033, 0x00000064, + 0x03F, 0x00000EE5, + 0x033, 0x00000065, + 0x03F, 0x00000EE8, + 0x033, 0x00000066, + 0x03F, 0x00000EEB, + 0x033, 0x00000067, + 0x03F, 0x00000EEE, + 0x033, 0x00000068, + 0x03F, 0x00000EF1, + 0x033, 0x00000069, + 0x03F, 0x00000EF4, + 0x033, 0x0000006A, + 0x03F, 0x00000EF7, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x000000A0, + 0x03F, 0x00000001, + 0x033, 0x000000A1, + 0x03F, 0x00000004, + 0x033, 0x000000A2, + 0x03F, 0x00000007, + 0x033, 0x000000A3, + 0x03F, 0x00000025, + 0x033, 0x000000A4, + 0x03F, 0x00000028, + 0x033, 0x000000A5, + 0x03F, 0x0000002B, + 0x033, 0x000000A6, + 0x03F, 0x0000002E, + 0x033, 0x000000A7, + 0x03F, 0x00000031, + 0x033, 0x000000A8, + 0x03F, 0x00000034, + 0x033, 0x000000A9, + 0x03F, 0x00000037, + 0x033, 0x000000AA, + 0x03F, 0x0000003A, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x000000A0, + 0x03F, 0x00000E09, + 0x033, 0x000000A1, + 0x03F, 0x00000E43, + 0x033, 0x000000A2, + 0x03F, 0x00000E64, + 0x033, 0x000000A3, + 0x03F, 0x00000E67, + 0x033, 0x000000A4, + 0x03F, 0x00000EE4, + 0x033, 0x000000A5, + 0x03F, 0x00000EE7, + 0x033, 0x000000A6, + 0x03F, 0x00000EEA, + 0x033, 0x000000A7, + 0x03F, 0x00000EED, + 0x033, 0x000000A8, + 0x03F, 0x00000EF0, + 0x033, 0x000000A9, + 0x03F, 0x00000EF3, + 0x033, 0x000000AA, + 0x03F, 0x00000EF6, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x033, 0x000000A0, + 0x03F, 0x00000E08, + 0x033, 0x000000A1, + 0x03F, 0x00000E42, + 0x033, 0x000000A2, + 0x03F, 0x00000E45, + 0x033, 0x000000A3, + 0x03F, 0x00000E48, + 0x033, 0x000000A4, + 0x03F, 0x00000EA5, + 0x033, 0x000000A5, + 0x03F, 0x00000EA8, + 0x033, 0x000000A6, + 0x03F, 0x00000ECA, + 0x033, 0x000000A7, + 0x03F, 0x00000ECD, + 0x033, 0x000000A8, + 0x03F, 0x00000EEF, + 0x033, 0x000000A9, + 0x03F, 0x00000EF2, + 0x033, 0x000000AA, + 0x03F, 0x00000EF5, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x033, 0x000000A0, + 0x03F, 0x00000E09, + 0x033, 0x000000A1, + 0x03F, 0x00000E43, + 0x033, 0x000000A2, + 0x03F, 0x00000E64, + 0x033, 0x000000A3, + 0x03F, 0x00000E67, + 0x033, 0x000000A4, + 0x03F, 0x00000EE4, + 0x033, 0x000000A5, + 0x03F, 0x00000EE7, + 0x033, 0x000000A6, + 0x03F, 0x00000EEA, + 0x033, 0x000000A7, + 0x03F, 0x00000EED, + 0x033, 0x000000A8, + 0x03F, 0x00000EF0, + 0x033, 0x000000A9, + 0x03F, 0x00000EF3, + 0x033, 0x000000AA, + 0x03F, 0x00000EF6, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x0006AC00, + 0x033, 0x00000001, + 0x03F, 0x00060C00, + 0x033, 0x00000002, + 0x03F, 0x0006AC00, + 0x033, 0x00000003, + 0x03F, 0x00086A00, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x0006AC00, + 0x033, 0x00000001, + 0x03F, 0x00060C00, + 0x033, 0x00000002, + 0x03F, 0x0006AC00, + 0x033, 0x00000003, + 0x03F, 0x00086A00, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x0006AC00, + 0x033, 0x00000001, + 0x03F, 0x00060C00, + 0x033, 0x00000002, + 0x03F, 0x0006AC00, + 0x033, 0x00000003, + 0x03F, 0x00086A00, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000400, + 0x033, 0x00000000, + 0x03F, 0x0006AC00, + 0x033, 0x00000001, + 0x03F, 0x00060C00, + 0x033, 0x00000002, + 0x03F, 0x0006AC00, + 0x033, 0x00000003, + 0x03F, 0x00086A00, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000100, + 0x033, 0x00000000, + 0x03F, 0x00000040, + 0x033, 0x00000001, + 0x03F, 0x00000040, + 0x033, 0x00000002, + 0x03F, 0x00000040, + 0x033, 0x00000003, + 0x03F, 0x00000040, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000100, + 0x033, 0x00000000, + 0x03F, 0x00000040, + 0x033, 0x00000001, + 0x03F, 0x00000040, + 0x033, 0x00000002, + 0x03F, 0x00000040, + 0x033, 0x00000003, + 0x03F, 0x00000040, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000100, + 0x033, 0x00000000, + 0x03F, 0x00000040, + 0x033, 0x00000001, + 0x03F, 0x00000040, + 0x033, 0x00000002, + 0x03F, 0x00000040, + 0x033, 0x00000003, + 0x03F, 0x00000040, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000100, + 0x033, 0x00000000, + 0x03F, 0x00000040, + 0x033, 0x00000001, + 0x03F, 0x00000040, + 0x033, 0x00000002, + 0x03F, 0x00000040, + 0x033, 0x00000003, + 0x03F, 0x00000040, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00040000, + 0x033, 0x00000000, + 0x03F, 0x00086A40, + 0x033, 0x00000001, + 0x03F, 0x00086A40, + 0x033, 0x00000002, + 0x03F, 0x00086A40, + 0x033, 0x00000003, + 0x03F, 0x00086A40, + 0x033, 0x00000004, + 0x03F, 0x00086A40, + 0x033, 0x00000005, + 0x03F, 0x00086A40, + 0x033, 0x00000006, + 0x03F, 0x00084A40, + 0x033, 0x00000007, + 0x03F, 0x00084A40, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00040000, + 0x033, 0x00000000, + 0x03F, 0x00086A40, + 0x033, 0x00000001, + 0x03F, 0x00086A40, + 0x033, 0x00000002, + 0x03F, 0x00086A40, + 0x033, 0x00000003, + 0x03F, 0x00086A40, + 0x033, 0x00000004, + 0x03F, 0x00086A40, + 0x033, 0x00000005, + 0x03F, 0x00086A40, + 0x033, 0x00000006, + 0x03F, 0x00084A40, + 0x033, 0x00000007, + 0x03F, 0x00084A40, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00040000, + 0x033, 0x00000000, + 0x03F, 0x00086A40, + 0x033, 0x00000001, + 0x03F, 0x00086A40, + 0x033, 0x00000002, + 0x03F, 0x00086A40, + 0x033, 0x00000003, + 0x03F, 0x00086A40, + 0x033, 0x00000004, + 0x03F, 0x00086A40, + 0x033, 0x00000005, + 0x03F, 0x00086A40, + 0x033, 0x00000006, + 0x03F, 0x00084A40, + 0x033, 0x00000007, + 0x03F, 0x00084A40, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00040000, + 0x033, 0x00000000, + 0x03F, 0x00086A40, + 0x033, 0x00000001, + 0x03F, 0x00086A40, + 0x033, 0x00000002, + 0x03F, 0x00086A40, + 0x033, 0x00000003, + 0x03F, 0x00086A40, + 0x033, 0x00000004, + 0x03F, 0x00086A40, + 0x033, 0x00000005, + 0x03F, 0x00086A40, + 0x033, 0x00000006, + 0x03F, 0x00084A40, + 0x033, 0x00000007, + 0x03F, 0x00084A40, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x000801A8, + 0x052, 0x000972E3, + 0x053, 0x00008069, + 0x054, 0x00030032, + 0x055, 0x00082003, + 0x056, 0x00051CCB, + 0x057, 0x0000CFC2, + 0x058, 0x00000010, + 0x059, 0x00030000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x000801A8, + 0x052, 0x000972E3, + 0x053, 0x00008069, + 0x054, 0x00030032, + 0x055, 0x00082003, + 0x056, 0x00051CCB, + 0x057, 0x0000CFC2, + 0x058, 0x00000010, + 0x059, 0x00030000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x000801A8, + 0x052, 0x000972E3, + 0x053, 0x00008069, + 0x054, 0x00030032, + 0x055, 0x00082003, + 0x056, 0x00051CCB, + 0x057, 0x0000CFC2, + 0x058, 0x00000010, + 0x059, 0x00030000, + 0xA0000000, 0x00000000, + 0x051, 0x000801A8, + 0x052, 0x000972E3, + 0x053, 0x00008069, + 0x054, 0x00030032, + 0x055, 0x00082003, + 0x056, 0x00051CCB, + 0x057, 0x0000CFC2, + 0x058, 0x00000010, + 0x059, 0x00030000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000800, + 0x033, 0x00000000, + 0x03F, 0x00051429, + 0x033, 0x00000001, + 0x03F, 0x00051449, + 0x033, 0x00000002, + 0x03F, 0x0005144C, + 0x033, 0x00000003, + 0x03F, 0x00051C66, + 0x033, 0x00000004, + 0x03F, 0x00051C69, + 0x033, 0x00000005, + 0x03F, 0x00051C6C, + 0x033, 0x00000006, + 0x03F, 0x00051CE8, + 0x033, 0x00000007, + 0x03F, 0x00051CEB, + 0x033, 0x00000008, + 0x03F, 0x00051CEE, + 0x033, 0x00000009, + 0x03F, 0x00051CF1, + 0x033, 0x0000000A, + 0x03F, 0x00051CF4, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000800, + 0x033, 0x00000000, + 0x03F, 0x00051429, + 0x033, 0x00000001, + 0x03F, 0x00051449, + 0x033, 0x00000002, + 0x03F, 0x0005144C, + 0x033, 0x00000003, + 0x03F, 0x00051C66, + 0x033, 0x00000004, + 0x03F, 0x00051C69, + 0x033, 0x00000005, + 0x03F, 0x00051C6C, + 0x033, 0x00000006, + 0x03F, 0x00051CE8, + 0x033, 0x00000007, + 0x03F, 0x00051CEB, + 0x033, 0x00000008, + 0x03F, 0x00051CEE, + 0x033, 0x00000009, + 0x03F, 0x00051CF1, + 0x033, 0x0000000A, + 0x03F, 0x00051CF4, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000800, + 0x033, 0x00000000, + 0x03F, 0x00051427, + 0x033, 0x00000001, + 0x03F, 0x00051446, + 0x033, 0x00000002, + 0x03F, 0x00051449, + 0x033, 0x00000003, + 0x03F, 0x0005144C, + 0x033, 0x00000004, + 0x03F, 0x00051C67, + 0x033, 0x00000005, + 0x03F, 0x00051C6A, + 0x033, 0x00000006, + 0x03F, 0x00051C8B, + 0x033, 0x00000007, + 0x03F, 0x00051CE9, + 0x033, 0x00000008, + 0x03F, 0x00051CEC, + 0x033, 0x00000009, + 0x03F, 0x00051CEF, + 0x033, 0x0000000A, + 0x03F, 0x00051CF2, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000800, + 0x033, 0x00000000, + 0x03F, 0x00051427, + 0x033, 0x00000001, + 0x03F, 0x00051446, + 0x033, 0x00000002, + 0x03F, 0x00051449, + 0x033, 0x00000003, + 0x03F, 0x0005144C, + 0x033, 0x00000004, + 0x03F, 0x00051C67, + 0x033, 0x00000005, + 0x03F, 0x00051C6A, + 0x033, 0x00000006, + 0x03F, 0x00051C8B, + 0x033, 0x00000007, + 0x03F, 0x00051CE9, + 0x033, 0x00000008, + 0x03F, 0x00051CEC, + 0x033, 0x00000009, + 0x03F, 0x00051CEF, + 0x033, 0x0000000A, + 0x03F, 0x00051CF2, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00086E00, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00086E00, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00086E00, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00004000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00086E00, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x00000000, + 0x033, 0x00000001, + 0x03F, 0x00000000, + 0x033, 0x00000002, + 0x03F, 0x00000000, + 0x033, 0x00000003, + 0x03F, 0x00000000, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x00000000, + 0x033, 0x00000001, + 0x03F, 0x00000000, + 0x033, 0x00000002, + 0x03F, 0x00000000, + 0x033, 0x00000003, + 0x03F, 0x00000000, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x00000000, + 0x033, 0x00000001, + 0x03F, 0x00000000, + 0x033, 0x00000002, + 0x03F, 0x00000000, + 0x033, 0x00000003, + 0x03F, 0x00000000, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00002000, + 0x033, 0x00000000, + 0x03F, 0x00000000, + 0x033, 0x00000001, + 0x03F, 0x00000000, + 0x033, 0x00000002, + 0x03F, 0x00000000, + 0x033, 0x00000003, + 0x03F, 0x00000000, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00080000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00048400, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x033, 0x00000004, + 0x03F, 0x00048400, + 0x033, 0x00000005, + 0x03F, 0x00048400, + 0x033, 0x00000006, + 0x03F, 0x00048400, + 0x033, 0x00000007, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00080000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00048400, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x033, 0x00000004, + 0x03F, 0x00048400, + 0x033, 0x00000005, + 0x03F, 0x00048400, + 0x033, 0x00000006, + 0x03F, 0x00048400, + 0x033, 0x00000007, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00080000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00048400, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x033, 0x00000004, + 0x03F, 0x00048400, + 0x033, 0x00000005, + 0x03F, 0x00048400, + 0x033, 0x00000006, + 0x03F, 0x00048400, + 0x033, 0x00000007, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00080000, + 0x033, 0x00000000, + 0x03F, 0x00048400, + 0x033, 0x00000001, + 0x03F, 0x00048400, + 0x033, 0x00000002, + 0x03F, 0x00048400, + 0x033, 0x00000003, + 0x03F, 0x00048400, + 0x033, 0x00000004, + 0x03F, 0x00048400, + 0x033, 0x00000005, + 0x03F, 0x00048400, + 0x033, 0x00000006, + 0x03F, 0x00048400, + 0x033, 0x00000007, + 0x03F, 0x00048400, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x070, 0x00008000, + 0x075, 0x000027DA, + 0x076, 0x00006997, + 0x077, 0x00070418, + 0x078, 0x000BB000, + 0x07D, 0x00007600, + 0x07F, 0x00000000, + 0x06A, 0x000F4C00, + 0x065, 0x00082030, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x070, 0x00008000, + 0x075, 0x000027DA, + 0x076, 0x00006997, + 0x077, 0x00070418, + 0x078, 0x000BB000, + 0x07D, 0x00007600, + 0x07F, 0x00000000, + 0x06A, 0x000F4C00, + 0x065, 0x00082030, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x070, 0x00008000, + 0x075, 0x000027DA, + 0x076, 0x00006997, + 0x077, 0x00070418, + 0x078, 0x000BB000, + 0x07D, 0x00007600, + 0x07F, 0x00000000, + 0x06A, 0x000F4C00, + 0x065, 0x00082030, + 0xA0000000, 0x00000000, + 0x070, 0x00008000, + 0x075, 0x000027DA, + 0x076, 0x00006997, + 0x077, 0x00070418, + 0x078, 0x000BB000, + 0x07D, 0x00007600, + 0x07F, 0x00000000, + 0x06A, 0x000F4C00, + 0x065, 0x00082030, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00008000, + 0x033, 0x00000000, + 0x03F, 0x00051427, + 0x033, 0x00000001, + 0x03F, 0x00051446, + 0x033, 0x00000002, + 0x03F, 0x00051449, + 0x033, 0x00000003, + 0x03F, 0x0005144C, + 0x033, 0x00000004, + 0x03F, 0x00051C69, + 0x033, 0x00000005, + 0x03F, 0x00051C6C, + 0x033, 0x00000006, + 0x03F, 0x00051C8D, + 0x033, 0x00000007, + 0x03F, 0x00051CEB, + 0x033, 0x00000008, + 0x03F, 0x00051CEE, + 0x033, 0x00000009, + 0x03F, 0x00051CF1, + 0x033, 0x0000000A, + 0x03F, 0x00051CF4, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00008000, + 0x033, 0x00000000, + 0x03F, 0x00051427, + 0x033, 0x00000001, + 0x03F, 0x00051446, + 0x033, 0x00000002, + 0x03F, 0x00051449, + 0x033, 0x00000003, + 0x03F, 0x0005144C, + 0x033, 0x00000004, + 0x03F, 0x00051C69, + 0x033, 0x00000005, + 0x03F, 0x00051C6C, + 0x033, 0x00000006, + 0x03F, 0x00051C8D, + 0x033, 0x00000007, + 0x03F, 0x00051CEB, + 0x033, 0x00000008, + 0x03F, 0x00051CEE, + 0x033, 0x00000009, + 0x03F, 0x00051CF1, + 0x033, 0x0000000A, + 0x03F, 0x00051CF4, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00008000, + 0x033, 0x00000000, + 0x03F, 0x00051427, + 0x033, 0x00000001, + 0x03F, 0x00051446, + 0x033, 0x00000002, + 0x03F, 0x00051449, + 0x033, 0x00000003, + 0x03F, 0x0005144C, + 0x033, 0x00000004, + 0x03F, 0x00051C69, + 0x033, 0x00000005, + 0x03F, 0x00051C6C, + 0x033, 0x00000006, + 0x03F, 0x00051C8D, + 0x033, 0x00000007, + 0x03F, 0x00051CEB, + 0x033, 0x00000008, + 0x03F, 0x00051CEE, + 0x033, 0x00000009, + 0x03F, 0x00051CF1, + 0x033, 0x0000000A, + 0x03F, 0x00051CF4, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00008000, + 0x033, 0x00000000, + 0x03F, 0x00051427, + 0x033, 0x00000001, + 0x03F, 0x00051446, + 0x033, 0x00000002, + 0x03F, 0x00051449, + 0x033, 0x00000003, + 0x03F, 0x0005144C, + 0x033, 0x00000004, + 0x03F, 0x00051C69, + 0x033, 0x00000005, + 0x03F, 0x00051C6C, + 0x033, 0x00000006, + 0x03F, 0x00051C8D, + 0x033, 0x00000007, + 0x03F, 0x00051CEB, + 0x033, 0x00000008, + 0x03F, 0x00051CEE, + 0x033, 0x00000009, + 0x03F, 0x00051CF1, + 0x033, 0x0000000A, + 0x03F, 0x00051CF4, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000010, + 0x033, 0x00000000, + 0x008, 0x0009C060, + 0x033, 0x00000001, + 0x008, 0x0009C060, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000010, + 0x033, 0x00000000, + 0x008, 0x0009C060, + 0x033, 0x00000001, + 0x008, 0x0009C060, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00000010, + 0x033, 0x00000000, + 0x008, 0x0009C060, + 0x033, 0x00000001, + 0x008, 0x0009C060, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00000010, + 0x033, 0x00000000, + 0x008, 0x0009C060, + 0x033, 0x00000001, + 0x008, 0x0009C060, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000024, + 0x03E, 0x0000003F, + 0x03F, 0x00060FDE, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000024, + 0x03E, 0x0000003F, + 0x03F, 0x00060FDE, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000024, + 0x03E, 0x0000003F, + 0x03F, 0x00060FDE, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000024, + 0x03E, 0x0000003F, + 0x03F, 0x00060FDE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000025, + 0x03E, 0x00000037, + 0x03F, 0x0007EFCE, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000025, + 0x03E, 0x00000037, + 0x03F, 0x0007EFCE, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000025, + 0x03E, 0x00000037, + 0x03F, 0x0007EFCE, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000025, + 0x03E, 0x00000037, + 0x03F, 0x0007EFCE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000026, + 0x03E, 0x00000037, + 0x03F, 0x0005EFCE, + 0x0EF, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000026, + 0x03E, 0x00000037, + 0x03F, 0x0005EFCE, + 0x0EF, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000026, + 0x03E, 0x00000037, + 0x03F, 0x0005EFCE, + 0x0EF, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EF, 0x00080000, + 0x033, 0x00000026, + 0x03E, 0x00000037, + 0x03F, 0x0005EFCE, + 0x0EF, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000004, + 0x03F, 0x00001EC1, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000004, + 0x03F, 0x00001EC1, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000004, + 0x03F, 0x00001EC1, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000004, + 0x03F, 0x00001EC1, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000005, + 0x03F, 0x00001ECF, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000005, + 0x03F, 0x00001ECF, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000005, + 0x03F, 0x00001ECF, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000005, + 0x03F, 0x00001ECF, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + 0x80001005, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000006, + 0x03F, 0x00001F9D, + 0x0EE, 0x00000000, + 0x90001004, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000006, + 0x03F, 0x00001F9D, + 0x0EE, 0x00000000, + 0x90000400, 0x00000000, 0x40000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000006, + 0x03F, 0x00001F9D, + 0x0EE, 0x00000000, + 0xA0000000, 0x00000000, + 0x0EE, 0x00001000, + 0x033, 0x00000006, + 0x03F, 0x00001F9D, + 0x0EE, 0x00000000, + 0xB0000000, 0x00000000, + +}; + +RTW_DECL_TABLE_RF_RADIO(rtw8821c_rf_a, A); + +static const struct rtw_txpwr_lmt_cfg_pair rtw8821c_txpwr_lmt_type0[] = { + { 0, 0, 0, 0, 1, 30, }, + { 2, 0, 0, 0, 1, 30, }, + { 0, 0, 0, 0, 2, 32, }, + { 2, 0, 0, 0, 2, 30, }, + { 0, 0, 0, 0, 3, 32, }, + { 2, 0, 0, 0, 3, 30, }, + { 0, 0, 0, 0, 4, 32, }, + { 2, 0, 0, 0, 4, 30, }, + { 0, 0, 0, 0, 5, 32, }, + { 2, 0, 0, 0, 5, 30, }, + { 0, 0, 0, 0, 6, 32, }, + { 2, 0, 0, 0, 6, 30, }, + { 0, 0, 0, 0, 7, 32, }, + { 2, 0, 0, 0, 7, 30, }, + { 0, 0, 0, 0, 8, 32, }, + { 2, 0, 0, 0, 8, 30, }, + { 0, 0, 0, 0, 9, 32, }, + { 2, 0, 0, 0, 9, 30, }, + { 0, 0, 0, 0, 10, 32, }, + { 2, 0, 0, 0, 10, 30, }, + { 0, 0, 0, 0, 11, 32, }, + { 2, 0, 0, 0, 11, 30, }, + { 0, 0, 0, 0, 12, 24, }, + { 2, 0, 0, 0, 12, 30, }, + { 0, 0, 0, 0, 13, 16, }, + { 2, 0, 0, 0, 13, 30, }, + { 0, 0, 0, 0, 14, 63, }, + { 2, 0, 0, 0, 14, 63, }, + { 0, 0, 0, 1, 1, 30, }, + { 2, 0, 0, 1, 1, 30, }, + { 0, 0, 0, 1, 2, 32, }, + { 2, 0, 0, 1, 2, 30, }, + { 0, 0, 0, 1, 3, 34, }, + { 2, 0, 0, 1, 3, 30, }, + { 0, 0, 0, 1, 4, 34, }, + { 2, 0, 0, 1, 4, 30, }, + { 0, 0, 0, 1, 5, 34, }, + { 2, 0, 0, 1, 5, 30, }, + { 0, 0, 0, 1, 6, 34, }, + { 2, 0, 0, 1, 6, 30, }, + { 0, 0, 0, 1, 7, 34, }, + { 2, 0, 0, 1, 7, 30, }, + { 0, 0, 0, 1, 8, 34, }, + { 2, 0, 0, 1, 8, 30, }, + { 0, 0, 0, 1, 9, 34, }, + { 2, 0, 0, 1, 9, 30, }, + { 0, 0, 0, 1, 10, 32, }, + { 2, 0, 0, 1, 10, 30, }, + { 0, 0, 0, 1, 11, 30, }, + { 2, 0, 0, 1, 11, 30, }, + { 0, 0, 0, 1, 12, 28, }, + { 2, 0, 0, 1, 12, 30, }, + { 0, 0, 0, 1, 13, 16, }, + { 2, 0, 0, 1, 13, 30, }, + { 0, 0, 0, 1, 14, 63, }, + { 2, 0, 0, 1, 14, 63, }, + { 0, 0, 0, 2, 1, 26, }, + { 2, 0, 0, 2, 1, 30, }, + { 0, 0, 0, 2, 2, 30, }, + { 2, 0, 0, 2, 2, 30, }, + { 0, 0, 0, 2, 3, 32, }, + { 2, 0, 0, 2, 3, 30, }, + { 0, 0, 0, 2, 4, 34, }, + { 2, 0, 0, 2, 4, 30, }, + { 0, 0, 0, 2, 5, 34, }, + { 2, 0, 0, 2, 5, 30, }, + { 0, 0, 0, 2, 6, 34, }, + { 2, 0, 0, 2, 6, 30, }, + { 0, 0, 0, 2, 7, 34, }, + { 2, 0, 0, 2, 7, 30, }, + { 0, 0, 0, 2, 8, 34, }, + { 2, 0, 0, 2, 8, 30, }, + { 0, 0, 0, 2, 9, 32, }, + { 2, 0, 0, 2, 9, 30, }, + { 0, 0, 0, 2, 10, 30, }, + { 2, 0, 0, 2, 10, 30, }, + { 0, 0, 0, 2, 11, 28, }, + { 2, 0, 0, 2, 11, 30, }, + { 0, 0, 0, 2, 12, 26, }, + { 2, 0, 0, 2, 12, 30, }, + { 0, 0, 0, 2, 13, 12, }, + { 2, 0, 0, 2, 13, 30, }, + { 0, 0, 0, 2, 14, 63, }, + { 2, 0, 0, 2, 14, 63, }, + { 0, 0, 1, 2, 1, 63, }, + { 2, 0, 1, 2, 1, 63, }, + { 0, 0, 1, 2, 2, 63, }, + { 2, 0, 1, 2, 2, 63, }, + { 0, 0, 1, 2, 3, 26, }, + { 2, 0, 1, 2, 3, 30, }, + { 0, 0, 1, 2, 4, 26, }, + { 2, 0, 1, 2, 4, 30, }, + { 0, 0, 1, 2, 5, 30, }, + { 2, 0, 1, 2, 5, 30, }, + { 0, 0, 1, 2, 6, 30, }, + { 2, 0, 1, 2, 6, 30, }, + { 0, 0, 1, 2, 7, 30, }, + { 2, 0, 1, 2, 7, 30, }, + { 0, 0, 1, 2, 8, 26, }, + { 2, 0, 1, 2, 8, 30, }, + { 0, 0, 1, 2, 9, 26, }, + { 2, 0, 1, 2, 9, 30, }, + { 0, 0, 1, 2, 10, 28, }, + { 2, 0, 1, 2, 10, 30, }, + { 0, 0, 1, 2, 11, 20, }, + { 2, 0, 1, 2, 11, 30, }, + { 0, 0, 1, 2, 12, 63, }, + { 2, 0, 1, 2, 12, 63, }, + { 0, 0, 1, 2, 13, 63, }, + { 2, 0, 1, 2, 13, 63, }, + { 0, 0, 1, 2, 14, 63, }, + { 2, 0, 1, 2, 14, 63, }, + { 0, 1, 0, 1, 36, 31, }, + { 2, 1, 0, 1, 36, 32, }, + { 0, 1, 0, 1, 40, 33, }, + { 2, 1, 0, 1, 40, 32, }, + { 0, 1, 0, 1, 44, 33, }, + { 2, 1, 0, 1, 44, 32, }, + { 0, 1, 0, 1, 48, 31, }, + { 2, 1, 0, 1, 48, 32, }, + { 0, 1, 0, 1, 52, 33, }, + { 2, 1, 0, 1, 52, 32, }, + { 0, 1, 0, 1, 56, 33, }, + { 2, 1, 0, 1, 56, 32, }, + { 0, 1, 0, 1, 60, 33, }, + { 2, 1, 0, 1, 60, 32, }, + { 0, 1, 0, 1, 64, 30, }, + { 2, 1, 0, 1, 64, 32, }, + { 0, 1, 0, 1, 100, 30, }, + { 2, 1, 0, 1, 100, 32, }, + { 0, 1, 0, 1, 104, 33, }, + { 2, 1, 0, 1, 104, 32, }, + { 0, 1, 0, 1, 108, 33, }, + { 2, 1, 0, 1, 108, 32, }, + { 0, 1, 0, 1, 112, 33, }, + { 2, 1, 0, 1, 112, 32, }, + { 0, 1, 0, 1, 116, 33, }, + { 2, 1, 0, 1, 116, 32, }, + { 0, 1, 0, 1, 120, 33, }, + { 2, 1, 0, 1, 120, 32, }, + { 0, 1, 0, 1, 124, 33, }, + { 2, 1, 0, 1, 124, 32, }, + { 0, 1, 0, 1, 128, 33, }, + { 2, 1, 0, 1, 128, 32, }, + { 0, 1, 0, 1, 132, 33, }, + { 2, 1, 0, 1, 132, 32, }, + { 0, 1, 0, 1, 136, 33, }, + { 2, 1, 0, 1, 136, 32, }, + { 0, 1, 0, 1, 140, 31, }, + { 2, 1, 0, 1, 140, 32, }, + { 0, 1, 0, 1, 144, 30, }, + { 2, 1, 0, 1, 144, 63, }, + { 0, 1, 0, 1, 149, 33, }, + { 2, 1, 0, 1, 149, 63, }, + { 0, 1, 0, 1, 153, 33, }, + { 2, 1, 0, 1, 153, 63, }, + { 0, 1, 0, 1, 157, 33, }, + { 2, 1, 0, 1, 157, 63, }, + { 0, 1, 0, 1, 161, 33, }, + { 2, 1, 0, 1, 161, 63, }, + { 0, 1, 0, 1, 165, 33, }, + { 2, 1, 0, 1, 165, 63, }, + { 0, 1, 0, 2, 36, 30, }, + { 2, 1, 0, 2, 36, 32, }, + { 0, 1, 0, 2, 40, 33, }, + { 2, 1, 0, 2, 40, 32, }, + { 0, 1, 0, 2, 44, 33, }, + { 2, 1, 0, 2, 44, 32, }, + { 0, 1, 0, 2, 48, 33, }, + { 2, 1, 0, 2, 48, 32, }, + { 0, 1, 0, 2, 52, 33, }, + { 2, 1, 0, 2, 52, 32, }, + { 0, 1, 0, 2, 56, 33, }, + { 2, 1, 0, 2, 56, 32, }, + { 0, 1, 0, 2, 60, 33, }, + { 2, 1, 0, 2, 60, 32, }, + { 0, 1, 0, 2, 64, 30, }, + { 2, 1, 0, 2, 64, 32, }, + { 0, 1, 0, 2, 100, 30, }, + { 2, 1, 0, 2, 100, 32, }, + { 0, 1, 0, 2, 104, 33, }, + { 2, 1, 0, 2, 104, 32, }, + { 0, 1, 0, 2, 108, 33, }, + { 2, 1, 0, 2, 108, 32, }, + { 0, 1, 0, 2, 112, 33, }, + { 2, 1, 0, 2, 112, 32, }, + { 0, 1, 0, 2, 116, 33, }, + { 2, 1, 0, 2, 116, 32, }, + { 0, 1, 0, 2, 120, 33, }, + { 2, 1, 0, 2, 120, 32, }, + { 0, 1, 0, 2, 124, 33, }, + { 2, 1, 0, 2, 124, 32, }, + { 0, 1, 0, 2, 128, 33, }, + { 2, 1, 0, 2, 128, 32, }, + { 0, 1, 0, 2, 132, 33, }, + { 2, 1, 0, 2, 132, 32, }, + { 0, 1, 0, 2, 136, 33, }, + { 2, 1, 0, 2, 136, 32, }, + { 0, 1, 0, 2, 140, 29, }, + { 2, 1, 0, 2, 140, 32, }, + { 0, 1, 0, 2, 144, 27, }, + { 2, 1, 0, 2, 144, 63, }, + { 0, 1, 0, 2, 149, 33, }, + { 2, 1, 0, 2, 149, 63, }, + { 0, 1, 0, 2, 153, 33, }, + { 2, 1, 0, 2, 153, 63, }, + { 0, 1, 0, 2, 157, 33, }, + { 2, 1, 0, 2, 157, 63, }, + { 0, 1, 0, 2, 161, 33, }, + { 2, 1, 0, 2, 161, 63, }, + { 0, 1, 0, 2, 165, 33, }, + { 2, 1, 0, 2, 165, 63, }, + { 0, 1, 1, 2, 38, 22, }, + { 2, 1, 1, 2, 38, 32, }, + { 0, 1, 1, 2, 46, 32, }, + { 2, 1, 1, 2, 46, 32, }, + { 0, 1, 1, 2, 54, 32, }, + { 2, 1, 1, 2, 54, 32, }, + { 0, 1, 1, 2, 62, 23, }, + { 2, 1, 1, 2, 62, 32, }, + { 0, 1, 1, 2, 102, 21, }, + { 2, 1, 1, 2, 102, 32, }, + { 0, 1, 1, 2, 110, 32, }, + { 2, 1, 1, 2, 110, 32, }, + { 0, 1, 1, 2, 118, 32, }, + { 2, 1, 1, 2, 118, 32, }, + { 0, 1, 1, 2, 126, 32, }, + { 2, 1, 1, 2, 126, 32, }, + { 0, 1, 1, 2, 134, 32, }, + { 2, 1, 1, 2, 134, 32, }, + { 0, 1, 1, 2, 142, 29, }, + { 2, 1, 1, 2, 142, 63, }, + { 0, 1, 1, 2, 151, 32, }, + { 2, 1, 1, 2, 151, 63, }, + { 0, 1, 1, 2, 159, 32, }, + { 2, 1, 1, 2, 159, 63, }, + { 0, 1, 2, 4, 42, 19, }, + { 2, 1, 2, 4, 42, 32, }, + { 0, 1, 2, 4, 58, 22, }, + { 2, 1, 2, 4, 58, 32, }, + { 0, 1, 2, 4, 106, 18, }, + { 2, 1, 2, 4, 106, 32, }, + { 0, 1, 2, 4, 122, 32, }, + { 2, 1, 2, 4, 122, 32, }, + { 0, 1, 2, 4, 138, 28, }, + { 2, 1, 2, 4, 138, 63, }, + { 0, 1, 2, 4, 155, 32, }, + { 2, 1, 2, 4, 155, 63, }, + { 1, 0, 0, 0, 1, 34, }, + { 3, 0, 0, 0, 1, 30, }, + { 4, 0, 0, 0, 1, 34, }, + { 5, 0, 0, 0, 1, 30, }, + { 6, 0, 0, 0, 1, 30, }, + { 7, 0, 0, 0, 1, 30, }, + { 1, 0, 0, 0, 2, 34, }, + { 3, 0, 0, 0, 2, 32, }, + { 4, 0, 0, 0, 2, 34, }, + { 5, 0, 0, 0, 2, 30, }, + { 6, 0, 0, 0, 2, 32, }, + { 7, 0, 0, 0, 2, 30, }, + { 1, 0, 0, 0, 3, 34, }, + { 3, 0, 0, 0, 3, 32, }, + { 4, 0, 0, 0, 3, 34, }, + { 5, 0, 0, 0, 3, 30, }, + { 6, 0, 0, 0, 3, 32, }, + { 7, 0, 0, 0, 3, 30, }, + { 1, 0, 0, 0, 4, 34, }, + { 3, 0, 0, 0, 4, 32, }, + { 4, 0, 0, 0, 4, 34, }, + { 5, 0, 0, 0, 4, 30, }, + { 6, 0, 0, 0, 4, 32, }, + { 7, 0, 0, 0, 4, 30, }, + { 1, 0, 0, 0, 5, 34, }, + { 3, 0, 0, 0, 5, 32, }, + { 4, 0, 0, 0, 5, 34, }, + { 5, 0, 0, 0, 5, 30, }, + { 6, 0, 0, 0, 5, 32, }, + { 7, 0, 0, 0, 5, 30, }, + { 1, 0, 0, 0, 6, 34, }, + { 3, 0, 0, 0, 6, 32, }, + { 4, 0, 0, 0, 6, 34, }, + { 5, 0, 0, 0, 6, 30, }, + { 6, 0, 0, 0, 6, 32, }, + { 7, 0, 0, 0, 6, 30, }, + { 1, 0, 0, 0, 7, 34, }, + { 3, 0, 0, 0, 7, 32, }, + { 4, 0, 0, 0, 7, 34, }, + { 5, 0, 0, 0, 7, 30, }, + { 6, 0, 0, 0, 7, 32, }, + { 7, 0, 0, 0, 7, 30, }, + { 1, 0, 0, 0, 8, 34, }, + { 3, 0, 0, 0, 8, 32, }, + { 4, 0, 0, 0, 8, 34, }, + { 5, 0, 0, 0, 8, 30, }, + { 6, 0, 0, 0, 8, 32, }, + { 7, 0, 0, 0, 8, 30, }, + { 1, 0, 0, 0, 9, 34, }, + { 3, 0, 0, 0, 9, 32, }, + { 4, 0, 0, 0, 9, 34, }, + { 5, 0, 0, 0, 9, 30, }, + { 6, 0, 0, 0, 9, 32, }, + { 7, 0, 0, 0, 9, 30, }, + { 1, 0, 0, 0, 10, 34, }, + { 3, 0, 0, 0, 10, 32, }, + { 4, 0, 0, 0, 10, 34, }, + { 5, 0, 0, 0, 10, 30, }, + { 6, 0, 0, 0, 10, 32, }, + { 7, 0, 0, 0, 10, 30, }, + { 1, 0, 0, 0, 11, 34, }, + { 3, 0, 0, 0, 11, 32, }, + { 4, 0, 0, 0, 11, 34, }, + { 5, 0, 0, 0, 11, 30, }, + { 6, 0, 0, 0, 11, 32, }, + { 7, 0, 0, 0, 11, 30, }, + { 1, 0, 0, 0, 12, 34, }, + { 3, 0, 0, 0, 12, 24, }, + { 4, 0, 0, 0, 12, 34, }, + { 5, 0, 0, 0, 12, 30, }, + { 6, 0, 0, 0, 12, 24, }, + { 7, 0, 0, 0, 12, 30, }, + { 1, 0, 0, 0, 13, 34, }, + { 3, 0, 0, 0, 13, 16, }, + { 4, 0, 0, 0, 13, 34, }, + { 5, 0, 0, 0, 13, 30, }, + { 6, 0, 0, 0, 13, 16, }, + { 7, 0, 0, 0, 13, 30, }, + { 1, 0, 0, 0, 14, 34, }, + { 3, 0, 0, 0, 14, 63, }, + { 4, 0, 0, 0, 14, 63, }, + { 5, 0, 0, 0, 14, 63, }, + { 6, 0, 0, 0, 14, 63, }, + { 7, 0, 0, 0, 14, 63, }, + { 1, 0, 0, 1, 1, 34, }, + { 3, 0, 0, 1, 1, 30, }, + { 4, 0, 0, 1, 1, 32, }, + { 5, 0, 0, 1, 1, 30, }, + { 6, 0, 0, 1, 1, 30, }, + { 7, 0, 0, 1, 1, 30, }, + { 1, 0, 0, 1, 2, 34, }, + { 3, 0, 0, 1, 2, 32, }, + { 4, 0, 0, 1, 2, 34, }, + { 5, 0, 0, 1, 2, 30, }, + { 6, 0, 0, 1, 2, 32, }, + { 7, 0, 0, 1, 2, 30, }, + { 1, 0, 0, 1, 3, 34, }, + { 3, 0, 0, 1, 3, 34, }, + { 4, 0, 0, 1, 3, 34, }, + { 5, 0, 0, 1, 3, 30, }, + { 6, 0, 0, 1, 3, 34, }, + { 7, 0, 0, 1, 3, 30, }, + { 1, 0, 0, 1, 4, 34, }, + { 3, 0, 0, 1, 4, 34, }, + { 4, 0, 0, 1, 4, 34, }, + { 5, 0, 0, 1, 4, 30, }, + { 6, 0, 0, 1, 4, 34, }, + { 7, 0, 0, 1, 4, 30, }, + { 1, 0, 0, 1, 5, 34, }, + { 3, 0, 0, 1, 5, 34, }, + { 4, 0, 0, 1, 5, 34, }, + { 5, 0, 0, 1, 5, 30, }, + { 6, 0, 0, 1, 5, 34, }, + { 7, 0, 0, 1, 5, 30, }, + { 1, 0, 0, 1, 6, 34, }, + { 3, 0, 0, 1, 6, 34, }, + { 4, 0, 0, 1, 6, 34, }, + { 5, 0, 0, 1, 6, 30, }, + { 6, 0, 0, 1, 6, 34, }, + { 7, 0, 0, 1, 6, 30, }, + { 1, 0, 0, 1, 7, 34, }, + { 3, 0, 0, 1, 7, 34, }, + { 4, 0, 0, 1, 7, 34, }, + { 5, 0, 0, 1, 7, 30, }, + { 6, 0, 0, 1, 7, 34, }, + { 7, 0, 0, 1, 7, 30, }, + { 1, 0, 0, 1, 8, 34, }, + { 3, 0, 0, 1, 8, 34, }, + { 4, 0, 0, 1, 8, 34, }, + { 5, 0, 0, 1, 8, 30, }, + { 6, 0, 0, 1, 8, 34, }, + { 7, 0, 0, 1, 8, 30, }, + { 1, 0, 0, 1, 9, 34, }, + { 3, 0, 0, 1, 9, 34, }, + { 4, 0, 0, 1, 9, 34, }, + { 5, 0, 0, 1, 9, 30, }, + { 6, 0, 0, 1, 9, 34, }, + { 7, 0, 0, 1, 9, 30, }, + { 1, 0, 0, 1, 10, 34, }, + { 3, 0, 0, 1, 10, 32, }, + { 4, 0, 0, 1, 10, 34, }, + { 5, 0, 0, 1, 10, 30, }, + { 6, 0, 0, 1, 10, 32, }, + { 7, 0, 0, 1, 10, 30, }, + { 1, 0, 0, 1, 11, 34, }, + { 3, 0, 0, 1, 11, 30, }, + { 4, 0, 0, 1, 11, 34, }, + { 5, 0, 0, 1, 11, 30, }, + { 6, 0, 0, 1, 11, 30, }, + { 7, 0, 0, 1, 11, 30, }, + { 1, 0, 0, 1, 12, 34, }, + { 3, 0, 0, 1, 12, 28, }, + { 4, 0, 0, 1, 12, 34, }, + { 5, 0, 0, 1, 12, 30, }, + { 6, 0, 0, 1, 12, 28, }, + { 7, 0, 0, 1, 12, 30, }, + { 1, 0, 0, 1, 13, 34, }, + { 3, 0, 0, 1, 13, 16, }, + { 4, 0, 0, 1, 13, 32, }, + { 5, 0, 0, 1, 13, 30, }, + { 6, 0, 0, 1, 13, 16, }, + { 7, 0, 0, 1, 13, 30, }, + { 1, 0, 0, 1, 14, 63, }, + { 3, 0, 0, 1, 14, 63, }, + { 4, 0, 0, 1, 14, 63, }, + { 5, 0, 0, 1, 14, 63, }, + { 6, 0, 0, 1, 14, 63, }, + { 7, 0, 0, 1, 14, 63, }, + { 1, 0, 0, 2, 1, 34, }, + { 3, 0, 0, 2, 1, 26, }, + { 4, 0, 0, 2, 1, 32, }, + { 5, 0, 0, 2, 1, 30, }, + { 6, 0, 0, 2, 1, 26, }, + { 7, 0, 0, 2, 1, 30, }, + { 1, 0, 0, 2, 2, 34, }, + { 3, 0, 0, 2, 2, 30, }, + { 4, 0, 0, 2, 2, 34, }, + { 5, 0, 0, 2, 2, 30, }, + { 6, 0, 0, 2, 2, 30, }, + { 7, 0, 0, 2, 2, 30, }, + { 1, 0, 0, 2, 3, 34, }, + { 3, 0, 0, 2, 3, 32, }, + { 4, 0, 0, 2, 3, 34, }, + { 5, 0, 0, 2, 3, 30, }, + { 6, 0, 0, 2, 3, 32, }, + { 7, 0, 0, 2, 3, 30, }, + { 1, 0, 0, 2, 4, 34, }, + { 3, 0, 0, 2, 4, 34, }, + { 4, 0, 0, 2, 4, 34, }, + { 5, 0, 0, 2, 4, 30, }, + { 6, 0, 0, 2, 4, 34, }, + { 7, 0, 0, 2, 4, 30, }, + { 1, 0, 0, 2, 5, 34, }, + { 3, 0, 0, 2, 5, 34, }, + { 4, 0, 0, 2, 5, 34, }, + { 5, 0, 0, 2, 5, 30, }, + { 6, 0, 0, 2, 5, 34, }, + { 7, 0, 0, 2, 5, 30, }, + { 1, 0, 0, 2, 6, 34, }, + { 3, 0, 0, 2, 6, 34, }, + { 4, 0, 0, 2, 6, 34, }, + { 5, 0, 0, 2, 6, 30, }, + { 6, 0, 0, 2, 6, 34, }, + { 7, 0, 0, 2, 6, 30, }, + { 1, 0, 0, 2, 7, 34, }, + { 3, 0, 0, 2, 7, 34, }, + { 4, 0, 0, 2, 7, 34, }, + { 5, 0, 0, 2, 7, 30, }, + { 6, 0, 0, 2, 7, 34, }, + { 7, 0, 0, 2, 7, 30, }, + { 1, 0, 0, 2, 8, 34, }, + { 3, 0, 0, 2, 8, 34, }, + { 4, 0, 0, 2, 8, 34, }, + { 5, 0, 0, 2, 8, 30, }, + { 6, 0, 0, 2, 8, 34, }, + { 7, 0, 0, 2, 8, 30, }, + { 1, 0, 0, 2, 9, 34, }, + { 3, 0, 0, 2, 9, 32, }, + { 4, 0, 0, 2, 9, 34, }, + { 5, 0, 0, 2, 9, 30, }, + { 6, 0, 0, 2, 9, 32, }, + { 7, 0, 0, 2, 9, 30, }, + { 1, 0, 0, 2, 10, 34, }, + { 3, 0, 0, 2, 10, 30, }, + { 4, 0, 0, 2, 10, 34, }, + { 5, 0, 0, 2, 10, 30, }, + { 6, 0, 0, 2, 10, 30, }, + { 7, 0, 0, 2, 10, 30, }, + { 1, 0, 0, 2, 11, 34, }, + { 3, 0, 0, 2, 11, 28, }, + { 4, 0, 0, 2, 11, 34, }, + { 5, 0, 0, 2, 11, 30, }, + { 6, 0, 0, 2, 11, 28, }, + { 7, 0, 0, 2, 11, 30, }, + { 1, 0, 0, 2, 12, 34, }, + { 3, 0, 0, 2, 12, 26, }, + { 4, 0, 0, 2, 12, 34, }, + { 5, 0, 0, 2, 12, 30, }, + { 6, 0, 0, 2, 12, 26, }, + { 7, 0, 0, 2, 12, 30, }, + { 1, 0, 0, 2, 13, 34, }, + { 3, 0, 0, 2, 13, 12, }, + { 4, 0, 0, 2, 13, 32, }, + { 5, 0, 0, 2, 13, 30, }, + { 6, 0, 0, 2, 13, 12, }, + { 7, 0, 0, 2, 13, 30, }, + { 1, 0, 0, 2, 14, 63, }, + { 3, 0, 0, 2, 14, 63, }, + { 4, 0, 0, 2, 14, 63, }, + { 5, 0, 0, 2, 14, 63, }, + { 6, 0, 0, 2, 14, 63, }, + { 7, 0, 0, 2, 14, 63, }, + { 1, 0, 1, 2, 1, 63, }, + { 3, 0, 1, 2, 1, 63, }, + { 4, 0, 1, 2, 1, 63, }, + { 5, 0, 1, 2, 1, 63, }, + { 6, 0, 1, 2, 1, 63, }, + { 7, 0, 1, 2, 1, 63, }, + { 1, 0, 1, 2, 2, 63, }, + { 3, 0, 1, 2, 2, 63, }, + { 4, 0, 1, 2, 2, 63, }, + { 5, 0, 1, 2, 2, 63, }, + { 6, 0, 1, 2, 2, 63, }, + { 7, 0, 1, 2, 2, 63, }, + { 1, 0, 1, 2, 3, 30, }, + { 3, 0, 1, 2, 3, 26, }, + { 4, 0, 1, 2, 3, 30, }, + { 5, 0, 1, 2, 3, 30, }, + { 6, 0, 1, 2, 3, 26, }, + { 7, 0, 1, 2, 3, 30, }, + { 1, 0, 1, 2, 4, 30, }, + { 3, 0, 1, 2, 4, 26, }, + { 4, 0, 1, 2, 4, 30, }, + { 5, 0, 1, 2, 4, 30, }, + { 6, 0, 1, 2, 4, 26, }, + { 7, 0, 1, 2, 4, 30, }, + { 1, 0, 1, 2, 5, 30, }, + { 3, 0, 1, 2, 5, 30, }, + { 4, 0, 1, 2, 5, 30, }, + { 5, 0, 1, 2, 5, 30, }, + { 6, 0, 1, 2, 5, 30, }, + { 7, 0, 1, 2, 5, 30, }, + { 1, 0, 1, 2, 6, 30, }, + { 3, 0, 1, 2, 6, 30, }, + { 4, 0, 1, 2, 6, 30, }, + { 5, 0, 1, 2, 6, 30, }, + { 6, 0, 1, 2, 6, 30, }, + { 7, 0, 1, 2, 6, 30, }, + { 1, 0, 1, 2, 7, 30, }, + { 3, 0, 1, 2, 7, 30, }, + { 4, 0, 1, 2, 7, 30, }, + { 5, 0, 1, 2, 7, 30, }, + { 6, 0, 1, 2, 7, 30, }, + { 7, 0, 1, 2, 7, 30, }, + { 1, 0, 1, 2, 8, 30, }, + { 3, 0, 1, 2, 8, 26, }, + { 4, 0, 1, 2, 8, 30, }, + { 5, 0, 1, 2, 8, 30, }, + { 6, 0, 1, 2, 8, 26, }, + { 7, 0, 1, 2, 8, 30, }, + { 1, 0, 1, 2, 9, 30, }, + { 3, 0, 1, 2, 9, 26, }, + { 4, 0, 1, 2, 9, 30, }, + { 5, 0, 1, 2, 9, 30, }, + { 6, 0, 1, 2, 9, 26, }, + { 7, 0, 1, 2, 9, 30, }, + { 1, 0, 1, 2, 10, 30, }, + { 3, 0, 1, 2, 10, 28, }, + { 4, 0, 1, 2, 10, 30, }, + { 5, 0, 1, 2, 10, 30, }, + { 6, 0, 1, 2, 10, 28, }, + { 7, 0, 1, 2, 10, 30, }, + { 1, 0, 1, 2, 11, 30, }, + { 3, 0, 1, 2, 11, 20, }, + { 4, 0, 1, 2, 11, 30, }, + { 5, 0, 1, 2, 11, 30, }, + { 6, 0, 1, 2, 11, 20, }, + { 7, 0, 1, 2, 11, 30, }, + { 1, 0, 1, 2, 12, 63, }, + { 3, 0, 1, 2, 12, 63, }, + { 4, 0, 1, 2, 12, 63, }, + { 5, 0, 1, 2, 12, 63, }, + { 6, 0, 1, 2, 12, 63, }, + { 7, 0, 1, 2, 12, 63, }, + { 1, 0, 1, 2, 13, 63, }, + { 3, 0, 1, 2, 13, 63, }, + { 4, 0, 1, 2, 13, 63, }, + { 5, 0, 1, 2, 13, 63, }, + { 6, 0, 1, 2, 13, 63, }, + { 7, 0, 1, 2, 13, 63, }, + { 1, 0, 1, 2, 14, 63, }, + { 3, 0, 1, 2, 14, 63, }, + { 4, 0, 1, 2, 14, 63, }, + { 5, 0, 1, 2, 14, 63, }, + { 6, 0, 1, 2, 14, 63, }, + { 7, 0, 1, 2, 14, 63, }, + { 1, 1, 0, 1, 36, 33, }, + { 3, 1, 0, 1, 36, 31, }, + { 4, 1, 0, 1, 36, 29, }, + { 5, 1, 0, 1, 36, 32, }, + { 6, 1, 0, 1, 36, 29, }, + { 7, 1, 0, 1, 36, 27, }, + { 1, 1, 0, 1, 40, 33, }, + { 3, 1, 0, 1, 40, 31, }, + { 4, 1, 0, 1, 40, 28, }, + { 5, 1, 0, 1, 40, 32, }, + { 6, 1, 0, 1, 40, 29, }, + { 7, 1, 0, 1, 40, 27, }, + { 1, 1, 0, 1, 44, 33, }, + { 3, 1, 0, 1, 44, 31, }, + { 4, 1, 0, 1, 44, 28, }, + { 5, 1, 0, 1, 44, 32, }, + { 6, 1, 0, 1, 44, 30, }, + { 7, 1, 0, 1, 44, 27, }, + { 1, 1, 0, 1, 48, 33, }, + { 3, 1, 0, 1, 48, 31, }, + { 4, 1, 0, 1, 48, 27, }, + { 5, 1, 0, 1, 48, 32, }, + { 6, 1, 0, 1, 48, 30, }, + { 7, 1, 0, 1, 48, 27, }, + { 1, 1, 0, 1, 52, 33, }, + { 3, 1, 0, 1, 52, 32, }, + { 4, 1, 0, 1, 52, 16, }, + { 5, 1, 0, 1, 52, 32, }, + { 6, 1, 0, 1, 52, 30, }, + { 7, 1, 0, 1, 52, 27, }, + { 1, 1, 0, 1, 56, 33, }, + { 3, 1, 0, 1, 56, 32, }, + { 4, 1, 0, 1, 56, 33, }, + { 5, 1, 0, 1, 56, 32, }, + { 6, 1, 0, 1, 56, 30, }, + { 7, 1, 0, 1, 56, 27, }, + { 1, 1, 0, 1, 60, 33, }, + { 3, 1, 0, 1, 60, 32, }, + { 4, 1, 0, 1, 60, 33, }, + { 5, 1, 0, 1, 60, 32, }, + { 6, 1, 0, 1, 60, 30, }, + { 7, 1, 0, 1, 60, 27, }, + { 1, 1, 0, 1, 64, 33, }, + { 3, 1, 0, 1, 64, 30, }, + { 4, 1, 0, 1, 64, 33, }, + { 5, 1, 0, 1, 64, 32, }, + { 6, 1, 0, 1, 64, 29, }, + { 7, 1, 0, 1, 64, 27, }, + { 1, 1, 0, 1, 100, 33, }, + { 3, 1, 0, 1, 100, 30, }, + { 4, 1, 0, 1, 100, 33, }, + { 5, 1, 0, 1, 100, 32, }, + { 6, 1, 0, 1, 100, 30, }, + { 7, 1, 0, 1, 100, 27, }, + { 1, 1, 0, 1, 104, 33, }, + { 3, 1, 0, 1, 104, 33, }, + { 4, 1, 0, 1, 104, 33, }, + { 5, 1, 0, 1, 104, 32, }, + { 6, 1, 0, 1, 104, 30, }, + { 7, 1, 0, 1, 104, 27, }, + { 1, 1, 0, 1, 108, 33, }, + { 3, 1, 0, 1, 108, 33, }, + { 4, 1, 0, 1, 108, 33, }, + { 5, 1, 0, 1, 108, 32, }, + { 6, 1, 0, 1, 108, 30, }, + { 7, 1, 0, 1, 108, 27, }, + { 1, 1, 0, 1, 112, 33, }, + { 3, 1, 0, 1, 112, 33, }, + { 4, 1, 0, 1, 112, 33, }, + { 5, 1, 0, 1, 112, 32, }, + { 6, 1, 0, 1, 112, 30, }, + { 7, 1, 0, 1, 112, 27, }, + { 1, 1, 0, 1, 116, 33, }, + { 3, 1, 0, 1, 116, 33, }, + { 4, 1, 0, 1, 116, 33, }, + { 5, 1, 0, 1, 116, 32, }, + { 6, 1, 0, 1, 116, 30, }, + { 7, 1, 0, 1, 116, 27, }, + { 1, 1, 0, 1, 120, 33, }, + { 3, 1, 0, 1, 120, 63, }, + { 4, 1, 0, 1, 120, 33, }, + { 5, 1, 0, 1, 120, 63, }, + { 6, 1, 0, 1, 120, 30, }, + { 7, 1, 0, 1, 120, 27, }, + { 1, 1, 0, 1, 124, 33, }, + { 3, 1, 0, 1, 124, 63, }, + { 4, 1, 0, 1, 124, 33, }, + { 5, 1, 0, 1, 124, 63, }, + { 6, 1, 0, 1, 124, 30, }, + { 7, 1, 0, 1, 124, 27, }, + { 1, 1, 0, 1, 128, 33, }, + { 3, 1, 0, 1, 128, 63, }, + { 4, 1, 0, 1, 128, 63, }, + { 5, 1, 0, 1, 128, 63, }, + { 6, 1, 0, 1, 128, 30, }, + { 7, 1, 0, 1, 128, 27, }, + { 1, 1, 0, 1, 132, 33, }, + { 3, 1, 0, 1, 132, 33, }, + { 4, 1, 0, 1, 132, 63, }, + { 5, 1, 0, 1, 132, 32, }, + { 6, 1, 0, 1, 132, 30, }, + { 7, 1, 0, 1, 132, 27, }, + { 1, 1, 0, 1, 136, 33, }, + { 3, 1, 0, 1, 136, 33, }, + { 4, 1, 0, 1, 136, 63, }, + { 5, 1, 0, 1, 136, 32, }, + { 6, 1, 0, 1, 136, 30, }, + { 7, 1, 0, 1, 136, 63, }, + { 1, 1, 0, 1, 140, 33, }, + { 3, 1, 0, 1, 140, 31, }, + { 4, 1, 0, 1, 140, 63, }, + { 5, 1, 0, 1, 140, 32, }, + { 6, 1, 0, 1, 140, 30, }, + { 7, 1, 0, 1, 140, 63, }, + { 1, 1, 0, 1, 144, 63, }, + { 3, 1, 0, 1, 144, 30, }, + { 4, 1, 0, 1, 144, 63, }, + { 5, 1, 0, 1, 144, 63, }, + { 6, 1, 0, 1, 144, 30, }, + { 7, 1, 0, 1, 144, 63, }, + { 1, 1, 0, 1, 149, 63, }, + { 3, 1, 0, 1, 149, 30, }, + { 4, 1, 0, 1, 149, 33, }, + { 5, 1, 0, 1, 149, 33, }, + { 6, 1, 0, 1, 149, 30, }, + { 7, 1, 0, 1, 149, 27, }, + { 1, 1, 0, 1, 153, 63, }, + { 3, 1, 0, 1, 153, 33, }, + { 4, 1, 0, 1, 153, 33, }, + { 5, 1, 0, 1, 153, 33, }, + { 6, 1, 0, 1, 153, 30, }, + { 7, 1, 0, 1, 153, 27, }, + { 1, 1, 0, 1, 157, 63, }, + { 3, 1, 0, 1, 157, 33, }, + { 4, 1, 0, 1, 157, 33, }, + { 5, 1, 0, 1, 157, 33, }, + { 6, 1, 0, 1, 157, 30, }, + { 7, 1, 0, 1, 157, 27, }, + { 1, 1, 0, 1, 161, 63, }, + { 3, 1, 0, 1, 161, 33, }, + { 4, 1, 0, 1, 161, 31, }, + { 5, 1, 0, 1, 161, 33, }, + { 6, 1, 0, 1, 161, 30, }, + { 7, 1, 0, 1, 161, 27, }, + { 1, 1, 0, 1, 165, 63, }, + { 3, 1, 0, 1, 165, 33, }, + { 4, 1, 0, 1, 165, 63, }, + { 5, 1, 0, 1, 165, 33, }, + { 6, 1, 0, 1, 165, 30, }, + { 7, 1, 0, 1, 165, 27, }, + { 1, 1, 0, 2, 36, 33, }, + { 3, 1, 0, 2, 36, 30, }, + { 4, 1, 0, 2, 36, 27, }, + { 5, 1, 0, 2, 36, 32, }, + { 6, 1, 0, 2, 36, 30, }, + { 7, 1, 0, 2, 36, 27, }, + { 1, 1, 0, 2, 40, 33, }, + { 3, 1, 0, 2, 40, 31, }, + { 4, 1, 0, 2, 40, 29, }, + { 5, 1, 0, 2, 40, 32, }, + { 6, 1, 0, 2, 40, 30, }, + { 7, 1, 0, 2, 40, 27, }, + { 1, 1, 0, 2, 44, 33, }, + { 3, 1, 0, 2, 44, 31, }, + { 4, 1, 0, 2, 44, 29, }, + { 5, 1, 0, 2, 44, 32, }, + { 6, 1, 0, 2, 44, 30, }, + { 7, 1, 0, 2, 44, 27, }, + { 1, 1, 0, 2, 48, 33, }, + { 3, 1, 0, 2, 48, 31, }, + { 4, 1, 0, 2, 48, 26, }, + { 5, 1, 0, 2, 48, 32, }, + { 6, 1, 0, 2, 48, 30, }, + { 7, 1, 0, 2, 48, 27, }, + { 1, 1, 0, 2, 52, 33, }, + { 3, 1, 0, 2, 52, 32, }, + { 4, 1, 0, 2, 52, 7, }, + { 5, 1, 0, 2, 52, 32, }, + { 6, 1, 0, 2, 52, 30, }, + { 7, 1, 0, 2, 52, 27, }, + { 1, 1, 0, 2, 56, 33, }, + { 3, 1, 0, 2, 56, 32, }, + { 4, 1, 0, 2, 56, 33, }, + { 5, 1, 0, 2, 56, 32, }, + { 6, 1, 0, 2, 56, 30, }, + { 7, 1, 0, 2, 56, 27, }, + { 1, 1, 0, 2, 60, 33, }, + { 3, 1, 0, 2, 60, 32, }, + { 4, 1, 0, 2, 60, 33, }, + { 5, 1, 0, 2, 60, 32, }, + { 6, 1, 0, 2, 60, 30, }, + { 7, 1, 0, 2, 60, 27, }, + { 1, 1, 0, 2, 64, 33, }, + { 3, 1, 0, 2, 64, 30, }, + { 4, 1, 0, 2, 64, 33, }, + { 5, 1, 0, 2, 64, 32, }, + { 6, 1, 0, 2, 64, 30, }, + { 7, 1, 0, 2, 64, 27, }, + { 1, 1, 0, 2, 100, 33, }, + { 3, 1, 0, 2, 100, 30, }, + { 4, 1, 0, 2, 100, 33, }, + { 5, 1, 0, 2, 100, 32, }, + { 6, 1, 0, 2, 100, 30, }, + { 7, 1, 0, 2, 100, 27, }, + { 1, 1, 0, 2, 104, 33, }, + { 3, 1, 0, 2, 104, 33, }, + { 4, 1, 0, 2, 104, 33, }, + { 5, 1, 0, 2, 104, 32, }, + { 6, 1, 0, 2, 104, 30, }, + { 7, 1, 0, 2, 104, 27, }, + { 1, 1, 0, 2, 108, 33, }, + { 3, 1, 0, 2, 108, 33, }, + { 4, 1, 0, 2, 108, 33, }, + { 5, 1, 0, 2, 108, 32, }, + { 6, 1, 0, 2, 108, 30, }, + { 7, 1, 0, 2, 108, 27, }, + { 1, 1, 0, 2, 112, 33, }, + { 3, 1, 0, 2, 112, 33, }, + { 4, 1, 0, 2, 112, 33, }, + { 5, 1, 0, 2, 112, 32, }, + { 6, 1, 0, 2, 112, 30, }, + { 7, 1, 0, 2, 112, 27, }, + { 1, 1, 0, 2, 116, 33, }, + { 3, 1, 0, 2, 116, 33, }, + { 4, 1, 0, 2, 116, 33, }, + { 5, 1, 0, 2, 116, 32, }, + { 6, 1, 0, 2, 116, 30, }, + { 7, 1, 0, 2, 116, 27, }, + { 1, 1, 0, 2, 120, 33, }, + { 3, 1, 0, 2, 120, 63, }, + { 4, 1, 0, 2, 120, 33, }, + { 5, 1, 0, 2, 120, 63, }, + { 6, 1, 0, 2, 120, 30, }, + { 7, 1, 0, 2, 120, 27, }, + { 1, 1, 0, 2, 124, 33, }, + { 3, 1, 0, 2, 124, 63, }, + { 4, 1, 0, 2, 124, 33, }, + { 5, 1, 0, 2, 124, 63, }, + { 6, 1, 0, 2, 124, 30, }, + { 7, 1, 0, 2, 124, 27, }, + { 1, 1, 0, 2, 128, 33, }, + { 3, 1, 0, 2, 128, 63, }, + { 4, 1, 0, 2, 128, 63, }, + { 5, 1, 0, 2, 128, 63, }, + { 6, 1, 0, 2, 128, 30, }, + { 7, 1, 0, 2, 128, 27, }, + { 1, 1, 0, 2, 132, 33, }, + { 3, 1, 0, 2, 132, 33, }, + { 4, 1, 0, 2, 132, 63, }, + { 5, 1, 0, 2, 132, 32, }, + { 6, 1, 0, 2, 132, 30, }, + { 7, 1, 0, 2, 132, 27, }, + { 1, 1, 0, 2, 136, 33, }, + { 3, 1, 0, 2, 136, 33, }, + { 4, 1, 0, 2, 136, 63, }, + { 5, 1, 0, 2, 136, 32, }, + { 6, 1, 0, 2, 136, 30, }, + { 7, 1, 0, 2, 136, 63, }, + { 1, 1, 0, 2, 140, 33, }, + { 3, 1, 0, 2, 140, 29, }, + { 4, 1, 0, 2, 140, 63, }, + { 5, 1, 0, 2, 140, 32, }, + { 6, 1, 0, 2, 140, 30, }, + { 7, 1, 0, 2, 140, 63, }, + { 1, 1, 0, 2, 144, 63, }, + { 3, 1, 0, 2, 144, 27, }, + { 4, 1, 0, 2, 144, 63, }, + { 5, 1, 0, 2, 144, 63, }, + { 6, 1, 0, 2, 144, 30, }, + { 7, 1, 0, 2, 144, 63, }, + { 1, 1, 0, 2, 149, 63, }, + { 3, 1, 0, 2, 149, 33, }, + { 4, 1, 0, 2, 149, 33, }, + { 5, 1, 0, 2, 149, 33, }, + { 6, 1, 0, 2, 149, 30, }, + { 7, 1, 0, 2, 149, 27, }, + { 1, 1, 0, 2, 153, 63, }, + { 3, 1, 0, 2, 153, 33, }, + { 4, 1, 0, 2, 153, 33, }, + { 5, 1, 0, 2, 153, 33, }, + { 6, 1, 0, 2, 153, 30, }, + { 7, 1, 0, 2, 153, 27, }, + { 1, 1, 0, 2, 157, 63, }, + { 3, 1, 0, 2, 157, 33, }, + { 4, 1, 0, 2, 157, 33, }, + { 5, 1, 0, 2, 157, 33, }, + { 6, 1, 0, 2, 157, 30, }, + { 7, 1, 0, 2, 157, 27, }, + { 1, 1, 0, 2, 161, 63, }, + { 3, 1, 0, 2, 161, 33, }, + { 4, 1, 0, 2, 161, 31, }, + { 5, 1, 0, 2, 161, 33, }, + { 6, 1, 0, 2, 161, 30, }, + { 7, 1, 0, 2, 161, 27, }, + { 1, 1, 0, 2, 165, 63, }, + { 3, 1, 0, 2, 165, 33, }, + { 4, 1, 0, 2, 165, 63, }, + { 5, 1, 0, 2, 165, 33, }, + { 6, 1, 0, 2, 165, 30, }, + { 7, 1, 0, 2, 165, 27, }, + { 1, 1, 1, 2, 38, 32, }, + { 3, 1, 1, 2, 38, 22, }, + { 4, 1, 1, 2, 38, 26, }, + { 5, 1, 1, 2, 38, 32, }, + { 6, 1, 1, 2, 38, 22, }, + { 7, 1, 1, 2, 38, 27, }, + { 1, 1, 1, 2, 46, 32, }, + { 3, 1, 1, 2, 46, 32, }, + { 4, 1, 1, 2, 46, 28, }, + { 5, 1, 1, 2, 46, 32, }, + { 6, 1, 1, 2, 46, 30, }, + { 7, 1, 1, 2, 46, 27, }, + { 1, 1, 1, 2, 54, 32, }, + { 3, 1, 1, 2, 54, 32, }, + { 4, 1, 1, 2, 54, 22, }, + { 5, 1, 1, 2, 54, 32, }, + { 6, 1, 1, 2, 54, 30, }, + { 7, 1, 1, 2, 54, 27, }, + { 1, 1, 1, 2, 62, 32, }, + { 3, 1, 1, 2, 62, 23, }, + { 4, 1, 1, 2, 62, 31, }, + { 5, 1, 1, 2, 62, 32, }, + { 6, 1, 1, 2, 62, 23, }, + { 7, 1, 1, 2, 62, 27, }, + { 1, 1, 1, 2, 102, 32, }, + { 3, 1, 1, 2, 102, 21, }, + { 4, 1, 1, 2, 102, 31, }, + { 5, 1, 1, 2, 102, 32, }, + { 6, 1, 1, 2, 102, 30, }, + { 7, 1, 1, 2, 102, 27, }, + { 1, 1, 1, 2, 110, 32, }, + { 3, 1, 1, 2, 110, 32, }, + { 4, 1, 1, 2, 110, 32, }, + { 5, 1, 1, 2, 110, 32, }, + { 6, 1, 1, 2, 110, 30, }, + { 7, 1, 1, 2, 110, 27, }, + { 1, 1, 1, 2, 118, 32, }, + { 3, 1, 1, 2, 118, 63, }, + { 4, 1, 1, 2, 118, 32, }, + { 5, 1, 1, 2, 118, 63, }, + { 6, 1, 1, 2, 118, 30, }, + { 7, 1, 1, 2, 118, 27, }, + { 1, 1, 1, 2, 126, 32, }, + { 3, 1, 1, 2, 126, 63, }, + { 4, 1, 1, 2, 126, 63, }, + { 5, 1, 1, 2, 126, 63, }, + { 6, 1, 1, 2, 126, 30, }, + { 7, 1, 1, 2, 126, 27, }, + { 1, 1, 1, 2, 134, 32, }, + { 3, 1, 1, 2, 134, 32, }, + { 4, 1, 1, 2, 134, 63, }, + { 5, 1, 1, 2, 134, 32, }, + { 6, 1, 1, 2, 134, 30, }, + { 7, 1, 1, 2, 134, 63, }, + { 1, 1, 1, 2, 142, 63, }, + { 3, 1, 1, 2, 142, 29, }, + { 4, 1, 1, 2, 142, 63, }, + { 5, 1, 1, 2, 142, 63, }, + { 6, 1, 1, 2, 142, 30, }, + { 7, 1, 1, 2, 142, 63, }, + { 1, 1, 1, 2, 151, 63, }, + { 3, 1, 1, 2, 151, 32, }, + { 4, 1, 1, 2, 151, 27, }, + { 5, 1, 1, 2, 151, 32, }, + { 6, 1, 1, 2, 151, 30, }, + { 7, 1, 1, 2, 151, 27, }, + { 1, 1, 1, 2, 159, 63, }, + { 3, 1, 1, 2, 159, 32, }, + { 4, 1, 1, 2, 159, 26, }, + { 5, 1, 1, 2, 159, 32, }, + { 6, 1, 1, 2, 159, 30, }, + { 7, 1, 1, 2, 159, 27, }, + { 1, 1, 2, 4, 42, 28, }, + { 3, 1, 2, 4, 42, 19, }, + { 4, 1, 2, 4, 42, 25, }, + { 5, 1, 2, 4, 42, 32, }, + { 6, 1, 2, 4, 42, 19, }, + { 7, 1, 2, 4, 42, 27, }, + { 1, 1, 2, 4, 58, 28, }, + { 3, 1, 2, 4, 58, 22, }, + { 4, 1, 2, 4, 58, 28, }, + { 5, 1, 2, 4, 58, 32, }, + { 6, 1, 2, 4, 58, 22, }, + { 7, 1, 2, 4, 58, 27, }, + { 1, 1, 2, 4, 106, 32, }, + { 3, 1, 2, 4, 106, 18, }, + { 4, 1, 2, 4, 106, 30, }, + { 5, 1, 2, 4, 106, 32, }, + { 6, 1, 2, 4, 106, 30, }, + { 7, 1, 2, 4, 106, 27, }, + { 1, 1, 2, 4, 122, 32, }, + { 3, 1, 2, 4, 122, 63, }, + { 4, 1, 2, 4, 122, 26, }, + { 5, 1, 2, 4, 122, 63, }, + { 6, 1, 2, 4, 122, 30, }, + { 7, 1, 2, 4, 122, 27, }, + { 1, 1, 2, 4, 138, 63, }, + { 3, 1, 2, 4, 138, 28, }, + { 4, 1, 2, 4, 138, 63, }, + { 5, 1, 2, 4, 138, 63, }, + { 6, 1, 2, 4, 138, 30, }, + { 7, 1, 2, 4, 138, 63, }, + { 1, 1, 2, 4, 155, 63, }, + { 3, 1, 2, 4, 155, 32, }, + { 4, 1, 2, 4, 155, 27, }, + { 5, 1, 2, 4, 155, 32, }, + { 6, 1, 2, 4, 155, 30, }, + { 7, 1, 2, 4, 155, 27, }, +}; + +RTW_DECL_TABLE_TXPWR_LMT(rtw8821c_txpwr_lmt_type0); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h new file mode 100644 index 000000000000..5ea8b4fc7fba --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW8821C_TABLE_H__ +#define __RTW8821C_TABLE_H__ + +extern const struct rtw_table rtw8821c_mac_tbl; +extern const struct rtw_table rtw8821c_agc_tbl; +extern const struct rtw_table rtw8821c_bb_tbl; +extern const struct rtw_table rtw8821c_bb_pg_type0_tbl; +extern const struct rtw_table rtw8821c_rf_a_tbl; +extern const struct rtw_table rtw8821c_txpwr_lmt_type0_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c new file mode 100644 index 000000000000..616fdcfd62c9 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#include +#include +#include "rtw8821ce.h" + +static const struct pci_device_id rtw_8821ce_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC821), + .driver_data = (kernel_ulong_t)&rtw8821c_hw_spec + }, + {} +}; +MODULE_DEVICE_TABLE(pci, rtw_8821ce_id_table); + +static struct pci_driver rtw_8821ce_driver = { + .name = "rtw_8821ce", + .id_table = rtw_8821ce_id_table, + .probe = rtw_pci_probe, + .remove = rtw_pci_remove, + .driver.pm = &rtw_pm_ops, + .shutdown = rtw_pci_shutdown, +}; +module_pci_driver(rtw_8821ce_driver); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821ce driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h new file mode 100644 index 000000000000..8d3eb77a876b --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2018-2019 Realtek Corporation + */ + +#ifndef __RTW_8821CE_H_ +#define __RTW_8821CE_H_ + +extern const struct dev_pm_ops rtw_pm_ops; +extern struct rtw_chip_info rtw8821c_hw_spec; +int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); +void rtw_pci_remove(struct pci_dev *pdev); +void rtw_pci_shutdown(struct pci_dev *pdev); + +#endif From ad5f411b7f37fcc9d1fcf4fe0b433c2f5691cb0c Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:20 +0800 Subject: [PATCH 1033/2056] rtw88: 8821c: add set tx power index To configure the transmit power of 8821c implement trasmit power index setting callback function for 8821c. This is very similar to the callback function of 8822b. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-3-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 9989eab95256..d5ce4ce568dd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -169,6 +169,44 @@ static void rtw8821c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr); } +static void +rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +{ + struct rtw_hal *hal = &rtwdev->hal; + static const u32 offset_txagc[2] = {0x1d00, 0x1d80}; + static u32 phy_pwr_idx; + u8 rate, rate_idx, pwr_index, shift; + int j; + + for (j = 0; j < rtw_rate_size[rs]; j++) { + rate = rtw_rate_section[rs][j]; + pwr_index = hal->tx_pwr_tbl[path][rate]; + shift = rate & 0x3; + phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + if (shift == 0x3 || rate == DESC_RATEVHT1SS_MCS9) { + rate_idx = rate & 0xfc; + rtw_write32(rtwdev, offset_txagc[path] + rate_idx, + phy_pwr_idx); + phy_pwr_idx = 0; + } + } +} + +static void rtw8821c_set_tx_power_index(struct rtw_dev *rtwdev) +{ + struct rtw_hal *hal = &rtwdev->hal; + int rs, path; + + for (path = 0; path < hal->rf_path_num; path++) { + for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) { + if (rs == RTW_RATE_SECTION_HT_2S || + rs == RTW_RATE_SECTION_VHT_2S) + continue; + rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs); + } + } +} + static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821c[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -598,6 +636,7 @@ static struct rtw_chip_ops rtw8821c_ops = { .read_rf = rtw_phy_read_rf, .write_rf = rtw_phy_write_rf_reg_sipi, .set_antenna = NULL, + .set_tx_power_index = rtw8821c_set_tx_power_index, .cfg_ldo25 = rtw8821c_cfg_ldo25, }; From 6cf2086fd099bcc81b872175b60a7ba2228e407c Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:21 +0800 Subject: [PATCH 1034/2056] rtw88: 8821c: add dig related settings To improve user experience in field, we need DIG to adjust RX initial gain depends on field situation. Define the register addresses for 8821c. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-4-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index d5ce4ce568dd..dfce8f6cb5fc 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -579,6 +579,10 @@ static const struct rtw_rfe_def rtw8821c_rfe_defs[] = { [0] = RTW_DEF_RFE(8821c, 0, 0), }; +static struct rtw_hw_reg rtw8821c_dig[] = { + [0] = { .addr = 0xc50, .mask = 0x7f }, +}; + static const struct rtw_ltecoex_addr rtw8821c_ltecoex_addr = { .ctrl = LTECOEX_ACCESS_CTRL, .wdata = LTECOEX_WRITE_DATA, @@ -660,6 +664,7 @@ struct rtw_chip_info rtw8821c_hw_spec = { .csi_buf_pg_num = 0, .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = 128, + .dig_min = 0x1c, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), @@ -670,6 +675,7 @@ struct rtw_chip_info rtw8821c_hw_spec = { .rqpn_table = rqpn_table_8821c, .prioq_addrs = &prioq_addrs_8821c, .intf_table = &phy_para_table_8821c, + .dig = rtw8821c_dig, .rf_base_addr = {0x2800, 0x2c00}, .rf_sipi_addr = {0xc90, 0xe90}, .ltecoex_addr = &rtw8821c_ltecoex_addr, From 58eb40c921a2b08ad3a2dff82ee7d965aadbcfee Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:22 +0800 Subject: [PATCH 1035/2056] rtw88: 8821c: add set channel support 8821c is capable of 2.4G and 5G. Implement rtw_chip_ops::set_channel() to set 2G and 5G channels. This includes MAC, BB and RF related settings. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-5-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/main.c | 4 + drivers/net/wireless/realtek/rtw88/main.h | 5 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 205 ++++++++++++++++++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 15 ++ 4 files changed, 229 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 0eefafc51c62..d5677f94170f 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1326,6 +1326,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev) efuse->share_ant = true; if (efuse->regd == 0xff) efuse->regd = 0; + if (efuse->tx_bb_swing_setting_2g == 0xff) + efuse->tx_bb_swing_setting_2g = 0; + if (efuse->tx_bb_swing_setting_5g == 0xff) + efuse->tx_bb_swing_setting_5g = 0; efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20; efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 8f6e10acd65f..82b6accf4744 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1148,6 +1148,9 @@ struct rtw_chip_info { const struct wiphy_wowlan_support *wowlan_stub; const u8 max_sched_scan_ssids; + /* for 8821c set channel */ + u32 ch_param[3]; + /* coex paras */ u32 coex_para_ver; u8 bt_desired_ver; @@ -1527,6 +1530,8 @@ struct rtw_efuse { u8 apa_type; bool ext_pa_2g; bool ext_pa_5g; + u8 tx_bb_swing_setting_2g; + u8 tx_bb_swing_setting_5g; bool btcoex; /* bt share antenna with wifi */ diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index dfce8f6cb5fc..4c27d28a9f9d 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -97,6 +97,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) /* post init after header files config */ rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); + rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD); + rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD); + rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD); rtw_phy_init(rtwdev); } @@ -169,6 +172,207 @@ static void rtw8821c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable) rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr); } +static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw) +{ + u32 rf_reg18; + + rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK); + + rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK | + RF18_BW_MASK); + + rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G); + rf_reg18 |= (channel & RF18_CHANNEL_MASK); + + if (channel >= 100 && channel <= 140) + rf_reg18 |= RF18_RFSI_GE; + else if (channel > 140) + rf_reg18 |= RF18_RFSI_GT; + + switch (bw) { + case RTW_CHANNEL_WIDTH_5: + case RTW_CHANNEL_WIDTH_10: + case RTW_CHANNEL_WIDTH_20: + default: + rf_reg18 |= RF18_BW_20M; + break; + case RTW_CHANNEL_WIDTH_40: + rf_reg18 |= RF18_BW_40M; + break; + case RTW_CHANNEL_WIDTH_80: + rf_reg18 |= RF18_BW_80M; + break; + } + + if (channel <= 14) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1); + rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf); + } else { + rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x0); + } + + rtw_write_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK, rf_reg18); + + rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 0); + rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 1); +} + +static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw) +{ + if (bw == RTW_CHANNEL_WIDTH_40) { + /* RX DFIR for BW40 */ + rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2); + rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2); + rtw_write32_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0); + rtw_write32_mask(rtwdev, REG_CHFIR, BIT(31), 0x0); + } else if (bw == RTW_CHANNEL_WIDTH_80) { + /* RX DFIR for BW80 */ + rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2); + rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x1); + rtw_write32_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0); + rtw_write32_mask(rtwdev, REG_CHFIR, BIT(31), 0x1); + } else { + /* RX DFIR for BW20, BW10 and BW5 */ + rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2); + rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2); + rtw_write32_mask(rtwdev, REG_TXDFIR, BIT(31), 0x1); + rtw_write32_mask(rtwdev, REG_CHFIR, BIT(31), 0x0); + } +} + +static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_ch_idx) +{ + u32 val32; + + if (channel <= 14) { + rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1); + rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0); + rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0); + rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15); + + rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x0); + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a); + if (channel == 14) { + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x0000b81c); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667); + } else { + rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, + rtwdev->chip->ch_param[0]); + rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, + rtwdev->chip->ch_param[1] & MASKLWORD); + rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, + rtwdev->chip->ch_param[2]); + } + } else if (channel > 35) { + rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1); + rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1); + rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0); + rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15); + + if (channel >= 36 && channel <= 64) + rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x1); + else if (channel >= 100 && channel <= 144) + rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x2); + else if (channel >= 149) + rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x3); + + if (channel >= 36 && channel <= 48) + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494); + else if (channel >= 52 && channel <= 64) + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453); + else if (channel >= 100 && channel <= 116) + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452); + else if (channel >= 118 && channel <= 177) + rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412); + } + + switch (bw) { + case RTW_CHANNEL_WIDTH_20: + default: + val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD); + val32 &= 0xffcffc00; + val32 |= 0x10010000; + rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32); + + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1); + break; + case RTW_CHANNEL_WIDTH_40: + if (primary_ch_idx == 1) + rtw_write32_set(rtwdev, REG_RXSB, BIT(4)); + else + rtw_write32_clr(rtwdev, REG_RXSB, BIT(4)); + + val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD); + val32 &= 0xff3ff300; + val32 |= 0x20020000 | ((primary_ch_idx & 0xf) << 2) | + RTW_CHANNEL_WIDTH_40; + rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32); + + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1); + break; + case RTW_CHANNEL_WIDTH_80: + val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD); + val32 &= 0xfcffcf00; + val32 |= 0x40040000 | ((primary_ch_idx & 0xf) << 2) | + RTW_CHANNEL_WIDTH_80; + rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32); + + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1); + break; + case RTW_CHANNEL_WIDTH_5: + val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD); + val32 &= 0xefcefc00; + val32 |= 0x200240; + rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32); + + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0); + rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1); + break; + case RTW_CHANNEL_WIDTH_10: + val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD); + val32 &= 0xefcefc00; + val32 |= 0x300380; + rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32); + + rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0); + rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1); + break; + } +} + +static u32 rtw8821c_get_bb_swing(struct rtw_dev *rtwdev, u8 channel) +{ + struct rtw_efuse efuse = rtwdev->efuse; + u8 tx_bb_swing; + u32 swing2setting[4] = {0x200, 0x16a, 0x101, 0x0b6}; + + tx_bb_swing = channel <= 14 ? efuse.tx_bb_swing_setting_2g : + efuse.tx_bb_swing_setting_5g; + if (tx_bb_swing > 9) + tx_bb_swing = 0; + + return swing2setting[(tx_bb_swing / 3)]; +} + +static void rtw8821c_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 channel, + u8 bw, u8 primary_ch_idx) +{ + rtw_write32_mask(rtwdev, REG_TXSCALE_A, GENMASK(31, 21), + rtw8821c_get_bb_swing(rtwdev, channel)); +} + +static void rtw8821c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, + u8 primary_chan_idx) +{ + rtw8821c_set_channel_bb(rtwdev, channel, bw, primary_chan_idx); + rtw8821c_set_channel_bb_swing(rtwdev, channel, bw, primary_chan_idx); + rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx); + rtw8821c_set_channel_rf(rtwdev, channel, bw); + rtw8821c_set_channel_rxdfir(rtwdev, bw); +} + static void rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) { @@ -636,6 +840,7 @@ static struct rtw_prioq_addrs prioq_addrs_8821c = { static struct rtw_chip_ops rtw8821c_ops = { .phy_set_param = rtw8821c_phy_set_param, .read_efuse = rtw8821c_read_efuse, + .set_channel = rtw8821c_set_channel, .mac_init = rtw8821c_mac_init, .read_rf = rtw_phy_read_rf, .write_rf = rtw_phy_write_rf_reg_sipi, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 1c357e2b099b..2d33f6e50cea 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -161,6 +161,7 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_ADCCLK 0x8ac #define REG_ADC160 0x8c4 #define REG_ADC40 0x8c8 +#define REG_CHFIR 0x8f0 #define REG_CDDTXP 0x93c #define REG_TXPSEL1 0x940 #define REG_ACBB0 0x948 @@ -172,7 +173,9 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_TXSF6 0xa28 #define REG_RXDESC 0xa2c #define REG_ENTXCCK 0xa80 +#define REG_TXFILTER 0xaac #define REG_AGCTR_A 0xc08 +#define REG_TXSCALE_A 0xc1c #define REG_TXDFIR 0xc20 #define REG_RXIGI_A 0xc50 #define REG_TRSW 0xca0 @@ -185,4 +188,16 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_ANTWT 0x1904 #define REG_IQKFAILMSK 0x1bf0 +#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8)) +#define RF18_BAND_2G (0) +#define RF18_BAND_5G (BIT(16) | BIT(8)) +#define RF18_CHANNEL_MASK (MASKBYTE0) +#define RF18_RFSI_MASK (BIT(18) | BIT(17)) +#define RF18_RFSI_GE (BIT(17)) +#define RF18_RFSI_GT (BIT(18)) +#define RF18_BW_MASK (BIT(11) | BIT(10)) +#define RF18_BW_20M (BIT(11) | BIT(10)) +#define RF18_BW_40M (BIT(11)) +#define RF18_BW_80M (BIT(10)) + #endif From d19040618a22bd9acb698be71cd39f05f699bd62 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:23 +0800 Subject: [PATCH 1036/2056] rtw88: 8821c: add query rx desc support Some RX packets contain also information about environment status. Implement rtw_chip_ops::query_rx_desc() for 8821c. Parse the RX descriptor which describes the current condition of the received packet. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-6-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 101 ++++++++++++++++++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 16 +++ 2 files changed, 117 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 4c27d28a9f9d..bf1a2c92f2a5 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -373,6 +373,106 @@ static void rtw8821c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, rtw8821c_set_channel_rxdfir(rtwdev, bw); } +static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + s8 min_rx_power = -120; + u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status); + + pkt_stat->rx_power[RF_PATH_A] = pwdb - 100; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + pkt_stat->bw = RTW_CHANNEL_WIDTH_20; + pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A], + min_rx_power); +} + +static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + u8 rxsc, bw; + s8 min_rx_power = -120; + + if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0) + rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status); + else + rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status); + + if (rxsc >= 1 && rxsc <= 8) + bw = RTW_CHANNEL_WIDTH_20; + else if (rxsc >= 9 && rxsc <= 12) + bw = RTW_CHANNEL_WIDTH_40; + else if (rxsc >= 13) + bw = RTW_CHANNEL_WIDTH_80; + else + bw = GET_PHY_STAT_P1_RF_MODE(phy_status); + + pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110; + pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1); + pkt_stat->bw = bw; + pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A], + min_rx_power); +} + +static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status, + struct rtw_rx_pkt_stat *pkt_stat) +{ + u8 page; + + page = *phy_status & 0xf; + + switch (page) { + case 0: + query_phy_status_page0(rtwdev, phy_status, pkt_stat); + break; + case 1: + query_phy_status_page1(rtwdev, phy_status, pkt_stat); + break; + default: + rtw_warn(rtwdev, "unused phy status page (%d)\n", page); + return; + } +} + +static void rtw8821c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, + struct rtw_rx_pkt_stat *pkt_stat, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_hdr *hdr; + u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz; + u8 *phy_status = NULL; + + memset(pkt_stat, 0, sizeof(*pkt_stat)); + + pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc); + pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc); + pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc); + pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc); + pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc); + pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc); + pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc); + pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc); + pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc); + pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc); + pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc); + pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc); + + /* drv_info_sz is in unit of 8-bytes */ + pkt_stat->drv_info_sz *= 8; + + /* c2h cmd pkt's rx/phy status is not interested */ + if (pkt_stat->is_c2h) + return; + + hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift + + pkt_stat->drv_info_sz); + if (pkt_stat->phy_status) { + phy_status = rx_desc + desc_sz + pkt_stat->shift; + query_phy_status(rtwdev, phy_status, pkt_stat); + } + + rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status); +} + static void rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) { @@ -840,6 +940,7 @@ static struct rtw_prioq_addrs prioq_addrs_8821c = { static struct rtw_chip_ops rtw8821c_ops = { .phy_set_param = rtw8821c_phy_set_param, .read_efuse = rtw8821c_read_efuse, + .query_rx_desc = rtw8821c_query_rx_desc, .set_channel = rtw8821c_set_channel, .mac_init = rtw8821c_mac_init, .read_rf = rtw_phy_read_rf, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 2d33f6e50cea..741f78829c17 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -145,6 +145,22 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8) #define WLAN_PRE_TXCNT_TIME_TH 0x1E4 +/* phy status page0 */ +#define GET_PHY_STAT_P0_PWDB(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8)) + +/* phy status page1 */ +#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8)) +#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16)) +#define GET_PHY_STAT_P1_RF_MODE(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28)) +#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8)) +#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12)) + #define REG_INIRTS_RATE_SEL 0x0480 #define REG_HTSTFWT 0x800 #define REG_RXPSEL 0x808 From 960361238b867aa169ffec93d965ea329081cc86 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:24 +0800 Subject: [PATCH 1037/2056] rtw88: 8821c: add false alarm statistics False alarm statistics can be used to adjust the RX gain. This helps the driver to adapt to different circumstances. Implement rtw_chip_ops::false_alarm_statistics() for 8821c. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-7-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 53 +++++++++++++++++++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 10 ++++ 2 files changed, 63 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index bf1a2c92f2a5..ffcb427468d7 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -511,6 +511,58 @@ static void rtw8821c_set_tx_power_index(struct rtw_dev *rtwdev) } } +static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u32 cck_enable; + u32 cck_fa_cnt; + u32 ofdm_fa_cnt; + u32 crc32_cnt; + u32 cca32_cnt; + + cck_enable = rtw_read32(rtwdev, REG_RXPSEL) & BIT(28); + cck_fa_cnt = rtw_read16(rtwdev, REG_FA_CCK); + ofdm_fa_cnt = rtw_read16(rtwdev, REG_FA_OFDM); + + dm_info->cck_fa_cnt = cck_fa_cnt; + dm_info->ofdm_fa_cnt = ofdm_fa_cnt; + if (cck_enable) + dm_info->total_fa_cnt += cck_fa_cnt; + dm_info->total_fa_cnt = ofdm_fa_cnt; + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK); + dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt); + dm_info->cck_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_OFDM); + dm_info->ofdm_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt); + dm_info->ofdm_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_HT); + dm_info->ht_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt); + dm_info->ht_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt); + + crc32_cnt = rtw_read32(rtwdev, REG_CRC_VHT); + dm_info->vht_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt); + dm_info->vht_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt); + + cca32_cnt = rtw_read32(rtwdev, REG_CCA_OFDM); + dm_info->ofdm_cca_cnt = FIELD_GET(GENMASK(31, 16), cca32_cnt); + dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt; + if (cck_enable) { + cca32_cnt = rtw_read32(rtwdev, REG_CCA_CCK); + dm_info->cck_cca_cnt = FIELD_GET(GENMASK(15, 0), cca32_cnt); + dm_info->total_cca_cnt += dm_info->cck_cca_cnt; + } + + rtw_write32_set(rtwdev, REG_FAS, BIT(17)); + rtw_write32_clr(rtwdev, REG_FAS, BIT(17)); + rtw_write32_clr(rtwdev, REG_RXDESC, BIT(15)); + rtw_write32_set(rtwdev, REG_RXDESC, BIT(15)); + rtw_write32_set(rtwdev, REG_CNTRST, BIT(0)); + rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0)); +} + static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821c[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -948,6 +1000,7 @@ static struct rtw_chip_ops rtw8821c_ops = { .set_antenna = NULL, .set_tx_power_index = rtw8821c_set_tx_power_index, .cfg_ldo25 = rtw8821c_cfg_ldo25, + .false_alarm_statistics = rtw8821c_false_alarm_statistics, }; struct rtw_chip_info rtw8821c_hw_spec = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 741f78829c17..3b7d12bf7728 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -183,13 +183,16 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_ACBB0 0x948 #define REG_ACBBRXFIR 0x94c #define REG_ACGG2TBL 0x958 +#define REG_FAS 0x9a4 #define REG_RXSB 0xa00 #define REG_ADCINI 0xa04 #define REG_TXSF2 0xa24 #define REG_TXSF6 0xa28 +#define REG_FA_CCK 0xa5c #define REG_RXDESC 0xa2c #define REG_ENTXCCK 0xa80 #define REG_TXFILTER 0xaac +#define REG_CNTRST 0xb58 #define REG_AGCTR_A 0xc08 #define REG_TXSCALE_A 0xc1c #define REG_TXDFIR 0xc20 @@ -201,6 +204,13 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_RFEINV 0xcbc #define REG_AGCTR_B 0xe08 #define REG_RXIGI_B 0xe50 +#define REG_CRC_CCK 0xf04 +#define REG_CRC_OFDM 0xf14 +#define REG_CRC_HT 0xf10 +#define REG_CRC_VHT 0xf0c +#define REG_CCA_OFDM 0xf08 +#define REG_FA_OFDM 0xf48 +#define REG_CCA_CCK 0xfcc #define REG_ANTWT 0x1904 #define REG_IQKFAILMSK 0x1bf0 From 1a94d93e648f6829a1dc30cb1ee4e1f92c7386e0 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Tue, 16 Jun 2020 17:16:25 +0800 Subject: [PATCH 1038/2056] rtw88: 8821c: add phy calibration In order to get a better TX EVM, do calibration after association. The calibration needed for 8821c is the IQK, which is done in the firmware. Implement the rtw_chip_ops::phy_calibration() to trigger firmware to calibrate. Reviewed-by: Sebastian Andrzej Siewior Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200616091625.26489-8-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index ffcb427468d7..4bd4164d23ef 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -563,6 +563,39 @@ static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev) rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0)); } +static void rtw8821c_do_iqk(struct rtw_dev *rtwdev) +{ + static int do_iqk_cnt; + struct rtw_iqk_para para = {.clear = 0, .segment_iqk = 0}; + u32 rf_reg, iqk_fail_mask; + int counter; + bool reload; + + if (rtw_is_assoc(rtwdev)) + para.segment_iqk = 1; + + rtw_fw_do_iqk(rtwdev, ¶); + + for (counter = 0; counter < 300; counter++) { + rf_reg = rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK); + if (rf_reg == 0xabcde) + break; + msleep(20); + } + rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0); + + reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16)); + iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(7, 0)); + rtw_dbg(rtwdev, RTW_DBG_PHY, + "iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n", + counter, reload, ++do_iqk_cnt, iqk_fail_mask); +} + +static void rtw8821c_phy_calibration(struct rtw_dev *rtwdev) +{ + rtw8821c_do_iqk(rtwdev); +} + static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821c[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -1001,6 +1034,7 @@ static struct rtw_chip_ops rtw8821c_ops = { .set_tx_power_index = rtw8821c_set_tx_power_index, .cfg_ldo25 = rtw8821c_cfg_ldo25, .false_alarm_statistics = rtw8821c_false_alarm_statistics, + .phy_calibration = rtw8821c_phy_calibration, }; struct rtw_chip_info rtw8821c_hw_spec = { From 11fcb119a758e1e03ec77e20b386f4b93ae06601 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Wed, 3 Jun 2020 17:42:14 +0800 Subject: [PATCH 1039/2056] rtw88: 8821c: add cck pd settings CCK PD can reduce the number of false alarm of the CCK rates. It dynamically adjusts the power threshold and CS ratio. The values are compared to the values of the previous level, if the level is changed, set new values of power threshold and CS ratio. Implement rtw_chip_ops::cck_pd_set() for 8821c. Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603094218.19942-2-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/main.h | 1 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 25 +++++++++++++++++++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 3 +++ 3 files changed, 29 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 82b6accf4744..605a70e311fc 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1483,6 +1483,7 @@ struct rtw_dm_info { /* [bandwidth 0:20M/1:40M][number of path] */ u8 cck_pd_lv[2][RTW_RF_PATH_MAX]; u32 cck_fa_avg; + u8 cck_pd_default; /* save the last rx phy status for debug */ s8 rx_snr[RTW_RF_PATH_MAX]; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 4bd4164d23ef..904eb494ccad 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -102,6 +102,7 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD); rtw_phy_init(rtwdev); + rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f; } static int rtw8821c_mac_init(struct rtw_dev *rtwdev) @@ -596,6 +597,29 @@ static void rtw8821c_phy_calibration(struct rtw_dev *rtwdev) rtw8821c_do_iqk(rtwdev); } +static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 pd[CCK_PD_LV_MAX] = {3, 7, 13, 13, 13}; + + if (dm_info->min_rssi > 60) { + new_lvl = 4; + pd[4] = 0x1d; + goto set_cck_pd; + } + + if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl) + return; + + dm_info->cck_fa_avg = CCK_FA_AVG_RESET; + +set_cck_pd: + dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl; + rtw_write32_mask(rtwdev, REG_PWRTH, 0x3f0000, pd[new_lvl]); + rtw_write32_mask(rtwdev, REG_PWRTH2, 0x1f0000, + dm_info->cck_pd_default + new_lvl * 2); +} + static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821c[] = { {0x0086, RTW_PWR_CUT_ALL_MSK, @@ -1035,6 +1059,7 @@ static struct rtw_chip_ops rtw8821c_ops = { .cfg_ldo25 = rtw8821c_cfg_ldo25, .false_alarm_statistics = rtw8821c_false_alarm_statistics, .phy_calibration = rtw8821c_phy_calibration, + .cck_pd_set = rtw8821c_phy_cck_pd_set, }; struct rtw_chip_info rtw8821c_hw_spec = { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 3b7d12bf7728..bc66b91cd961 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -186,11 +186,14 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_FAS 0x9a4 #define REG_RXSB 0xa00 #define REG_ADCINI 0xa04 +#define REG_PWRTH 0xa08 #define REG_TXSF2 0xa24 #define REG_TXSF6 0xa28 #define REG_FA_CCK 0xa5c #define REG_RXDESC 0xa2c #define REG_ENTXCCK 0xa80 +#define REG_PWRTH2 0xaa8 +#define REG_CSRATIO 0xaaa #define REG_TXFILTER 0xaac #define REG_CNTRST 0xb58 #define REG_AGCTR_A 0xc08 From 3a4312828ce13e1645fd3af76e4314a2623b3361 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Wed, 3 Jun 2020 17:42:15 +0800 Subject: [PATCH 1040/2056] rtw88: 8821c: add power tracking The TX power requires to be adjusted based on the thermal value. The actual power will decrease if the thermal value raised, and will increase if the thermal value lowered. Driver comapres the thermal value, as moving averages. If it changes over a limit, driver will modify the TX power index to compensate. Implement rtw_chip_ops::pwr_track() for 8821c. Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603094218.19942-3-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/main.h | 1 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 291 ++++++++++++++++++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 1 + 3 files changed, 293 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 605a70e311fc..776ec0590aba 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1466,6 +1466,7 @@ struct rtw_dm_info { u8 thermal_avg[RTW_RF_PATH_MAX]; u8 thermal_meter_k; s8 delta_power_index[RTW_RF_PATH_MAX]; + s8 delta_power_index_last[RTW_RF_PATH_MAX]; u8 default_ofdm_index; bool pwr_trk_triggered; bool pwr_trk_init_trigger; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 904eb494ccad..aa2457046ad1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -61,6 +61,46 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map) return 0; } +static const u32 rtw8821c_txscale_tbl[] = { + 0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8, + 0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180, + 0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab, + 0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe +}; + +static const u8 rtw8821c_get_swing_index(struct rtw_dev *rtwdev) +{ + u8 i = 0; + u32 swing, table_value; + + swing = rtw_read32_mask(rtwdev, REG_TXSCALE_A, 0xffe00000); + for (i = 0; i < ARRAY_SIZE(rtw8821c_txscale_tbl); i++) { + table_value = rtw8821c_txscale_tbl[i]; + if (swing == table_value) + break; + } + + return i; +} + +static void rtw8821c_pwrtrack_init(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 swing_idx = rtw8821c_get_swing_index(rtwdev); + + if (swing_idx >= ARRAY_SIZE(rtw8821c_txscale_tbl)) + dm_info->default_ofdm_index = 24; + else + dm_info->default_ofdm_index = swing_idx; + + ewma_thermal_init(&dm_info->avg_thermal[RF_PATH_A]); + dm_info->delta_power_index[RF_PATH_A] = 0; + dm_info->delta_power_index_last[RF_PATH_A] = 0; + dm_info->pwr_trk_triggered = false; + dm_info->pwr_trk_init_trigger = true; + dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; +} + static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) { u8 crystal_cap, val; @@ -103,6 +143,8 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) rtw_phy_init(rtwdev); rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f; + + rtw8821c_pwrtrack_init(rtwdev); } static int rtw8821c_mac_init(struct rtw_dev *rtwdev) @@ -362,6 +404,7 @@ static void rtw8821c_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 channel, { rtw_write32_mask(rtwdev, REG_TXSCALE_A, GENMASK(31, 21), rtw8821c_get_bb_swing(rtwdev, channel)); + rtw8821c_pwrtrack_init(rtwdev); } static void rtw8821c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw, @@ -597,6 +640,152 @@ static void rtw8821c_phy_calibration(struct rtw_dev *rtwdev) rtw8821c_do_iqk(rtwdev); } +static void +rtw8821c_txagc_swing_offset(struct rtw_dev *rtwdev, u8 pwr_idx_offset, + s8 pwr_idx_offset_lower, + s8 *txagc_idx, u8 *swing_idx) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + s8 delta_pwr_idx = dm_info->delta_power_index[RF_PATH_A]; + u8 swing_upper_bound = dm_info->default_ofdm_index + 10; + u8 swing_lower_bound = 0; + u8 max_pwr_idx_offset = 0xf; + s8 agc_index = 0; + u8 swing_index = dm_info->default_ofdm_index; + + pwr_idx_offset = min_t(u8, pwr_idx_offset, max_pwr_idx_offset); + pwr_idx_offset_lower = max_t(s8, pwr_idx_offset_lower, -15); + + if (delta_pwr_idx >= 0) { + if (delta_pwr_idx <= pwr_idx_offset) { + agc_index = delta_pwr_idx; + swing_index = dm_info->default_ofdm_index; + } else if (delta_pwr_idx > pwr_idx_offset) { + agc_index = pwr_idx_offset; + swing_index = dm_info->default_ofdm_index + + delta_pwr_idx - pwr_idx_offset; + swing_index = min_t(u8, swing_index, swing_upper_bound); + } + } else if (delta_pwr_idx < 0) { + if (delta_pwr_idx >= pwr_idx_offset_lower) { + agc_index = delta_pwr_idx; + swing_index = dm_info->default_ofdm_index; + } else if (delta_pwr_idx < pwr_idx_offset_lower) { + if (dm_info->default_ofdm_index > + (pwr_idx_offset_lower - delta_pwr_idx)) + swing_index = dm_info->default_ofdm_index + + delta_pwr_idx - pwr_idx_offset_lower; + else + swing_index = swing_lower_bound; + + agc_index = pwr_idx_offset_lower; + } + } + + if (swing_index >= ARRAY_SIZE(rtw8821c_txscale_tbl)) { + rtw_warn(rtwdev, "swing index overflow\n"); + swing_index = ARRAY_SIZE(rtw8821c_txscale_tbl) - 1; + } + + *txagc_idx = agc_index; + *swing_idx = swing_index; +} + +static void rtw8821c_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 pwr_idx_offset, + s8 pwr_idx_offset_lower) +{ + s8 txagc_idx; + u8 swing_idx; + + rtw8821c_txagc_swing_offset(rtwdev, pwr_idx_offset, pwr_idx_offset_lower, + &txagc_idx, &swing_idx); + rtw_write32_mask(rtwdev, REG_TXAGCIDX, GENMASK(6, 1), txagc_idx); + rtw_write32_mask(rtwdev, REG_TXSCALE_A, GENMASK(31, 21), + rtw8821c_txscale_tbl[swing_idx]); +} + +static void rtw8821c_pwrtrack_set(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + u8 pwr_idx_offset, tx_pwr_idx; + s8 pwr_idx_offset_lower; + u8 channel = rtwdev->hal.current_channel; + u8 band_width = rtwdev->hal.current_band_width; + u8 regd = rtwdev->regd.txpwr_regd; + u8 tx_rate = dm_info->tx_rate; + u8 max_pwr_idx = rtwdev->chip->max_power_index; + + tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, RF_PATH_A, tx_rate, + band_width, channel, regd); + + tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx); + + pwr_idx_offset = max_pwr_idx - tx_pwr_idx; + pwr_idx_offset_lower = 0 - tx_pwr_idx; + + rtw8821c_pwrtrack_set_pwr(rtwdev, pwr_idx_offset, pwr_idx_offset_lower); +} + +static void rtw8821c_phy_pwrtrack(struct rtw_dev *rtwdev) +{ + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + struct rtw_swing_table swing_table; + u8 thermal_value, delta; + + rtw_phy_config_swing_table(rtwdev, &swing_table); + + if (rtwdev->efuse.thermal_meter[0] == 0xff) + return; + + thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00); + + rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A); + + if (dm_info->pwr_trk_init_trigger) + dm_info->pwr_trk_init_trigger = false; + else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value, + RF_PATH_A)) + goto iqk; + + delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A); + + delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1); + + dm_info->delta_power_index[RF_PATH_A] = + rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table, RF_PATH_A, + RF_PATH_A, delta); + if (dm_info->delta_power_index[RF_PATH_A] == + dm_info->delta_power_index_last[RF_PATH_A]) + goto iqk; + else + dm_info->delta_power_index_last[RF_PATH_A] = + dm_info->delta_power_index[RF_PATH_A]; + rtw8821c_pwrtrack_set(rtwdev); + +iqk: + if (rtw_phy_pwrtrack_need_iqk(rtwdev)) + rtw8821c_do_iqk(rtwdev); +} + +static void rtw8821c_pwr_track(struct rtw_dev *rtwdev) +{ + struct rtw_efuse *efuse = &rtwdev->efuse; + struct rtw_dm_info *dm_info = &rtwdev->dm_info; + + if (efuse->power_track_type != 0) + return; + + if (!dm_info->pwr_trk_triggered) { + rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER, + GENMASK(17, 16), 0x03); + dm_info->pwr_trk_triggered = true; + return; + } + + rtw8821c_phy_pwrtrack(rtwdev); + dm_info->pwr_trk_triggered = false; +} + static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; @@ -1060,6 +1249,106 @@ static struct rtw_chip_ops rtw8821c_ops = { .false_alarm_statistics = rtw8821c_false_alarm_statistics, .phy_calibration = rtw8821c_phy_calibration, .cck_pd_set = rtw8821c_phy_cck_pd_set, + .pwr_track = rtw8821c_pwr_track, +}; + +static const u8 rtw8821c_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12}, + {0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 10, 11, + 11, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, + 11, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 rtw8821c_pwrtrk_5gb_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, + 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11, + 11, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 rtw8821c_pwrtrk_5ga_n[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, + 11, 11, 12, 12, 12, 12, 12}, + {0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 10, 11, + 11, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, + 11, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 rtw8821c_pwrtrk_5ga_p[][RTW_PWR_TRK_TBL_SZ] = { + {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11, + 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, + 12, 12, 12, 12, 12, 12, 12, 12}, + {0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11, + 11, 12, 12, 12, 12, 12, 12, 12}, +}; + +static const u8 rtw8821c_pwrtrk_2gb_n[] = { + 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, + 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9 +}; + +static const u8 rtw8821c_pwrtrk_2gb_p[] = { + 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, + 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9 +}; + +static const u8 rtw8821c_pwrtrk_2ga_n[] = { + 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, + 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9 +}; + +static const u8 rtw8821c_pwrtrk_2ga_p[] = { + 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5, + 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9 +}; + +static const u8 rtw8821c_pwrtrk_2g_cck_b_n[] = { + 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, + 4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9 +}; + +static const u8 rtw8821c_pwrtrk_2g_cck_b_p[] = { + 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9 +}; + +static const u8 rtw8821c_pwrtrk_2g_cck_a_n[] = { + 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, + 4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9 +}; + +static const u8 rtw8821c_pwrtrk_2g_cck_a_p[] = { + 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, + 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9 +}; + +const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { + .pwrtrk_5gb_n[0] = rtw8821c_pwrtrk_5gb_n[0], + .pwrtrk_5gb_n[1] = rtw8821c_pwrtrk_5gb_n[1], + .pwrtrk_5gb_n[2] = rtw8821c_pwrtrk_5gb_n[2], + .pwrtrk_5gb_p[0] = rtw8821c_pwrtrk_5gb_p[0], + .pwrtrk_5gb_p[1] = rtw8821c_pwrtrk_5gb_p[1], + .pwrtrk_5gb_p[2] = rtw8821c_pwrtrk_5gb_p[2], + .pwrtrk_5ga_n[0] = rtw8821c_pwrtrk_5ga_n[0], + .pwrtrk_5ga_n[1] = rtw8821c_pwrtrk_5ga_n[1], + .pwrtrk_5ga_n[2] = rtw8821c_pwrtrk_5ga_n[2], + .pwrtrk_5ga_p[0] = rtw8821c_pwrtrk_5ga_p[0], + .pwrtrk_5ga_p[1] = rtw8821c_pwrtrk_5ga_p[1], + .pwrtrk_5ga_p[2] = rtw8821c_pwrtrk_5ga_p[2], + .pwrtrk_2gb_n = rtw8821c_pwrtrk_2gb_n, + .pwrtrk_2gb_p = rtw8821c_pwrtrk_2gb_p, + .pwrtrk_2ga_n = rtw8821c_pwrtrk_2ga_n, + .pwrtrk_2ga_p = rtw8821c_pwrtrk_2ga_p, + .pwrtrk_2g_cckb_n = rtw8821c_pwrtrk_2g_cck_b_n, + .pwrtrk_2g_cckb_p = rtw8821c_pwrtrk_2g_cck_b_p, + .pwrtrk_2g_ccka_n = rtw8821c_pwrtrk_2g_cck_a_n, + .pwrtrk_2g_ccka_p = rtw8821c_pwrtrk_2g_cck_a_p, }; struct rtw_chip_info rtw8821c_hw_spec = { @@ -1104,6 +1393,8 @@ struct rtw_chip_info rtw8821c_hw_spec = { .rfe_defs = rtw8821c_rfe_defs, .rfe_defs_size = ARRAY_SIZE(rtw8821c_rfe_defs), .rx_ldpc = false, + .pwr_track_tbl = &rtw8821c_rtw_pwr_track_tbl, + .iqk_threshold = 8, }; EXPORT_SYMBOL(rtw8821c_hw_spec); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index bc66b91cd961..166bd0e9294e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -200,6 +200,7 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_TXSCALE_A 0xc1c #define REG_TXDFIR 0xc20 #define REG_RXIGI_A 0xc50 +#define REG_TXAGCIDX 0xc94 #define REG_TRSW 0xca0 #define REG_RFESEL0 0xcb0 #define REG_RFESEL8 0xcb4 From 5f4eab883c6a0abd9e071ba3ebe0c03043ceda7c Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Wed, 3 Jun 2020 17:42:16 +0800 Subject: [PATCH 1041/2056] rtw88: 8821c: add beamformee support Beamforming is used for directional signal transmission/reception. Beamformee plays the role for signal reception, and makes the RX performance better in middle distance range. Implement beamformee related callbacks for 8821c. Since 8821c only support 1ss rate, nc_index in beamformee setting needs to be adjusted based on the capability. Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603094218.19942-4-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/bf.c | 5 ++- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 45 +++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c index 8a070d5d9174..aff70e4ae028 100644 --- a/drivers/net/wireless/realtek/rtw88/bf.c +++ b/drivers/net/wireless/realtek/rtw88/bf.c @@ -183,7 +183,7 @@ void rtw_bf_del_sounding(struct rtw_dev *rtwdev) void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif, struct rtw_bfee *bfee) { - u8 nc_index = 1; + u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1; u8 nr_index = bfee->sound_dim; u8 grouping = 0, codebookinfo = 1, coefficientsize = 3; u32 addr_bfer_info, addr_csi_rpt, csi_param; @@ -231,7 +231,8 @@ void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif, { struct rtw_bf_info *bf_info = &rtwdev->bf_info; struct mu_bfer_init_para param; - u8 nc_index = 1, nr_index = 1; + u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1; + u8 nr_index = 1; u8 grouping = 0, codebookinfo = 1, coefficientsize = 0; u32 csi_param; diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index aa2457046ad1..c41c61ee2fb6 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -101,6 +101,13 @@ static void rtw8821c_pwrtrack_init(struct rtw_dev *rtwdev) dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k; } +static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev) +{ + rtw_bf_phy_init(rtwdev); + /* Grouping bitmap parameters */ + rtw_write32(rtwdev, 0x1C94, 0xAFFFAFFF); +} + static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) { u8 crystal_cap, val; @@ -145,6 +152,8 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev) rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f; rtw8821c_pwrtrack_init(rtwdev); + + rtw8821c_phy_bf_init(rtwdev); } static int rtw8821c_mac_init(struct rtw_dev *rtwdev) @@ -786,6 +795,37 @@ static void rtw8821c_pwr_track(struct rtw_dev *rtwdev) dm_info->pwr_trk_triggered = false; } +static void rtw8821c_bf_config_bfee_su(struct rtw_dev *rtwdev, + struct rtw_vif *vif, + struct rtw_bfee *bfee, bool enable) +{ + if (enable) + rtw_bf_enable_bfee_su(rtwdev, vif, bfee); + else + rtw_bf_remove_bfee_su(rtwdev, bfee); +} + +static void rtw8821c_bf_config_bfee_mu(struct rtw_dev *rtwdev, + struct rtw_vif *vif, + struct rtw_bfee *bfee, bool enable) +{ + if (enable) + rtw_bf_enable_bfee_mu(rtwdev, vif, bfee); + else + rtw_bf_remove_bfee_mu(rtwdev, bfee); +} + +static void rtw8821c_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif, + struct rtw_bfee *bfee, bool enable) +{ + if (bfee->role == RTW_BFEE_SU) + rtw8821c_bf_config_bfee_su(rtwdev, vif, bfee, enable); + else if (bfee->role == RTW_BFEE_MU) + rtw8821c_bf_config_bfee_mu(rtwdev, vif, bfee, enable); + else + rtw_warn(rtwdev, "wrong bfee role\n"); +} + static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl) { struct rtw_dm_info *dm_info = &rtwdev->dm_info; @@ -1250,6 +1290,9 @@ static struct rtw_chip_ops rtw8821c_ops = { .phy_calibration = rtw8821c_phy_calibration, .cck_pd_set = rtw8821c_phy_cck_pd_set, .pwr_track = rtw8821c_pwr_track, + .config_bfee = rtw8821c_bf_config_bfee, + .set_gid_table = rtw_bf_set_gid_table, + .cfg_csi_rate = rtw_bf_cfg_csi_rate, }; static const u8 rtw8821c_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { @@ -1395,6 +1438,8 @@ struct rtw_chip_info rtw8821c_hw_spec = { .rx_ldpc = false, .pwr_track_tbl = &rtw8821c_rtw_pwr_track_tbl, .iqk_threshold = 8, + .bfer_su_max_num = 2, + .bfer_mu_max_num = 1, }; EXPORT_SYMBOL(rtw8821c_hw_spec); From d47e7371b23a0401cf284181e83a1819e9b22623 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Wed, 3 Jun 2020 17:42:17 +0800 Subject: [PATCH 1042/2056] rtw88: single rf path chips don't support TX STBC Since single rf path chips don't support TX SPBC, tell mac80211 to not advertise it. Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603094218.19942-5-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index d5677f94170f..9992f56ff624 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -989,12 +989,12 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev, vht_cap->vht_supported = true; vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SHORT_GI_80 | - IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | 0; - + if (rtwdev->hal.rf_path_num > 1) + vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE; vht_cap->cap |= (rtwdev->hal.bfee_sts_cap << From f745eb9ca5bf823bc5c0f82a434cefb41c57844e Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Wed, 3 Jun 2020 17:42:18 +0800 Subject: [PATCH 1043/2056] rtw88: 8821c: Add 8821CE to Kconfig and Makefile Since 8821C code is ready, we can build it. Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200603094218.19942-6-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/Kconfig | 14 ++++++++++++++ drivers/net/wireless/realtek/rtw88/Makefile | 6 ++++++ 2 files changed, 20 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index ca894c4f96ac..e3d7cb6c1290 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -25,6 +25,9 @@ config RTW88_8822C config RTW88_8723D tristate +config RTW88_8821C + tristate + config RTW88_8822BE tristate "Realtek 8822BE PCI wireless network adapter" depends on PCI @@ -58,6 +61,17 @@ config RTW88_8723DE 802.11n PCIe wireless network adapter +config RTW88_8821CE + tristate "Realtek 8821CE PCI wireless network adapter" + depends on PCI + select RTW88_CORE + select RTW88_PCI + select RTW88_8821C + help + Select this option will enable support for 8821CE chipset + + 802.11ac PCIe wireless network adapter + config RTW88_DEBUG bool "Realtek rtw88 debug support" depends on RTW88_CORE diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index f31e78a6f146..c0e4b111c8b4 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -37,5 +37,11 @@ rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o obj-$(CONFIG_RTW88_8723DE) += rtw88_8723de.o rtw88_8723de-objs := rtw8723de.o +obj-$(CONFIG_RTW88_8821C) += rtw88_8821c.o +rtw88_8821c-objs := rtw8821c.o rtw8821c_table.o + +obj-$(CONFIG_RTW88_8821CE) += rtw88_8821ce.o +rtw88_8821ce-objs := rtw8821ce.o + obj-$(CONFIG_RTW88_PCI) += rtw88_pci.o rtw88_pci-objs := pci.o From 68aa716b7dd36f55e080da9e27bc594346334c41 Mon Sep 17 00:00:00 2001 From: Yan-Hsuan Chuang Date: Fri, 5 Jun 2020 15:47:03 +0800 Subject: [PATCH 1044/2056] rtw88: pci: disable aspm for platform inter-op with module parameter Some platforms cannot read the DBI register successfully for the ASPM settings. After the read failed, the bus could be unstable, and the device just became unavailable [1]. For those platforms, the ASPM should be disabled. But as the ASPM can help the driver to save the power consumption in power save mode, the ASPM is still needed. So, add a module parameter for them to disable it, then the device can still work, while others can benefit from the less power consumption that brings by ASPM enabled. [1] https://bugzilla.kernel.org/show_bug.cgi?id=206411 [2] Note that my lenovo T430 is the same. Fixes: 3dff7c6e3749 ("rtw88: allows to enable/disable HCI link PS mechanism") Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200605074703.32726-1-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/pci.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 8228db9a5fc8..3413973bc475 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -14,8 +14,11 @@ #include "debug.h" static bool rtw_disable_msi; +static bool rtw_pci_disable_aspm; module_param_named(disable_msi, rtw_disable_msi, bool, 0644); +module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644); MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support"); +MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support"); static u32 rtw_pci_tx_queue_idx_addr[] = { [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ, @@ -1200,6 +1203,9 @@ static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable) u8 value; int ret; + if (rtw_pci_disable_aspm) + return; + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret); @@ -1219,6 +1225,9 @@ static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable) u8 value; int ret; + if (rtw_pci_disable_aspm) + return; + ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value); if (ret) { rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret); From 7d428b1c9ffc9ddcdd64c6955836bbb17a233ef3 Mon Sep 17 00:00:00 2001 From: Aaron Ma Date: Fri, 12 Jun 2020 16:27:45 +0800 Subject: [PATCH 1045/2056] rtw88: 8822ce: add support for device ID 0xc82f New device ID 0xc82f found on Lenovo ThinkCenter. Tested it with c822 driver, works good. PCI id: 03:00.0 Network controller [0280]: Realtek Semiconductor Co., Ltd. Device [10ec:c82f] Subsystem: Lenovo Device [17aa:c02f] Signed-off-by: Aaron Ma Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200612082745.204400-1-aaron.ma@canonical.com --- drivers/net/wireless/realtek/rtw88/rtw8822ce.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c index 7b6bd990651e..026ac49ce6e3 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822ce.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822ce.c @@ -11,6 +11,10 @@ static const struct pci_device_id rtw_8822ce_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822), .driver_data = (kernel_ulong_t)&rtw8822c_hw_spec }, + { + PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC82F), + .driver_data = (kernel_ulong_t)&rtw8822c_hw_spec + }, {} }; MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table); From 84c2e47aeb16c376ce2e4e81addbe4b1186129a9 Mon Sep 17 00:00:00 2001 From: Yan-Hsuan Chuang Date: Tue, 23 Jun 2020 15:20:14 +0800 Subject: [PATCH 1046/2056] rtw88: 8822c: add new RFE type 6 A new RFE type 6 of RTL8822CE is released, add it in the RFE type definition. Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200623072014.31631-1-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/rtw8822c.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index c3d72ef611c6..01b12d6b5439 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -3899,6 +3899,7 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = { [1] = RTW_DEF_RFE(8822c, 0, 0), [2] = RTW_DEF_RFE(8822c, 0, 0), [5] = RTW_DEF_RFE(8822c, 0, 5), + [6] = RTW_DEF_RFE(8822c, 0, 0), }; static const struct rtw_hw_reg rtw8822c_dig[] = { From 0ef0ace3e8e7d7be6dd7de2617ef3590170a3392 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Fri, 26 Jun 2020 17:29:38 +0200 Subject: [PATCH 1047/2056] mwifiex: Use macro MWIFIEX_MAX_BSS_NUM for specifying limit of interfaces MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This macro is already used in mwifiex driver for specifying upper limit and is defined to value 3. So use it also in struct ieee80211_iface_limit. Signed-off-by: Pali Rohár Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200626152938.12737-1-pali@kernel.org --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 4e4f59c17ded..867b5cf385a8 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -27,7 +27,8 @@ module_param(reg_alpha2, charp, 0); static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = { { - .max = 3, .types = BIT(NL80211_IFTYPE_STATION) | + .max = MWIFIEX_MAX_BSS_NUM, + .types = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP), From eb215c33f308cefe6d3691ffac27f9f498280811 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 2 Jul 2020 09:29:48 -0700 Subject: [PATCH 1048/2056] wlcore: Simplify runtime resume ELP path We can simplify the runtime resume ELP path by always setting and clearing the completion in runtime resume. This way we can test for WL1271_FLAG_IRQ_RUNNING after the resume write to see if we need completion at all. And in wlcore_irq(), we need to take spinlock for running the completion and for the pm_wakeup_event(). Spinlock is not needed around the bitops flags check for WL1271_FLAG_SUSPENDED so the spinlocked sections get shorter. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200702162951.45392-2-tony@atomide.com --- drivers/net/wireless/ti/wlcore/main.c | 43 ++++++++++----------------- 1 file changed, 15 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index de6c8a7589ca..437ddb89ad9c 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -649,24 +649,26 @@ static irqreturn_t wlcore_irq(int irq, void *cookie) unsigned long flags; struct wl1271 *wl = cookie; - /* complete the ELP completion */ - spin_lock_irqsave(&wl->wl_lock, flags); set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - if (wl->elp_compl) { - complete(wl->elp_compl); - wl->elp_compl = NULL; + + /* complete the ELP completion */ + if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) { + spin_lock_irqsave(&wl->wl_lock, flags); + if (wl->elp_compl) + complete(wl->elp_compl); + spin_unlock_irqrestore(&wl->wl_lock, flags); } if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { /* don't enqueue a work right now. mark it as pending */ set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); wl1271_debug(DEBUG_IRQ, "should not enqueue work"); + spin_lock_irqsave(&wl->wl_lock, flags); disable_irq_nosync(wl->irq); pm_wakeup_event(wl->dev, 0); spin_unlock_irqrestore(&wl->wl_lock, flags); goto out_handled; } - spin_unlock_irqrestore(&wl->wl_lock, flags); /* TX might be handled here, avoid redundant work */ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); @@ -6732,7 +6734,6 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) unsigned long flags; int ret; unsigned long start_time = jiffies; - bool pending = false; bool recovery = false; /* Nothing to do if no ELP mode requested */ @@ -6742,49 +6743,35 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev) wl1271_debug(DEBUG_PSM, "waking up chip from elp"); spin_lock_irqsave(&wl->wl_lock, flags); - if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) - pending = true; - else - wl->elp_compl = &compl; + wl->elp_compl = &compl; spin_unlock_irqrestore(&wl->wl_lock, flags); ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); if (ret < 0) { recovery = true; - goto err; - } - - if (!pending) { + } else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) { ret = wait_for_completion_timeout(&compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); if (ret == 0) { wl1271_warning("ELP wakeup timeout!"); - - /* Return no error for runtime PM for recovery */ - ret = 0; recovery = true; - goto err; } } - clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); - - wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", - jiffies_to_msecs(jiffies - start_time)); - - return 0; - -err: spin_lock_irqsave(&wl->wl_lock, flags); wl->elp_compl = NULL; spin_unlock_irqrestore(&wl->wl_lock, flags); + clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); if (recovery) { set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); wl12xx_queue_recovery_work(wl); + } else { + wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", + jiffies_to_msecs(jiffies - start_time)); } - return ret; + return 0; } static const struct dev_pm_ops wlcore_pm_ops = { From f0325e38ab39c2e270770b72c79795772ac3b49e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 2 Jul 2020 09:29:49 -0700 Subject: [PATCH 1049/2056] wlcore: Use spin_trylock in wlcore_irq_locked() for running the queue We currently have a collection of flags and locking between the threaded irq and tx work: - wl->flags bitops - wl->mutex - wl->wl_lock spinlock The bitops flags do not need a spinlock around them, and wlcore_irq() already holds the mutex calling wlcore_irq_locked(). And we only need the spinlock to see if we need to run the queue or not. To simplify the locking, we can use spin_trylock and always run the tx queue unless we know there's nothing to do. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200702162951.45392-3-tony@atomide.com --- drivers/net/wireless/ti/wlcore/main.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 437ddb89ad9c..458457cbab59 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -521,6 +521,7 @@ static int wlcore_irq_locked(struct wl1271 *wl) int ret = 0; u32 intr; int loopcount = WL1271_IRQ_MAX_LOOPS; + bool run_tx_queue = true; bool done = false; unsigned int defer_count; unsigned long flags; @@ -586,19 +587,22 @@ static int wlcore_irq_locked(struct wl1271 *wl) goto err_ret; /* Check if any tx blocks were freed */ - spin_lock_irqsave(&wl->wl_lock, flags); - if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && - wl1271_tx_total_queue_count(wl) > 0) { - spin_unlock_irqrestore(&wl->wl_lock, flags); + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) { + if (spin_trylock_irqsave(&wl->wl_lock, flags)) { + if (!wl1271_tx_total_queue_count(wl)) + run_tx_queue = false; + spin_unlock_irqrestore(&wl->wl_lock, flags); + } + /* * In order to avoid starvation of the TX path, * call the work function directly. */ - ret = wlcore_tx_work_locked(wl); - if (ret < 0) - goto err_ret; - } else { - spin_unlock_irqrestore(&wl->wl_lock, flags); + if (run_tx_queue) { + ret = wlcore_tx_work_locked(wl); + if (ret < 0) + goto err_ret; + } } /* check for tx results */ From 35fba0f0fd762a8b87d403ae3c723e0061c4aa25 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 2 Jul 2020 09:29:50 -0700 Subject: [PATCH 1050/2056] wlcore: Use spin_trylock in wlcore_irq() to see if we need to queue tx We currently have a collection of flags and locking between the threaded irq and tx work: - wl->flags bitops - wl->mutex - wl->wl_lock spinlock The bitops flags do not need a spinlock around them, and we only need the spinlock to see if we need to queue tx work or not. And wlcore_irq() holds the mutex. To simplify the locking, we can use spin_trylock and always queue tx work unless we know there's nothing to do. Let's also update the comment a bit while at it. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200702162951.45392-4-tony@atomide.com --- drivers/net/wireless/ti/wlcore/main.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 458457cbab59..4cdfd4f566af 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -652,6 +652,7 @@ static irqreturn_t wlcore_irq(int irq, void *cookie) int ret; unsigned long flags; struct wl1271 *wl = cookie; + bool queue_tx_work = true; set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); @@ -684,13 +685,17 @@ static irqreturn_t wlcore_irq(int irq, void *cookie) if (ret) wl12xx_queue_recovery_work(wl); - spin_lock_irqsave(&wl->wl_lock, flags); - /* In case TX was not handled here, queue TX work */ + /* In case TX was not handled in wlcore_irq_locked(), queue TX work */ clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); - if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && - wl1271_tx_total_queue_count(wl) > 0) - ieee80211_queue_work(wl->hw, &wl->tx_work); - spin_unlock_irqrestore(&wl->wl_lock, flags); + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) { + if (spin_trylock_irqsave(&wl->wl_lock, flags)) { + if (!wl1271_tx_total_queue_count(wl)) + queue_tx_work = false; + spin_unlock_irqrestore(&wl->wl_lock, flags); + } + if (queue_tx_work) + ieee80211_queue_work(wl->hw, &wl->tx_work); + } mutex_unlock(&wl->mutex); From 2c3601e6a3401ac14e3f581aa3acef4ff1e1b754 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 2 Jul 2020 09:29:51 -0700 Subject: [PATCH 1051/2056] wlcore: Remove pointless spinlock No need to take a spinlock here for bitops. Signed-off-by: Tony Lindgren Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200702162951.45392-5-tony@atomide.com --- drivers/net/wireless/ti/wlcore/main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 4cdfd4f566af..821ad1acd505 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -700,9 +700,7 @@ static irqreturn_t wlcore_irq(int irq, void *cookie) mutex_unlock(&wl->mutex); out_handled: - spin_lock_irqsave(&wl->wl_lock, flags); clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); - spin_unlock_irqrestore(&wl->wl_lock, flags); return IRQ_HANDLED; } From 9187f4e840929161be957fc0be28c51e964f9824 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Fri, 3 Jul 2020 13:21:51 +0200 Subject: [PATCH 1052/2056] mwifiex: Fix reporting 'operation not supported' error code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ENOTSUPP (double PP) is internal linux kernel code 524 available only in kernel include file linux/errno.h and not exported to userspace. EOPNOTSUPP (OP; double PP) is standard code 95 for reporting 'operation not supported' available via kernel include file uapi/asm-generic/errno.h. ENOTSUP (single P) is alias for EOPNOTSUPP defined only in userspace include file bits/errno.h and not available in kernel. Because Linux kernel does not support ENOTSUP (single P) and because userspace does not support ENOTSUPP (double PP), report error code for 'operation not supported' via EOPNOTSUPP macro. This patch fixes problem that mwifiex kernel driver sends to userspace unsupported error codes like: "failed: -524 (No error information)". After applying this patch userspace see: "failed: -95 (Not supported)". Signed-off-by: Pali Rohár Acked-by: Ganapathi Bhat Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200703112151.18917-1-pali@kernel.org --- .../net/wireless/marvell/mwifiex/cfg80211.c | 18 +++++++++--------- drivers/net/wireless/marvell/mwifiex/main.c | 2 +- drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 867b5cf385a8..96848fa0e417 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3727,11 +3727,11 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, int ret; if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)) - return -ENOTSUPP; + return -EOPNOTSUPP; /* make sure we are in station mode and connected */ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected)) - return -ENOTSUPP; + return -EOPNOTSUPP; switch (action_code) { case WLAN_TDLS_SETUP_REQUEST: @@ -3799,11 +3799,11 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) || !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)) - return -ENOTSUPP; + return -EOPNOTSUPP; /* make sure we are in station mode and connected */ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected)) - return -ENOTSUPP; + return -EOPNOTSUPP; mwifiex_dbg(priv->adapter, MSG, "TDLS peer=%pM, oper=%d\n", peer, action); @@ -3833,7 +3833,7 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, default: mwifiex_dbg(priv->adapter, ERROR, "tdls_oper: operation not supported\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } return mwifiex_tdls_oper(priv, peer, action); @@ -3914,11 +3914,11 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev, struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) - return -ENOTSUPP; + return -EOPNOTSUPP; /* make sure we are in station mode and connected */ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected) - return -ENOTSUPP; + return -EOPNOTSUPP; return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK); } @@ -4151,11 +4151,11 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev, /* we support change_station handler only for TDLS peers*/ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) - return -ENOTSUPP; + return -EOPNOTSUPP; /* make sure we are in station mode and connected */ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected) - return -ENOTSUPP; + return -EOPNOTSUPP; priv->sta_params = params; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 529099137644..9ee5600351a7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -953,7 +953,7 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv, } else { /* Internal mac address change */ if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY) - return -ENOTSUPP; + return -EOPNOTSUPP; mac_addr = old_mac_addr; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 8bd355d7974e..d3a968ef21ef 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1723,7 +1723,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv, default: mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS configuration\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } le16_unaligned_add_cpu(&cmd->size, len); @@ -1849,7 +1849,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, break; default: mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } le16_unaligned_add_cpu(&cmd->size, config_len); From e18696786548244914f36ec3c46ac99c53df99c3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 8 Jul 2020 14:58:57 +0300 Subject: [PATCH 1053/2056] mwifiex: Prevent memory corruption handling keys The length of the key comes from the network and it's a 16 bit number. It needs to be capped to prevent a buffer overflow. Fixes: 5e6e3a92b9a4 ("wireless: mwifiex: initial commit for Marvell mwifiex driver") Signed-off-by: Dan Carpenter Acked-by: Ganapathi Bhat Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200708115857.GA13729@mwanda --- .../wireless/marvell/mwifiex/sta_cmdresp.c | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index f21660149f58..962d8bfe6f10 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -580,6 +580,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, { struct host_cmd_ds_802_11_key_material *key = &resp->params.key_material; + int len; + + len = le16_to_cpu(key->key_param_set.key_len); + if (len > sizeof(key->key_param_set.key)) + return -EINVAL; if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) { @@ -593,9 +598,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv, memset(priv->aes_key.key_param_set.key, 0, sizeof(key->key_param_set.key)); - priv->aes_key.key_param_set.key_len = key->key_param_set.key_len; - memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, - le16_to_cpu(priv->aes_key.key_param_set.key_len)); + priv->aes_key.key_param_set.key_len = cpu_to_le16(len); + memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len); return 0; } @@ -610,9 +614,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { struct host_cmd_ds_802_11_key_material_v2 *key_v2; - __le16 len; + int len; key_v2 = &resp->params.key_material_v2; + + len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); + if (len > WLAN_KEY_LEN_CCMP) + return -EINVAL; + if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) { mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n"); @@ -628,10 +637,9 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, WLAN_KEY_LEN_CCMP); priv->aes_key_v2.key_param_set.key_params.aes.key_len = - key_v2->key_param_set.key_params.aes.key_len; - len = priv->aes_key_v2.key_param_set.key_params.aes.key_len; + cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, - key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len)); + key_v2->key_param_set.key_params.aes.key, len); return 0; } From 001a3c902f6f79d23495f70db856465728edbb5b Mon Sep 17 00:00:00 2001 From: Zong-Zhe Yang Date: Wed, 15 Jul 2020 10:33:24 +0800 Subject: [PATCH 1054/2056] rtw88: coex: Fix ACL Tx pause during BT inquiry/page. Add a set of logic with corresponding coexistence parameters to handle the situation under BT inquiry/page. We will set PSTDMA while WL-Busy + BT inquiry/page to separate WL/BT slots. PSTDMA can protect WL data rate and BT performance. If WL-Busy + BT inquiry/page and there was BT device paired, We will set the mechanism to 4Slot PSTDMA. In 4Slot PSTDMA, the paired devices can perform more smoothly and prevent some issues trigger from insufficient data. And to avoid A2DP glitch or disconnection, we will adjust ACL data priority higher than inquiry/page. In addition, we found sometimes BT inquiry/page still working last for seconds after BT had notified inquiry/page finished. It will lead to A2DP glitch cause of ACL data, inquiry/page priority toggled. To fix the corner, we add a timer to remain the inquiry/page status. And we found WL busy/idle threshold is too sensitive, it will keep switching in some weak network environment and coexistence mechanism will switch between TDMA and PSTDMA. The very frequently switching may destroyed not only the handshake with AP, but BT performance. And it will trigger some unexpected error. To prevent the frequently switching, we add a timer to delay the status change while WL busy switch to idle. Signed-off-by: Zong-Zhe Yang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200715023324.8600-1-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/coex.c | 98 ++++++++++++++++--- drivers/net/wireless/realtek/rtw88/coex.h | 3 + drivers/net/wireless/realtek/rtw88/main.c | 4 + drivers/net/wireless/realtek/rtw88/main.h | 3 + drivers/net/wireless/realtek/rtw88/rtw8723d.c | 13 +-- drivers/net/wireless/realtek/rtw88/rtw8822b.c | 7 +- drivers/net/wireless/realtek/rtw88/rtw8822c.c | 9 +- 7 files changed, 108 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index cbf3d503df1c..3abae32341c4 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -378,6 +378,7 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason) struct rtw_chip_info *chip = rtwdev->chip; struct rtw_traffic_stats *stats = &rtwdev->stats; bool is_5G = false; + bool wl_busy = false; bool scan = false, link = false; int i; u8 rssi_state; @@ -386,7 +387,16 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason) scan = test_bit(RTW_FLAG_SCANNING, rtwdev->flags); coex_stat->wl_connected = !!rtwdev->sta_cnt; - coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + + wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + if (wl_busy != coex_stat->wl_gl_busy) { + if (wl_busy) + coex_stat->wl_gl_busy = true; + else + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->wl_remain_work, + 12 * HZ); + } if (stats->tx_throughput > stats->rx_throughput) coex_stat->wl_tput_dir = COEX_WL_TPUT_TX; @@ -888,10 +898,12 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) { struct rtw_coex *coex = &rtwdev->coex; struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_stat *coex_stat = &coex->stat; struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; u8 n, type; bool turn_on; + bool wl_busy = false; if (tcase & TDMA_4SLOT)/* 4-slot (50ms) mode */ rtw_coex_tdma_timer_base(rtwdev, 3); @@ -909,13 +921,18 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase) } } - if (turn_on) { - /* enable TBTT interrupt */ + /* enable TBTT interrupt */ + if (turn_on) rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); - rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true); - } else { + + wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + + if ((coex_stat->bt_a2dp_exist && + (coex_stat->bt_inq_remain || coex_stat->bt_multi_link)) || + !wl_busy) rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, false); - } + else + rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true); if (efuse->share_ant) { if (type < chip->tdma_sant_num) @@ -1323,20 +1340,31 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev) /* Shared-Ant */ if (wl_hi_pri) { table_case = 15; - if (coex_stat->bt_a2dp_exist && - !coex_stat->bt_pan_exist) { - slot_type = TDMA_4SLOT; - tdma_case = 11; - } else if (coex_stat->wl_hi_pri_task1) { + if (coex_stat->bt_profile_num > 0) + tdma_case = 10; + else if (coex_stat->wl_hi_pri_task1) tdma_case = 6; - } else if (!coex_stat->bt_page) { + else if (!coex_stat->bt_page) tdma_case = 8; - } else { + else tdma_case = 9; + } else if (coex_stat->wl_gl_busy) { + if (coex_stat->bt_profile_num == 0) { + table_case = 12; + tdma_case = 18; + } else if (coex_stat->bt_profile_num == 1 && + !coex_stat->bt_a2dp_exist) { + slot_type = TDMA_4SLOT; + table_case = 12; + tdma_case = 20; + } else { + slot_type = TDMA_4SLOT; + table_case = 12; + tdma_case = 26; } } else if (coex_stat->wl_connected) { - table_case = 10; - tdma_case = 10; + table_case = 9; + tdma_case = 27; } else { table_case = 1; tdma_case = 0; @@ -2277,6 +2305,7 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) struct rtw_chip_info *chip = rtwdev->chip; unsigned long bt_relink_time; u8 i, rsp_source = 0, type; + bool inq_page = false; rsp_source = buf[0] & 0xf; if (rsp_source >= COEX_BTINFO_SRC_MAX) @@ -2343,7 +2372,20 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length) /* 0xff means BT is under WHCK test */ coex_stat->bt_whck_test = (coex_stat->bt_info_lb2 == 0xff); - coex_stat->bt_inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2)); + + inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2)); + + if (inq_page != coex_stat->bt_inq_page) { + cancel_delayed_work_sync(&coex->bt_remain_work); + coex_stat->bt_inq_page = inq_page; + + if (inq_page) + coex_stat->bt_inq_remain = true; + else + ieee80211_queue_delayed_work(rtwdev->hw, + &coex->bt_remain_work, + 4 * HZ); + } coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3)); coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf; if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1) @@ -2518,6 +2560,30 @@ void rtw_coex_defreeze_work(struct work_struct *work) mutex_unlock(&rtwdev->mutex); } +void rtw_coex_wl_remain_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.wl_remain_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags); + rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS); + mutex_unlock(&rtwdev->mutex); +} + +void rtw_coex_bt_remain_work(struct work_struct *work) +{ + struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, + coex.bt_remain_work.work); + struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat; + + mutex_lock(&rtwdev->mutex); + coex_stat->bt_inq_remain = coex_stat->bt_inq_page; + rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS); + mutex_unlock(&rtwdev->mutex); +} + #ifdef CONFIG_RTW88_DEBUGFS #define INFO_SIZE 80 diff --git a/drivers/net/wireless/realtek/rtw88/coex.h b/drivers/net/wireless/realtek/rtw88/coex.h index 4c3a01968f5e..44720fdfc053 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.h +++ b/drivers/net/wireless/realtek/rtw88/coex.h @@ -95,6 +95,7 @@ enum coex_runreason { COEX_RSN_BTINFO = 12, COEX_RSN_LPS = 13, COEX_RSN_WLSTATUS = 14, + COEX_RSN_BTSTATUS = 15, COEX_RSN_MAX }; @@ -362,6 +363,8 @@ void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set); void rtw_coex_bt_relink_work(struct work_struct *work); void rtw_coex_bt_reenable_work(struct work_struct *work); void rtw_coex_defreeze_work(struct work_struct *work); +void rtw_coex_wl_remain_work(struct work_struct *work); +void rtw_coex_bt_remain_work(struct work_struct *work); void rtw_coex_power_on_setting(struct rtw_dev *rtwdev); void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only); diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 9992f56ff624..7304e8bc5e31 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -935,6 +935,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev) cancel_delayed_work_sync(&coex->bt_relink_work); cancel_delayed_work_sync(&coex->bt_reenable_work); cancel_delayed_work_sync(&coex->defreeze_work); + cancel_delayed_work_sync(&coex->wl_remain_work); + cancel_delayed_work_sync(&coex->bt_remain_work); mutex_lock(&rtwdev->mutex); @@ -1426,6 +1428,8 @@ int rtw_core_init(struct rtw_dev *rtwdev) INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work); INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work); INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work); + INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work); + INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work); INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work); INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work); skb_queue_head_init(&rtwdev->c2h_queue); diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 776ec0590aba..45ebc5a70b1e 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1267,6 +1267,7 @@ struct rtw_coex_stat { bool bt_link_exist; bool bt_whck_test; bool bt_inq_page; + bool bt_inq_remain; bool bt_inq; bool bt_page; bool bt_ble_voice; @@ -1367,6 +1368,8 @@ struct rtw_coex { struct delayed_work bt_relink_work; struct delayed_work bt_reenable_work; struct delayed_work defreeze_work; + struct delayed_work wl_remain_work; + struct delayed_work bt_remain_work; }; #define DPK_RF_REG_NUM 7 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index 4700195c8eef..3ddd170f1651 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -1956,13 +1956,13 @@ static const struct coex_table_para table_sant_8723d[] = { {0xa5555555, 0xaaaa5aaa}, {0x6a5a5a5a, 0x5a5a5a5a}, {0x6a5a5a5a, 0x6a5a5a5a}, - {0x65555555, 0x5a5a5a5a}, + {0x66555555, 0x5a5a5a5a}, {0x65555555, 0x6a5a5a5a}, /* case-10 */ {0x65555555, 0xfafafafa}, - {0x65555555, 0x6a5a5aaa}, + {0x66555555, 0x5a5a5aaa}, {0x65555555, 0x5aaa5aaa}, {0x65555555, 0xaaaa5aaa}, - {0x65555555, 0xaaaaaaaa}, /* case-15 */ + {0x66555555, 0xaaaaaaaa}, /* case-15 */ {0xffff55ff, 0xfafafafa}, {0xffff55ff, 0x6afa5afa}, {0xaaffffaa, 0xfafafafa}, @@ -2034,8 +2034,9 @@ static const struct coex_tdma_para tdma_sant_8723d[] = { { {0x51, 0x0c, 0x03, 0x10, 0x54} }, { {0x55, 0x08, 0x03, 0x10, 0x54} }, { {0x65, 0x10, 0x03, 0x11, 0x11} }, - { {0x51, 0x10, 0x03, 0x10, 0x51} }, - { {0x61, 0x15, 0x03, 0x11, 0x10} } + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ + { {0x51, 0x08, 0x03, 0x10, 0x50} }, + { {0x61, 0x08, 0x03, 0x11, 0x11} } }; /* Non-Shared-Antenna TDMA */ @@ -2714,7 +2715,7 @@ struct rtw_chip_info rtw8723d_hw_spec = { .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl, .iqk_threshold = 8, - .coex_para_ver = 0x1905302f, + .coex_para_ver = 0x2007022f, .bt_desired_ver = 0x2f, .scbd_support = true, .new_scbd10_def = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index e49bdd76ab9a..351cd055a295 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2147,7 +2147,7 @@ static const struct coex_table_para table_sant_8822b[] = { {0x66555555, 0x5a5a5a5a}, {0x66555555, 0x6a5a5a5a}, /* case-10 */ {0x66555555, 0xfafafafa}, - {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x5a5a5aaa}, {0x66555555, 0x5aaa5aaa}, {0x66555555, 0xaaaa5aaa}, {0x66555555, 0xaaaaaaaa}, /* case-15 */ @@ -2223,7 +2223,8 @@ static const struct coex_tdma_para tdma_sant_8822b[] = { { {0x55, 0x08, 0x03, 0x10, 0x54} }, { {0x65, 0x10, 0x03, 0x11, 0x11} }, { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ - { {0x51, 0x08, 0x03, 0x10, 0x50} } + { {0x51, 0x08, 0x03, 0x10, 0x50} }, + { {0x61, 0x08, 0x03, 0x11, 0x11} } }; /* Non-Shared-Antenna TDMA */ @@ -2475,7 +2476,7 @@ struct rtw_chip_info rtw8822b_hw_spec = { .bfer_mu_max_num = 1, .rx_ldpc = true, - .coex_para_ver = 0x19062706, + .coex_para_ver = 0x20070206, .bt_desired_ver = 0x6, .scbd_support = true, .new_scbd10_def = false, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index 01b12d6b5439..426808413baa 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -3998,7 +3998,7 @@ static const struct coex_table_para table_sant_8822c[] = { {0x66555555, 0x5a5a5a5a}, {0x66555555, 0x6a5a5a5a}, /* case-10 */ {0x66555555, 0xfafafafa}, - {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x5a5a5aaa}, {0x66555555, 0x5aaa5aaa}, {0x66555555, 0xaaaa5aaa}, {0x66555555, 0xaaaaaaaa}, /* case-15 */ @@ -4074,7 +4074,8 @@ static const struct coex_tdma_para tdma_sant_8822c[] = { { {0x55, 0x08, 0x03, 0x10, 0x54} }, { {0x65, 0x10, 0x03, 0x11, 0x11} }, { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ - { {0x51, 0x08, 0x03, 0x10, 0x50} } + { {0x51, 0x08, 0x03, 0x10, 0x50} }, + { {0x61, 0x08, 0x03, 0x11, 0x11} } }; /* Non-Shared-Antenna TDMA */ @@ -4344,8 +4345,8 @@ struct rtw_chip_info rtw8822c_hw_spec = { .wowlan_stub = &rtw_wowlan_stub_8822c, .max_sched_scan_ssids = 4, #endif - .coex_para_ver = 0x19062706, - .bt_desired_ver = 0x6, + .coex_para_ver = 0x20070217, + .bt_desired_ver = 0x17, .scbd_support = true, .new_scbd10_def = true, .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, From 0ef2c2d1a9d09aaaf91b1732c07f096ee60178b7 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:05 +0200 Subject: [PATCH 1055/2056] wireless: fix wiki website url in main Kconfig The wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200605154112.16277-3-f.suligoi@asem.it --- drivers/net/wireless/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 75f18c1e5009..59af9ab0b1f6 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -14,7 +14,7 @@ menuconfig WLAN device drivers. For a complete list of drivers and documentation on them refer to the wireless wiki: - http://wireless.kernel.org/en/users/Drivers + https://wireless.wiki.kernel.org/en/users/Drivers if WLAN From eb17a4f9acf1348bb81aa6cbe8ed1a9321c22015 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:07 +0200 Subject: [PATCH 1056/2056] atmel: fix wiki website url In at76c50x-usb.c the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200605154112.16277-5-f.suligoi@asem.it --- drivers/net/wireless/atmel/at76c50x-usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 3b2680772f03..a63b5c2f1e17 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -17,7 +17,7 @@ * * TODO list is at the wiki: * - * http://wireless.kernel.org/en/users/Drivers/at76c50x-usb#TODO + * https://wireless.wiki.kernel.org/en/users/Drivers/at76c50x-usb#TODO */ #include From 8bd4147c4b1719a606a5525d6ddd2a8f9b39cd54 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 5 Jun 2020 17:41:08 +0200 Subject: [PATCH 1057/2056] broadcom: fix wiki website url In some b43 files, the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200605154112.16277-6-f.suligoi@asem.it --- drivers/net/wireless/broadcom/b43/main.c | 2 +- drivers/net/wireless/broadcom/b43legacy/main.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 3ad94dad2d89..0c2ea12fca19 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -2164,7 +2164,7 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error) { const char text[] = "You must go to " \ - "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \ + "https://wireless.wiki.kernel.org/en/users/Drivers/b43#devicefirmware " \ "and download the correct firmware for this driver version. " \ "Please carefully read all instructions on this website.\n"; diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 5208a39fd6f7..7bb6681fa882 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -1477,8 +1477,8 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev) static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl) { - b43legacyerr(wl, "You must go to http://wireless.kernel.org/en/users/" - "Drivers/b43#devicefirmware " + b43legacyerr(wl, "You must go to https://wireless.wiki.kernel.org/en/" + "users/Drivers/b43#devicefirmware " "and download the correct firmware (version 3).\n"); } From 0df9edb37f3c1479e060d4aca73000afe5fe69d4 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Wed, 3 Jun 2020 19:57:32 -0500 Subject: [PATCH 1058/2056] rtlwifi: Fix endian issue in ps.c Sparse reports the following endian issues: CHECK drivers/net/wireless/realtek/rtlwifi/ps.c drivers/net/wireless/realtek/rtlwifi/ps.c:772:42: warning: restricted __le32 degrades to integer drivers/net/wireless/realtek/rtlwifi/ps.c:772:42: warning: cast to restricted __le32 drivers/net/wireless/realtek/rtlwifi/ps.c:775:42: warning: restricted __le32 degrades to integer drivers/net/wireless/realtek/rtlwifi/ps.c:775:42: warning: cast to restricted __le32 drivers/net/wireless/realtek/rtlwifi/ps.c:778:42: warning: restricted __le32 degrades to integer drivers/net/wireless/realtek/rtlwifi/ps.c:778:42: warning: cast to restricted __le32 drivers/net/wireless/realtek/rtlwifi/ps.c:867:42: warning: restricted __le32 degrades to integer drivers/net/wireless/realtek/rtlwifi/ps.c:867:42: warning: cast to restricted __le32 drivers/net/wireless/realtek/rtlwifi/ps.c:870:42: warning: restricted __le32 degrades to integer drivers/net/wireless/realtek/rtlwifi/ps.c:870:42: warning: cast to restricted __le32 drivers/net/wireless/realtek/rtlwifi/ps.c:873:42: warning: restricted __le32 degrades to integer drivers/net/wireless/realtek/rtlwifi/ps.c:873:42: warning: cast to restricted __le32 Signed-off-by: Larry Finger Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604005733.7905-2-Larry.Finger@lwfinger.net --- drivers/net/wireless/realtek/rtlwifi/ps.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index bc0ac96ee615..90f92728e16a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -769,13 +769,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data, *(u8 *)(ie + index); index += 1; p2pinfo->noa_duration[i] = - le32_to_cpu(*(__le32 *)ie + index); + le32_to_cpu(*(__le32 *)(ie + index)); index += 4; p2pinfo->noa_interval[i] = - le32_to_cpu(*(__le32 *)ie + index); + le32_to_cpu(*(__le32 *)(ie + index)); index += 4; p2pinfo->noa_start_time[i] = - le32_to_cpu(*(__le32 *)ie + index); + le32_to_cpu(*(__le32 *)(ie + index)); index += 4; } @@ -864,13 +864,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data, *(u8 *)(ie + index); index += 1; p2pinfo->noa_duration[i] = - le32_to_cpu(*(__le32 *)ie + index); + le32_to_cpu(*(__le32 *)(ie + index)); index += 4; p2pinfo->noa_interval[i] = - le32_to_cpu(*(__le32 *)ie + index); + le32_to_cpu(*(__le32 *)(ie + index)); index += 4; p2pinfo->noa_start_time[i] = - le32_to_cpu(*(__le32 *)ie + index); + le32_to_cpu(*(__le32 *)(ie + index)); index += 4; } From 97794e638cf53fd92413a30657968f24c9f74306 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Wed, 3 Jun 2020 19:57:33 -0500 Subject: [PATCH 1059/2056] rtlwifi: rtl8188ee: Fix endian issue Sparse reports the following issue: CHECK drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c:500:26: warning: incorrect type in initializer (different base types) drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c:500:26: expected restricted __le32 [usertype] *pdesc drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c:500:26: got unsigned int [usertype] * Signed-off-by: Larry Finger Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200604005733.7905-3-Larry.Finger@lwfinger.net --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index aa2e9e88be53..a5d2d6ece8db 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -497,7 +497,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, dma_addr_t mapping; u8 bw_40 = 0; u8 short_gi = 0; - __le32 *pdesc = (u32 *)pdesc8; + __le32 *pdesc = (__le32 *)pdesc8; if (mac->opmode == NL80211_IFTYPE_STATION) { bw_40 = mac->bw_40; From ad806454c3cb5cf0b28ebcfaedfaa3ad4bb4a65a Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Fri, 19 Jun 2020 11:31:02 +0200 Subject: [PATCH 1060/2056] orinoco_usb: fix spelling mistake Fix typo: "EZUSB_REQUEST_TRIGER" --> "EZUSB_REQUEST_TRIGGER" Signed-off-by: Flavio Suligoi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200619093102.29487-1-f.suligoi@asem.it --- drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 651c676b5506..11fa38fedd87 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -158,7 +158,7 @@ MODULE_FIRMWARE("orinoco_ezusb_fw"); #define EZUSB_REQUEST_FW_TRANS 0xA0 -#define EZUSB_REQUEST_TRIGER 0xAA +#define EZUSB_REQUEST_TRIGGER 0xAA #define EZUSB_REQUEST_TRIG_AC 0xAC #define EZUSB_CPUCS_REG 0x7F92 @@ -1318,12 +1318,12 @@ static int ezusb_hard_reset(struct orinoco_private *priv) netdev_dbg(upriv->dev, "sending control message\n"); retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), - EZUSB_REQUEST_TRIGER, + EZUSB_REQUEST_TRIGGER, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0, 0x0, NULL, 0, DEF_TIMEOUT); if (retval < 0) { - err("EZUSB_REQUEST_TRIGER failed retval %d", retval); + err("EZUSB_REQUEST_TRIGGER failed retval %d", retval); return retval; } #if 0 From 800e7a205a0f5061d7818eb26a53b65548139fed Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 19 Jun 2020 20:29:55 -0700 Subject: [PATCH 1061/2056] b43: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. As a precursor to removing[2] this[3] macro[4], just initialize this variable to NULL. No later NULL deref is possible due to the early returns outside of the (phy->rev >= 7 && phy->rev < 19) case, which explicitly tests for NULL. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Fixes: 58619b14d106 ("b43: move under broadcom vendor directory") Signed-off-by: Kees Cook Reviewed-by: Nick Desaulniers Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200620033007.1444705-5-keescook@chromium.org --- drivers/net/wireless/broadcom/b43/phy_n.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index c33b4235839d..46db91846007 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -4222,7 +4222,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) u32 rfpwr_offset; u8 pga_gain, pad_gain; int i; - const s16 *uninitialized_var(rf_pwr_offset_table); + const s16 *rf_pwr_offset_table = NULL; table = b43_nphy_get_tx_gain_table(dev); if (!table) From f8279dad4e36c1e8fcdb54114306eca879275e30 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 19 Jun 2020 20:29:56 -0700 Subject: [PATCH 1062/2056] rtlwifi: rtl8192cu: Remove uninitialized_var() usage Using uninitialized_var() is dangerous as it papers over real bugs[1] (or can in the future), and suppresses unrelated compiler warnings (e.g. "unused variable"). If the compiler thinks it is uninitialized, either simply initialize the variable or make compiler changes. As a precursor to removing[2] this[3] macro[4], just initialize this variable to NULL, and avoid sending garbage by returning. [1] https://lore.kernel.org/lkml/20200603174714.192027-1-glider@google.com/ [2] https://lore.kernel.org/lkml/CA+55aFw+Vbj0i=1TGqCR5vQkCzWJ0QxK6CernOU6eedsudAixw@mail.gmail.com/ [3] https://lore.kernel.org/lkml/CA+55aFwgbgqhbp1fkxvRKEpzyR5J8n1vKT1VZdz9knmPuXhOeg@mail.gmail.com/ [4] https://lore.kernel.org/lkml/CA+55aFz2500WfbKXAx8s67wrm9=yVJu65TpLgN_ybYNv0VEOKA@mail.gmail.com/ Fixes: dc0313f46664 ("rtlwifi: rtl8192cu: Add routine hw") Reviewed-by: Nick Desaulniers Acked-by: Kalle Valo Signed-off-by: Kees Cook Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200620033007.1444705-6-keescook@chromium.org --- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index f070f25bb735..5b071b70bc08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -592,7 +592,7 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw, bool wmm_enable, u8 queue_sel) { - u16 uninitialized_var(value); + u16 value; switch (queue_sel) { case TX_SELE_HQ: @@ -606,7 +606,7 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw, break; default: WARN_ON(1); /* Shall not reach here! */ - break; + return; } _rtl92c_init_chipn_reg_priority(hw, value, value, value, value, value, value); From ec89032cd148f8805c5adf5d38f1f382036f5a5c Mon Sep 17 00:00:00 2001 From: Reto Schneider Date: Mon, 22 Jun 2020 15:21:11 +0200 Subject: [PATCH 1063/2056] rtlwifi: rtl8192cu: Fix deadlock Prevent code from calling itself indirectly, causing the driver to hang and consume 100% CPU. Without this fix, the following script can bring down a single CPU system: ``` while true; do rmmod rtl8192cu modprobe rtl8192cu done ``` Signed-off-by: Reto Schneider ACKed-by: Larry Finger Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200622132113.14508-2-code@reto-schneider.ch --- drivers/net/wireless/realtek/rtlwifi/usb.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index c66c6dc00378..d3bbfc0171f6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -680,8 +680,10 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); - flush_workqueue(rtlpriv->works.rtl_wq); - destroy_workqueue(rtlpriv->works.rtl_wq); + if (rtlpriv->works.rtl_wq) { + destroy_workqueue(rtlpriv->works.rtl_wq); + rtlpriv->works.rtl_wq = NULL; + } skb_queue_purge(&rtlusb->rx_queue); From 03128643eb5453a798db5770952c73dc64fcaf00 Mon Sep 17 00:00:00 2001 From: Reto Schneider Date: Mon, 22 Jun 2020 15:21:12 +0200 Subject: [PATCH 1064/2056] rtlwifi: rtl8192cu: Prevent leaking urb If usb_submit_urb fails the allocated urb should be unanchored and released. Signed-off-by: Reto Schneider Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200622132113.14508-3-code@reto-schneider.ch --- drivers/net/wireless/realtek/rtlwifi/usb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index d3bbfc0171f6..1a9fcb2697bf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -720,8 +720,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw) usb_anchor_urb(urb, &rtlusb->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); - if (err) + if (err) { + usb_unanchor_urb(urb); + usb_free_urb(urb); goto err_out; + } usb_free_urb(urb); } return 0; From a7f7c15e945a24fbb8ff99d3b19ab0dcd764030d Mon Sep 17 00:00:00 2001 From: Reto Schneider Date: Mon, 22 Jun 2020 15:21:13 +0200 Subject: [PATCH 1065/2056] rtlwifi: rtl8192cu: Free ieee80211_hw if probing fails In case of an error, no one will use the allocated structure. Call ieee80211_free_hw, same as in rtl_usb_disconnect. Signed-off-by: Reto Schneider Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200622132113.14508-4-code@reto-schneider.ch --- drivers/net/wireless/realtek/rtlwifi/usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 1a9fcb2697bf..d05e709536ea 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -1087,6 +1087,7 @@ int rtl_usb_probe(struct usb_interface *intf, usb_put_dev(udev); complete(&rtlpriv->firmware_loading_complete); kfree(rtlpriv->usb_data); + ieee80211_free_hw(hw); return -ENODEV; } EXPORT_SYMBOL(rtl_usb_probe); From ef6425107ccceabf2743e7c848d174a4d51808ab Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Tue, 23 Jun 2020 15:14:55 +0530 Subject: [PATCH 1066/2056] rtl818x_pci: use generic power management Earlier, drivers had to manage the device's power states, and related operations, themselves. With the generic approach, these are done by PCI core. The only driver-specific jobs, .suspend() and .resume() doing were invoking PCI helper functions pci_save/restore_state() and pci_set_power_state(). This is not recommeneded as PCI core takes care of that. Hence they became empty-body functions, thus define them NULL. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200623094454.12427-1-vaibhavgupta40@gmail.com --- .../wireless/realtek/rtl818x/rtl8180/dev.c | 23 ++++--------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index d5f65372356b..ba3286f732cc 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -1966,32 +1966,17 @@ static void rtl8180_remove(struct pci_dev *pdev) ieee80211_free_hw(dev); } -#ifdef CONFIG_PM -static int rtl8180_suspend(struct pci_dev *pdev, pm_message_t state) -{ - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; -} +#define rtl8180_suspend NULL +#define rtl8180_resume NULL -static int rtl8180_resume(struct pci_dev *pdev) -{ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - return 0; -} - -#endif /* CONFIG_PM */ +static SIMPLE_DEV_PM_OPS(rtl8180_pm_ops, rtl8180_suspend, rtl8180_resume); static struct pci_driver rtl8180_driver = { .name = KBUILD_MODNAME, .id_table = rtl8180_table, .probe = rtl8180_probe, .remove = rtl8180_remove, -#ifdef CONFIG_PM - .suspend = rtl8180_suspend, - .resume = rtl8180_resume, -#endif /* CONFIG_PM */ + .driver.pm = &rtl8180_pm_ops, }; module_pci_driver(rtl8180_driver); From c689a62b6712475dccee852a5214896014a945a0 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Wed, 24 Jun 2020 23:10:49 +0530 Subject: [PATCH 1067/2056] orinoco: use generic power management With the support of generic PM callbacks, drivers no longer need to use legacy .suspend() and .resume() in which they had to maintain PCI states changes and device's power state themselves. The required operations are done by PCI core. PCI drivers are not expected to invoke PCI helper functions like pci_save/restore_state(), pci_enable/disable_device(), pci_set_power_state(), etc. Their tasks are completed by PCI core itself. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200624174048.64754-1-vaibhavgupta40@gmail.com --- .../intersil/orinoco/orinoco_nortel.c | 3 +- .../wireless/intersil/orinoco/orinoco_pci.c | 3 +- .../wireless/intersil/orinoco/orinoco_pci.h | 32 ++++++------------- .../wireless/intersil/orinoco/orinoco_plx.c | 3 +- .../wireless/intersil/orinoco/orinoco_tmd.c | 3 +- 5 files changed, 13 insertions(+), 31 deletions(-) diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c index 048693b6c6c2..96a03d10a080 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_nortel.c @@ -290,8 +290,7 @@ static struct pci_driver orinoco_nortel_driver = { .id_table = orinoco_nortel_id_table, .probe = orinoco_nortel_init_one, .remove = orinoco_nortel_remove_one, - .suspend = orinoco_pci_suspend, - .resume = orinoco_pci_resume, + .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c index 4938a2208a37..f3c86b07b1b9 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_pci.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_pci.c @@ -230,8 +230,7 @@ static struct pci_driver orinoco_pci_driver = { .id_table = orinoco_pci_id_table, .probe = orinoco_pci_init_one, .remove = orinoco_pci_remove_one, - .suspend = orinoco_pci_suspend, - .resume = orinoco_pci_resume, + .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_pci.h b/drivers/net/wireless/intersil/orinoco/orinoco_pci.h index 43f5b9f5a0b0..d49d940864b4 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_pci.h +++ b/drivers/net/wireless/intersil/orinoco/orinoco_pci.h @@ -18,51 +18,37 @@ struct orinoco_pci_card { void __iomem *attr_io; }; -#ifdef CONFIG_PM -static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused orinoco_pci_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct orinoco_private *priv = pci_get_drvdata(pdev); orinoco_down(priv); free_irq(pdev->irq, priv); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); return 0; } -static int orinoco_pci_resume(struct pci_dev *pdev) +static int __maybe_unused orinoco_pci_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct orinoco_private *priv = pci_get_drvdata(pdev); struct net_device *dev = priv->ndev; int err; - pci_set_power_state(pdev, PCI_D0); - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", - dev->name); - return err; - } - pci_restore_state(pdev); - err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, dev->name, priv); if (err) { printk(KERN_ERR "%s: cannot re-allocate IRQ on resume\n", dev->name); - pci_disable_device(pdev); return -EBUSY; } - err = orinoco_up(priv); - - return err; + return orinoco_up(priv); } -#else -#define orinoco_pci_suspend NULL -#define orinoco_pci_resume NULL -#endif + +static SIMPLE_DEV_PM_OPS(orinoco_pci_pm_ops, + orinoco_pci_suspend, + orinoco_pci_resume); #endif /* _ORINOCO_PCI_H */ diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c index 221352027779..16dada94c774 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_plx.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_plx.c @@ -336,8 +336,7 @@ static struct pci_driver orinoco_plx_driver = { .id_table = orinoco_plx_id_table, .probe = orinoco_plx_init_one, .remove = orinoco_plx_remove_one, - .suspend = orinoco_pci_suspend, - .resume = orinoco_pci_resume, + .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c index 20ce569b8a43..9a9d335611ac 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_tmd.c @@ -213,8 +213,7 @@ static struct pci_driver orinoco_tmd_driver = { .id_table = orinoco_tmd_id_table, .probe = orinoco_tmd_init_one, .remove = orinoco_tmd_remove_one, - .suspend = orinoco_pci_suspend, - .resume = orinoco_pci_resume, + .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION From 04bceecdf1e893cd825cd6b2fe788e414a8b18cd Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 09:20:32 +0530 Subject: [PATCH 1068/2056] adm8211: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. In the case of adm8211, after removing PCI helper functions, .suspend() and .resume() became empty-body functions. Hence, define them NULL and use dev_pm_ops. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200629035031.169670-1-vaibhavgupta40@gmail.com --- drivers/net/wireless/admtek/adm8211.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index ba326f6c1214..22f9f2f8af10 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -1976,35 +1976,20 @@ static void adm8211_remove(struct pci_dev *pdev) } -#ifdef CONFIG_PM -static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state) -{ - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; -} - -static int adm8211_resume(struct pci_dev *pdev) -{ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - return 0; -} -#endif /* CONFIG_PM */ - +#define adm8211_suspend NULL +#define adm8211_resume NULL MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table); +static SIMPLE_DEV_PM_OPS(adm8211_pm_ops, adm8211_suspend, adm8211_resume); + /* TODO: implement enable_wake */ static struct pci_driver adm8211_driver = { .name = "adm8211", .id_table = adm8211_pci_id_table, .probe = adm8211_probe, .remove = adm8211_remove, -#ifdef CONFIG_PM - .suspend = adm8211_suspend, - .resume = adm8211_resume, -#endif /* CONFIG_PM */ + .driver.pm = &adm8211_pm_ops, }; module_pci_driver(adm8211_driver); From 814db61adb8676939335e39446f653cd354ea538 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 12:55:24 +0530 Subject: [PATCH 1069/2056] ipw2100: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. The driver was invoking PCI helper functions like pci_save/restore_state(), pci_enable/disable_device() and pci_set_power_state(), which is not recommended. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200629072525.156154-2-vaibhavgupta40@gmail.com --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 31 +++++--------------- 1 file changed, 7 insertions(+), 24 deletions(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 624fe721e2b5..57ce55728808 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -6397,10 +6397,9 @@ static void ipw2100_pci_remove_one(struct pci_dev *pci_dev) IPW_DEBUG_INFO("exit\n"); } -#ifdef CONFIG_PM -static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) +static int __maybe_unused ipw2100_suspend(struct device *dev_d) { - struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); + struct ipw2100_priv *priv = dev_get_drvdata(dev_d); struct net_device *dev = priv->net_dev; IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); @@ -6414,10 +6413,6 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) /* Remove the PRESENT state of the device */ netif_device_detach(dev); - pci_save_state(pci_dev); - pci_disable_device(pci_dev); - pci_set_power_state(pci_dev, PCI_D3hot); - priv->suspend_at = ktime_get_boottime_seconds(); mutex_unlock(&priv->action_mutex); @@ -6425,11 +6420,11 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) return 0; } -static int ipw2100_resume(struct pci_dev *pci_dev) +static int __maybe_unused ipw2100_resume(struct device *dev_d) { + struct pci_dev *pci_dev = to_pci_dev(dev_d); struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); struct net_device *dev = priv->net_dev; - int err; u32 val; if (IPW2100_PM_DISABLED) @@ -6439,16 +6434,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev) IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); - pci_set_power_state(pci_dev, PCI_D0); - err = pci_enable_device(pci_dev); - if (err) { - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", - dev->name); - mutex_unlock(&priv->action_mutex); - return err; - } - pci_restore_state(pci_dev); - /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries @@ -6473,7 +6458,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev) return 0; } -#endif static void ipw2100_shutdown(struct pci_dev *pci_dev) { @@ -6539,15 +6523,14 @@ static const struct pci_device_id ipw2100_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table); +static SIMPLE_DEV_PM_OPS(ipw2100_pm_ops, ipw2100_suspend, ipw2100_resume); + static struct pci_driver ipw2100_pci_driver = { .name = DRV_NAME, .id_table = ipw2100_pci_id_table, .probe = ipw2100_pci_init_one, .remove = ipw2100_pci_remove_one, -#ifdef CONFIG_PM - .suspend = ipw2100_suspend, - .resume = ipw2100_resume, -#endif + .driver.pm = &ipw2100_pm_ops, .shutdown = ipw2100_shutdown, }; From 77b4ad07699fba9f899b4f4fa17c62326e54e8bc Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 12:55:25 +0530 Subject: [PATCH 1070/2056] ipw2200: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. The driver was invoking PCI helper functions like pci_save/restore_state(), pci_enable/disable_device() and pci_set_power_state(), which is not recommended. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200629072525.156154-3-vaibhavgupta40@gmail.com --- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 30 +++++--------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 661e63bfc892..39ff3a426092 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -11838,10 +11838,9 @@ static void ipw_pci_remove(struct pci_dev *pdev) free_firmware(); } -#ifdef CONFIG_PM -static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ipw_pci_suspend(struct device *dev_d) { - struct ipw_priv *priv = pci_get_drvdata(pdev); + struct ipw_priv *priv = dev_get_drvdata(dev_d); struct net_device *dev = priv->net_dev; printk(KERN_INFO "%s: Going into suspend...\n", dev->name); @@ -11852,33 +11851,20 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) /* Remove the PRESENT state of the device */ netif_device_detach(dev); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - priv->suspend_at = ktime_get_boottime_seconds(); return 0; } -static int ipw_pci_resume(struct pci_dev *pdev) +static int __maybe_unused ipw_pci_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct ipw_priv *priv = pci_get_drvdata(pdev); struct net_device *dev = priv->net_dev; - int err; u32 val; printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name); - pci_set_power_state(pdev, PCI_D0); - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", - dev->name); - return err; - } - pci_restore_state(pdev); - /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries @@ -11900,7 +11886,6 @@ static int ipw_pci_resume(struct pci_dev *pdev) return 0; } -#endif static void ipw_pci_shutdown(struct pci_dev *pdev) { @@ -11912,16 +11897,15 @@ static void ipw_pci_shutdown(struct pci_dev *pdev) pci_disable_device(pdev); } +static SIMPLE_DEV_PM_OPS(ipw_pci_pm_ops, ipw_pci_suspend, ipw_pci_resume); + /* driver initialization stuff */ static struct pci_driver ipw_driver = { .name = DRV_NAME, .id_table = card_ids, .probe = ipw_pci_probe, .remove = ipw_pci_remove, -#ifdef CONFIG_PM - .suspend = ipw_pci_suspend, - .resume = ipw_pci_resume, -#endif + .driver.pm = &ipw_pci_pm_ops, .shutdown = ipw_pci_shutdown, }; From ddfa943f246a668ed68e8dc688acf17c4540062a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 1 Jul 2020 14:52:21 +0100 Subject: [PATCH 1071/2056] iwlegacy: remove redundant initialization of variable tid The variable tid is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200701135221.549700-1-colin.king@canonical.com --- drivers/net/wireless/intel/iwlegacy/4965-rs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c index 0a02d8aca320..1f196665d21f 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c @@ -1749,7 +1749,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb, u8 done_search = 0; u16 high_low; s32 sr; - u8 tid = MAX_TID_COUNT; + u8 tid; struct il_tid_data *tid_data; D_RATE("rate scale calculate new rate for skb\n"); From b28bd97c1c191ade3736b33f2b24e5b288b90ab4 Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Thu, 2 Jul 2020 01:57:01 +0000 Subject: [PATCH 1072/2056] airo: use set_current_state macro Use set_current_state macro instead of current->state = TASK_RUNNING. Signed-off-by: Xu Wang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200702015701.8606-1-vulab@iscas.ac.cn --- drivers/net/wireless/cisco/airo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 827bb6d74815..f0bcb67095f1 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -3113,7 +3113,7 @@ static int airo_thread(void *data) { } break; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&ai->thr_wait, &wait); locked = 1; } From 37adf701dd8790fd019c513b7a892d7178170338 Mon Sep 17 00:00:00 2001 From: Daniel Winkler Date: Tue, 14 Jul 2020 14:16:00 -0700 Subject: [PATCH 1073/2056] Bluetooth: Add per-instance adv disable/remove Add functionality to disable and remove advertising instances, and use that functionality in MGMT add/remove advertising calls. Currently, advertising is globally-disabled, i.e. all instances are disabled together, even if hardware offloading is available. This patch adds functionality to disable and remove individual adv instances, solving two issues: 1. On new advertisement registration, a global disable was done, and then only the new instance was enabled. This meant only the newest instance was actually enabled. 2. On advertisement removal, the structure was removed, but the instance was never disabled or removed, which is incorrect with hardware offload support. Signed-off-by: Daniel Winkler Reviewed-by: Shyh-In Hwang Reviewed-by: Alain Michaud Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_conn.c | 2 +- net/bluetooth/hci_request.c | 59 +++++++++++++++++++++++++++++++------ net/bluetooth/hci_request.h | 2 ++ net/bluetooth/mgmt.c | 6 ++++ 4 files changed, 59 insertions(+), 10 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 8805d68e65f2..be67361ff2f0 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -931,7 +931,7 @@ static void hci_req_directed_advertising(struct hci_request *req, * So it is required to remove adv set for handle 0x00. since we use * instance 0 for directed adv. */ - hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle); + __hci_req_remove_ext_adv_instance(req, cp.handle); hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 770b93758112..7c0c2fda04ad 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -1179,13 +1179,8 @@ static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) void __hci_req_disable_advertising(struct hci_request *req) { if (ext_adv_capable(req->hdev)) { - struct hci_cp_le_set_ext_adv_enable cp; + __hci_req_disable_ext_adv_instance(req, 0x00); - cp.enable = 0x00; - /* Disable all sets since we only support one set at the moment */ - cp.num_of_sets = 0x00; - - hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp); } else { u8 enable = 0x00; @@ -1950,13 +1945,59 @@ int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) return 0; } +int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance) +{ + struct hci_dev *hdev = req->hdev; + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *adv_set; + u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; + u8 req_size; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0 && !hci_find_adv_instance(hdev, instance)) + return -EINVAL; + + memset(data, 0, sizeof(data)); + + cp = (void *)data; + adv_set = (void *)cp->data; + + /* Instance 0x00 indicates all advertising instances will be disabled */ + cp->num_of_sets = !!instance; + cp->enable = 0x00; + + adv_set->handle = instance; + + req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets; + hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data); + + return 0; +} + +int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance) +{ + struct hci_dev *hdev = req->hdev; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0 && !hci_find_adv_instance(hdev, instance)) + return -EINVAL; + + hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance); + + return 0; +} + int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) { struct hci_dev *hdev = req->hdev; + struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance); int err; - if (hci_dev_test_flag(hdev, HCI_LE_ADV)) - __hci_req_disable_advertising(req); + /* If instance isn't pending, the chip knows about it, and it's safe to + * disable + */ + if (adv_instance && !adv_instance->pending) + __hci_req_disable_ext_adv_instance(req, instance); err = __hci_req_setup_ext_adv_instance(req, instance); if (err < 0) @@ -2104,7 +2145,7 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, hci_dev_test_flag(hdev, HCI_ADVERTISING)) return; - if (next_instance) + if (next_instance && !ext_adv_capable(hdev)) __hci_req_schedule_adv_instance(req, next_instance->instance, false); } diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 0e81614d235e..bbe892ab078a 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -86,6 +86,8 @@ void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance); +int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance); +int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance); void __hci_req_clear_ext_adv_sets(struct hci_request *req); int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, bool use_rpa, struct adv_info *adv_instance, diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 686ef4792831..f45105d2de77 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -7504,6 +7504,12 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, hci_req_init(&req, hdev); + /* If we use extended advertising, instance is disabled and removed */ + if (ext_adv_capable(hdev)) { + __hci_req_disable_ext_adv_instance(&req, cp->instance); + __hci_req_remove_ext_adv_instance(&req, cp->instance); + } + hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true); if (list_empty(&hdev->adv_instances)) From 17c7b8b1cadc0ae95ee2a9fa18022dd0c901af5f Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:18:46 +0800 Subject: [PATCH 1074/2056] cipso: Remove unused inline functions They are not used any more since commit b1edeb102397 ("netlabel: Replace protocol/NetLabel linking with refrerence counts") Signed-off-by: YueHaibing Acked-by: Paul Moore Signed-off-by: David S. Miller --- include/net/cipso_ipv4.h | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h index 428b6725b248..53dd7d988a2d 100644 --- a/include/net/cipso_ipv4.h +++ b/include/net/cipso_ipv4.h @@ -150,18 +150,6 @@ static inline int cipso_v4_doi_walk(u32 *skip_cnt, { return 0; } - -static inline int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def, - const char *domain) -{ - return -ENOSYS; -} - -static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def, - const char *domain) -{ - return 0; -} #endif /* CONFIG_NETLABEL */ /* From 054848d21bc19992f2c3540244bd6defbc833aa2 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:31:19 +0800 Subject: [PATCH 1075/2056] net: flow: Remove unused inline function It is not used since commit 09c7570480f7 ("xfrm: remove flow cache") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- include/net/flow.h | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/include/net/flow.h b/include/net/flow.h index a50fb77a0b27..929d3ca614d0 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -204,24 +204,6 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn) return container_of(fldn, struct flowi, u.dn); } -typedef unsigned long flow_compare_t; - -static inline unsigned int flow_key_size(u16 family) -{ - switch (family) { - case AF_INET: - BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t)); - return sizeof(struct flowi4) / sizeof(flow_compare_t); - case AF_INET6: - BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t)); - return sizeof(struct flowi6) / sizeof(flow_compare_t); - case AF_DECnet: - BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t)); - return sizeof(struct flowidn) / sizeof(flow_compare_t); - } - return 0; -} - __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys); #endif From 0d40efabe3e383dd7e347debd85a0d4754402efb Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:36:13 +0800 Subject: [PATCH 1076/2056] mptcp: Remove unused inline function mptcp_rcv_synsent() commit 263e1201a2c3 ("mptcp: consolidate synack processing.") left behind this, remove it. Signed-off-by: YueHaibing Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/mptcp.h | 4 ---- 1 file changed, 4 deletions(-) diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 46d0487d2b22..02158c257bd4 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -164,10 +164,6 @@ static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, return false; } -static inline void mptcp_rcv_synsent(struct sock *sk) -{ -} - static inline bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, struct mptcp_out_options *opts) From 8635764bcf0f109933b593d79ac2247b1e863d0a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 15 Jul 2020 10:52:03 +0800 Subject: [PATCH 1077/2056] netpoll: Remove unused inline function netpoll_netdev_init() commit d565b0a1a9b6 ("net: Add Generic Receive Offload infrastructure") left behind this, remove it. Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- include/linux/netpoll.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index f47af135bd56..e6a2d72e0dc7 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -102,9 +102,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) static inline void netpoll_poll_unlock(void *have) { } -static inline void netpoll_netdev_init(struct net_device *dev) -{ -} static inline bool netpoll_tx_running(struct net_device *dev) { return false; From 40c66c68b3fdba20685e765dcf45acdc5bf380a3 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 29 Jun 2020 13:58:42 +0800 Subject: [PATCH 1078/2056] rtlwifi: Use const in 8188ee/8723be/8821ae swing_table declarations Reduce data usage about 1KB by using const. Signed-off-by: Joe Perches Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200629055843.25394-1-pkshih@realtek.com --- .../wireless/realtek/rtlwifi/rtl8188ee/dm.c | 4 +- .../wireless/realtek/rtlwifi/rtl8723be/dm.c | 4 +- .../wireless/realtek/rtlwifi/rtl8821ae/dm.c | 114 +++++++++--------- 3 files changed, 64 insertions(+), 58 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index dceb04a9b3f5..1ffa188a65c9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -870,11 +870,11 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw) /*0.1 the following TWO tables decide the *final index of OFDM/CCK swing table */ - s8 delta_swing_table_idx[2][15] = { + static const s8 delta_swing_table_idx[2][15] = { {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10} }; - u8 thermal_threshold[2][15] = { + static const u8 thermal_threshold[2][15] = { {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 27}, {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 25, 25, 25} }; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index b13fd3c0c832..c9b3d9d09c48 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c @@ -736,11 +736,11 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter( u8 ofdm_min_index = 6; u8 index_for_channel = 0; - s8 delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = { + static const s8 delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = { 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15}; - s8 delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = { + static const s8 delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = { 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, 14, 15}; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index f57e8794f0ec..e8fb9354cf19 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -115,47 +115,47 @@ static const u32 edca_setting_ul[PEER_MAX] = { 0x5ea44f, /* 7 MARV */ }; -static u8 rtl8818e_delta_swing_table_idx_24gb_p[] = { +static const u8 rtl8818e_delta_swing_table_idx_24gb_p[] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9}; -static u8 rtl8818e_delta_swing_table_idx_24gb_n[] = { +static const u8 rtl8818e_delta_swing_table_idx_24gb_n[] = { 0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11}; -static u8 rtl8812ae_delta_swing_table_idx_24gb_n[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24gb_n[] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11}; -static u8 rtl8812ae_delta_swing_table_idx_24gb_p[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24gb_p[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; -static u8 rtl8812ae_delta_swing_table_idx_24ga_n[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24ga_n[] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11}; -static u8 rtl8812ae_delta_swing_table_idx_24ga_p[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24ga_p[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; -static u8 rtl8812ae_delta_swing_table_idx_24gcckb_n[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24gcckb_n[] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11}; -static u8 rtl8812ae_delta_swing_table_idx_24gcckb_p[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24gcckb_p[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; -static u8 rtl8812ae_delta_swing_table_idx_24gccka_n[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24gccka_n[] = { 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11}; -static u8 rtl8812ae_delta_swing_table_idx_24gccka_p[] = { +static const u8 rtl8812ae_delta_swing_table_idx_24gccka_p[] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9}; -static u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { +static const u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13}, {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, @@ -164,7 +164,7 @@ static u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { 12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18}, }; -static u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { +static const u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, @@ -173,7 +173,7 @@ static u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, }; -static u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { +static const u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13}, {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, @@ -182,7 +182,7 @@ static u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { 12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18}, }; -static u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { +static const u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11}, {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, @@ -191,39 +191,39 @@ static u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, }; -static u8 rtl8821ae_delta_swing_table_idx_24gb_n[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24gb_n[] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; -static u8 rtl8821ae_delta_swing_table_idx_24gb_p[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24gb_p[] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; -static u8 rtl8821ae_delta_swing_table_idx_24ga_n[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24ga_n[] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; -static u8 rtl8821ae_delta_swing_table_idx_24ga_p[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24ga_p[] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; -static u8 rtl8821ae_delta_swing_table_idx_24gcckb_n[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24gcckb_n[] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; -static u8 rtl8821ae_delta_swing_table_idx_24gcckb_p[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24gcckb_p[] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; -static u8 rtl8821ae_delta_swing_table_idx_24gccka_n[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24gccka_n[] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; -static u8 rtl8821ae_delta_swing_table_idx_24gccka_p[] = { +static const u8 rtl8821ae_delta_swing_table_idx_24gccka_p[] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; -static u8 rtl8821ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { +static const u8 rtl8821ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, @@ -232,7 +232,7 @@ static u8 rtl8821ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, }; -static u8 rtl8821ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { +static const u8 rtl8821ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, @@ -241,7 +241,7 @@ static u8 rtl8821ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, }; -static u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { +static const u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, @@ -250,7 +250,7 @@ static u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, }; -static u8 rtl8821ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { +static const u8 rtl8821ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, @@ -962,8 +962,10 @@ static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw) } static void rtl8812ae_get_delta_swing_table(struct ieee80211_hw *hw, - u8 **up_a, u8 **down_a, - u8 **up_b, u8 **down_b) + const u8 **up_a, + const u8 **down_a, + const u8 **up_b, + const u8 **down_b) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -999,10 +1001,10 @@ static void rtl8812ae_get_delta_swing_table(struct ieee80211_hw *hw, *up_b = rtl8812ae_delta_swing_table_idx_5gb_p[2]; *down_b = rtl8812ae_delta_swing_table_idx_5gb_n[2]; } else { - *up_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; - *down_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; - *up_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; - *down_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; + *up_a = rtl8818e_delta_swing_table_idx_24gb_p; + *down_a = rtl8818e_delta_swing_table_idx_24gb_n; + *up_b = rtl8818e_delta_swing_table_idx_24gb_p; + *down_b = rtl8818e_delta_swing_table_idx_24gb_n; } } @@ -1492,17 +1494,17 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( /* 1. The following TWO tables decide * the final index of OFDM/CCK swing table. */ - u8 *delta_swing_table_idx_tup_a; - u8 *delta_swing_table_idx_tdown_a; - u8 *delta_swing_table_idx_tup_b; - u8 *delta_swing_table_idx_tdown_b; + const u8 *delta_swing_table_idx_tup_a; + const u8 *delta_swing_table_idx_tdown_a; + const u8 *delta_swing_table_idx_tup_b; + const u8 *delta_swing_table_idx_tdown_b; /*2. Initilization ( 7 steps in total )*/ rtl8812ae_get_delta_swing_table(hw, - (u8 **)&delta_swing_table_idx_tup_a, - (u8 **)&delta_swing_table_idx_tdown_a, - (u8 **)&delta_swing_table_idx_tup_b, - (u8 **)&delta_swing_table_idx_tdown_b); + &delta_swing_table_idx_tup_a, + &delta_swing_table_idx_tdown_a, + &delta_swing_table_idx_tup_b, + &delta_swing_table_idx_tdown_b); rtldm->txpower_trackinginit = true; @@ -1830,8 +1832,11 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n"); } -static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, u8 **up_a, - u8 **down_a, u8 **up_b, u8 **down_b) +static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, + const u8 **up_a, + const u8 **down_a, + const u8 **up_b, + const u8 **down_b) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -1867,10 +1872,10 @@ static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, u8 **up_a, *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[2]; *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[2]; } else { - *up_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; - *down_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; - *up_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p; - *down_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n; + *up_a = rtl8818e_delta_swing_table_idx_24gb_p; + *down_a = rtl8818e_delta_swing_table_idx_24gb_n; + *up_b = rtl8818e_delta_swing_table_idx_24gb_p; + *down_b = rtl8818e_delta_swing_table_idx_24gb_n; } return; } @@ -2075,16 +2080,17 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter( /* 1. The following TWO tables decide the final * index of OFDM/CCK swing table. */ - u8 *delta_swing_table_idx_tup_a; - u8 *delta_swing_table_idx_tdown_a; - u8 *delta_swing_table_idx_tup_b; - u8 *delta_swing_table_idx_tdown_b; + const u8 *delta_swing_table_idx_tup_a; + const u8 *delta_swing_table_idx_tdown_a; + const u8 *delta_swing_table_idx_tup_b; + const u8 *delta_swing_table_idx_tdown_b; /*2. Initilization ( 7 steps in total )*/ - rtl8821ae_get_delta_swing_table(hw, (u8 **)&delta_swing_table_idx_tup_a, - (u8 **)&delta_swing_table_idx_tdown_a, - (u8 **)&delta_swing_table_idx_tup_b, - (u8 **)&delta_swing_table_idx_tdown_b); + rtl8821ae_get_delta_swing_table(hw, + &delta_swing_table_idx_tup_a, + &delta_swing_table_idx_tdown_a, + &delta_swing_table_idx_tup_b, + &delta_swing_table_idx_tdown_b); rtldm->txpower_trackinginit = true; From 6648cfac8e2a69e12aa314ffe1178353a474b957 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Mon, 29 Jun 2020 13:58:43 +0800 Subject: [PATCH 1079/2056] rtlwifi: 8821ae: remove unused path B parameters from swing table 8821AE is a 1x1 chip, so swing parameters for path B aren't necessary. Signed-off-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200629055843.25394-2-pkshih@realtek.com --- .../wireless/realtek/rtlwifi/rtl8821ae/dm.c | 56 +------------------ 1 file changed, 2 insertions(+), 54 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index e8fb9354cf19..97a30ccf0b27 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -191,14 +191,6 @@ static const u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = { 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11}, }; -static const u8 rtl8821ae_delta_swing_table_idx_24gb_n[] = { - 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, - 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; - -static const u8 rtl8821ae_delta_swing_table_idx_24gb_p[] = { - 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, - 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; - static const u8 rtl8821ae_delta_swing_table_idx_24ga_n[] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; @@ -207,14 +199,6 @@ static const u8 rtl8821ae_delta_swing_table_idx_24ga_p[] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; -static const u8 rtl8821ae_delta_swing_table_idx_24gcckb_n[] = { - 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, - 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; - -static const u8 rtl8821ae_delta_swing_table_idx_24gcckb_p[] = { - 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, - 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; - static const u8 rtl8821ae_delta_swing_table_idx_24gccka_n[] = { 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10}; @@ -223,24 +207,6 @@ static const u8 rtl8821ae_delta_swing_table_idx_24gccka_p[] = { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12}; -static const u8 rtl8821ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = { - {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, - 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, - {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, - 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, - {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, - 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, -}; - -static const u8 rtl8821ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = { - {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, - 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, - {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, - 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, - {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, - 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, -}; - static const u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = { {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11, 12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16}, @@ -1834,9 +1800,7 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter( static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, const u8 **up_a, - const u8 **down_a, - const u8 **up_b, - const u8 **down_b) + const u8 **down_a) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -1848,34 +1812,22 @@ static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, if (RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) { *up_a = rtl8821ae_delta_swing_table_idx_24gccka_p; *down_a = rtl8821ae_delta_swing_table_idx_24gccka_n; - *up_b = rtl8821ae_delta_swing_table_idx_24gcckb_p; - *down_b = rtl8821ae_delta_swing_table_idx_24gcckb_n; } else { *up_a = rtl8821ae_delta_swing_table_idx_24ga_p; *down_a = rtl8821ae_delta_swing_table_idx_24ga_n; - *up_b = rtl8821ae_delta_swing_table_idx_24gb_p; - *down_b = rtl8821ae_delta_swing_table_idx_24gb_n; } } else if (36 <= channel && channel <= 64) { *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[0]; *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[0]; - *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[0]; - *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[0]; } else if (100 <= channel && channel <= 140) { *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[1]; *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[1]; - *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[1]; - *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[1]; } else if (149 <= channel && channel <= 173) { *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[2]; *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[2]; - *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[2]; - *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[2]; } else { *up_a = rtl8818e_delta_swing_table_idx_24gb_p; *down_a = rtl8818e_delta_swing_table_idx_24gb_n; - *up_b = rtl8818e_delta_swing_table_idx_24gb_p; - *down_b = rtl8818e_delta_swing_table_idx_24gb_n; } return; } @@ -2082,15 +2034,11 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter( */ const u8 *delta_swing_table_idx_tup_a; const u8 *delta_swing_table_idx_tdown_a; - const u8 *delta_swing_table_idx_tup_b; - const u8 *delta_swing_table_idx_tdown_b; /*2. Initilization ( 7 steps in total )*/ rtl8821ae_get_delta_swing_table(hw, &delta_swing_table_idx_tup_a, - &delta_swing_table_idx_tdown_a, - &delta_swing_table_idx_tup_b, - &delta_swing_table_idx_tdown_b); + &delta_swing_table_idx_tdown_a); rtldm->txpower_trackinginit = true; From 4f3ebd6fb6806a948df9eb1dc7ea9bb76e2712ec Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Tue, 30 Jun 2020 07:04:04 +0000 Subject: [PATCH 1080/2056] zd1211rw: remove needless check before usb_free_coherent() usb_free_coherent() is safe with NULL addr and this check is not required. Signed-off-by: Xu Wang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200630070404.8207-1-vulab@iscas.ac.cn --- drivers/net/wireless/zydas/zd1211rw/zd_usb.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 8ff0374126e4..65b5985ad402 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -600,9 +600,7 @@ void zd_usb_disable_int(struct zd_usb *usb) dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb); usb_free_urb(urb); - if (buffer) - usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, - buffer, buffer_dma); + usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, buffer, buffer_dma); } static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, From 0db5bc7b344736731a3982885402914b34dd252b Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Fri, 10 Jul 2020 08:21:51 +0200 Subject: [PATCH 1081/2056] ssb: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200710062151.28871-1-grandmaster@al2klimov.de --- drivers/ssb/driver_chipcommon.c | 4 ++-- drivers/ssb/driver_chipcommon_pmu.c | 2 +- drivers/ssb/sprom.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c index 3861cb659cb9..85542bfd7715 100644 --- a/drivers/ssb/driver_chipcommon.c +++ b/drivers/ssb/driver_chipcommon.c @@ -238,7 +238,7 @@ static void chipco_powercontrol_init(struct ssb_chipcommon *cc) } } -/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */ +/* https://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */ static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; @@ -255,7 +255,7 @@ static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc) } } -/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */ +/* https://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */ static void calc_fast_powerup_delay(struct ssb_chipcommon *cc) { struct ssb_bus *bus = cc->dev->bus; diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c index 0f60e90ded26..888069e10659 100644 --- a/drivers/ssb/driver_chipcommon_pmu.c +++ b/drivers/ssb/driver_chipcommon_pmu.c @@ -513,7 +513,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc) chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk); } -/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */ +/* https://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */ void ssb_pmu_init(struct ssb_chipcommon *cc) { u32 pmucap; diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c index 42d620cee8a9..7cd553127861 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c @@ -186,7 +186,7 @@ int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) return get_fallback_sprom(bus, out); } -/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ +/* https://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ bool ssb_is_sprom_available(struct ssb_bus *bus) { /* status register only exists on chipcomon rev >= 11 and we need check From 9018fd7f2a73e9b290f48a56b421558fa31e8b75 Mon Sep 17 00:00:00 2001 From: Bolarinwa Olayemi Saheed Date: Mon, 13 Jul 2020 19:55:27 +0200 Subject: [PATCH 1082/2056] iwlegacy: Check the return value of pcie_capability_read_*() On failure pcie_capability_read_dword() sets it's last parameter, val to 0. However, with Patch 14/14, it is possible that val is set to ~0 on failure. This would introduce a bug because (x & x) == (~0 & x). This bug can be avoided without changing the function's behaviour if the return value of pcie_capability_read_dword is checked to confirm success. Check the return value of pcie_capability_read_dword() to ensure success. Suggested-by: Bjorn Helgaas Signed-off-by: Bolarinwa Olayemi Saheed Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200713175529.29715-3-refactormyself@gmail.com --- drivers/net/wireless/intel/iwlegacy/common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 348c17ce72f5..f78e062df572 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -4286,8 +4286,8 @@ il_apm_init(struct il_priv *il) * power savings, even without L1. */ if (il->cfg->set_l0s) { - pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { + ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl); + if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) { /* L1-ASPM enabled; disable(!) L0S */ il_set_bit(il, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); From 0e20c3e10333326fc63646fa40b159eb88b7e8c8 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Wed, 15 Jul 2020 13:48:35 +0100 Subject: [PATCH 1083/2056] wireless: Fix trivial spelling The word 'descriptor' is misspelled throughout the tree. Fix it up accordingly: decriptors -> descriptors Signed-off-by: Kieran Bingham Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200715124839.252822-5-kieran.bingham+renesas@ideasonboard.com --- drivers/net/wireless/ath/ath10k/usb.c | 2 +- drivers/net/wireless/ath/ath6kl/usb.c | 2 +- drivers/net/wireless/cisco/airo.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c index b7daf344d012..05a620ff6fe2 100644 --- a/drivers/net/wireless/ath/ath10k/usb.c +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -824,7 +824,7 @@ static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); - /* walk decriptors and setup pipes */ + /* walk descriptors and setup pipes */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 53b66e9434c9..5372e948e761 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -311,7 +311,7 @@ static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb) ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n"); - /* walk decriptors and setup pipes */ + /* walk descriptors and setup pipes */ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index f0bcb67095f1..726cb1bc86cd 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -2450,7 +2450,7 @@ static void mpi_unmap_card(struct pci_dev *pci) /************************************************************* * This routine assumes that descriptors have been setup . - * Run at insmod time or after reset when the decriptors + * Run at insmod time or after reset when the descriptors * have been initialized . Returns 0 if all is well nz * otherwise . Does not allocate memory but sets up card * using previously allocated descriptors. From 336f531ab17c3f480f8289d26c35bd48302ed085 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Jul 2020 13:56:22 +0300 Subject: [PATCH 1084/2056] netfilter: nf_tables: Fix a use after free in nft_immediate_destroy() The nf_tables_rule_release() function frees "rule" so we have to use the _safe() version of list_for_each_entry(). Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING") Signed-off-by: Dan Carpenter Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_immediate.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 9e556638bb32..c63eb3b17178 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -103,9 +103,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, { const struct nft_immediate_expr *priv = nft_expr_priv(expr); const struct nft_data *data = &priv->data; + struct nft_rule *rule, *n; struct nft_ctx chain_ctx; struct nft_chain *chain; - struct nft_rule *rule; if (priv->dreg != NFT_REG_VERDICT) return; @@ -121,7 +121,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, chain_ctx = *ctx; chain_ctx.chain = chain; - list_for_each_entry(rule, &chain->rules, list) + list_for_each_entry_safe(rule, n, &chain->rules, list) nf_tables_rule_release(&chain_ctx, rule); nf_tables_chain_destroy(&chain_ctx); From e63a22828432fd1739681be09814b5a5ce69a556 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Thu, 9 Jul 2020 22:49:25 +0200 Subject: [PATCH 1085/2056] net: sundance: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/dlink/sundance.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index ca97e321082d..b3f8597e77aa 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -18,7 +18,7 @@ http://www.scyld.com/network/sundance.html [link no longer provides useful info -jgarzik] Archives of the mailing list are still available at - http://www.beowulf.org/pipermail/netdrivers/ + https://www.beowulf.org/pipermail/netdrivers/ */ From 94d9f78f4d64b967273a676167bd34ddad2f978c Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 9 Jul 2020 23:17:33 +0300 Subject: [PATCH 1086/2056] docs: networking: timestamping: add section for stacked PHC devices The concept of timestamping DSA switches / Ethernet PHYs is becoming more and more popular, however the Linux kernel timestamping code has evolved quite organically and there's layers upon layers of new and old code that need to work together for things to behave as expected. Add this chapter to explain what the overall goals are. Loosely based upon this email discussion plus some more info: https://lkml.org/lkml/2020/7/6/481 Signed-off-by: Vladimir Oltean Reviewed-by: Richard Cochran Signed-off-by: Jakub Kicinski --- Documentation/networking/timestamping.rst | 165 ++++++++++++++++++++++ 1 file changed, 165 insertions(+) diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst index 1adead6a4527..03f7beade470 100644 --- a/Documentation/networking/timestamping.rst +++ b/Documentation/networking/timestamping.rst @@ -589,3 +589,168 @@ Time stamps for outgoing packets are to be generated as follows: this would occur at a later time in the processing pipeline than other software time stamping and therefore could lead to unexpected deltas between time stamps. + +3.2 Special considerations for stacked PTP Hardware Clocks +---------------------------------------------------------- + +There are situations when there may be more than one PHC (PTP Hardware Clock) +in the data path of a packet. The kernel has no explicit mechanism to allow the +user to select which PHC to use for timestamping Ethernet frames. Instead, the +assumption is that the outermost PHC is always the most preferable, and that +kernel drivers collaborate towards achieving that goal. Currently there are 3 +cases of stacked PHCs, detailed below: + +3.2.1 DSA (Distributed Switch Architecture) switches +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These are Ethernet switches which have one of their ports connected to an +(otherwise completely unaware) host Ethernet interface, and perform the role of +a port multiplier with optional forwarding acceleration features. Each DSA +switch port is visible to the user as a standalone (virtual) network interface, +and its network I/O is performed, under the hood, indirectly through the host +interface (redirecting to the host port on TX, and intercepting frames on RX). + +When a DSA switch is attached to a host port, PTP synchronization has to +suffer, since the switch's variable queuing delay introduces a path delay +jitter between the host port and its PTP partner. For this reason, some DSA +switches include a timestamping clock of their own, and have the ability to +perform network timestamping on their own MAC, such that path delays only +measure wire and PHY propagation latencies. Timestamping DSA switches are +supported in Linux and expose the same ABI as any other network interface (save +for the fact that the DSA interfaces are in fact virtual in terms of network +I/O, they do have their own PHC). It is typical, but not mandatory, for all +interfaces of a DSA switch to share the same PHC. + +By design, PTP timestamping with a DSA switch does not need any special +handling in the driver for the host port it is attached to. However, when the +host port also supports PTP timestamping, DSA will take care of intercepting +the ``.ndo_do_ioctl`` calls towards the host port, and block attempts to enable +hardware timestamping on it. This is because the SO_TIMESTAMPING API does not +allow the delivery of multiple hardware timestamps for the same packet, so +anybody else except for the DSA switch port must be prevented from doing so. + +In code, DSA provides for most of the infrastructure for timestamping already, +in generic code: a BPF classifier (``ptp_classify_raw``) is used to identify +PTP event messages (any other packets, including PTP general messages, are not +timestamped), and provides two hooks to drivers: + +- ``.port_txtstamp()``: The driver is passed a clone of the timestampable skb + to be transmitted, before actually transmitting it. Typically, a switch will + have a PTP TX timestamp register (or sometimes a FIFO) where the timestamp + becomes available. There may be an IRQ that is raised upon this timestamp's + availability, or the driver might have to poll after invoking + ``dev_queue_xmit()`` towards the host interface. Either way, in the + ``.port_txtstamp()`` method, the driver only needs to save the clone for + later use (when the timestamp becomes available). Each skb is annotated with + a pointer to its clone, in ``DSA_SKB_CB(skb)->clone``, to ease the driver's + job of keeping track of which clone belongs to which skb. + +- ``.port_rxtstamp()``: The original (and only) timestampable skb is provided + to the driver, for it to annotate it with a timestamp, if that is immediately + available, or defer to later. On reception, timestamps might either be + available in-band (through metadata in the DSA header, or attached in other + ways to the packet), or out-of-band (through another RX timestamping FIFO). + Deferral on RX is typically necessary when retrieving the timestamp needs a + sleepable context. In that case, it is the responsibility of the DSA driver + to call ``netif_rx_ni()`` on the freshly timestamped skb. + +3.2.2 Ethernet PHYs +^^^^^^^^^^^^^^^^^^^ + +These are devices that typically fulfill a Layer 1 role in the network stack, +hence they do not have a representation in terms of a network interface as DSA +switches do. However, PHYs may be able to detect and timestamp PTP packets, for +performance reasons: timestamps taken as close as possible to the wire have the +potential to yield a more stable and precise synchronization. + +A PHY driver that supports PTP timestamping must create a ``struct +mii_timestamper`` and add a pointer to it in ``phydev->mii_ts``. The presence +of this pointer will be checked by the networking stack. + +Since PHYs do not have network interface representations, the timestamping and +ethtool ioctl operations for them need to be mediated by their respective MAC +driver. Therefore, as opposed to DSA switches, modifications need to be done +to each individual MAC driver for PHY timestamping support. This entails: + +- Checking, in ``.ndo_do_ioctl``, whether ``phy_has_hwtstamp(netdev->phydev)`` + is true or not. If it is, then the MAC driver should not process this request + but instead pass it on to the PHY using ``phy_mii_ioctl()``. + +- On RX, special intervention may or may not be needed, depending on the + function used to deliver skb's up the network stack. In the case of plain + ``netif_rx()`` and similar, MAC drivers must check whether + ``skb_defer_rx_timestamp(skb)`` is necessary or not - and if it is, don't + call ``netif_rx()`` at all. If ``CONFIG_NETWORK_PHY_TIMESTAMPING`` is + enabled, and ``skb->dev->phydev->mii_ts`` exists, its ``.rxtstamp()`` hook + will be called now, to determine, using logic very similar to DSA, whether + deferral for RX timestamping is necessary. Again like DSA, it becomes the + responsibility of the PHY driver to send the packet up the stack when the + timestamp is available. + + For other skb receive functions, such as ``napi_gro_receive`` and + ``netif_receive_skb``, the stack automatically checks whether + ``skb_defer_rx_timestamp()`` is necessary, so this check is not needed inside + the driver. + +- On TX, again, special intervention might or might not be needed. The + function that calls the ``mii_ts->txtstamp()`` hook is named + ``skb_clone_tx_timestamp()``. This function can either be called directly + (case in which explicit MAC driver support is indeed needed), but the + function also piggybacks from the ``skb_tx_timestamp()`` call, which many MAC + drivers already perform for software timestamping purposes. Therefore, if a + MAC supports software timestamping, it does not need to do anything further + at this stage. + +3.2.3 MII bus snooping devices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +These perform the same role as timestamping Ethernet PHYs, save for the fact +that they are discrete devices and can therefore be used in conjunction with +any PHY even if it doesn't support timestamping. In Linux, they are +discoverable and attachable to a ``struct phy_device`` through Device Tree, and +for the rest, they use the same mii_ts infrastructure as those. See +Documentation/devicetree/bindings/ptp/timestamper.txt for more details. + +3.2.4 Other caveats for MAC drivers +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Stacked PHCs, especially DSA (but not only) - since that doesn't require any +modification to MAC drivers, so it is more difficult to ensure correctness of +all possible code paths - is that they uncover bugs which were impossible to +trigger before the existence of stacked PTP clocks. One example has to do with +this line of code, already presented earlier:: + + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + +Any TX timestamping logic, be it a plain MAC driver, a DSA switch driver, a PHY +driver or a MII bus snooping device driver, should set this flag. +But a MAC driver that is unaware of PHC stacking might get tripped up by +somebody other than itself setting this flag, and deliver a duplicate +timestamp. +For example, a typical driver design for TX timestamping might be to split the +transmission part into 2 portions: + +1. "TX": checks whether PTP timestamping has been previously enabled through + the ``.ndo_do_ioctl`` ("``priv->hwtstamp_tx_enabled == true``") and the + current skb requires a TX timestamp ("``skb_shinfo(skb)->tx_flags & + SKBTX_HW_TSTAMP``"). If this is true, it sets the + "``skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS``" flag. Note: as + described above, in the case of a stacked PHC system, this condition should + never trigger, as this MAC is certainly not the outermost PHC. But this is + not where the typical issue is. Transmission proceeds with this packet. + +2. "TX confirmation": Transmission has finished. The driver checks whether it + is necessary to collect any TX timestamp for it. Here is where the typical + issues are: the MAC driver takes a shortcut and only checks whether + "``skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS``" was set. With a stacked + PHC system, this is incorrect because this MAC driver is not the only entity + in the TX data path who could have enabled SKBTX_IN_PROGRESS in the first + place. + +The correct solution for this problem is for MAC drivers to have a compound +check in their "TX confirmation" portion, not only for +"``skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS``", but also for +"``priv->hwtstamp_tx_enabled == true``". Because the rest of the system ensures +that PTP timestamping is not enabled for anything other than the outermost PHC, +this enhanced check will avoid delivering a duplicated TX timestamp to user +space. From 514d09529db905251916399a90b69e1a3eac6cb1 Mon Sep 17 00:00:00 2001 From: Suraj Upadhyay Date: Tue, 14 Jul 2020 19:53:28 +0530 Subject: [PATCH 1087/2056] decnet: dn_dev: Remove an unnecessary label. Remove the unnecessary label from dn_dev_ioctl() and make its error handling simpler to read. Signed-off-by: Suraj Upadhyay Signed-off-by: Jakub Kicinski --- net/decnet/dn_dev.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 65abcf1b3210..15d42353f1a3 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -462,7 +462,9 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg) switch (cmd) { case SIOCGIFADDR: *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; - goto rarok; + if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) + ret = -EFAULT; + break; case SIOCSIFADDR: if (!ifa) { @@ -485,10 +487,6 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg) rtnl_unlock(); return ret; -rarok: - if (copy_to_user(arg, ifr, DN_IFREQ_SIZE)) - ret = -EFAULT; - goto done; } struct net_device *dn_dev_get_default(void) From 5e126e7c4e52754e3aac0fbc5325abcbe1629388 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Wed, 15 Jul 2020 15:30:23 +0800 Subject: [PATCH 1088/2056] hinic: add firmware update support add support to update firmware by the devlink flashing API Signed-off-by: Luo bin Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/huawei/hinic/Makefile | 2 +- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 5 + .../net/ethernet/huawei/hinic/hinic_devlink.c | 328 ++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_devlink.h | 115 ++++++ .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 28 ++ .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 34 ++ .../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 4 +- .../net/ethernet/huawei/hinic/hinic_main.c | 28 +- .../net/ethernet/huawei/hinic/hinic_port.h | 25 ++ 9 files changed, 565 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_devlink.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_devlink.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index 32a011ca44c3..67b59d0ba769 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -4,4 +4,4 @@ obj-$(CONFIG_HINIC) += hinic.o hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \ hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \ - hinic_common.o hinic_ethtool.o hinic_hw_mbox.o hinic_sriov.o + hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o hinic_sriov.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 9adb755f0820..befd925c03dc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -97,6 +97,11 @@ struct hinic_dev { int lb_test_rx_idx; int lb_pkt_len; u8 *lb_test_rx_buf; + struct devlink *devlink; +}; + +struct hinic_devlink_priv { + struct hinic_hwdev *hwdev; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c new file mode 100644 index 000000000000..a40a10ac1ee9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ +#include +#include +#include + +#include "hinic_dev.h" +#include "hinic_port.h" +#include "hinic_devlink.h" + +static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf, + u32 image_size, struct host_image_st *host_image) +{ + struct fw_image_st *fw_image = NULL; + u32 len = 0; + u32 i; + + fw_image = (struct fw_image_st *)buf; + + if (fw_image->fw_magic != HINIC_MAGIC_NUM) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_magic read from file, fw_magic: 0x%x\n", + fw_image->fw_magic); + return false; + } + + if (fw_image->fw_info.fw_section_cnt > MAX_FW_TYPE_NUM) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_type_num read from file, fw_type_num: 0x%x\n", + fw_image->fw_info.fw_section_cnt); + return false; + } + + for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) { + len += fw_image->fw_section_info[i].fw_section_len; + memcpy(&host_image->image_section_info[i], + &fw_image->fw_section_info[i], + sizeof(struct fw_section_info_st)); + } + + if (len != fw_image->fw_len || + (fw_image->fw_len + UPDATEFW_IMAGE_HEAD_SIZE) != image_size) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong data size read from file\n"); + return false; + } + + host_image->image_info.up_total_len = fw_image->fw_len; + host_image->image_info.fw_version = fw_image->fw_version; + host_image->section_type_num = fw_image->fw_info.fw_section_cnt; + host_image->device_id = fw_image->device_id; + + return true; +} + +static bool check_image_integrity(struct hinic_devlink_priv *priv, + struct host_image_st *host_image, + u32 update_type) +{ + u32 collect_section_type = 0; + u32 i, type; + + for (i = 0; i < host_image->section_type_num; i++) { + type = host_image->image_section_info[i].fw_section_type; + if (collect_section_type & (1U << type)) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Duplicate section type: %u\n", + type); + return false; + } + collect_section_type |= (1U << type); + } + + if (update_type == FW_UPDATE_COLD && + (((collect_section_type & _IMAGE_COLD_SUB_MODULES_MUST_IN) == + _IMAGE_COLD_SUB_MODULES_MUST_IN) || + collect_section_type == _IMAGE_CFG_SUB_MODULES_MUST_IN)) + return true; + + if (update_type == FW_UPDATE_HOT && + (collect_section_type & _IMAGE_HOT_SUB_MODULES_MUST_IN) == + _IMAGE_HOT_SUB_MODULES_MUST_IN) + return true; + + if (update_type == FW_UPDATE_COLD) + dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid: 0x%x or 0x%lx, current: 0x%x\n", + _IMAGE_COLD_SUB_MODULES_MUST_IN, + _IMAGE_CFG_SUB_MODULES_MUST_IN, collect_section_type); + else + dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid:0x%x, current: 0x%x\n", + _IMAGE_HOT_SUB_MODULES_MUST_IN, collect_section_type); + + return false; +} + +static int check_image_device_type(struct hinic_devlink_priv *priv, + u32 image_device_type) +{ + struct hinic_comm_board_info board_info = {0}; + + if (hinic_get_board_info(priv->hwdev, &board_info)) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Get board info failed\n"); + return false; + } + + if (image_device_type == board_info.info.board_type) + return true; + + dev_err(&priv->hwdev->hwif->pdev->dev, "The device type of upgrade file doesn't match the device type of current firmware, please check the upgrade file\n"); + dev_err(&priv->hwdev->hwif->pdev->dev, "The image device type: 0x%x, firmware device type: 0x%x\n", + image_device_type, board_info.info.board_type); + + return false; +} + +static int hinic_flash_fw(struct hinic_devlink_priv *priv, const u8 *data, + struct host_image_st *host_image) +{ + u32 section_remain_send_len, send_fragment_len, send_pos, up_total_len; + struct hinic_cmd_update_fw *fw_update_msg = NULL; + u32 section_type, section_crc, section_version; + u32 i, len, section_len, section_offset; + u16 out_size = sizeof(*fw_update_msg); + int total_len_flag = 0; + int err; + + fw_update_msg = kzalloc(sizeof(*fw_update_msg), GFP_KERNEL); + if (!fw_update_msg) + return -ENOMEM; + + up_total_len = host_image->image_info.up_total_len; + + for (i = 0; i < host_image->section_type_num; i++) { + len = host_image->image_section_info[i].fw_section_len; + if (host_image->image_section_info[i].fw_section_type == + UP_FW_UPDATE_BOOT) { + up_total_len = up_total_len - len; + break; + } + } + + for (i = 0; i < host_image->section_type_num; i++) { + section_len = + host_image->image_section_info[i].fw_section_len; + section_offset = + host_image->image_section_info[i].fw_section_offset; + section_remain_send_len = section_len; + section_type = + host_image->image_section_info[i].fw_section_type; + section_crc = host_image->image_section_info[i].fw_section_crc; + section_version = + host_image->image_section_info[i].fw_section_version; + + if (section_type == UP_FW_UPDATE_BOOT) + continue; + + send_fragment_len = 0; + send_pos = 0; + + while (section_remain_send_len > 0) { + if (!total_len_flag) { + fw_update_msg->total_len = up_total_len; + total_len_flag = 1; + } else { + fw_update_msg->total_len = 0; + } + + memset(fw_update_msg->data, 0, MAX_FW_FRAGMENT_LEN); + + fw_update_msg->ctl_info.SF = + (section_remain_send_len == section_len) ? + true : false; + fw_update_msg->section_info.FW_section_CRC = section_crc; + fw_update_msg->fw_section_version = section_version; + fw_update_msg->ctl_info.flag = UP_TYPE_A; + + if (section_type <= UP_FW_UPDATE_UP_DATA_B) { + fw_update_msg->section_info.FW_section_type = + (section_type % 2) ? + UP_FW_UPDATE_UP_DATA : + UP_FW_UPDATE_UP_TEXT; + + fw_update_msg->ctl_info.flag = UP_TYPE_B; + if (section_type <= UP_FW_UPDATE_UP_DATA_A) + fw_update_msg->ctl_info.flag = UP_TYPE_A; + } else { + fw_update_msg->section_info.FW_section_type = + section_type - 0x2; + } + + fw_update_msg->setion_total_len = section_len; + fw_update_msg->section_offset = send_pos; + + if (section_remain_send_len <= MAX_FW_FRAGMENT_LEN) { + fw_update_msg->ctl_info.SL = true; + fw_update_msg->ctl_info.fragment_len = + section_remain_send_len; + send_fragment_len += section_remain_send_len; + } else { + fw_update_msg->ctl_info.SL = false; + fw_update_msg->ctl_info.fragment_len = + MAX_FW_FRAGMENT_LEN; + send_fragment_len += MAX_FW_FRAGMENT_LEN; + } + + memcpy(fw_update_msg->data, + data + UPDATEFW_IMAGE_HEAD_SIZE + + section_offset + send_pos, + fw_update_msg->ctl_info.fragment_len); + + err = hinic_port_msg_cmd(priv->hwdev, + HINIC_PORT_CMD_UPDATE_FW, + fw_update_msg, + sizeof(*fw_update_msg), + fw_update_msg, &out_size); + if (err || !out_size || fw_update_msg->status) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Failed to update firmware, err: %d, status: 0x%x, out size: 0x%x\n", + err, fw_update_msg->status, out_size); + err = fw_update_msg->status ? + fw_update_msg->status : -EIO; + kfree(fw_update_msg); + return err; + } + + send_pos = send_fragment_len; + section_remain_send_len = section_len - + send_fragment_len; + } + } + + kfree(fw_update_msg); + + return 0; +} + +static int hinic_firmware_update(struct hinic_devlink_priv *priv, + const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + struct host_image_st host_image; + int err; + + memset(&host_image, 0, sizeof(struct host_image_st)); + + if (!check_image_valid(priv, fw->data, fw->size, &host_image) || + !check_image_integrity(priv, &host_image, FW_UPDATE_COLD) || + !check_image_device_type(priv, host_image.device_id)) { + NL_SET_ERR_MSG_MOD(extack, "Check image failed"); + return -EINVAL; + } + + dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware begin\n"); + + err = hinic_flash_fw(priv, fw->data, &host_image); + if (err) { + if (err == HINIC_FW_DISMATCH_ERROR) { + dev_err(&priv->hwdev->hwif->pdev->dev, "Firmware image doesn't match this card, please use newer image, err: %d\n", + err); + NL_SET_ERR_MSG_MOD(extack, + "Firmware image doesn't match this card, please use newer image"); + } else { + dev_err(&priv->hwdev->hwif->pdev->dev, "Send firmware image data failed, err: %d\n", + err); + NL_SET_ERR_MSG_MOD(extack, "Send firmware image data failed"); + } + + return err; + } + + dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware end\n"); + + return 0; +} + +static int hinic_devlink_flash_update(struct devlink *devlink, + const char *file_name, + const char *component, + struct netlink_ext_ack *extack) +{ + struct hinic_devlink_priv *priv = devlink_priv(devlink); + const struct firmware *fw; + int err; + + if (component) + return -EOPNOTSUPP; + + err = request_firmware_direct(&fw, file_name, + &priv->hwdev->hwif->pdev->dev); + if (err) + return err; + + err = hinic_firmware_update(priv, fw, extack); + release_firmware(fw); + + return err; +} + +static const struct devlink_ops hinic_devlink_ops = { + .flash_update = hinic_devlink_flash_update, +}; + +struct devlink *hinic_devlink_alloc(void) +{ + return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev)); +} + +void hinic_devlink_free(struct devlink *devlink) +{ + devlink_free(devlink); +} + +int hinic_devlink_register(struct devlink *devlink, struct device *dev) +{ + return devlink_register(devlink, dev); +} + +void hinic_devlink_unregister(struct devlink *devlink) +{ + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h new file mode 100644 index 000000000000..604e95a7c5ce --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + */ + +#ifndef __HINIC_DEVLINK_H__ +#define __HINIC_DEVLINK_H__ + +#include + +#define MAX_FW_TYPE_NUM 30 +#define HINIC_MAGIC_NUM 0x18221100 +#define UPDATEFW_IMAGE_HEAD_SIZE 1024 +#define FW_UPDATE_COLD 0 +#define FW_UPDATE_HOT 1 + +#define UP_TYPE_A 0x0 +#define UP_TYPE_B 0x1 + +#define MAX_FW_FRAGMENT_LEN 1536 +#define HINIC_FW_DISMATCH_ERROR 10 + +enum hinic_fw_type { + UP_FW_UPDATE_UP_TEXT_A = 0x0, + UP_FW_UPDATE_UP_DATA_A, + UP_FW_UPDATE_UP_TEXT_B, + UP_FW_UPDATE_UP_DATA_B, + UP_FW_UPDATE_UP_DICT, + + UP_FW_UPDATE_HLINK_ONE = 0x5, + UP_FW_UPDATE_HLINK_TWO, + UP_FW_UPDATE_HLINK_THR, + UP_FW_UPDATE_PHY, + UP_FW_UPDATE_TILE_TEXT, + + UP_FW_UPDATE_TILE_DATA = 0xa, + UP_FW_UPDATE_TILE_DICT, + UP_FW_UPDATE_PPE_STATE, + UP_FW_UPDATE_PPE_BRANCH, + UP_FW_UPDATE_PPE_EXTACT, + + UP_FW_UPDATE_CLP_LEGACY = 0xf, + UP_FW_UPDATE_PXE_LEGACY, + UP_FW_UPDATE_ISCSI_LEGACY, + UP_FW_UPDATE_CLP_EFI, + UP_FW_UPDATE_PXE_EFI, + + UP_FW_UPDATE_ISCSI_EFI = 0x14, + UP_FW_UPDATE_CFG, + UP_FW_UPDATE_BOOT, + UP_FW_UPDATE_VPD, + FILE_TYPE_TOTAL_NUM +}; + +#define _IMAGE_UP_ALL_IN ((1 << UP_FW_UPDATE_UP_TEXT_A) | \ + (1 << UP_FW_UPDATE_UP_DATA_A) | \ + (1 << UP_FW_UPDATE_UP_TEXT_B) | \ + (1 << UP_FW_UPDATE_UP_DATA_B) | \ + (1 << UP_FW_UPDATE_UP_DICT) | \ + (1 << UP_FW_UPDATE_BOOT) | \ + (1 << UP_FW_UPDATE_HLINK_ONE) | \ + (1 << UP_FW_UPDATE_HLINK_TWO) | \ + (1 << UP_FW_UPDATE_HLINK_THR)) + +#define _IMAGE_UCODE_ALL_IN ((1 << UP_FW_UPDATE_TILE_TEXT) | \ + (1 << UP_FW_UPDATE_TILE_DICT) | \ + (1 << UP_FW_UPDATE_PPE_STATE) | \ + (1 << UP_FW_UPDATE_PPE_BRANCH) | \ + (1 << UP_FW_UPDATE_PPE_EXTACT)) + +#define _IMAGE_COLD_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN) +#define _IMAGE_HOT_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN) +#define _IMAGE_CFG_SUB_MODULES_MUST_IN BIT(UP_FW_UPDATE_CFG) +#define UP_FW_UPDATE_UP_TEXT 0x0 +#define UP_FW_UPDATE_UP_DATA 0x1 +#define UP_FW_UPDATE_VPD_B 0x15 + +struct fw_section_info_st { + u32 fw_section_len; + u32 fw_section_offset; + u32 fw_section_version; + u32 fw_section_type; + u32 fw_section_crc; +}; + +struct fw_image_st { + u32 fw_version; + u32 fw_len; + u32 fw_magic; + struct { + u32 fw_section_cnt:16; + u32 resd:16; + } fw_info; + struct fw_section_info_st fw_section_info[MAX_FW_TYPE_NUM]; + u32 device_id; + u32 res[101]; + void *bin_data; +}; + +struct host_image_st { + struct fw_section_info_st image_section_info[MAX_FW_TYPE_NUM]; + struct { + u32 up_total_len; + u32 fw_version; + } image_info; + u32 section_type_num; + u32 device_id; +}; + +struct devlink *hinic_devlink_alloc(void); +void hinic_devlink_free(struct devlink *devlink); +int hinic_devlink_register(struct devlink *devlink, struct device *dev); +void hinic_devlink_unregister(struct devlink *devlink); + +#endif /* __HINIC_DEVLINK_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 47c60d9752e4..9831c14324e6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -897,6 +897,8 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev) set_resources_state(hwdev, HINIC_RES_CLEAN); + hinic_vf_func_free(hwdev); + free_pfhwdev(pfhwdev); hinic_aeqs_free(&hwdev->aeqs); @@ -1047,3 +1049,29 @@ void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index, { hinic_set_msix_state(hwdev->hwif, msix_index, flag); } + +int hinic_get_board_info(struct hinic_hwdev *hwdev, + struct hinic_comm_board_info *board_info) +{ + u16 out_size = sizeof(*board_info); + struct hinic_pfhwdev *pfhwdev; + int err; + + if (!hwdev || !board_info) + return -EINVAL; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_GET_BOARD_INFO, + board_info, sizeof(*board_info), + board_info, &out_size, HINIC_MGMT_MSG_SYNC); + if (err || board_info->status || !out_size) { + dev_err(&hwdev->hwif->pdev->dev, + "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n", + err, board_info->status, out_size); + return -EIO; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 958ea1a6a60d..94593a8ad667 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -116,6 +116,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_SET_TSO = 112, + HINIC_PORT_CMD_UPDATE_FW = 114, + HINIC_PORT_CMD_SET_RQ_IQ_MAP = 115, HINIC_PORT_CMD_LINK_STATUS_REPORT = 160, @@ -307,6 +309,35 @@ struct hinic_msix_config { u8 rsvd1[3]; }; +struct hinic_board_info { + u32 board_type; + u32 port_num; + u32 port_speed; + u32 pcie_width; + u32 host_num; + u32 pf_num; + u32 vf_total_num; + u32 tile_num; + u32 qcm_num; + u32 core_num; + u32 work_mode; + u32 service_mode; + u32 pcie_mode; + u32 cfg_addr; + u32 boot_sel; + u32 board_id; +}; + +struct hinic_comm_board_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_board_info info; + + u32 rsvd1[4]; +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; @@ -407,4 +438,7 @@ int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev, int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, struct hinic_msix_config *interrupt_info); +int hinic_get_board_info(struct hinic_hwdev *hwdev, + struct hinic_comm_board_info *board_info); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index e8a55b53c855..21b93b654d6b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -87,7 +87,9 @@ enum hinic_comm_cmd { HINIC_COMM_CMD_PAGESIZE_SET = 0x50, - HINIC_COMM_CMD_MAX = 0x51, + HINIC_COMM_CMD_GET_BOARD_INFO = 0x52, + + HINIC_COMM_CMD_MAX, }; enum hinic_mgmt_cb_state { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 834a20a0043c..c4c6f9c29f0e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -25,6 +26,7 @@ #include "hinic_hw_qp.h" #include "hinic_hw_dev.h" +#include "hinic_devlink.h" #include "hinic_port.h" #include "hinic_tx.h" #include "hinic_rx.h" @@ -1075,9 +1077,11 @@ static int nic_dev_init(struct pci_dev *pdev) struct hinic_rx_mode_work *rx_mode_work; struct hinic_txq_stats *tx_stats; struct hinic_rxq_stats *rx_stats; + struct hinic_devlink_priv *priv; struct hinic_dev *nic_dev; struct net_device *netdev; struct hinic_hwdev *hwdev; + struct devlink *devlink; int err, num_qps; hwdev = hinic_init_hwdev(pdev); @@ -1086,6 +1090,16 @@ static int nic_dev_init(struct pci_dev *pdev) return PTR_ERR(hwdev); } + devlink = hinic_devlink_alloc(); + if (!devlink) { + dev_err(&pdev->dev, "Hinic devlink alloc failed\n"); + err = -ENOMEM; + goto err_devlink_alloc; + } + + priv = devlink_priv(devlink); + priv->hwdev = hwdev; + num_qps = hinic_hwdev_num_qps(hwdev); if (num_qps <= 0) { dev_err(&pdev->dev, "Invalid number of QPS\n"); @@ -1121,6 +1135,7 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->sriov_info.hwdev = hwdev; nic_dev->sriov_info.pdev = pdev; nic_dev->max_qps = num_qps; + nic_dev->devlink = devlink; hinic_set_ethtool_ops(netdev); @@ -1146,6 +1161,10 @@ static int nic_dev_init(struct pci_dev *pdev) goto err_workq; } + err = hinic_devlink_register(devlink, &pdev->dev); + if (err) + goto err_devlink_reg; + pci_set_drvdata(pdev, netdev); err = hinic_port_get_mac(nic_dev, netdev->dev_addr); @@ -1223,9 +1242,11 @@ static int nic_dev_init(struct pci_dev *pdev) cancel_work_sync(&rx_mode_work->work); err_set_mtu: -err_get_mac: + hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); err_add_mac: +err_get_mac: pci_set_drvdata(pdev, NULL); +err_devlink_reg: destroy_workqueue(nic_dev->workq); err_workq: @@ -1234,6 +1255,7 @@ static int nic_dev_init(struct pci_dev *pdev) err_alloc_etherdev: err_num_qps: +err_devlink_alloc: hinic_free_hwdev(hwdev); return err; } @@ -1342,9 +1364,11 @@ static void hinic_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); + hinic_devlink_unregister(nic_dev->devlink); + destroy_workqueue(nic_dev->workq); - hinic_vf_func_free(nic_dev->hwdev); + hinic_devlink_free(nic_dev->devlink); hinic_free_hwdev(nic_dev->hwdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 0e444d2c02bb..14931adaffb8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -703,6 +703,31 @@ struct hinic_cmd_get_std_sfp_info { u8 sfp_info[STD_SFP_INFO_MAX_SIZE]; }; +struct hinic_cmd_update_fw { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct { + u32 SL:1; + u32 SF:1; + u32 flag:1; + u32 reserved:13; + u32 fragment_len:16; + } ctl_info; + + struct { + u32 FW_section_CRC; + u32 FW_section_type; + } section_info; + + u32 total_len; + u32 setion_total_len; + u32 fw_section_version; + u32 section_offset; + u32 data[384]; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); From fbf0f5d185343300a80aa0fd8e633a985b9947e9 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:23 +0300 Subject: [PATCH 1089/2056] mlxsw: reg: Add policer bandwidth limits Add policer bandwidth limits for both rate and burst size so that they could be enforced by a later patch. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 408003520602..3c5b25495751 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3405,11 +3405,20 @@ MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4); */ MLXSW_ITEM64(reg, qpcr, violate_count, 0x20, 0, 64); +/* Packets */ #define MLXSW_REG_QPCR_LOWEST_CIR 1 #define MLXSW_REG_QPCR_HIGHEST_CIR (2 * 1000 * 1000 * 1000) /* 2Gpps */ #define MLXSW_REG_QPCR_LOWEST_CBS 4 #define MLXSW_REG_QPCR_HIGHEST_CBS 24 +/* Bandwidth */ +#define MLXSW_REG_QPCR_LOWEST_CIR_BITS 1024 /* bps */ +#define MLXSW_REG_QPCR_HIGHEST_CIR_BITS 2000000000000ULL /* 2Tbps */ +#define MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP1 4 +#define MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP2 4 +#define MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP1 25 +#define MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP2 31 + static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid, enum mlxsw_reg_qpcr_ir_units ir_units, bool bytes, u32 cir, u16 cbs) From 1b744fc9f8d5372506d1a9b7aae2584b75d54e3b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:24 +0300 Subject: [PATCH 1090/2056] mlxsw: resources: Add resource identifier for global policers Add a resource identifier for maximum global policers so that it could be later used to query the information from firmware. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index d62496ef299c..a56c9e19a390 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -47,6 +47,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB, MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB, MLXSW_RES_ID_ACL_MAX_BF_LOG, + MLXSW_RES_ID_MAX_GLOBAL_POLICERS, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -105,6 +106,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953, [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960, + [MLXSW_RES_ID_MAX_GLOBAL_POLICERS] = 0x2A10, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, From 8d3fbae70d8d7e5f75cf3cb79eb8f1ac9807c354 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:25 +0300 Subject: [PATCH 1091/2056] mlxsw: spectrum_policer: Add policer core Add common code to handle all policer-related functionality in mlxsw. Currently, only policer for policy engines are supported, but it in the future more policer families will be added such as CPU (trap) policers and storm control policers. The API allows different modules to add / delete policers and read their drop counter. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum.c | 12 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 32 ++ .../mellanox/mlxsw/spectrum_policer.c | 403 ++++++++++++++++++ 4 files changed, 448 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 3709983fbd77..892724380ea2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -31,7 +31,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_qdisc.o spectrum_span.o \ spectrum_nve.o spectrum_nve_vxlan.o \ spectrum_dpipe.o spectrum_trap.o \ - spectrum_ethtool.o + spectrum_ethtool.o spectrum_policer.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK) += spectrum_ptp.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4ac634bd3571..c6ab61818800 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2860,6 +2860,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_fids_init; } + err = mlxsw_sp_policers_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n"); + goto err_policers_init; + } + err = mlxsw_sp_traps_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n"); @@ -3019,6 +3025,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, err_devlink_traps_init: mlxsw_sp_traps_fini(mlxsw_sp); err_traps_init: + mlxsw_sp_policers_fini(mlxsw_sp); +err_policers_init: mlxsw_sp_fids_fini(mlxsw_sp); err_fids_init: mlxsw_sp_kvdl_fini(mlxsw_sp); @@ -3046,6 +3054,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops; mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; + mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; mlxsw_sp->listeners = mlxsw_sp1_listener; mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; @@ -3074,6 +3083,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; + mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3100,6 +3110,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops; mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; + mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3129,6 +3140,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_devlink_traps_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); + mlxsw_sp_policers_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); mlxsw_sp_kvdl_fini(mlxsw_sp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index c00811178637..82227e87ef7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -151,6 +151,7 @@ struct mlxsw_sp { struct mlxsw_afa *afa; struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; + struct mlxsw_sp_policer_core *policer_core; struct mlxsw_sp_kvdl *kvdl; struct mlxsw_sp_nve *nve; struct notifier_block netdevice_nb; @@ -173,6 +174,7 @@ struct mlxsw_sp { const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops; const struct mlxsw_sp_ptp_ops *ptp_ops; const struct mlxsw_sp_span_ops *span_ops; + const struct mlxsw_sp_policer_core_ops *policer_core_ops; const struct mlxsw_listener *listeners; size_t listeners_count; u32 lowest_shaper_bs; @@ -1196,4 +1198,34 @@ extern const struct ethtool_ops mlxsw_sp_port_ethtool_ops; extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops; extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops; +/* spectrum_policer.c */ +extern const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops; +extern const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops; + +enum mlxsw_sp_policer_type { + MLXSW_SP_POLICER_TYPE_SINGLE_RATE, + + __MLXSW_SP_POLICER_TYPE_MAX, + MLXSW_SP_POLICER_TYPE_MAX = __MLXSW_SP_POLICER_TYPE_MAX - 1, +}; + +struct mlxsw_sp_policer_params { + u64 rate; + u64 burst; + bool bytes; +}; + +int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_policer_type type, + const struct mlxsw_sp_policer_params *params, + struct netlink_ext_ack *extack, u16 *p_policer_index); +void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_policer_type type, + u16 policer_index); +int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_policer_type type, + u16 policer_index, u64 *p_drops); +int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c new file mode 100644 index 000000000000..74766e936e0a --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */ + +#include +#include +#include +#include + +#include "spectrum.h" + +struct mlxsw_sp_policer_family { + enum mlxsw_sp_policer_type type; + enum mlxsw_reg_qpcr_g qpcr_type; + struct mlxsw_sp *mlxsw_sp; + u16 start_index; /* Inclusive */ + u16 end_index; /* Exclusive */ + struct idr policer_idr; + struct mutex lock; /* Protects policer_idr */ + const struct mlxsw_sp_policer_family_ops *ops; +}; + +struct mlxsw_sp_policer { + struct mlxsw_sp_policer_params params; + u16 index; +}; + +struct mlxsw_sp_policer_family_ops { + int (*init)(struct mlxsw_sp_policer_family *family); + void (*fini)(struct mlxsw_sp_policer_family *family); + int (*policer_index_alloc)(struct mlxsw_sp_policer_family *family, + struct mlxsw_sp_policer *policer); + struct mlxsw_sp_policer * (*policer_index_free)(struct mlxsw_sp_policer_family *family, + u16 policer_index); + int (*policer_init)(struct mlxsw_sp_policer_family *family, + const struct mlxsw_sp_policer *policer); + int (*policer_params_check)(const struct mlxsw_sp_policer_family *family, + const struct mlxsw_sp_policer_params *params, + struct netlink_ext_ack *extack); +}; + +struct mlxsw_sp_policer_core { + struct mlxsw_sp_policer_family *family_arr[MLXSW_SP_POLICER_TYPE_MAX + 1]; + const struct mlxsw_sp_policer_core_ops *ops; + u8 lowest_bs_bits; + u8 highest_bs_bits; +}; + +struct mlxsw_sp_policer_core_ops { + int (*init)(struct mlxsw_sp_policer_core *policer_core); +}; + +static u64 mlxsw_sp_policer_rate_bytes_ps_kbps(u64 rate_bytes_ps) +{ + return div_u64(rate_bytes_ps, 1000) * BITS_PER_BYTE; +} + +static u8 mlxsw_sp_policer_burst_bytes_hw_units(u64 burst_bytes) +{ + /* Provided burst size is in bytes. The ASIC burst size value is + * (2 ^ bs) * 512 bits. Convert the provided size to 512-bit units. + */ + u64 bs512 = div_u64(burst_bytes, 64); + + if (!bs512) + return 0; + + return fls64(bs512) - 1; +} + +static int +mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family) +{ + struct mlxsw_core *core = family->mlxsw_sp->core; + + /* CPU policers are allocated from the first N policers in the global + * range, so skip them. + */ + if (!MLXSW_CORE_RES_VALID(core, MAX_GLOBAL_POLICERS) || + !MLXSW_CORE_RES_VALID(core, MAX_CPU_POLICERS)) + return -EIO; + + family->start_index = MLXSW_CORE_RES_GET(core, MAX_CPU_POLICERS); + family->end_index = MLXSW_CORE_RES_GET(core, MAX_GLOBAL_POLICERS); + + return 0; +} + +static void +mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family) +{ +} + +static int +mlxsw_sp_policer_single_rate_index_alloc(struct mlxsw_sp_policer_family *family, + struct mlxsw_sp_policer *policer) +{ + int id; + + mutex_lock(&family->lock); + id = idr_alloc(&family->policer_idr, policer, family->start_index, + family->end_index, GFP_KERNEL); + mutex_unlock(&family->lock); + + if (id < 0) + return id; + + policer->index = id; + + return 0; +} + +static struct mlxsw_sp_policer * +mlxsw_sp_policer_single_rate_index_free(struct mlxsw_sp_policer_family *family, + u16 policer_index) +{ + struct mlxsw_sp_policer *policer; + + mutex_lock(&family->lock); + policer = idr_remove(&family->policer_idr, policer_index); + mutex_unlock(&family->lock); + + WARN_ON(!policer); + + return policer; +} + +static int +mlxsw_sp_policer_single_rate_init(struct mlxsw_sp_policer_family *family, + const struct mlxsw_sp_policer *policer) +{ + u64 rate_kbps = mlxsw_sp_policer_rate_bytes_ps_kbps(policer->params.rate); + u8 bs = mlxsw_sp_policer_burst_bytes_hw_units(policer->params.burst); + struct mlxsw_sp *mlxsw_sp = family->mlxsw_sp; + char qpcr_pl[MLXSW_REG_QPCR_LEN]; + + mlxsw_reg_qpcr_pack(qpcr_pl, policer->index, MLXSW_REG_QPCR_IR_UNITS_K, + true, rate_kbps, bs); + mlxsw_reg_qpcr_clear_counter_set(qpcr_pl, true); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl); +} + +static int +mlxsw_sp_policer_single_rate_params_check(const struct mlxsw_sp_policer_family *family, + const struct mlxsw_sp_policer_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_policer_core *policer_core = family->mlxsw_sp->policer_core; + u64 rate_bps = params->rate * BITS_PER_BYTE; + u8 bs; + + if (!params->bytes) { + NL_SET_ERR_MSG_MOD(extack, "Only bandwidth policing is currently supported by single rate policers"); + return -EINVAL; + } + + if (!is_power_of_2(params->burst)) { + NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two"); + return -EINVAL; + } + + bs = mlxsw_sp_policer_burst_bytes_hw_units(params->burst); + + if (bs < policer_core->lowest_bs_bits) { + NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit"); + return -EINVAL; + } + + if (bs > policer_core->highest_bs_bits) { + NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit"); + return -EINVAL; + } + + if (rate_bps < MLXSW_REG_QPCR_LOWEST_CIR_BITS) { + NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit"); + return -EINVAL; + } + + if (rate_bps > MLXSW_REG_QPCR_HIGHEST_CIR_BITS) { + NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit"); + return -EINVAL; + } + + return 0; +} + +static const struct mlxsw_sp_policer_family_ops mlxsw_sp_policer_single_rate_ops = { + .init = mlxsw_sp_policer_single_rate_family_init, + .fini = mlxsw_sp_policer_single_rate_family_fini, + .policer_index_alloc = mlxsw_sp_policer_single_rate_index_alloc, + .policer_index_free = mlxsw_sp_policer_single_rate_index_free, + .policer_init = mlxsw_sp_policer_single_rate_init, + .policer_params_check = mlxsw_sp_policer_single_rate_params_check, +}; + +static const struct mlxsw_sp_policer_family mlxsw_sp_policer_single_rate_family = { + .type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE, + .qpcr_type = MLXSW_REG_QPCR_G_GLOBAL, + .ops = &mlxsw_sp_policer_single_rate_ops, +}; + +static const struct mlxsw_sp_policer_family *mlxsw_sp_policer_family_arr[] = { + [MLXSW_SP_POLICER_TYPE_SINGLE_RATE] = &mlxsw_sp_policer_single_rate_family, +}; + +int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_policer_type type, + const struct mlxsw_sp_policer_params *params, + struct netlink_ext_ack *extack, u16 *p_policer_index) +{ + struct mlxsw_sp_policer_family *family; + struct mlxsw_sp_policer *policer; + int err; + + family = mlxsw_sp->policer_core->family_arr[type]; + + err = family->ops->policer_params_check(family, params, extack); + if (err) + return err; + + policer = kmalloc(sizeof(*policer), GFP_KERNEL); + if (!policer) + return -ENOMEM; + policer->params = *params; + + err = family->ops->policer_index_alloc(family, policer); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to allocate policer index"); + goto err_policer_index_alloc; + } + + err = family->ops->policer_init(family, policer); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize policer"); + goto err_policer_init; + } + + *p_policer_index = policer->index; + + return 0; + +err_policer_init: + family->ops->policer_index_free(family, policer->index); +err_policer_index_alloc: + kfree(policer); + return err; +} + +void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_policer_type type, u16 policer_index) +{ + struct mlxsw_sp_policer_family *family; + struct mlxsw_sp_policer *policer; + + family = mlxsw_sp->policer_core->family_arr[type]; + policer = family->ops->policer_index_free(family, policer_index); + kfree(policer); +} + +int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_policer_type type, + u16 policer_index, u64 *p_drops) +{ + struct mlxsw_sp_policer_family *family; + char qpcr_pl[MLXSW_REG_QPCR_LEN]; + int err; + + family = mlxsw_sp->policer_core->family_arr[type]; + + MLXSW_REG_ZERO(qpcr, qpcr_pl); + mlxsw_reg_qpcr_pid_set(qpcr_pl, policer_index); + mlxsw_reg_qpcr_g_set(qpcr_pl, family->qpcr_type); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl); + if (err) + return err; + + *p_drops = mlxsw_reg_qpcr_violate_count_get(qpcr_pl); + + return 0; +} + +static int +mlxsw_sp_policer_family_register(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_policer_family *tmpl) +{ + struct mlxsw_sp_policer_family *family; + int err; + + family = kmemdup(tmpl, sizeof(*family), GFP_KERNEL); + if (!family) + return -ENOMEM; + + family->mlxsw_sp = mlxsw_sp; + idr_init(&family->policer_idr); + mutex_init(&family->lock); + + err = family->ops->init(family); + if (err) + goto err_family_init; + + if (WARN_ON(family->start_index >= family->end_index)) { + err = -EINVAL; + goto err_index_check; + } + + mlxsw_sp->policer_core->family_arr[tmpl->type] = family; + + return 0; + +err_index_check: + family->ops->fini(family); +err_family_init: + mutex_destroy(&family->lock); + idr_destroy(&family->policer_idr); + kfree(family); + return err; +} + +static void +mlxsw_sp_policer_family_unregister(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_policer_family *family) +{ + family->ops->fini(family); + mutex_destroy(&family->lock); + WARN_ON(!idr_is_empty(&family->policer_idr)); + idr_destroy(&family->policer_idr); + kfree(family); +} + +int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_policer_core *policer_core; + int i, err; + + policer_core = kzalloc(sizeof(*policer_core), GFP_KERNEL); + if (!policer_core) + return -ENOMEM; + mlxsw_sp->policer_core = policer_core; + policer_core->ops = mlxsw_sp->policer_core_ops; + + err = policer_core->ops->init(policer_core); + if (err) + goto err_init; + + for (i = 0; i < MLXSW_SP_POLICER_TYPE_MAX + 1; i++) { + err = mlxsw_sp_policer_family_register(mlxsw_sp, mlxsw_sp_policer_family_arr[i]); + if (err) + goto err_family_register; + } + + return 0; + +err_family_register: + for (i--; i >= 0; i--) { + struct mlxsw_sp_policer_family *family; + + family = mlxsw_sp->policer_core->family_arr[i]; + mlxsw_sp_policer_family_unregister(mlxsw_sp, family); + } +err_init: + kfree(mlxsw_sp->policer_core); + return err; +} + +void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = MLXSW_SP_POLICER_TYPE_MAX; i >= 0; i--) { + struct mlxsw_sp_policer_family *family; + + family = mlxsw_sp->policer_core->family_arr[i]; + mlxsw_sp_policer_family_unregister(mlxsw_sp, family); + } + + kfree(mlxsw_sp->policer_core); +} + +static int +mlxsw_sp1_policer_core_init(struct mlxsw_sp_policer_core *policer_core) +{ + policer_core->lowest_bs_bits = MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP1; + policer_core->highest_bs_bits = MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP1; + + return 0; +} + +const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops = { + .init = mlxsw_sp1_policer_core_init, +}; + +static int +mlxsw_sp2_policer_core_init(struct mlxsw_sp_policer_core *policer_core) +{ + policer_core->lowest_bs_bits = MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP2; + policer_core->highest_bs_bits = MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP2; + + return 0; +} + +const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops = { + .init = mlxsw_sp2_policer_core_init, +}; From bf038f03728ee2880168aa127f5f871fea6d22ab Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:26 +0300 Subject: [PATCH 1092/2056] mlxsw: spectrum_policer: Add devlink resource support Expose via devlink-resource the maximum number of single-rate policers and their current occupancy. Example: $ devlink resource show pci/0000:01:00.0 ... name global_policers size 1000 unit entry dpipe_tables none resources: name single_rate_policers size 968 occ 0 unit entry dpipe_tables none Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 8 +++ .../net/ethernet/mellanox/mlxsw/spectrum.h | 3 + .../mellanox/mlxsw/spectrum_policer.c | 65 +++++++++++++++++++ 3 files changed, 76 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c6ab61818800..519eb44e4097 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3352,6 +3352,10 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core) if (err) goto err_resources_counter_register; + err = mlxsw_sp_policer_resources_register(mlxsw_core); + if (err) + goto err_resources_counter_register; + return 0; err_resources_counter_register: @@ -3376,6 +3380,10 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core) if (err) goto err_resources_counter_register; + err = mlxsw_sp_policer_resources_register(mlxsw_core); + if (err) + goto err_resources_counter_register; + return 0; err_resources_counter_register: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 82227e87ef7c..defe1d82d83e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -62,6 +62,8 @@ enum mlxsw_sp_resource_id { MLXSW_SP_RESOURCE_COUNTERS, MLXSW_SP_RESOURCE_COUNTERS_FLOW, MLXSW_SP_RESOURCE_COUNTERS_RIF, + MLXSW_SP_RESOURCE_GLOBAL_POLICERS, + MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS, }; struct mlxsw_sp_port; @@ -1227,5 +1229,6 @@ int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp, u16 policer_index, u64 *p_drops); int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c index 74766e936e0a..39052e5c12fd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c @@ -5,6 +5,7 @@ #include #include #include +#include #include "spectrum.h" @@ -16,6 +17,7 @@ struct mlxsw_sp_policer_family { u16 end_index; /* Exclusive */ struct idr policer_idr; struct mutex lock; /* Protects policer_idr */ + atomic_t policers_count; const struct mlxsw_sp_policer_family_ops *ops; }; @@ -67,10 +69,18 @@ static u8 mlxsw_sp_policer_burst_bytes_hw_units(u64 burst_bytes) return fls64(bs512) - 1; } +static u64 mlxsw_sp_policer_single_rate_occ_get(void *priv) +{ + struct mlxsw_sp_policer_family *family = priv; + + return atomic_read(&family->policers_count); +} + static int mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family) { struct mlxsw_core *core = family->mlxsw_sp->core; + struct devlink *devlink; /* CPU policers are allocated from the first N policers in the global * range, so skip them. @@ -82,12 +92,24 @@ mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family) family->start_index = MLXSW_CORE_RES_GET(core, MAX_CPU_POLICERS); family->end_index = MLXSW_CORE_RES_GET(core, MAX_GLOBAL_POLICERS); + atomic_set(&family->policers_count, 0); + devlink = priv_to_devlink(core); + devlink_resource_occ_get_register(devlink, + MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS, + mlxsw_sp_policer_single_rate_occ_get, + family); + return 0; } static void mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family) { + struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core); + + devlink_resource_occ_get_unregister(devlink, + MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS); + WARN_ON(atomic_read(&family->policers_count) != 0); } static int @@ -104,6 +126,7 @@ mlxsw_sp_policer_single_rate_index_alloc(struct mlxsw_sp_policer_family *family, if (id < 0) return id; + atomic_inc(&family->policers_count); policer->index = id; return 0; @@ -115,6 +138,8 @@ mlxsw_sp_policer_single_rate_index_free(struct mlxsw_sp_policer_family *family, { struct mlxsw_sp_policer *policer; + atomic_dec(&family->policers_count); + mutex_lock(&family->lock); policer = idr_remove(&family->policer_idr, policer_index); mutex_unlock(&family->lock); @@ -376,6 +401,46 @@ void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp) kfree(mlxsw_sp->policer_core); } +int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core) +{ + u64 global_policers, cpu_policers, single_rate_policers; + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_resource_size_params size_params; + int err; + + if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_GLOBAL_POLICERS) || + !MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS)) + return -EIO; + + global_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_GLOBAL_POLICERS); + cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS); + single_rate_policers = global_policers - cpu_policers; + + devlink_resource_size_params_init(&size_params, global_policers, + global_policers, 1, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, "global_policers", + global_policers, + MLXSW_SP_RESOURCE_GLOBAL_POLICERS, + DEVLINK_RESOURCE_ID_PARENT_TOP, + &size_params); + if (err) + return err; + + devlink_resource_size_params_init(&size_params, single_rate_policers, + single_rate_policers, 1, + DEVLINK_RESOURCE_UNIT_ENTRY); + err = devlink_resource_register(devlink, "single_rate_policers", + single_rate_policers, + MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS, + MLXSW_SP_RESOURCE_GLOBAL_POLICERS, + &size_params); + if (err) + return err; + + return 0; +} + static int mlxsw_sp1_policer_core_init(struct mlxsw_sp_policer_core *policer_core) { From d25b8f6ebcc451b56faedc2dc666f74628fb1ffc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:27 +0300 Subject: [PATCH 1093/2056] mlxsw: core_acl_flex_actions: Work around hardware limitation In the policy engine, each ACL rule points to an action block where the ACL actions are stored. Each action block consists of one or more action sets. Each action set holds one or more individual actions, up to a maximum queried from the device. For example: Action set #1 Action set #2 +----------+ +--------------+ +--------------+ | ACL rule +----------> Action #1 | +-----> Action #4 | +----------+ +--------------+ | +--------------+ | Action #2 | | | Action #5 | +--------------+ | +--------------+ | Action #3 +------+ | | +--------------+ +--------------+ <---------+ Action block +-----------------> The hardware has a limitation that prevents a policing action (MLXSW_AFA_POLCNT_CODE when used with a policer, not a counter) from being configured in the same action set with a trap action (i.e., MLXSW_AFA_TRAP_CODE or MLXSW_AFA_TRAPWU_CODE). Note that the latter used to implement multiple actions: 'trap', 'mirred', 'drop'. Work around this limitation by teaching mlxsw_afa_block_append_action() to create a new action set not only when there is no more room left in the current set, but also when there is a conflict between previously mentioned actions. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../mellanox/mlxsw/core_acl_flex_actions.c | 87 +++++++++++++++---- 1 file changed, 71 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 30a7d5afdec7..06a43913b9ce 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -88,9 +88,11 @@ struct mlxsw_afa_set { struct rhash_head ht_node; struct mlxsw_afa_set_ht_key ht_key; u32 kvdl_index; - bool shared; /* Inserted in hashtable (doesn't mean that + u8 shared:1, /* Inserted in hashtable (doesn't mean that * kvdl_index is valid). */ + has_trap:1, + has_police:1; unsigned int ref_count; struct mlxsw_afa_set *next; /* Pointer to the next set. */ struct mlxsw_afa_set *prev; /* Pointer to the previous set, @@ -839,16 +841,38 @@ mlxsw_afa_cookie_ref_create(struct mlxsw_afa_block *block, #define MLXSW_AFA_ONE_ACTION_LEN 32 #define MLXSW_AFA_PAYLOAD_OFFSET 4 -static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, - u8 action_code, u8 action_size) +enum mlxsw_afa_action_type { + MLXSW_AFA_ACTION_TYPE_TRAP, + MLXSW_AFA_ACTION_TYPE_POLICE, + MLXSW_AFA_ACTION_TYPE_OTHER, +}; + +static bool +mlxsw_afa_block_need_split(const struct mlxsw_afa_block *block, + enum mlxsw_afa_action_type type) +{ + struct mlxsw_afa_set *cur_set = block->cur_set; + + /* Due to a hardware limitation, police action cannot be in the same + * action set with MLXSW_AFA_TRAP_CODE or MLXSW_AFA_TRAPWU_CODE + * actions. Work around this limitation by creating a new action set + * and place the new action there. + */ + return (cur_set->has_trap && type == MLXSW_AFA_ACTION_TYPE_POLICE) || + (cur_set->has_police && type == MLXSW_AFA_ACTION_TYPE_TRAP); +} + +static char *mlxsw_afa_block_append_action_ext(struct mlxsw_afa_block *block, + u8 action_code, u8 action_size, + enum mlxsw_afa_action_type type) { char *oneact; char *actions; if (block->finished) return ERR_PTR(-EINVAL); - if (block->cur_act_index + action_size > - block->afa->max_acts_per_set) { + if (block->cur_act_index + action_size > block->afa->max_acts_per_set || + mlxsw_afa_block_need_split(block, type)) { struct mlxsw_afa_set *set; /* The appended action won't fit into the current action set, @@ -863,6 +887,17 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, block->cur_set = set; } + switch (type) { + case MLXSW_AFA_ACTION_TYPE_TRAP: + block->cur_set->has_trap = true; + break; + case MLXSW_AFA_ACTION_TYPE_POLICE: + block->cur_set->has_police = true; + break; + default: + break; + } + actions = block->cur_set->ht_key.enc_actions; oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN; block->cur_act_index += action_size; @@ -870,6 +905,14 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, return oneact + MLXSW_AFA_PAYLOAD_OFFSET; } +static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block, + u8 action_code, u8 action_size) +{ + return mlxsw_afa_block_append_action_ext(block, action_code, + action_size, + MLXSW_AFA_ACTION_TYPE_OTHER); +} + /* VLAN Action * ----------- * VLAN action is used for manipulating VLANs. It can be used to implement QinQ, @@ -1048,11 +1091,20 @@ mlxsw_afa_trap_mirror_pack(char *payload, bool mirror_enable, mlxsw_afa_trap_mirror_agent_set(payload, mirror_agent); } +static char *mlxsw_afa_block_append_action_trap(struct mlxsw_afa_block *block, + u8 action_code, u8 action_size) +{ + return mlxsw_afa_block_append_action_ext(block, action_code, + action_size, + MLXSW_AFA_ACTION_TYPE_TRAP); +} + static int mlxsw_afa_block_append_drop_plain(struct mlxsw_afa_block *block, bool ingress) { - char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE, - MLXSW_AFA_TRAP_SIZE); + char *act = mlxsw_afa_block_append_action_trap(block, + MLXSW_AFA_TRAP_CODE, + MLXSW_AFA_TRAP_SIZE); if (IS_ERR(act)) return PTR_ERR(act); @@ -1081,8 +1133,8 @@ mlxsw_afa_block_append_drop_with_cookie(struct mlxsw_afa_block *block, } cookie_index = cookie_ref->cookie->cookie_index; - act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPWU_CODE, - MLXSW_AFA_TRAPWU_SIZE); + act = mlxsw_afa_block_append_action_trap(block, MLXSW_AFA_TRAPWU_CODE, + MLXSW_AFA_TRAPWU_SIZE); if (IS_ERR(act)) { NL_SET_ERR_MSG_MOD(extack, "Cannot append drop with cookie action"); err = PTR_ERR(act); @@ -1113,8 +1165,9 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_drop); int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id) { - char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE, - MLXSW_AFA_TRAP_SIZE); + char *act = mlxsw_afa_block_append_action_trap(block, + MLXSW_AFA_TRAP_CODE, + MLXSW_AFA_TRAP_SIZE); if (IS_ERR(act)) return PTR_ERR(act); @@ -1127,8 +1180,9 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap); int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, u16 trap_id) { - char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE, - MLXSW_AFA_TRAP_SIZE); + char *act = mlxsw_afa_block_append_action_trap(block, + MLXSW_AFA_TRAP_CODE, + MLXSW_AFA_TRAP_SIZE); if (IS_ERR(act)) return PTR_ERR(act); @@ -1199,9 +1253,10 @@ static int mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block, u8 mirror_agent) { - char *act = mlxsw_afa_block_append_action(block, - MLXSW_AFA_TRAP_CODE, - MLXSW_AFA_TRAP_SIZE); + char *act = mlxsw_afa_block_append_action_trap(block, + MLXSW_AFA_TRAP_CODE, + MLXSW_AFA_TRAP_SIZE); + if (IS_ERR(act)) return PTR_ERR(act); mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_NOP, From deee0abc70d9c2c7241fdb915527cb8fdbf36e7a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:28 +0300 Subject: [PATCH 1094/2056] mlxsw: core_acl_flex_actions: Add police action Add core functionality required to support police action in the policy engine. The utilized hardware policers are stored in a hash table keyed by the flow action index. This allows to support policer sharing between multiple ACL rules. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../mellanox/mlxsw/core_acl_flex_actions.c | 217 ++++++++++++++++++ .../mellanox/mlxsw/core_acl_flex_actions.h | 8 + 2 files changed, 225 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 06a43913b9ce..4d699fe98cb6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -67,7 +67,9 @@ struct mlxsw_afa { struct rhashtable set_ht; struct rhashtable fwd_entry_ht; struct rhashtable cookie_ht; + struct rhashtable policer_ht; struct idr cookie_idr; + struct list_head policer_list; }; #define MLXSW_AFA_SET_LEN 0xA8 @@ -177,6 +179,21 @@ static const struct rhashtable_params mlxsw_afa_cookie_ht_params = { .automatic_shrinking = true, }; +struct mlxsw_afa_policer { + struct rhash_head ht_node; + struct list_head list; /* Member of policer_list */ + refcount_t ref_count; + u32 fa_index; + u16 policer_index; +}; + +static const struct rhashtable_params mlxsw_afa_policer_ht_params = { + .key_len = sizeof(u32), + .key_offset = offsetof(struct mlxsw_afa_policer, fa_index), + .head_offset = offsetof(struct mlxsw_afa_policer, ht_node), + .automatic_shrinking = true, +}; + struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, const struct mlxsw_afa_ops *ops, void *ops_priv) @@ -198,12 +215,19 @@ struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set, &mlxsw_afa_cookie_ht_params); if (err) goto err_cookie_rhashtable_init; + err = rhashtable_init(&mlxsw_afa->policer_ht, + &mlxsw_afa_policer_ht_params); + if (err) + goto err_policer_rhashtable_init; idr_init(&mlxsw_afa->cookie_idr); + INIT_LIST_HEAD(&mlxsw_afa->policer_list); mlxsw_afa->max_acts_per_set = max_acts_per_set; mlxsw_afa->ops = ops; mlxsw_afa->ops_priv = ops_priv; return mlxsw_afa; +err_policer_rhashtable_init: + rhashtable_destroy(&mlxsw_afa->cookie_ht); err_cookie_rhashtable_init: rhashtable_destroy(&mlxsw_afa->fwd_entry_ht); err_fwd_entry_rhashtable_init: @@ -216,8 +240,10 @@ EXPORT_SYMBOL(mlxsw_afa_create); void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa) { + WARN_ON(!list_empty(&mlxsw_afa->policer_list)); WARN_ON(!idr_is_empty(&mlxsw_afa->cookie_idr)); idr_destroy(&mlxsw_afa->cookie_idr); + rhashtable_destroy(&mlxsw_afa->policer_ht); rhashtable_destroy(&mlxsw_afa->cookie_ht); rhashtable_destroy(&mlxsw_afa->fwd_entry_ht); rhashtable_destroy(&mlxsw_afa->set_ht); @@ -838,6 +864,137 @@ mlxsw_afa_cookie_ref_create(struct mlxsw_afa_block *block, return ERR_PTR(err); } +static struct mlxsw_afa_policer * +mlxsw_afa_policer_create(struct mlxsw_afa *mlxsw_afa, u32 fa_index, + u64 rate_bytes_ps, u32 burst, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_policer *policer; + int err; + + policer = kzalloc(sizeof(*policer), GFP_KERNEL); + if (!policer) + return ERR_PTR(-ENOMEM); + + err = mlxsw_afa->ops->policer_add(mlxsw_afa->ops_priv, rate_bytes_ps, + burst, &policer->policer_index, + extack); + if (err) + goto err_policer_add; + + refcount_set(&policer->ref_count, 1); + policer->fa_index = fa_index; + + err = rhashtable_insert_fast(&mlxsw_afa->policer_ht, &policer->ht_node, + mlxsw_afa_policer_ht_params); + if (err) + goto err_rhashtable_insert; + + list_add_tail(&policer->list, &mlxsw_afa->policer_list); + + return policer; + +err_rhashtable_insert: + mlxsw_afa->ops->policer_del(mlxsw_afa->ops_priv, + policer->policer_index); +err_policer_add: + kfree(policer); + return ERR_PTR(err); +} + +static void mlxsw_afa_policer_destroy(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_policer *policer) +{ + list_del(&policer->list); + rhashtable_remove_fast(&mlxsw_afa->policer_ht, &policer->ht_node, + mlxsw_afa_policer_ht_params); + mlxsw_afa->ops->policer_del(mlxsw_afa->ops_priv, + policer->policer_index); + kfree(policer); +} + +static struct mlxsw_afa_policer * +mlxsw_afa_policer_get(struct mlxsw_afa *mlxsw_afa, u32 fa_index, + u64 rate_bytes_ps, u32 burst, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_policer *policer; + + policer = rhashtable_lookup_fast(&mlxsw_afa->policer_ht, &fa_index, + mlxsw_afa_policer_ht_params); + if (policer) { + refcount_inc(&policer->ref_count); + return policer; + } + + return mlxsw_afa_policer_create(mlxsw_afa, fa_index, rate_bytes_ps, + burst, extack); +} + +static void mlxsw_afa_policer_put(struct mlxsw_afa *mlxsw_afa, + struct mlxsw_afa_policer *policer) +{ + if (!refcount_dec_and_test(&policer->ref_count)) + return; + mlxsw_afa_policer_destroy(mlxsw_afa, policer); +} + +struct mlxsw_afa_policer_ref { + struct mlxsw_afa_resource resource; + struct mlxsw_afa_policer *policer; +}; + +static void +mlxsw_afa_policer_ref_destroy(struct mlxsw_afa_block *block, + struct mlxsw_afa_policer_ref *policer_ref) +{ + mlxsw_afa_resource_del(&policer_ref->resource); + mlxsw_afa_policer_put(block->afa, policer_ref->policer); + kfree(policer_ref); +} + +static void +mlxsw_afa_policer_ref_destructor(struct mlxsw_afa_block *block, + struct mlxsw_afa_resource *resource) +{ + struct mlxsw_afa_policer_ref *policer_ref; + + policer_ref = container_of(resource, struct mlxsw_afa_policer_ref, + resource); + mlxsw_afa_policer_ref_destroy(block, policer_ref); +} + +static struct mlxsw_afa_policer_ref * +mlxsw_afa_policer_ref_create(struct mlxsw_afa_block *block, u32 fa_index, + u64 rate_bytes_ps, u32 burst, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_policer_ref *policer_ref; + struct mlxsw_afa_policer *policer; + int err; + + policer_ref = kzalloc(sizeof(*policer_ref), GFP_KERNEL); + if (!policer_ref) + return ERR_PTR(-ENOMEM); + + policer = mlxsw_afa_policer_get(block->afa, fa_index, rate_bytes_ps, + burst, extack); + if (IS_ERR(policer)) { + err = PTR_ERR(policer); + goto err_policer_get; + } + + policer_ref->policer = policer; + policer_ref->resource.destructor = mlxsw_afa_policer_ref_destructor; + mlxsw_afa_resource_add(block, &policer_ref->resource); + + return policer_ref; + +err_policer_get: + kfree(policer_ref); + return ERR_PTR(err); +} + #define MLXSW_AFA_ONE_ACTION_LEN 32 #define MLXSW_AFA_PAYLOAD_OFFSET 4 @@ -1551,6 +1708,19 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_fwd); #define MLXSW_AFA_POLCNT_CODE 0x08 #define MLXSW_AFA_POLCNT_SIZE 1 +enum { + MLXSW_AFA_POLCNT_COUNTER, + MLXSW_AFA_POLCNT_POLICER, +}; + +/* afa_polcnt_c_p + * Counter or policer. + * Indicates whether the action binds a policer or a counter to the flow. + * 0: Counter + * 1: Policer + */ +MLXSW_ITEM32(afa, polcnt, c_p, 0x00, 31, 1); + enum mlxsw_afa_polcnt_counter_set_type { /* No count */ MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00, @@ -1570,15 +1740,28 @@ MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8); */ MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24); +/* afa_polcnt_pid + * Policer ID. + * Reserved when c_p = 0 + */ +MLXSW_ITEM32(afa, polcnt, pid, 0x08, 0, 14); + static inline void mlxsw_afa_polcnt_pack(char *payload, enum mlxsw_afa_polcnt_counter_set_type set_type, u32 counter_index) { + mlxsw_afa_polcnt_c_p_set(payload, MLXSW_AFA_POLCNT_COUNTER); mlxsw_afa_polcnt_counter_set_type_set(payload, set_type); mlxsw_afa_polcnt_counter_index_set(payload, counter_index); } +static void mlxsw_afa_polcnt_policer_pack(char *payload, u16 policer_index) +{ + mlxsw_afa_polcnt_c_p_set(payload, MLXSW_AFA_POLCNT_POLICER); + mlxsw_afa_polcnt_pid_set(payload, policer_index); +} + int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block, u32 counter_index) { @@ -1622,6 +1805,40 @@ int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, } EXPORT_SYMBOL(mlxsw_afa_block_append_counter); +int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block, + u32 fa_index, u64 rate_bytes_ps, u32 burst, + u16 *p_policer_index, + struct netlink_ext_ack *extack) +{ + struct mlxsw_afa_policer_ref *policer_ref; + char *act; + int err; + + policer_ref = mlxsw_afa_policer_ref_create(block, fa_index, + rate_bytes_ps, + burst, extack); + if (IS_ERR(policer_ref)) + return PTR_ERR(policer_ref); + *p_policer_index = policer_ref->policer->policer_index; + + act = mlxsw_afa_block_append_action_ext(block, MLXSW_AFA_POLCNT_CODE, + MLXSW_AFA_POLCNT_SIZE, + MLXSW_AFA_ACTION_TYPE_POLICE); + if (IS_ERR(act)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot append police action"); + err = PTR_ERR(act); + goto err_append_action; + } + mlxsw_afa_polcnt_policer_pack(act, *p_policer_index); + + return 0; + +err_append_action: + mlxsw_afa_policer_ref_destroy(block, policer_ref); + return err; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_police); + /* Virtual Router and Forwarding Domain Action * ------------------------------------------- * Virtual Switch action is used for manipulate the Virtual Router (VR), diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index a72350399bcf..b652497b1002 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -26,6 +26,10 @@ struct mlxsw_afa_ops { bool ingress, int *p_span_id); void (*mirror_del)(void *priv, u8 local_in_port, int span_id, bool ingress); + int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst, + u16 *p_policer_index, + struct netlink_ext_ack *extack); + void (*policer_del)(void *priv, u16 policer_index); bool dummy_first_set; }; @@ -84,5 +88,9 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, bool rmid_valid, u32 kvdl_index); int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port, struct netlink_ext_ack *extack); +int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block, + u32 fa_index, u64 rate_bytes_ps, u32 burst, + u16 *p_policer_index, + struct netlink_ext_ack *extack); #endif From af11e818a7691468381bbb33a3b98fb718605385 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:29 +0300 Subject: [PATCH 1095/2056] mlxsw: spectrum_acl: Offload FLOW_ACTION_POLICE Offload action police when used with a flower classifier. The number of dropped packets is read from the policer and reported to tc. Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 11 +++++-- .../ethernet/mellanox/mlxsw/spectrum_acl.c | 33 ++++++++++++++++++- .../mlxsw/spectrum_acl_flex_actions.c | 27 +++++++++++++++ .../ethernet/mellanox/mlxsw/spectrum_flower.c | 30 +++++++++++++++-- 4 files changed, 96 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index defe1d82d83e..6ab1b6d725af 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -689,8 +689,10 @@ struct mlxsw_sp_acl_rule_info { u8 action_created:1, ingress_bind_blocker:1, egress_bind_blocker:1, - counter_valid:1; + counter_valid:1, + policer_index_valid:1; unsigned int counter_index; + u16 policer_index; }; /* spectrum_flow.c */ @@ -851,6 +853,10 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp, enum flow_action_mangle_base htype, u32 offset, u32 mask, u32 val, struct netlink_ext_ack *extack); +int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 index, u64 rate_bytes_ps, + u32 burst, struct netlink_ext_ack *extack); int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct netlink_ext_ack *extack); @@ -883,7 +889,8 @@ struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule); int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule, - u64 *packets, u64 *bytes, u64 *last_use, + u64 *packets, u64 *bytes, u64 *drops, + u64 *last_use, enum flow_action_hw_stats *used_hw_stats); struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index a671156a1428..8cfa03a75374 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -66,6 +66,7 @@ struct mlxsw_sp_acl_rule { u64 last_used; u64 last_packets; u64 last_bytes; + u64 last_drops; unsigned long priv[]; /* priv has to be always the last item */ }; @@ -648,6 +649,24 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp, return -EINVAL; } +int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + u32 index, u64 rate_bytes_ps, + u32 burst, struct netlink_ext_ack *extack) +{ + int err; + + err = mlxsw_afa_block_append_police(rulei->act_block, index, + rate_bytes_ps, burst, + &rulei->policer_index, extack); + if (err) + return err; + + rulei->policer_index_valid = true; + + return 0; +} + int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei, struct netlink_ext_ack *extack) @@ -868,13 +887,16 @@ static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work) int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule, - u64 *packets, u64 *bytes, u64 *last_use, + u64 *packets, u64 *bytes, u64 *drops, + u64 *last_use, enum flow_action_hw_stats *used_hw_stats) { + enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE; struct mlxsw_sp_acl_rule_info *rulei; u64 current_packets = 0; u64 current_bytes = 0; + u64 current_drops = 0; int err; rulei = mlxsw_sp_acl_rule_rulei(rule); @@ -886,12 +908,21 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, return err; *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE; } + if (rulei->policer_index_valid) { + err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type, + rulei->policer_index, + ¤t_drops); + if (err) + return err; + } *packets = current_packets - rule->last_packets; *bytes = current_bytes - rule->last_bytes; + *drops = current_drops - rule->last_drops; *last_use = rule->last_used; rule->last_bytes = current_bytes; rule->last_packets = current_packets; + rule->last_drops = current_drops; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c index 18444f675100..90372d1c28d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -169,6 +169,29 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress) mlxsw_sp_span_agent_put(mlxsw_sp, span_id); } +static int mlxsw_sp_act_policer_add(void *priv, u64 rate_bytes_ps, u32 burst, + u16 *p_policer_index, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_policer_params params; + struct mlxsw_sp *mlxsw_sp = priv; + + params.rate = rate_bytes_ps; + params.burst = burst; + params.bytes = true; + return mlxsw_sp_policer_add(mlxsw_sp, + MLXSW_SP_POLICER_TYPE_SINGLE_RATE, + ¶ms, extack, p_policer_index); +} + +static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_policer_del(mlxsw_sp, MLXSW_SP_POLICER_TYPE_SINGLE_RATE, + policer_index); +} + const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .kvdl_set_add = mlxsw_sp1_act_kvdl_set_add, .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, @@ -179,6 +202,8 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = { .counter_index_put = mlxsw_sp_act_counter_index_put, .mirror_add = mlxsw_sp_act_mirror_add, .mirror_del = mlxsw_sp_act_mirror_del, + .policer_add = mlxsw_sp_act_policer_add, + .policer_del = mlxsw_sp_act_policer_del, }; const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { @@ -191,6 +216,8 @@ const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = { .counter_index_put = mlxsw_sp_act_counter_index_put, .mirror_add = mlxsw_sp_act_mirror_add, .mirror_del = mlxsw_sp_act_mirror_del, + .policer_add = mlxsw_sp_act_policer_add, + .policer_del = mlxsw_sp_act_policer_del, .dummy_first_set = true, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 61d21043d83a..41855e58564b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -22,6 +23,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, { const struct flow_action_entry *act; int mirror_act_count = 0; + int police_act_count = 0; int err, i; if (!flow_action_has_entries(flow_action)) @@ -180,6 +182,28 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return err; break; } + case FLOW_ACTION_POLICE: { + u32 burst; + + if (police_act_count++) { + NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported"); + return -EOPNOTSUPP; + } + + /* The kernel might adjust the requested burst size so + * that it is not exactly a power of two. Re-adjust it + * here since the hardware only supports burst sizes + * that are a power of two. + */ + burst = roundup_pow_of_two(act->police.burst); + err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei, + act->police.index, + act->police.rate_bytes_ps, + burst, extack); + if (err) + return err; + break; + } default: NL_SET_ERR_MSG_MOD(extack, "Unsupported action"); dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n"); @@ -616,6 +640,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, u64 packets; u64 lastuse; u64 bytes; + u64 drops; int err; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block, @@ -629,11 +654,12 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp, return -EINVAL; err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes, - &lastuse, &used_hw_stats); + &drops, &lastuse, &used_hw_stats); if (err) goto err_rule_get_stats; - flow_stats_update(&f->stats, bytes, packets, 0, lastuse, used_hw_stats); + flow_stats_update(&f->stats, bytes, packets, drops, lastuse, + used_hw_stats); mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset); return 0; From afe231d32eb5fa4f4975596958c48c3a77fa1773 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:30 +0300 Subject: [PATCH 1096/2056] selftests: forwarding: Add tc-police tests Test tc-police action in various scenarios such as Rx policing, Tx policing, shared policer and police piped to mirred. The test passes with both veth pairs and loopbacked ports. # ./tc_police.sh TEST: police on rx [ OK ] TEST: police on tx [ OK ] TEST: police with shared policer - rx [ OK ] TEST: police with shared policer - tx [ OK ] TEST: police rx and mirror [ OK ] TEST: police tx and mirror [ OK ] Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../selftests/net/forwarding/tc_police.sh | 333 ++++++++++++++++++ 1 file changed, 333 insertions(+) create mode 100755 tools/testing/selftests/net/forwarding/tc_police.sh diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh new file mode 100755 index 000000000000..160f9cccdfb7 --- /dev/null +++ b/tools/testing/selftests/net/forwarding/tc_police.sh @@ -0,0 +1,333 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test tc-police action. +# +# +---------------------------------+ +# | H1 (vrf) | +# | + $h1 | +# | | 192.0.2.1/24 | +# | | | +# | | default via 192.0.2.2 | +# +----|----------------------------+ +# | +# +----|----------------------------------------------------------------------+ +# | SW | | +# | + $rp1 | +# | 192.0.2.2/24 | +# | | +# | 198.51.100.2/24 203.0.113.2/24 | +# | + $rp2 + $rp3 | +# | | | | +# +----|-----------------------------------------|----------------------------+ +# | | +# +----|----------------------------+ +----|----------------------------+ +# | | default via 198.51.100.2 | | | default via 203.0.113.2 | +# | | | | | | +# | | 198.51.100.1/24 | | | 203.0.113.1/24 | +# | + $h2 | | + $h3 | +# | H2 (vrf) | | H3 (vrf) | +# +---------------------------------+ +---------------------------------+ + +ALL_TESTS=" + police_rx_test + police_tx_test + police_shared_test + police_rx_mirror_test + police_tx_mirror_test +" +NUM_NETIFS=6 +source tc_common.sh +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/24 + + ip -4 route add default vrf v$h1 nexthop via 192.0.2.2 +} + +h1_destroy() +{ + ip -4 route del default vrf v$h1 nexthop via 192.0.2.2 + + simple_if_fini $h1 192.0.2.1/24 +} + +h2_create() +{ + simple_if_init $h2 198.51.100.1/24 + + ip -4 route add default vrf v$h2 nexthop via 198.51.100.2 + + tc qdisc add dev $h2 clsact +} + +h2_destroy() +{ + tc qdisc del dev $h2 clsact + + ip -4 route del default vrf v$h2 nexthop via 198.51.100.2 + + simple_if_fini $h2 198.51.100.1/24 +} + +h3_create() +{ + simple_if_init $h3 203.0.113.1/24 + + ip -4 route add default vrf v$h3 nexthop via 203.0.113.2 + + tc qdisc add dev $h3 clsact +} + +h3_destroy() +{ + tc qdisc del dev $h3 clsact + + ip -4 route del default vrf v$h3 nexthop via 203.0.113.2 + + simple_if_fini $h3 203.0.113.1/24 +} + +router_create() +{ + ip link set dev $rp1 up + ip link set dev $rp2 up + ip link set dev $rp3 up + + __addr_add_del $rp1 add 192.0.2.2/24 + __addr_add_del $rp2 add 198.51.100.2/24 + __addr_add_del $rp3 add 203.0.113.2/24 + + tc qdisc add dev $rp1 clsact + tc qdisc add dev $rp2 clsact +} + +router_destroy() +{ + tc qdisc del dev $rp2 clsact + tc qdisc del dev $rp1 clsact + + __addr_add_del $rp3 del 203.0.113.2/24 + __addr_add_del $rp2 del 198.51.100.2/24 + __addr_add_del $rp1 del 192.0.2.2/24 + + ip link set dev $rp3 down + ip link set dev $rp2 down + ip link set dev $rp1 down +} + +police_common_test() +{ + local test_name=$1; shift + + RET=0 + + # Rule to measure bandwidth on ingress of $h2 + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=54321 -p 1000 -c 0 -q & + + local t0=$(tc_rule_stats_get $h2 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h2 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + log_test "$test_name" + + { kill %% && wait %%; } 2>/dev/null + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower +} + +police_rx_test() +{ + # Rule to police traffic destined to $h2 on ingress of $rp1 + tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/ok + + police_common_test "police on rx" + + tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower +} + +police_tx_test() +{ + # Rule to police traffic destined to $h2 on egress of $rp2 + tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/ok + + police_common_test "police on tx" + + tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower +} + +police_shared_common_test() +{ + local dport=$1; shift + local test_name=$1; shift + + RET=0 + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=$dport -p 1000 -c 0 -q & + + local t0=$(tc_rule_stats_get $h2 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h2 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + log_test "$test_name" + + { kill %% && wait %%; } 2>/dev/null +} + +police_shared_test() +{ + # Rule to measure bandwidth on ingress of $h2 + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp src_port 12345 \ + action drop + + # Rule to police traffic destined to $h2 on ingress of $rp1 + tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/ok \ + index 10 + + # Rule to police a different flow destined to $h2 on egress of $rp2 + # using same policer + tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 22222 \ + action police index 10 + + police_shared_common_test 54321 "police with shared policer - rx" + + police_shared_common_test 22222 "police with shared policer - tx" + + tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower + tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower +} + +police_mirror_common_test() +{ + local pol_if=$1; shift + local dir=$1; shift + local test_name=$1; shift + + RET=0 + + # Rule to measure bandwidth on ingress of $h2 + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + # Rule to measure bandwidth of mirrored traffic on ingress of $h3 + tc filter add dev $h3 ingress protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action drop + + # Rule to police traffic destined to $h2 and mirror to $h3 + tc filter add dev $pol_if $dir protocol ip pref 1 handle 101 flower \ + dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \ + action police rate 80mbit burst 16k conform-exceed drop/pipe \ + action mirred egress mirror dev $rp3 + + mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \ + -t udp sp=12345,dp=54321 -p 1000 -c 0 -q & + + local t0=$(tc_rule_stats_get $h2 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h2 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + local t0=$(tc_rule_stats_get $h3 1 ingress .bytes) + sleep 10 + local t1=$(tc_rule_stats_get $h3 1 ingress .bytes) + + local er=$((80 * 1000 * 1000)) + local nr=$(rate $t0 $t1 10) + local nr_pct=$((100 * (nr - er) / er)) + ((-10 <= nr_pct && nr_pct <= 10)) + check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%." + + log_test "$test_name" + + { kill %% && wait %%; } 2>/dev/null + tc filter del dev $pol_if $dir protocol ip pref 1 handle 101 flower + tc filter del dev $h3 ingress protocol ip pref 1 handle 101 flower + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower +} + +police_rx_mirror_test() +{ + police_mirror_common_test $rp1 ingress "police rx and mirror" +} + +police_tx_mirror_test() +{ + police_mirror_common_test $rp2 egress "police tx and mirror" +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + rp1=${NETIFS[p2]} + + rp2=${NETIFS[p3]} + h2=${NETIFS[p4]} + + rp3=${NETIFS[p5]} + h3=${NETIFS[p6]} + + vrf_prepare + forwarding_enable + + h1_create + h2_create + h3_create + router_create +} + +cleanup() +{ + pre_cleanup + + router_destroy + h3_destroy + h2_destroy + h1_destroy + + forwarding_restore + vrf_cleanup +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS From cb12d176326722e2f4ede4e459e86ea36607c555 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:31 +0300 Subject: [PATCH 1097/2056] selftests: mlxsw: tc_restrictions: Test tc-police restrictions Test that upper and lower limits on rate and burst size imposed by the device are rejected by the kernel. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../drivers/net/mlxsw/tc_restrictions.sh | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh index 9241250c5921..553cb9fad508 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh @@ -11,6 +11,8 @@ ALL_TESTS=" matchall_mirror_behind_flower_ingress_test matchall_sample_behind_flower_ingress_test matchall_mirror_behind_flower_egress_test + police_limits_test + multi_police_test " NUM_NETIFS=2 @@ -287,6 +289,80 @@ matchall_mirror_behind_flower_egress_test() matchall_behind_flower_egress_test "mirror" "mirred egress mirror dev $swp2" } +police_limits_test() +{ + RET=0 + + tc qdisc add dev $swp1 clsact + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 0.5kbit burst 1m conform-exceed drop/ok + check_fail $? "Incorrect success to add police action with too low rate" + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 2.5tbit burst 1g conform-exceed drop/ok + check_fail $? "Incorrect success to add police action with too high rate" + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.5kbit burst 1m conform-exceed drop/ok + check_err $? "Failed to add police action with low rate" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.9tbit burst 1g conform-exceed drop/ok + check_err $? "Failed to add police action with high rate" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.5kbit burst 512b conform-exceed drop/ok + check_fail $? "Incorrect success to add police action with too low burst size" + + tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \ + flower skip_sw \ + action police rate 1.5kbit burst 2k conform-exceed drop/ok + check_err $? "Failed to add police action with low burst size" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc qdisc del dev $swp1 clsact + + log_test "police rate and burst limits" +} + +multi_police_test() +{ + RET=0 + + # It is forbidden in mlxsw driver to have multiple police + # actions in a single rule. + + tc qdisc add dev $swp1 clsact + + tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/ok + check_err $? "Failed to add rule with single police action" + + tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower + + tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/pipe \ + action police rate 200mbit burst 200k conform-exceed drop/ok + check_fail $? "Incorrect success to add rule with two police actions" + + tc qdisc del dev $swp1 clsact + + log_test "multi police" +} + setup_prepare() { swp1=${NETIFS[p1]} From 5061e773264b11fdc51c61d6619aca76efd3424f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:32 +0300 Subject: [PATCH 1098/2056] selftests: mlxsw: Add scale test for tc-police Query the maximum number of supported policers using devlink-resource and test that this number can be reached by configuring tc filters with police action. Test that an error is returned in case the maximum number is exceeded. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../net/mlxsw/spectrum-2/resource_scale.sh | 2 +- .../net/mlxsw/spectrum-2/tc_police_scale.sh | 16 ++++ .../net/mlxsw/spectrum/resource_scale.sh | 2 +- .../net/mlxsw/spectrum/tc_police_scale.sh | 16 ++++ .../drivers/net/mlxsw/tc_police_scale.sh | 92 +++++++++++++++++++ 5 files changed, 126 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh create mode 100644 tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh create mode 100644 tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh index fd583a171db7..d7cf33a3f18d 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh @@ -28,7 +28,7 @@ cleanup() trap cleanup EXIT -ALL_TESTS="router tc_flower mirror_gre" +ALL_TESTS="router tc_flower mirror_gre tc_police" for current_test in ${TESTS:-$ALL_TESTS}; do source ${current_test}_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh new file mode 100644 index 000000000000..e79ac0dad1f4 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../tc_police_scale.sh + +tc_police_get_target() +{ + local should_fail=$1; shift + local target + + target=$(devlink_resource_size_get global_policers single_rate_policers) + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh index 43ba1b438f6d..43f662401bc3 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh @@ -22,7 +22,7 @@ cleanup() devlink_sp_read_kvd_defaults trap cleanup EXIT -ALL_TESTS="router tc_flower mirror_gre" +ALL_TESTS="router tc_flower mirror_gre tc_police" for current_test in ${TESTS:-$ALL_TESTS}; do source ${current_test}_scale.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh new file mode 100644 index 000000000000..e79ac0dad1f4 --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +source ../tc_police_scale.sh + +tc_police_get_target() +{ + local should_fail=$1; shift + local target + + target=$(devlink_resource_size_get global_policers single_rate_policers) + + if ((! should_fail)); then + echo $target + else + echo $((target + 1)) + fi +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh new file mode 100644 index 000000000000..4b96561c462f --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: GPL-2.0 + +TC_POLICE_NUM_NETIFS=2 + +tc_police_h1_create() +{ + simple_if_init $h1 +} + +tc_police_h1_destroy() +{ + simple_if_fini $h1 +} + +tc_police_switch_create() +{ + simple_if_init $swp1 + tc qdisc add dev $swp1 clsact +} + +tc_police_switch_destroy() +{ + tc qdisc del dev $swp1 clsact + simple_if_fini $swp1 +} + +tc_police_rules_create() +{ + local count=$1; shift + local should_fail=$1; shift + + TC_POLICE_BATCH_FILE="$(mktemp)" + + for ((i = 0; i < count; ++i)); do + cat >> $TC_POLICE_BATCH_FILE <<-EOF + filter add dev $swp1 ingress \ + prot ip \ + flower skip_sw \ + action police rate 10mbit burst 100k \ + conform-exceed drop/ok + EOF + done + + tc -b $TC_POLICE_BATCH_FILE + check_err_fail $should_fail $? "Rule insertion" +} + +__tc_police_test() +{ + local count=$1; shift + local should_fail=$1; shift + + tc_police_rules_create $count $should_fail + + offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l) + ((offload_count == count)) + check_err_fail $should_fail $? "tc police offload count" +} + +tc_police_test() +{ + local count=$1; shift + local should_fail=$1; shift + + if ! tc_offload_check $TC_POLICE_NUM_NETIFS; then + check_err 1 "Could not test offloaded functionality" + return + fi + + __tc_police_test $count $should_fail +} + +tc_police_setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + vrf_prepare + + tc_police_h1_create + tc_police_switch_create +} + +tc_police_cleanup() +{ + pre_cleanup + + tc_police_switch_destroy + tc_police_h1_destroy + + vrf_cleanup +} From 46b171d7d73aedfae8070622f1798ac4b124284c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 15 Jul 2020 11:27:33 +0300 Subject: [PATCH 1099/2056] selftests: mlxsw: Test policers' occupancy Test that policers shared by different tc filters are correctly reference counted by observing policers' occupancy via devlink-resource. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jakub Kicinski --- .../drivers/net/mlxsw/tc_police_occ.sh | 108 ++++++++++++++++++ .../selftests/net/forwarding/devlink_lib.sh | 5 + 2 files changed, 113 insertions(+) create mode 100755 tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh new file mode 100755 index 000000000000..448b75c1545a --- /dev/null +++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh @@ -0,0 +1,108 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test that policers shared by different tc filters are correctly reference +# counted by observing policers' occupancy via devlink-resource. + +lib_dir=$(dirname $0)/../../../net/forwarding + +ALL_TESTS=" + tc_police_occ_test +" +NUM_NETIFS=2 +source $lib_dir/lib.sh +source $lib_dir/devlink_lib.sh + +h1_create() +{ + simple_if_init $h1 +} + +h1_destroy() +{ + simple_if_fini $h1 +} + +switch_create() +{ + simple_if_init $swp1 + tc qdisc add dev $swp1 clsact +} + +switch_destroy() +{ + tc qdisc del dev $swp1 clsact + simple_if_fini $swp1 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + vrf_prepare + + h1_create + switch_create +} + +cleanup() +{ + pre_cleanup + + switch_destroy + h1_destroy + + vrf_cleanup +} + +tc_police_occ_get() +{ + devlink_resource_occ_get global_policers single_rate_policers +} + +tc_police_occ_test() +{ + RET=0 + + local occ=$(tc_police_occ_get) + + tc filter add dev $swp1 ingress pref 1 handle 101 proto ip \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/ok + (( occ + 1 == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))" + + tc filter del dev $swp1 ingress pref 1 handle 101 flower + (( occ == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $occ" + + tc filter add dev $swp1 ingress pref 1 handle 101 proto ip \ + flower skip_sw \ + action police rate 100mbit burst 100k conform-exceed drop/ok \ + index 10 + tc filter add dev $swp1 ingress pref 2 handle 102 proto ip \ + flower skip_sw action police index 10 + + (( occ + 1 == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))" + + tc filter del dev $swp1 ingress pref 2 handle 102 flower + (( occ + 1 == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))" + + tc filter del dev $swp1 ingress pref 1 handle 101 flower + (( occ == $(tc_police_occ_get) )) + check_err $? "Got occupancy $(tc_police_occ_get), expected $occ" + + log_test "tc police occupancy" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh index f0e6be4c09e9..75fe24bcb9cd 100644 --- a/tools/testing/selftests/net/forwarding/devlink_lib.sh +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -98,6 +98,11 @@ devlink_resource_size_set() check_err $? "Failed setting path $path to size $size" } +devlink_resource_occ_get() +{ + devlink_resource_get "$@" | jq '.["occ"]' +} + devlink_reload() { local still_pending From 404fc93d4a0118d5f9abac8b63d418c2bbc8c2b4 Mon Sep 17 00:00:00 2001 From: Kieran Bingham Date: Wed, 15 Jul 2020 13:48:34 +0100 Subject: [PATCH 1100/2056] drivers: net: wan: Fix trivial spelling The word 'descriptor' is misspelled throughout the tree. Fix it up accordingly: decriptor -> descriptor Signed-off-by: Kieran Bingham Signed-off-by: Jakub Kicinski --- drivers/net/wan/lmc/lmc_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index a20f467ca48a..842def21e089 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -2063,7 +2063,7 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue) /* * Chip seems to have locked up * Reset it - * This whips out all our decriptor + * This whips out all our descriptor * table and starts from scartch */ From 0d80b76184ac9f2dfb939e39ad1f961fc006b99d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:38 -0700 Subject: [PATCH 1101/2056] net: qed: drop duplicate words in comments Drop doubled word "the" in two comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/qed/qed_chain.h | 2 +- include/linux/qed/qed_if.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 92cdc79e5019..7071dc92b4e2 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -130,7 +130,7 @@ struct qed_chain { } pbl_sp; /* Address of first page of the chain - the address is required - * for fastpath operation [consume/produce] but only for the the SINGLE + * for fastpath operation [consume/produce] but only for the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ void *p_virt_addr; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 90e1060da02b..8a6e3ad436d1 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -498,7 +498,7 @@ struct qed_fcoe_pf_params { u8 bdq_pbl_num_entries[2]; }; -/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ +/* Most of the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; u64 bdq_pbl_base_addr[3]; From 2ff17117e60572d6974c766d6b1a225ec7d68795 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:39 -0700 Subject: [PATCH 1102/2056] net: skbuff.h: drop duplicate words in comments Drop doubled words in several comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0c0377fc00c2..6a82d4a8229e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1328,7 +1328,7 @@ void skb_flow_dissect_meta(const struct sk_buff *skb, void *target_container); /* Gets a skb connection tracking info, ctinfo map should be a - * a map of mapsize to translate enum ip_conntrack_info states + * map of mapsize to translate enum ip_conntrack_info states * to user states. */ void @@ -3812,7 +3812,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * must call this function to return the skb back to the stack with a * timestamp. * - * @skb: clone of the the original outgoing packet + * @skb: clone of the original outgoing packet * @hwtstamps: hardware time stamps * */ From 158e89639166461e225a855bce6e3ee6cd8bb1c0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:40 -0700 Subject: [PATCH 1103/2056] net: wimax: fix duplicate words in comments Drop doubled words in two comments. Fix a spello/typo. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/wimax/debug.h | 4 ++-- include/net/wimax.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h index 4dd2c1cea6a9..cdae052bcdcd 100644 --- a/include/linux/wimax/debug.h +++ b/include/linux/wimax/debug.h @@ -184,8 +184,8 @@ do { \ /* - * CPP sintatic sugar to generate A_B like symbol names when one of - * the arguments is a a preprocessor #define. + * CPP syntactic sugar to generate A_B like symbol names when one of + * the arguments is a preprocessor #define. */ #define __D_PASTE__(varname, modulename) varname##_##modulename #define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename)) diff --git a/include/net/wimax.h b/include/net/wimax.h index 24ba7e89c26c..f6e31d2f47aa 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -28,7 +28,7 @@ * * USAGE * - * Embed a `struct wimax_dev` at the beginning of the the device's + * Embed a `struct wimax_dev` at the beginning of the device's * private structure, initialize and register it. For details, see * `struct wimax_dev`s documentation. * From cee50c2a028432dadacdf55950c6c6a7875e8172 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:41 -0700 Subject: [PATCH 1104/2056] net: 9p: drop duplicate word in comment Drop doubled word "not" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/9p/transport.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h index 98a2be2de04a..3eb4261b2958 100644 --- a/include/net/9p/transport.h +++ b/include/net/9p/transport.h @@ -25,7 +25,7 @@ * @request: member function to issue a request to the transport * @cancel: member function to cancel a request (if it hasn't been sent) * @cancelled: member function to notify that a cancelled request will not - * not receive a reply + * receive a reply * * This is the basic API for a transport module which is registered by the * transport module with the 9P core network module and used by the client From c201324b54553aebb32845193680f21eb493c6e5 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:42 -0700 Subject: [PATCH 1105/2056] net: caif: drop duplicate words in comments Drop doubled words "or" and "the" in several comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/caif/caif_layer.h | 4 ++-- include/uapi/linux/caif/caif_socket.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h index 064094101cb5..51f7bb42a936 100644 --- a/include/net/caif/caif_layer.h +++ b/include/net/caif/caif_layer.h @@ -156,7 +156,7 @@ struct cflayer { * CAIF packets upwards in the stack. * Packet handling rules: * - The CAIF packet (cfpkt) ownership is passed to the - * called receive function. This means that the the + * called receive function. This means that the * packet cannot be accessed after passing it to the * above layer using up->receive(). * @@ -184,7 +184,7 @@ struct cflayer { * CAIF packet downwards in the stack. * Packet handling rules: * - The CAIF packet (cfpkt) ownership is passed to the - * transmit function. This means that the the packet + * transmit function. This means that the packet * cannot be accessed after passing it to the below * layer using dn->transmit(). * diff --git a/include/uapi/linux/caif/caif_socket.h b/include/uapi/linux/caif/caif_socket.h index 10ec1d1cf68e..d9970bbaa156 100644 --- a/include/uapi/linux/caif/caif_socket.h +++ b/include/uapi/linux/caif/caif_socket.h @@ -169,7 +169,7 @@ struct sockaddr_caif { * @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are * available. Either a high bandwidth * link can be selected (CAIF_LINK_HIGH_BANDW) or - * or a low latency link (CAIF_LINK_LOW_LATENCY). + * a low latency link (CAIF_LINK_LOW_LATENCY). * This option is of type __u32. * Alternatively SO_BINDTODEVICE can be used. * From ab88d64a90951a95c4e08971b02fcb781d2067f0 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:43 -0700 Subject: [PATCH 1106/2056] net: dsa.h: drop duplicate word in comment Drop doubled word "to" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/dsa.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index b28c95c76762..6fa418ff1175 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -612,7 +612,7 @@ struct dsa_switch_ops { * MTU change functionality. Switches can also adjust their MRU through * this method. By MTU, one understands the SDU (L2 payload) length. * If the switch needs to account for the DSA tag on the CPU port, this - * method needs to to do so privately. + * method needs to do so privately. */ int (*port_change_mtu)(struct dsa_switch *ds, int port, int new_mtu); From 4b48b0a3aa0df49eb6d80a86c2b016bf6c3eebf9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:44 -0700 Subject: [PATCH 1107/2056] net: ip6_fib.h: drop duplicate word in comment Drop doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/ip6_fib.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index cc8356fd927f..ac5ff3c3afb1 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -166,7 +166,7 @@ struct fib6_info { struct fib6_node __rcu *fib6_node; /* Multipath routes: - * siblings is a list of fib6_info that have the the same metric/weight, + * siblings is a list of fib6_info that have the same metric/weight, * destination, but not the same gateway. nsiblings is just a cache * to speed up lookup. */ From d86f9868bdb40fc11c1e8c176ae11fb897b9d5f4 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:45 -0700 Subject: [PATCH 1108/2056] net: sctp: drop duplicate words in comments Drop doubled words in several comments. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/net/sctp/sctp.h | 2 +- include/net/sctp/structs.h | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index e3bd198b00ae..4fc747b778eb 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -291,7 +291,7 @@ atomic_dec(&sctp_dbg_objcnt_## name) #define SCTP_DBG_OBJCNT(name) \ atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0) -/* Macro to help create new entries in in the global array of +/* Macro to help create new entries in the global array of * objcnt counters. */ #define SCTP_DBG_OBJCNT_ENTRY(name) \ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index fb42c90348d3..9bbb2f60db92 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1398,7 +1398,7 @@ struct sctp_stream_priorities { struct list_head prio_sched; /* List of streams scheduled */ struct list_head active; - /* The next stream stream in line */ + /* The next stream in line */ struct sctp_stream_out_ext *next; __u16 prio; }; @@ -1460,7 +1460,7 @@ struct sctp_stream { struct { /* List of streams scheduled */ struct list_head rr_list; - /* The next stream stream in line */ + /* The next stream in line */ struct sctp_stream_out_ext *rr_next; }; }; @@ -1770,7 +1770,7 @@ struct sctp_association { int max_burst; /* This is the max_retrans value for the association. This value will - * be initialized initialized from system defaults, but can be + * be initialized from system defaults, but can be * modified by the SCTP_ASSOCINFO socket option. */ int max_retrans; From 59632b220f2d61df274ed3a14a204e941051fdad Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:42:46 -0700 Subject: [PATCH 1109/2056] net: ipv6: drop duplicate word in comment Drop the doubled word "by" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Jakub Kicinski --- include/linux/ipv6.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 2cb445a8fc9e..8d8f877e7f81 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -223,7 +223,7 @@ struct ipv6_pinfo { /* * Packed in 16bits. - * Omit one shift by by putting the signed field at MSB. + * Omit one shift by putting the signed field at MSB. */ #if defined(__BIG_ENDIAN_BITFIELD) __s16 hop_limit:9; From 339ddaa626995bc6218972ca241471f3717cc5f4 Mon Sep 17 00:00:00 2001 From: Patrick Steinhardt Date: Wed, 15 Jul 2020 19:43:33 +0200 Subject: [PATCH 1110/2056] Bluetooth: Fix update of connection state in `hci_encrypt_cfm` Starting with the upgrade to v5.8-rc3, I've noticed I wasn't able to connect to my Bluetooth headset properly anymore. While connecting to the device would eventually succeed, bluetoothd seemed to be confused about the current connection state where the state was flapping hence and forth. Bisecting this issue led to commit 3ca44c16b0dc (Bluetooth: Consolidate encryption handling in hci_encrypt_cfm, 2020-05-19), which refactored `hci_encrypt_cfm` to also handle updating the connection state. The commit in question changed the code to call `hci_connect_cfm` inside `hci_encrypt_cfm` and to change the connection state. But with the conversion, we now only update the connection state if a status was set already. In fact, the reverse should be true: the status should be updated if no status is yet set. So let's fix the isuse by reversing the condition. Fixes: 3ca44c16b0dc ("Bluetooth: Consolidate encryption handling in hci_encrypt_cfm") Signed-off-by: Patrick Steinhardt Acked-by: Luiz Augusto von Dentz Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 34ad5b207598..bee1b4778ccc 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1467,7 +1467,7 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) __u8 encrypt; if (conn->state == BT_CONFIG) { - if (status) + if (!status) conn->state = BT_CONNECTED; hci_connect_cfm(conn, status); From 9b74ebb2b0f259474da65fa0178c657e5fa5c640 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 14 Jul 2020 15:56:34 +0200 Subject: [PATCH 1111/2056] cpumap: Use non-locked version __ptr_ring_consume_batched Commit 77361825bb01 ("bpf: cpumap use ptr_ring_consume_batched") changed away from using single frame ptr_ring dequeue (__ptr_ring_consume) to consume a batched, but it uses a locked version, which as the comment explain isn't needed. Change to use the non-locked version __ptr_ring_consume_batched. Fixes: 77361825bb01 ("bpf: cpumap use ptr_ring_consume_batched") Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/a9c7d06f9a009e282209f0c8c7b2c5d9b9ad60b9.1594734381.git.lorenzo@kernel.org --- kernel/bpf/cpumap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index bd8658055c16..323c91c4fab0 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -259,7 +259,7 @@ static int cpu_map_kthread_run(void *data) * kthread CPU pinned. Lockless access to ptr_ring * consume side valid as no-resize allowed of queue. */ - n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); + n = __ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); for (i = 0; i < n; i++) { void *f = frames[i]; From daa5cdc3fd08407048538585b2433601d4089a82 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 14 Jul 2020 15:56:35 +0200 Subject: [PATCH 1112/2056] net: Refactor xdp_convert_buff_to_frame Move the guts of xdp_convert_buff_to_frame to a new helper, xdp_update_frame_from_buff so it can be reused removing code duplication Suggested-by: Jesper Dangaard Brouer Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: David Ahern Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/90a68c283d7ebeb48924934c9b7ac79492300472.1594734381.git.lorenzo@kernel.org --- include/net/xdp.h | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/include/net/xdp.h b/include/net/xdp.h index 609f819ed08b..5b383c450858 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -121,39 +121,48 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) xdp->frame_sz = frame->frame_sz; } -/* Convert xdp_buff to xdp_frame */ static inline -struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) +int xdp_update_frame_from_buff(struct xdp_buff *xdp, + struct xdp_frame *xdp_frame) { - struct xdp_frame *xdp_frame; - int metasize; - int headroom; - - if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) - return xdp_convert_zc_to_xdp_frame(xdp); + int metasize, headroom; /* Assure headroom is available for storing info */ headroom = xdp->data - xdp->data_hard_start; metasize = xdp->data - xdp->data_meta; metasize = metasize > 0 ? metasize : 0; if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) - return NULL; + return -ENOSPC; /* Catch if driver didn't reserve tailroom for skb_shared_info */ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { XDP_WARN("Driver BUG: missing reserved tailroom"); - return NULL; + return -ENOSPC; } - /* Store info in top of packet */ - xdp_frame = xdp->data_hard_start; - xdp_frame->data = xdp->data; xdp_frame->len = xdp->data_end - xdp->data; xdp_frame->headroom = headroom - sizeof(*xdp_frame); xdp_frame->metasize = metasize; xdp_frame->frame_sz = xdp->frame_sz; + return 0; +} + +/* Convert xdp_buff to xdp_frame */ +static inline +struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) +{ + struct xdp_frame *xdp_frame; + + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) + return xdp_convert_zc_to_xdp_frame(xdp); + + /* Store info in top of packet */ + xdp_frame = xdp->data_hard_start; + if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0)) + return NULL; + /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ xdp_frame->mem = xdp->rxq->mem; From a4e76f1bda8e7b358dc21f0f925b8a2c4c98e733 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:36 +0200 Subject: [PATCH 1113/2056] samples/bpf: xdp_redirect_cpu_user: Do not update bpf maps in option loop Do not update xdp_redirect_cpu maps running while option loop but defer it after all available options have been parsed. This is a preliminary patch to pass the program name we want to attach to the map entries as a user option Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/95dc46286fd2c609042948e04bb7ae1f5b425538.1594734381.git.lorenzo@kernel.org --- samples/bpf/xdp_redirect_cpu_user.c | 36 +++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index f4e755e0dd73..6bb2d95cb26c 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -681,6 +681,7 @@ int main(int argc, char **argv) int add_cpu = -1; int opt, err; int prog_fd; + int *cpu, i; __u32 qsize; n_cpus = get_nprocs_conf(); @@ -716,6 +717,13 @@ int main(int argc, char **argv) } mark_cpus_unavailable(); + cpu = malloc(n_cpus * sizeof(int)); + if (!cpu) { + fprintf(stderr, "failed to allocate cpu array\n"); + return EXIT_FAIL; + } + memset(cpu, 0, n_cpus * sizeof(int)); + /* Parse commands line args */ while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF", long_options, &longindex)) != -1) { @@ -760,8 +768,7 @@ int main(int argc, char **argv) errno, strerror(errno)); goto error; } - create_cpu_entry(add_cpu, qsize, added_cpus, true); - added_cpus++; + cpu[added_cpus++] = add_cpu; break; case 'q': qsize = atoi(optarg); @@ -772,6 +779,7 @@ int main(int argc, char **argv) case 'h': error: default: + free(cpu); usage(argv, obj); return EXIT_FAIL_OPTION; } @@ -784,16 +792,21 @@ int main(int argc, char **argv) if (ifindex == -1) { fprintf(stderr, "ERR: required option --dev missing\n"); usage(argv, obj); - return EXIT_FAIL_OPTION; + err = EXIT_FAIL_OPTION; + goto out; } /* Required option */ if (add_cpu == -1) { fprintf(stderr, "ERR: required option --cpu missing\n"); fprintf(stderr, " Specify multiple --cpu option to add more\n"); usage(argv, obj); - return EXIT_FAIL_OPTION; + err = EXIT_FAIL_OPTION; + goto out; } + for (i = 0; i < added_cpus; i++) + create_cpu_entry(cpu[i], qsize, i, true); + /* Remove XDP program when program is interrupted or killed */ signal(SIGINT, int_exit); signal(SIGTERM, int_exit); @@ -801,27 +814,32 @@ int main(int argc, char **argv) prog = bpf_object__find_program_by_title(obj, prog_name); if (!prog) { fprintf(stderr, "bpf_object__find_program_by_title failed\n"); - return EXIT_FAIL; + err = EXIT_FAIL; + goto out; } prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { fprintf(stderr, "bpf_program__fd failed\n"); - return EXIT_FAIL; + err = EXIT_FAIL; + goto out; } if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) { fprintf(stderr, "link set xdp fd failed\n"); - return EXIT_FAIL_XDP; + err = EXIT_FAIL_XDP; + goto out; } err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len); if (err) { printf("can't get prog info - %s\n", strerror(errno)); - return err; + goto out; } prog_id = info.id; stats_poll(interval, use_separators, prog_name, stress_mode); - return EXIT_OK; +out: + free(cpu); + return err; } From 644bfe51fa49c22244d24e896cd3fe3ee2f2cfd1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:37 +0200 Subject: [PATCH 1114/2056] cpumap: Formalize map value as a named struct As it has been already done for devmap, introduce 'struct bpf_cpumap_val' to formalize the expected values that can be passed in for a CPUMAP. Update cpumap code to use the struct. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/754f950674665dae6139c061d28c1d982aaf4170.1594734381.git.lorenzo@kernel.org --- include/uapi/linux/bpf.h | 9 +++++++++ kernel/bpf/cpumap.c | 28 +++++++++++++++------------- tools/include/uapi/linux/bpf.h | 9 +++++++++ 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5e386389913a..109623527358 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3849,6 +3849,15 @@ struct bpf_devmap_val { } bpf_prog; }; +/* CPUMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_cpumap_val { + __u32 qsize; /* queue size to remote target CPU */ +}; + enum sk_action { SK_DROP = 0, SK_PASS, diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 323c91c4fab0..ff48dc00e8d0 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -52,7 +52,6 @@ struct xdp_bulk_queue { struct bpf_cpu_map_entry { u32 cpu; /* kthread CPU and map index */ int map_id; /* Back reference to map */ - u32 qsize; /* Queue size placeholder for map lookup */ /* XDP can run multiple RX-ring queues, need __percpu enqueue store */ struct xdp_bulk_queue __percpu *bulkq; @@ -62,10 +61,13 @@ struct bpf_cpu_map_entry { /* Queue with potential multi-producers, and single-consumer kthread */ struct ptr_ring *queue; struct task_struct *kthread; - struct work_struct kthread_stop_wq; + + struct bpf_cpumap_val value; atomic_t refcnt; /* Control when this struct can be free'ed */ struct rcu_head rcu; + + struct work_struct kthread_stop_wq; }; struct bpf_cpu_map { @@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data) return 0; } -static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, - int map_id) +static struct bpf_cpu_map_entry * +__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) { gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct bpf_cpu_map_entry *rcpu; @@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu, if (!rcpu->queue) goto free_bulkq; - err = ptr_ring_init(rcpu->queue, qsize, gfp); + err = ptr_ring_init(rcpu->queue, value->qsize, gfp); if (err) goto free_queue; rcpu->cpu = cpu; rcpu->map_id = map_id; - rcpu->qsize = qsize; + rcpu->value.qsize = value->qsize; /* Setup kthread */ rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, @@ -437,12 +439,12 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); + struct bpf_cpumap_val cpumap_value = {}; struct bpf_cpu_map_entry *rcpu; - /* Array index key correspond to CPU number */ u32 key_cpu = *(u32 *)key; - /* Value is the queue size */ - u32 qsize = *(u32 *)value; + + memcpy(&cpumap_value, value, map->value_size); if (unlikely(map_flags > BPF_EXIST)) return -EINVAL; @@ -450,18 +452,18 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, return -E2BIG; if (unlikely(map_flags == BPF_NOEXIST)) return -EEXIST; - if (unlikely(qsize > 16384)) /* sanity limit on qsize */ + if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */ return -EOVERFLOW; /* Make sure CPU is a valid possible cpu */ if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu)) return -ENODEV; - if (qsize == 0) { + if (cpumap_value.qsize == 0) { rcpu = NULL; /* Same as deleting */ } else { /* Updating qsize cause re-allocation of bpf_cpu_map_entry */ - rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); + rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id); if (!rcpu) return -ENOMEM; rcpu->cmap = cmap; @@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) struct bpf_cpu_map_entry *rcpu = __cpu_map_lookup_elem(map, *(u32 *)key); - return rcpu ? &rcpu->qsize : NULL; + return rcpu ? &rcpu->value : NULL; } static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 5e386389913a..109623527358 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3849,6 +3849,15 @@ struct bpf_devmap_val { } bpf_prog; }; +/* CPUMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_cpumap_val { + __u32 qsize; /* queue size to remote target CPU */ +}; + enum sk_action { SK_DROP = 0, SK_PASS, From 9216477449f33cdbc9c9a99d49f500b7fbb81702 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:38 +0200 Subject: [PATCH 1115/2056] bpf: cpumap: Add the possibility to attach an eBPF program to cpumap Introduce the capability to attach an eBPF program to cpumap entries. The idea behind this feature is to add the possibility to define on which CPU run the eBPF program if the underlying hw does not support RSS. Current supported verdicts are XDP_DROP and XDP_PASS. This patch has been tested on Marvell ESPRESSObin using xdp_redirect_cpu sample available in the kernel tree to identify possible performance regressions. Results show there are no observable differences in packet-per-second: $./xdp_redirect_cpu --progname xdp_cpu_map0 --dev eth0 --cpu 1 rx: 354.8 Kpps rx: 356.0 Kpps rx: 356.8 Kpps rx: 356.3 Kpps rx: 356.6 Kpps rx: 356.6 Kpps rx: 356.7 Kpps rx: 355.8 Kpps rx: 356.8 Kpps rx: 356.8 Kpps Co-developed-by: Jesper Dangaard Brouer Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/5c9febdf903d810b3415732e5cd98491d7d9067a.1594734381.git.lorenzo@kernel.org --- include/linux/bpf.h | 6 ++ include/net/xdp.h | 5 ++ include/trace/events/xdp.h | 14 ++-- include/uapi/linux/bpf.h | 5 ++ kernel/bpf/cpumap.c | 121 +++++++++++++++++++++++++++++---- net/core/dev.c | 9 +++ tools/include/uapi/linux/bpf.h | 5 ++ 7 files changed, 148 insertions(+), 17 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c67c88ad35f8..54ad426dbea1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1272,6 +1272,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); +bool cpu_map_prog_allowed(struct bpf_map *map); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) @@ -1432,6 +1433,11 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, return 0; } +static inline bool cpu_map_prog_allowed(struct bpf_map *map) +{ + return false; +} + static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) { diff --git a/include/net/xdp.h b/include/net/xdp.h index 5b383c450858..83b9e0142b52 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -98,6 +98,11 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ }; +struct xdp_cpumap_stats { + unsigned int pass; + unsigned int drop; +}; + /* Clear kernel pointers in xdp_frame */ static inline void xdp_scrub_frame(struct xdp_frame *frame) { diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index b73d3e141323..e2c99f5bee39 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -177,9 +177,9 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err, TRACE_EVENT(xdp_cpumap_kthread, TP_PROTO(int map_id, unsigned int processed, unsigned int drops, - int sched), + int sched, struct xdp_cpumap_stats *xdp_stats), - TP_ARGS(map_id, processed, drops, sched), + TP_ARGS(map_id, processed, drops, sched, xdp_stats), TP_STRUCT__entry( __field(int, map_id) @@ -188,6 +188,8 @@ TRACE_EVENT(xdp_cpumap_kthread, __field(unsigned int, drops) __field(unsigned int, processed) __field(int, sched) + __field(unsigned int, xdp_pass) + __field(unsigned int, xdp_drop) ), TP_fast_assign( @@ -197,16 +199,20 @@ TRACE_EVENT(xdp_cpumap_kthread, __entry->drops = drops; __entry->processed = processed; __entry->sched = sched; + __entry->xdp_pass = xdp_stats->pass; + __entry->xdp_drop = xdp_stats->drop; ), TP_printk("kthread" " cpu=%d map_id=%d action=%s" " processed=%u drops=%u" - " sched=%d", + " sched=%d" + " xdp_pass=%u xdp_drop=%u", __entry->cpu, __entry->map_id, __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), __entry->processed, __entry->drops, - __entry->sched) + __entry->sched, + __entry->xdp_pass, __entry->xdp_drop) ); TRACE_EVENT(xdp_cpumap_enqueue, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 109623527358..c010b57fce3f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -227,6 +227,7 @@ enum bpf_attach_type { BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, + BPF_XDP_CPUMAP, __MAX_BPF_ATTACH_TYPE }; @@ -3856,6 +3857,10 @@ struct bpf_devmap_val { */ struct bpf_cpumap_val { __u32 qsize; /* queue size to remote target CPU */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; }; enum sk_action { diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index ff48dc00e8d0..b3a8aea81ee5 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -63,6 +63,7 @@ struct bpf_cpu_map_entry { struct task_struct *kthread; struct bpf_cpumap_val value; + struct bpf_prog *prog; atomic_t refcnt; /* Control when this struct can be free'ed */ struct rcu_head rcu; @@ -82,6 +83,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq); static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) { + u32 value_size = attr->value_size; struct bpf_cpu_map *cmap; int err = -ENOMEM; u64 cost; @@ -92,7 +94,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) + (value_size != offsetofend(struct bpf_cpumap_val, qsize) && + value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) || + attr->map_flags & ~BPF_F_NUMA_NODE) return ERR_PTR(-EINVAL); cmap = kzalloc(sizeof(*cmap), GFP_USER); @@ -214,6 +218,8 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) { if (atomic_dec_and_test(&rcpu->refcnt)) { + if (rcpu->prog) + bpf_prog_put(rcpu->prog); /* The queue should be empty at this point */ __cpu_map_ring_cleanup(rcpu->queue); ptr_ring_cleanup(rcpu->queue, NULL); @@ -222,6 +228,62 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) } } +static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, + void **frames, int n, + struct xdp_cpumap_stats *stats) +{ + struct xdp_rxq_info rxq; + struct xdp_buff xdp; + int i, nframes = 0; + + if (!rcpu->prog) + return n; + + rcu_read_lock(); + + xdp_set_return_frame_no_direct(); + xdp.rxq = &rxq; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + u32 act; + int err; + + rxq.dev = xdpf->dev_rx; + rxq.mem = xdpf->mem; + /* TODO: report queue_index to xdp_rxq_info */ + + xdp_convert_frame_to_buff(xdpf, &xdp); + + act = bpf_prog_run_xdp(rcpu->prog, &xdp); + switch (act) { + case XDP_PASS: + err = xdp_update_frame_from_buff(&xdp, xdpf); + if (err < 0) { + xdp_return_frame(xdpf); + stats->drop++; + } else { + frames[nframes++] = xdpf; + stats->pass++; + } + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fallthrough */ + case XDP_DROP: + xdp_return_frame(xdpf); + stats->drop++; + break; + } + } + + xdp_clear_return_frame_no_direct(); + + rcu_read_unlock(); + + return nframes; +} + #define CPUMAP_BATCH 8 static int cpu_map_kthread_run(void *data) @@ -236,11 +298,12 @@ static int cpu_map_kthread_run(void *data) * kthread_stop signal until queue is empty. */ while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) { + struct xdp_cpumap_stats stats = {}; /* zero stats */ + gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; unsigned int drops = 0, sched = 0; void *frames[CPUMAP_BATCH]; void *skbs[CPUMAP_BATCH]; - gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; - int i, n, m; + int i, n, m, nframes; /* Release CPU reschedule checks */ if (__ptr_ring_empty(rcpu->queue)) { @@ -261,8 +324,8 @@ static int cpu_map_kthread_run(void *data) * kthread CPU pinned. Lockless access to ptr_ring * consume side valid as no-resize allowed of queue. */ - n = __ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH); - + n = __ptr_ring_consume_batched(rcpu->queue, frames, + CPUMAP_BATCH); for (i = 0; i < n; i++) { void *f = frames[i]; struct page *page = virt_to_page(f); @@ -274,15 +337,19 @@ static int cpu_map_kthread_run(void *data) prefetchw(page); } - m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs); - if (unlikely(m == 0)) { - for (i = 0; i < n; i++) - skbs[i] = NULL; /* effect: xdp_return_frame */ - drops = n; + /* Support running another XDP prog on this CPU */ + nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats); + if (nframes) { + m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); + if (unlikely(m == 0)) { + for (i = 0; i < nframes; i++) + skbs[i] = NULL; /* effect: xdp_return_frame */ + drops += nframes; + } } local_bh_disable(); - for (i = 0; i < n; i++) { + for (i = 0; i < nframes; i++) { struct xdp_frame *xdpf = frames[i]; struct sk_buff *skb = skbs[i]; int ret; @@ -299,7 +366,7 @@ static int cpu_map_kthread_run(void *data) drops++; } /* Feedback loop via tracepoint */ - trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched); + trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats); local_bh_enable(); /* resched point, may call do_softirq() */ } @@ -309,13 +376,38 @@ static int cpu_map_kthread_run(void *data) return 0; } +bool cpu_map_prog_allowed(struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_CPUMAP && + map->value_size != offsetofend(struct bpf_cpumap_val, qsize); +} + +static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd) +{ + struct bpf_prog *prog; + + prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); + if (IS_ERR(prog)) + return PTR_ERR(prog); + + if (prog->expected_attach_type != BPF_XDP_CPUMAP) { + bpf_prog_put(prog); + return -EINVAL; + } + + rcpu->value.bpf_prog.id = prog->aux->id; + rcpu->prog = prog; + + return 0; +} + static struct bpf_cpu_map_entry * __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) { + int numa, err, i, fd = value->bpf_prog.fd; gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct bpf_cpu_map_entry *rcpu; struct xdp_bulk_queue *bq; - int numa, err, i; /* Have map->numa_node, but choose node of redirect target CPU */ numa = cpu_to_node(cpu); @@ -357,6 +449,9 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ + if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) + goto free_ptr_ring; + /* Make sure kthread runs on a single CPU */ kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); diff --git a/net/core/dev.c b/net/core/dev.c index b61075828358..b820527f0a8d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5448,6 +5448,8 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) for (i = 0; i < new->aux->used_map_cnt; i++) { if (dev_map_can_have_prog(new->aux->used_maps[i])) return -EINVAL; + if (cpu_map_prog_allowed(new->aux->used_maps[i])) + return -EINVAL; } } @@ -8875,6 +8877,13 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, return -EINVAL; } + if (prog->expected_attach_type == BPF_XDP_CPUMAP) { + NL_SET_ERR_MSG(extack, + "BPF_XDP_CPUMAP programs can not be attached to a device"); + bpf_prog_put(prog); + return -EINVAL; + } + /* prog->aux->id may be 0 for orphaned device-bound progs */ if (prog->aux->id && prog->aux->id == prog_id) { bpf_prog_put(prog); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 109623527358..c010b57fce3f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -227,6 +227,7 @@ enum bpf_attach_type { BPF_CGROUP_INET6_GETSOCKNAME, BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, + BPF_XDP_CPUMAP, __MAX_BPF_ATTACH_TYPE }; @@ -3856,6 +3857,10 @@ struct bpf_devmap_val { */ struct bpf_cpumap_val { __u32 qsize; /* queue size to remote target CPU */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; }; enum sk_action { From 28b1520ebf81ced970141640d90279ac7b9f1f9a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:39 +0200 Subject: [PATCH 1116/2056] bpf: cpumap: Implement XDP_REDIRECT for eBPF programs attached to map entries Introduce XDP_REDIRECT support for eBPF programs attached to cpumap entries. This patch has been tested on Marvell ESPRESSObin using a modified version of xdp_redirect_cpu sample in order to attach a XDP program to CPUMAP entries to perform a redirect on the mvneta interface. In particular the following scenario has been tested: rq (cpu0) --> mvneta - XDP_REDIRECT (cpu0) --> CPUMAP - XDP_REDIRECT (cpu1) --> mvneta $./xdp_redirect_cpu -p xdp_cpu_map0 -d eth0 -c 1 -e xdp_redirect \ -f xdp_redirect_kern.o -m tx_port -r eth0 tx: 285.2 Kpps rx: 285.2 Kpps Attaching a simple XDP program on eth0 to perform XDP_TX gives comparable results: tx: 288.4 Kpps rx: 288.4 Kpps Co-developed-by: Jesper Dangaard Brouer Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/bpf/2cf8373a731867af302b00c4ff16c122630c4980.1594734381.git.lorenzo@kernel.org --- include/net/xdp.h | 1 + include/trace/events/xdp.h | 6 ++++-- kernel/bpf/cpumap.c | 17 +++++++++++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/include/net/xdp.h b/include/net/xdp.h index 83b9e0142b52..5be0d4d65b94 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -99,6 +99,7 @@ struct xdp_frame { }; struct xdp_cpumap_stats { + unsigned int redirect; unsigned int pass; unsigned int drop; }; diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index e2c99f5bee39..cd24e8a59529 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -190,6 +190,7 @@ TRACE_EVENT(xdp_cpumap_kthread, __field(int, sched) __field(unsigned int, xdp_pass) __field(unsigned int, xdp_drop) + __field(unsigned int, xdp_redirect) ), TP_fast_assign( @@ -201,18 +202,19 @@ TRACE_EVENT(xdp_cpumap_kthread, __entry->sched = sched; __entry->xdp_pass = xdp_stats->pass; __entry->xdp_drop = xdp_stats->drop; + __entry->xdp_redirect = xdp_stats->redirect; ), TP_printk("kthread" " cpu=%d map_id=%d action=%s" " processed=%u drops=%u" " sched=%d" - " xdp_pass=%u xdp_drop=%u", + " xdp_pass=%u xdp_drop=%u xdp_redirect=%u", __entry->cpu, __entry->map_id, __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), __entry->processed, __entry->drops, __entry->sched, - __entry->xdp_pass, __entry->xdp_drop) + __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect) ); TRACE_EVENT(xdp_cpumap_enqueue, diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index b3a8aea81ee5..4c95d0615ca2 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -239,7 +239,7 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, if (!rcpu->prog) return n; - rcu_read_lock(); + rcu_read_lock_bh(); xdp_set_return_frame_no_direct(); xdp.rxq = &rxq; @@ -267,6 +267,16 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, stats->pass++; } break; + case XDP_REDIRECT: + err = xdp_do_redirect(xdpf->dev_rx, &xdp, + rcpu->prog); + if (unlikely(err)) { + xdp_return_frame(xdpf); + stats->drop++; + } else { + stats->redirect++; + } + break; default: bpf_warn_invalid_xdp_action(act); /* fallthrough */ @@ -277,9 +287,12 @@ static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu, } } + if (stats->redirect) + xdp_do_flush_map(); + xdp_clear_return_frame_no_direct(); - rcu_read_unlock(); + rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ return nframes; } From 4be556cf5aef416904743f2cee689351f91445b6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:40 +0200 Subject: [PATCH 1117/2056] libbpf: Add SEC name for xdp programs attached to CPUMAP As for DEVMAP, support SEC("xdp_cpumap/") as a short cut for loading the program with type BPF_PROG_TYPE_XDP and expected attach type BPF_XDP_CPUMAP. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/33174c41993a6d860d9c7c1f280a2477ee39ed11.1594734381.git.lorenzo@kernel.org --- tools/lib/bpf/libbpf.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4489f95f1d1a..f55fd8a5c008 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6912,6 +6912,8 @@ static const struct bpf_sec_def section_defs[] = { .attach_fn = attach_iter), BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP, BPF_XDP_DEVMAP), + BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, + BPF_XDP_CPUMAP), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), From ce4dade7f12a8f3e7918184ac851d992f551805d Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:41 +0200 Subject: [PATCH 1118/2056] samples/bpf: xdp_redirect_cpu: Load a eBPF program on cpumap Extend xdp_redirect_cpu_{usr,kern}.c adding the possibility to load a XDP program on cpumap entries. The following options have been added: - mprog-name: cpumap entry program name - mprog-filename: cpumap entry program filename - redirect-device: output interface if the cpumap program performs a XDP_REDIRECT to an egress interface - redirect-map: bpf map used to perform XDP_REDIRECT to an egress interface - mprog-disable: disable loading XDP program on cpumap entries Add xdp_pass, xdp_drop, xdp_redirect stats accounting Co-developed-by: Jesper Dangaard Brouer Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/aa5a9a281b9dac425620fdabe82670ffb6bbdb92.1594734381.git.lorenzo@kernel.org --- samples/bpf/xdp_redirect_cpu_kern.c | 25 ++-- samples/bpf/xdp_redirect_cpu_user.c | 175 +++++++++++++++++++++++++--- 2 files changed, 178 insertions(+), 22 deletions(-) diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index 2baf8db1f7e7..8255025dea97 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -21,7 +21,7 @@ struct { __uint(type, BPF_MAP_TYPE_CPUMAP); __uint(key_size, sizeof(u32)); - __uint(value_size, sizeof(u32)); + __uint(value_size, sizeof(struct bpf_cpumap_val)); __uint(max_entries, MAX_CPUS); } cpu_map SEC(".maps"); @@ -30,6 +30,9 @@ struct datarec { __u64 processed; __u64 dropped; __u64 issue; + __u64 xdp_pass; + __u64 xdp_drop; + __u64 xdp_redirect; }; /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success @@ -692,13 +695,16 @@ int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx) * Code in: kernel/include/trace/events/xdp.h */ struct cpumap_kthread_ctx { - u64 __pad; // First 8 bytes are not accessible by bpf code - int map_id; // offset:8; size:4; signed:1; - u32 act; // offset:12; size:4; signed:0; - int cpu; // offset:16; size:4; signed:1; - unsigned int drops; // offset:20; size:4; signed:0; - unsigned int processed; // offset:24; size:4; signed:0; - int sched; // offset:28; size:4; signed:1; + u64 __pad; // First 8 bytes are not accessible + int map_id; // offset:8; size:4; signed:1; + u32 act; // offset:12; size:4; signed:0; + int cpu; // offset:16; size:4; signed:1; + unsigned int drops; // offset:20; size:4; signed:0; + unsigned int processed; // offset:24; size:4; signed:0; + int sched; // offset:28; size:4; signed:1; + unsigned int xdp_pass; // offset:32; size:4; signed:0; + unsigned int xdp_drop; // offset:36; size:4; signed:0; + unsigned int xdp_redirect; // offset:40; size:4; signed:0; }; SEC("tracepoint/xdp/xdp_cpumap_kthread") @@ -712,6 +718,9 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx) return 0; rec->processed += ctx->processed; rec->dropped += ctx->drops; + rec->xdp_pass += ctx->xdp_pass; + rec->xdp_drop += ctx->xdp_drop; + rec->xdp_redirect += ctx->xdp_redirect; /* Count times kthread yielded CPU via schedule call */ if (ctx->sched) diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index 6bb2d95cb26c..004c0622c913 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -70,6 +70,11 @@ static const struct option long_options[] = { {"stress-mode", no_argument, NULL, 'x' }, {"no-separators", no_argument, NULL, 'z' }, {"force", no_argument, NULL, 'F' }, + {"mprog-disable", no_argument, NULL, 'n' }, + {"mprog-name", required_argument, NULL, 'e' }, + {"mprog-filename", required_argument, NULL, 'f' }, + {"redirect-device", required_argument, NULL, 'r' }, + {"redirect-map", required_argument, NULL, 'm' }, {0, 0, NULL, 0 } }; @@ -156,6 +161,9 @@ struct datarec { __u64 processed; __u64 dropped; __u64 issue; + __u64 xdp_pass; + __u64 xdp_drop; + __u64 xdp_redirect; }; struct record { __u64 timestamp; @@ -175,6 +183,9 @@ static bool map_collect_percpu(int fd, __u32 key, struct record *rec) /* For percpu maps, userspace gets a value per possible CPU */ unsigned int nr_cpus = bpf_num_possible_cpus(); struct datarec values[nr_cpus]; + __u64 sum_xdp_redirect = 0; + __u64 sum_xdp_pass = 0; + __u64 sum_xdp_drop = 0; __u64 sum_processed = 0; __u64 sum_dropped = 0; __u64 sum_issue = 0; @@ -196,10 +207,19 @@ static bool map_collect_percpu(int fd, __u32 key, struct record *rec) sum_dropped += values[i].dropped; rec->cpu[i].issue = values[i].issue; sum_issue += values[i].issue; + rec->cpu[i].xdp_pass = values[i].xdp_pass; + sum_xdp_pass += values[i].xdp_pass; + rec->cpu[i].xdp_drop = values[i].xdp_drop; + sum_xdp_drop += values[i].xdp_drop; + rec->cpu[i].xdp_redirect = values[i].xdp_redirect; + sum_xdp_redirect += values[i].xdp_redirect; } rec->total.processed = sum_processed; rec->total.dropped = sum_dropped; rec->total.issue = sum_issue; + rec->total.xdp_pass = sum_xdp_pass; + rec->total.xdp_drop = sum_xdp_drop; + rec->total.xdp_redirect = sum_xdp_redirect; return true; } @@ -300,17 +320,33 @@ static __u64 calc_errs_pps(struct datarec *r, return pps; } +static void calc_xdp_pps(struct datarec *r, struct datarec *p, + double *xdp_pass, double *xdp_drop, + double *xdp_redirect, double period_) +{ + *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0; + if (period_ > 0) { + *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_; + *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_; + *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_; + } +} + static void stats_print(struct stats_record *stats_rec, struct stats_record *stats_prev, - char *prog_name) + char *prog_name, char *mprog_name, int mprog_fd) { unsigned int nr_cpus = bpf_num_possible_cpus(); double pps = 0, drop = 0, err = 0; + bool mprog_enabled = false; struct record *rec, *prev; int to_cpu; double t; int i; + if (mprog_fd > 0) + mprog_enabled = true; + /* Header */ printf("Running XDP/eBPF prog_name:%s\n", prog_name); printf("%-15s %-7s %-14s %-11s %-9s\n", @@ -455,6 +491,34 @@ static void stats_print(struct stats_record *stats_rec, printf(fm2_err, "xdp_exception", "total", pps, drop); } + /* CPUMAP attached XDP program that runs on remote/destination CPU */ + if (mprog_enabled) { + char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f\n"; + char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f\n"; + double xdp_pass, xdp_drop, xdp_redirect; + + printf("\n2nd remote XDP/eBPF prog_name: %s\n", mprog_name); + printf("%-15s %-7s %-14s %-11s %-9s\n", + "XDP-cpumap", "CPU:to", "xdp-pass", "xdp-drop", "xdp-redir"); + + rec = &stats_rec->kthread; + prev = &stats_prev->kthread; + t = calc_period(rec, prev); + for (i = 0; i < nr_cpus; i++) { + struct datarec *r = &rec->cpu[i]; + struct datarec *p = &prev->cpu[i]; + + calc_xdp_pps(r, p, &xdp_pass, &xdp_drop, + &xdp_redirect, t); + if (xdp_pass > 0 || xdp_drop > 0 || xdp_redirect > 0) + printf(fmt_k, "xdp-in-kthread", i, xdp_pass, xdp_drop, + xdp_redirect); + } + calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop, + &xdp_redirect, t); + printf(fm2_k, "xdp-in-kthread", "total", xdp_pass, xdp_drop, xdp_redirect); + } + printf("\n"); fflush(stdout); } @@ -491,7 +555,7 @@ static inline void swap(struct stats_record **a, struct stats_record **b) *b = tmp; } -static int create_cpu_entry(__u32 cpu, __u32 queue_size, +static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value, __u32 avail_idx, bool new) { __u32 curr_cpus_count = 0; @@ -501,7 +565,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size, /* Add a CPU entry to cpumap, as this allocate a cpu entry in * the kernel for the cpu. */ - ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0); + ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0); if (ret) { fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret); exit(EXIT_FAIL_BPF); @@ -532,9 +596,9 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size, } } /* map_fd[7] = cpus_iterator */ - printf("%s CPU:%u as idx:%u queue_size:%d (total cpus_count:%u)\n", + printf("%s CPU:%u as idx:%u qsize:%d prog_fd: %d (cpus_count:%u)\n", new ? "Add-new":"Replace", cpu, avail_idx, - queue_size, curr_cpus_count); + value->qsize, value->bpf_prog.fd, curr_cpus_count); return 0; } @@ -558,21 +622,26 @@ static void mark_cpus_unavailable(void) } /* Stress cpumap management code by concurrently changing underlying cpumap */ -static void stress_cpumap(void) +static void stress_cpumap(struct bpf_cpumap_val *value) { /* Changing qsize will cause kernel to free and alloc a new * bpf_cpu_map_entry, with an associated/complicated tear-down * procedure. */ - create_cpu_entry(1, 1024, 0, false); - create_cpu_entry(1, 8, 0, false); - create_cpu_entry(1, 16000, 0, false); + value->qsize = 1024; + create_cpu_entry(1, value, 0, false); + value->qsize = 8; + create_cpu_entry(1, value, 0, false); + value->qsize = 16000; + create_cpu_entry(1, value, 0, false); } static void stats_poll(int interval, bool use_separators, char *prog_name, + char *mprog_name, struct bpf_cpumap_val *value, bool stress_mode) { struct stats_record *record, *prev; + int mprog_fd; record = alloc_stats_record(); prev = alloc_stats_record(); @@ -584,11 +653,12 @@ static void stats_poll(int interval, bool use_separators, char *prog_name, while (1) { swap(&prev, &record); + mprog_fd = value->bpf_prog.fd; stats_collect(record); - stats_print(record, prev, prog_name); + stats_print(record, prev, prog_name, mprog_name, mprog_fd); sleep(interval); if (stress_mode) - stress_cpumap(); + stress_cpumap(value); } free_stats_record(record); @@ -661,15 +731,66 @@ static int init_map_fds(struct bpf_object *obj) return 0; } +static int load_cpumap_prog(char *file_name, char *prog_name, + char *redir_interface, char *redir_map) +{ + struct bpf_prog_load_attr prog_load_attr = { + .prog_type = BPF_PROG_TYPE_XDP, + .expected_attach_type = BPF_XDP_CPUMAP, + .file = file_name, + }; + struct bpf_program *prog; + struct bpf_object *obj; + int fd; + + if (bpf_prog_load_xattr(&prog_load_attr, &obj, &fd)) + return -1; + + if (fd < 0) { + fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n", + strerror(errno)); + return fd; + } + + if (redir_interface && redir_map) { + int err, map_fd, ifindex_out, key = 0; + + map_fd = bpf_object__find_map_fd_by_name(obj, redir_map); + if (map_fd < 0) + return map_fd; + + ifindex_out = if_nametoindex(redir_interface); + if (!ifindex_out) + return -1; + + err = bpf_map_update_elem(map_fd, &key, &ifindex_out, 0); + if (err < 0) + return err; + } + + prog = bpf_object__find_program_by_title(obj, prog_name); + if (!prog) { + fprintf(stderr, "bpf_object__find_program_by_title failed\n"); + return EXIT_FAIL; + } + + return bpf_program__fd(prog); +} + int main(int argc, char **argv) { struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs"; + char *mprog_filename = "xdp_redirect_kern.o"; + char *redir_interface = NULL, *redir_map = NULL; + char *mprog_name = "xdp_redirect_dummy"; + bool mprog_disable = false; struct bpf_prog_load_attr prog_load_attr = { .prog_type = BPF_PROG_TYPE_UNSPEC, }; struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); + struct bpf_cpumap_val value; bool use_separators = true; bool stress_mode = false; struct bpf_program *prog; @@ -725,7 +846,7 @@ int main(int argc, char **argv) memset(cpu, 0, n_cpus * sizeof(int)); /* Parse commands line args */ - while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF", + while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:", long_options, &longindex)) != -1) { switch (opt) { case 'd': @@ -759,6 +880,21 @@ int main(int argc, char **argv) /* Selecting eBPF prog to load */ prog_name = optarg; break; + case 'n': + mprog_disable = true; + break; + case 'f': + mprog_filename = optarg; + break; + case 'e': + mprog_name = optarg; + break; + case 'r': + redir_interface = optarg; + break; + case 'm': + redir_map = optarg; + break; case 'c': /* Add multiple CPUs */ add_cpu = strtoul(optarg, NULL, 0); @@ -804,8 +940,18 @@ int main(int argc, char **argv) goto out; } + value.bpf_prog.fd = 0; + if (!mprog_disable) + value.bpf_prog.fd = load_cpumap_prog(mprog_filename, mprog_name, + redir_interface, redir_map); + if (value.bpf_prog.fd < 0) { + err = value.bpf_prog.fd; + goto out; + } + value.qsize = qsize; + for (i = 0; i < added_cpus; i++) - create_cpu_entry(cpu[i], qsize, i, true); + create_cpu_entry(cpu[i], &value, i, true); /* Remove XDP program when program is interrupted or killed */ signal(SIGINT, int_exit); @@ -838,7 +984,8 @@ int main(int argc, char **argv) } prog_id = info.id; - stats_poll(interval, use_separators, prog_name, stress_mode); + stats_poll(interval, use_separators, prog_name, mprog_name, + &value, stress_mode); out: free(cpu); return err; From 05500125021191f703d3759a7e94a37b7257d959 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 14 Jul 2020 15:56:42 +0200 Subject: [PATCH 1119/2056] selftest: Add tests for XDP programs in CPUMAP entries Similar to what have been done for DEVMAP, introduce tests to verify ability to add a XDP program to an entry in a CPUMAP. Verify CPUMAP programs can not be attached to devices as a normal XDP program, and only programs with BPF_XDP_CPUMAP attach type can be loaded in a CPUMAP. Signed-off-by: Lorenzo Bianconi Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/9c632fcea5382ea7b4578bd06b6eddf382c3550b.1594734381.git.lorenzo@kernel.org --- .../bpf/prog_tests/xdp_cpumap_attach.c | 70 +++++++++++++++++++ .../bpf/progs/test_xdp_with_cpumap_helpers.c | 36 ++++++++++ 2 files changed, 106 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c create mode 100644 tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c new file mode 100644 index 000000000000..0176573fe4e7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include "test_xdp_with_cpumap_helpers.skel.h" + +#define IFINDEX_LO 1 + +void test_xdp_with_cpumap_helpers(void) +{ + struct test_xdp_with_cpumap_helpers *skel; + struct bpf_prog_info info = {}; + struct bpf_cpumap_val val = { + .qsize = 192, + }; + __u32 duration = 0, idx = 0; + __u32 len = sizeof(info); + int err, prog_fd, map_fd; + + skel = test_xdp_with_cpumap_helpers__open_and_load(); + if (CHECK_FAIL(!skel)) { + perror("test_xdp_with_cpumap_helpers__open_and_load"); + return; + } + + /* can not attach program with cpumaps that allow programs + * as xdp generic + */ + prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog); + err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + CHECK(err == 0, "Generic attach of program with 8-byte CPUMAP", + "should have failed\n"); + + prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm); + map_fd = bpf_map__fd(skel->maps.cpu_map); + err = bpf_obj_get_info_by_fd(prog_fd, &info, &len); + if (CHECK_FAIL(err)) + goto out_close; + + val.bpf_prog.fd = prog_fd; + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + CHECK(err, "Add program to cpumap entry", "err %d errno %d\n", + err, errno); + + err = bpf_map_lookup_elem(map_fd, &idx, &val); + CHECK(err, "Read cpumap entry", "err %d errno %d\n", err, errno); + CHECK(info.id != val.bpf_prog.id, "Expected program id in cpumap entry", + "expected %u read %u\n", info.id, val.bpf_prog.id); + + /* can not attach BPF_XDP_CPUMAP program to a device */ + err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE); + CHECK(err == 0, "Attach of BPF_XDP_CPUMAP program", + "should have failed\n"); + + val.qsize = 192; + val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog); + err = bpf_map_update_elem(map_fd, &idx, &val, 0); + CHECK(err == 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry", + "should have failed\n"); + +out_close: + test_xdp_with_cpumap_helpers__destroy(skel); +} + +void test_xdp_cpumap_attach(void) +{ + if (test__start_subtest("cpumap_with_progs")) + test_xdp_with_cpumap_helpers(); +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c new file mode 100644 index 000000000000..59ee4f182ff8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#define IFINDEX_LO 1 + +struct { + __uint(type, BPF_MAP_TYPE_CPUMAP); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_cpumap_val)); + __uint(max_entries, 4); +} cpu_map SEC(".maps"); + +SEC("xdp_redir") +int xdp_redir_prog(struct xdp_md *ctx) +{ + return bpf_redirect_map(&cpu_map, 1, 0); +} + +SEC("xdp_dummy") +int xdp_dummy_prog(struct xdp_md *ctx) +{ + return XDP_PASS; +} + +SEC("xdp_cpumap/dummy_cm") +int xdp_dummy_cm(struct xdp_md *ctx) +{ + if (ctx->ingress_ifindex == IFINDEX_LO) + return XDP_DROP; + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; From e53a57e56fce787ee4d02ffc2cf9ba46f0173f58 Mon Sep 17 00:00:00 2001 From: Sergey Organov Date: Wed, 15 Jul 2020 18:42:57 +0300 Subject: [PATCH 1120/2056] net: fec: enable to use PPS feature without time stamping PPS feature could be useful even when hardware time stamping of network packets is not in use, so remove offending check for this condition from fec_ptp_enable_pps(). Signed-off-by: Sergey Organov Acked-by: Fugang Duan Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/fec_ptp.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 945643c02615..fda306b3e21f 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -103,11 +103,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable) u64 ns; val = 0; - if (!(fep->hwts_tx_en || fep->hwts_rx_en)) { - dev_err(&fep->pdev->dev, "No ptp stack is running\n"); - return -EINVAL; - } - if (fep->pps_enable == enable) return 0; From 199560343e9dac0e5f9e54307de767ef3fc781f4 Mon Sep 17 00:00:00 2001 From: Sergey Organov Date: Wed, 15 Jul 2020 18:42:58 +0300 Subject: [PATCH 1121/2056] net: fec: initialize clock with 0 rather than current kernel time Initializing with 0 makes it much easier to identify time stamps from otherwise uninitialized clock. Initialization of PTP clock with current kernel time makes little sense as PTP time scale differs from UTC time scale that kernel time represents. It only leads to confusion when no actual PTP initialization happens, as these time scales differ in a small integer number of seconds (37 at the time of writing.) Signed-off-by: Sergey Organov Acked-by: Fugang Duan Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/fec_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index fda306b3e21f..4bec7e66a39b 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -264,7 +264,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) fep->cc.mult = FEC_CC_MULT; /* reset the ns time counter */ - timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real())); + timecounter_init(&fep->tc, &fep->cc, 0); spin_unlock_irqrestore(&fep->tmreg_lock, flags); } From 2b803088865e459beffa1d153ad59db72d1c726a Mon Sep 17 00:00:00 2001 From: Sergey Organov Date: Wed, 15 Jul 2020 18:42:59 +0300 Subject: [PATCH 1122/2056] net: fec: get rid of redundant code in fec_ptp_set() Code of the form "if(x) x = 0" replaced with "x = 0". Code of the form "if(x == a) x = a" removed. Signed-off-by: Sergey Organov Acked-by: Fugang Duan Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/fec_ptp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 4bec7e66a39b..0c8c56bdd7ac 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -473,9 +473,7 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr) switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: - if (fep->hwts_rx_en) - fep->hwts_rx_en = 0; - config.rx_filter = HWTSTAMP_FILTER_NONE; + fep->hwts_rx_en = 0; break; default: From 31bb1a560b2996f9953cfc232f6349d043aaab49 Mon Sep 17 00:00:00 2001 From: Sergey Organov Date: Wed, 15 Jul 2020 18:43:00 +0300 Subject: [PATCH 1123/2056] net: fec: replace snprintf() with strlcpy() in fec_ptp_init() No need to use snprintf() on a constant string, nor using magic constant in the fixed code was a good idea. Signed-off-by: Sergey Organov Acked-by: Fugang Duan Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/freescale/fec_ptp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 0c8c56bdd7ac..93a86553147c 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -570,7 +570,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) int ret; fep->ptp_caps.owner = THIS_MODULE; - snprintf(fep->ptp_caps.name, 16, "fec ptp"); + strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name)); fep->ptp_caps.max_adj = 250000000; fep->ptp_caps.n_alarm = 0; From 632ca50f2cbd8809a2fc1934b4b2c9c49bd55e98 Mon Sep 17 00:00:00 2001 From: John Ogness Date: Tue, 7 Jul 2020 17:28:04 +0206 Subject: [PATCH 1124/2056] af_packet: TPACKET_V3: replace busy-wait loop A busy-wait loop is used to implement waiting for bits to be copied from the skb to the kernel buffer before retiring a block. This is a problem on PREEMPT_RT because the copying task could be preempted by the busy-waiting task and thus live lock in the busy-wait loop. Replace the busy-wait logic with an rwlock_t. This provides lockdep coverage and makes the code RT ready. Signed-off-by: John Ogness Signed-off-by: Jakub Kicinski --- net/packet/af_packet.c | 20 ++++++++++---------- net/packet/internal.h | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 7b436ebde61d..781fee93b7d5 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -593,6 +593,7 @@ static void init_prb_bdqc(struct packet_sock *po, req_u->req3.tp_block_size); p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov); p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv; + rwlock_init(&p1->blk_fill_in_prog_lock); p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv); prb_init_ft_ops(p1, req_u); @@ -659,10 +660,9 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t) * */ if (BLOCK_NUM_PKTS(pbd)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); - } + /* Waiting for skb_copy_bits to finish... */ + write_lock(&pkc->blk_fill_in_prog_lock); + write_unlock(&pkc->blk_fill_in_prog_lock); } if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { @@ -921,10 +921,9 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, * the timer-handler already handled this case. */ if (!(status & TP_STATUS_BLK_TMO)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); - } + /* Waiting for skb_copy_bits to finish... */ + write_lock(&pkc->blk_fill_in_prog_lock); + write_unlock(&pkc->blk_fill_in_prog_lock); } prb_close_block(pkc, pbd, po, status); return; @@ -944,7 +943,8 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc) static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb) { struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); - atomic_dec(&pkc->blk_fill_in_prog); + + read_unlock(&pkc->blk_fill_in_prog_lock); } static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc, @@ -998,7 +998,7 @@ static void prb_fill_curr_block(char *curr, pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len); BLOCK_NUM_PKTS(pbd) += 1; - atomic_inc(&pkc->blk_fill_in_prog); + read_lock(&pkc->blk_fill_in_prog_lock); prb_run_all_ft_ops(pkc, ppd); } diff --git a/net/packet/internal.h b/net/packet/internal.h index 907f4cd2a718..fd41ecb7f605 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -39,7 +39,7 @@ struct tpacket_kbdq_core { char *nxt_offset; struct sk_buff *skb; - atomic_t blk_fill_in_prog; + rwlock_t blk_fill_in_prog_lock; /* Default is set to 8ms */ #define DEFAULT_PRB_RETIRE_TOV (8) From de40a8abf07bfa125b7496b20ce6f92c6370eecd Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Thu, 16 Jul 2020 09:39:31 -0500 Subject: [PATCH 1125/2056] bpf: revert "test_bpf: Flag tests that cannot be jited on s390" This reverts commit 3203c9010060 ("test_bpf: flag tests that cannot be jited on s390"). The s390 bpf JIT previously had a restriction on the maximum program size, which required some tests in test_bpf to be flagged as expected failures. The program size limitation has been removed, and the tests now pass, so these tests should no longer be flagged. Fixes: d1242b10ff03 ("s390/bpf: Remove JITed image size limitations") Signed-off-by: Seth Forshee Signed-off-by: Daniel Borkmann Reviewed-by: Ilya Leoshkevich Link: https://lore.kernel.org/bpf/20200716143931.330122-1-seth.forshee@canonical.com --- lib/test_bpf.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index a5fddf9ebcb7..ca7d635bccd9 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -5275,31 +5275,21 @@ static struct bpf_test tests[] = { { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Ctx heavy transformations", { }, -#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) - CLASSIC | FLAG_EXPECTED_FAIL, -#else CLASSIC, -#endif { }, { { 1, SKB_VLAN_PRESENT }, { 10, SKB_VLAN_PRESENT } }, .fill_helper = bpf_fill_maxinsns6, - .expected_errcode = -ENOTSUPP, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Call heavy transformations", { }, -#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) - CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, -#else CLASSIC | FLAG_NO_DATA, -#endif { }, { { 1, 0 }, { 10, 0 } }, .fill_helper = bpf_fill_maxinsns7, - .expected_errcode = -ENOTSUPP, }, { /* Mainly checking JIT here. */ "BPF_MAXINSNS: Jump heavy test", @@ -5350,28 +5340,18 @@ static struct bpf_test tests[] = { { "BPF_MAXINSNS: exec all MSH", { }, -#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) - CLASSIC | FLAG_EXPECTED_FAIL, -#else CLASSIC, -#endif { 0xfa, 0xfb, 0xfc, 0xfd, }, { { 4, 0xababab83 } }, .fill_helper = bpf_fill_maxinsns13, - .expected_errcode = -ENOTSUPP, }, { "BPF_MAXINSNS: ld_abs+get_processor_id", { }, -#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390) - CLASSIC | FLAG_EXPECTED_FAIL, -#else CLASSIC, -#endif { }, { { 1, 0xbee } }, .fill_helper = bpf_fill_ld_abs_get_processor_id, - .expected_errcode = -ENOTSUPP, }, /* * LD_IND / LD_ABS on fragmented SKBs From e81e7a533742c30615f8b15390df6525cef96778 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 15 Jul 2020 15:41:07 -0700 Subject: [PATCH 1126/2056] selftests/bpf: Fix possible hang in sockopt_inherit Andrii reported that sockopt_inherit occasionally hangs up on 5.5 kernel [0]. This can happen if server_thread runs faster than the main thread. In that case, pthread_cond_wait will wait forever because pthread_cond_signal was executed before the main thread was blocking. Let's move pthread_mutex_lock up a bit to make sure server_thread runs strictly after the main thread goes to sleep. (Not sure why this is 5.5 specific, maybe scheduling is less deterministic? But I was able to confirm that it does indeed happen in a VM.) [0] https://lore.kernel.org/bpf/CAEf4BzY0-bVNHmCkMFPgObs=isUAyg-dFzGDY7QWYkmm7rmTSg@mail.gmail.com/ Reported-by: Andrii Nakryiko Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200715224107.3591967-1-sdf@google.com --- tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 8547ecbdc61f..ec281b0363b8 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -193,11 +193,10 @@ static void run_test(int cgroup_fd) if (CHECK_FAIL(server_fd < 0)) goto close_bpf_object; + pthread_mutex_lock(&server_started_mtx); if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread, (void *)&server_fd))) goto close_server_fd; - - pthread_mutex_lock(&server_started_mtx); pthread_cond_wait(&server_started, &server_started_mtx); pthread_mutex_unlock(&server_started_mtx); From bfdfa51702dec67e9fcd52568b4cf3c7f799db8b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 18:29:11 -0700 Subject: [PATCH 1127/2056] bpf: Drop duplicated words in uapi helper comments Drop doubled words "will" and "attach". Signed-off-by: Randy Dunlap Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/6b9f71ae-4f8e-0259-2c5d-187ddaefe6eb@infradead.org --- include/uapi/linux/bpf.h | 6 +++--- tools/include/uapi/linux/bpf.h | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index c010b57fce3f..7ac3992dacfe 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2420,7 +2420,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -2457,7 +2457,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -4000,7 +4000,7 @@ struct bpf_link_info { /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on - * attach attach type). + * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index c010b57fce3f..7ac3992dacfe 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -2420,7 +2420,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -2457,7 +2457,7 @@ union bpf_attr { * Look for an IPv6 socket. * * If the *netns* is a negative signed 32-bit integer, then the - * socket lookup table in the netns associated with the *ctx* will + * socket lookup table in the netns associated with the *ctx* * will be used. For the TC hooks, this is the netns of the device * in the skb. For socket hooks, this is the netns of the socket. * If *netns* is any other signed 32-bit value greater than or @@ -4000,7 +4000,7 @@ struct bpf_link_info { /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on - * attach attach type). + * attach type). */ struct bpf_sock_addr { __u32 user_family; /* Allows 4-byte read, but no write. */ From 89e35f66d552c98c1cfee4a058de158d7f21796a Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 13 Jul 2020 00:28:33 +0300 Subject: [PATCH 1128/2056] net: mscc: ocelot: rethink Kconfig dependencies again Having the users of MSCC_OCELOT_SWITCH_LIB depend on REGMAP_MMIO was a bad idea, since that symbol is not user-selectable. So we should have kept a 'select REGMAP_MMIO'. When we do that, we run into 2 more problems: - By depending on GENERIC_PHY, we are causing a recursive dependency. But it looks like GENERIC_PHY has no other dependencies, and other drivers select it, so we can select it too: drivers/of/Kconfig:69:error: recursive dependency detected! drivers/of/Kconfig:69: symbol OF_IRQ depends on IRQ_DOMAIN kernel/irq/Kconfig:68: symbol IRQ_DOMAIN is selected by REGMAP drivers/base/regmap/Kconfig:7: symbol REGMAP default is visible depending on REGMAP_MMIO drivers/base/regmap/Kconfig:39: symbol REGMAP_MMIO is selected by MSCC_OCELOT_SWITCH_LIB drivers/net/ethernet/mscc/Kconfig:15: symbol MSCC_OCELOT_SWITCH_LIB is selected by MSCC_OCELOT_SWITCH drivers/net/ethernet/mscc/Kconfig:22: symbol MSCC_OCELOT_SWITCH depends on GENERIC_PHY drivers/phy/Kconfig:8: symbol GENERIC_PHY is selected by PHY_BCM_NS_USB3 drivers/phy/broadcom/Kconfig:41: symbol PHY_BCM_NS_USB3 depends on MDIO_BUS drivers/net/phy/Kconfig:13: symbol MDIO_BUS depends on MDIO_DEVICE drivers/net/phy/Kconfig:6: symbol MDIO_DEVICE is selected by PHYLIB drivers/net/phy/Kconfig:254: symbol PHYLIB is selected by ARC_EMAC_CORE drivers/net/ethernet/arc/Kconfig:19: symbol ARC_EMAC_CORE is selected by ARC_EMAC drivers/net/ethernet/arc/Kconfig:25: symbol ARC_EMAC depends on OF_IRQ - By depending on PHYLIB, we are causing a recursive dependency. PHYLIB only has a single dependency, "depends on NETDEVICES", which we are already depending on, so we can again hack our way into conformance by turning the PHYLIB dependency into a select. drivers/of/Kconfig:69:error: recursive dependency detected! drivers/of/Kconfig:69: symbol OF_IRQ depends on IRQ_DOMAIN kernel/irq/Kconfig:68: symbol IRQ_DOMAIN is selected by REGMAP drivers/base/regmap/Kconfig:7: symbol REGMAP default is visible depending on REGMAP_MMIO drivers/base/regmap/Kconfig:39: symbol REGMAP_MMIO is selected by MSCC_OCELOT_SWITCH_LIB drivers/net/ethernet/mscc/Kconfig:15: symbol MSCC_OCELOT_SWITCH_LIB is selected by MSCC_OCELOT_SWITCH drivers/net/ethernet/mscc/Kconfig:22: symbol MSCC_OCELOT_SWITCH depends on PHYLIB drivers/net/phy/Kconfig:254: symbol PHYLIB is selected by ARC_EMAC_CORE drivers/net/ethernet/arc/Kconfig:19: symbol ARC_EMAC_CORE is selected by ARC_EMAC drivers/net/ethernet/arc/Kconfig:25: symbol ARC_EMAC depends on OF_IRQ Fixes: f4d0323bae4e ("net: mscc: ocelot: convert MSCC_OCELOT_SWITCH into a library") Signed-off-by: Vladimir Oltean Signed-off-by: Jakub Kicinski --- drivers/net/dsa/ocelot/Kconfig | 1 - drivers/net/ethernet/mscc/Kconfig | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig index 5b23bf6ba848..f121619d81fe 100644 --- a/drivers/net/dsa/ocelot/Kconfig +++ b/drivers/net/dsa/ocelot/Kconfig @@ -5,7 +5,6 @@ config NET_DSA_MSCC_FELIX depends on NET_VENDOR_MICROSEMI depends on NET_VENDOR_FREESCALE depends on HAS_IOMEM - depends on REGMAP_MMIO select MSCC_OCELOT_SWITCH_LIB select NET_DSA_TAG_OCELOT select FSL_ENETC_MDIO diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 3cfd1b629886..ee7bb7e24e8e 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -11,8 +11,10 @@ config NET_VENDOR_MICROSEMI if NET_VENDOR_MICROSEMI -# Users should depend on NET_SWITCHDEV, HAS_IOMEM, PHYLIB and REGMAP_MMIO +# Users should depend on NET_SWITCHDEV, HAS_IOMEM config MSCC_OCELOT_SWITCH_LIB + select REGMAP_MMIO + select PHYLIB tristate help This is a hardware support library for Ocelot network switches. It is @@ -21,12 +23,10 @@ config MSCC_OCELOT_SWITCH_LIB config MSCC_OCELOT_SWITCH tristate "Ocelot switch driver" depends on NET_SWITCHDEV - depends on GENERIC_PHY - depends on REGMAP_MMIO depends on HAS_IOMEM - depends on PHYLIB depends on OF_NET select MSCC_OCELOT_SWITCH_LIB + select GENERIC_PHY help This driver supports the Ocelot network switch device as present on the Ocelot SoCs (VSC7514). From 1315971fea66d2c1b09cab115a487898ba628cac Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 10 Jul 2020 21:12:44 +0300 Subject: [PATCH 1129/2056] net/mlx5e: Fix missing switch_id for representors Cited commit in fixes tag missed to set the switch id of the PF and VF ports. Due to this flow cannot be offloaded, a simple command like below fails to offload with below error. tc filter add dev ens2f0np0 parent ffff: prio 1 flower \ dst_mac 00:00:00:00:00:00/00:00:00:00:00:00 skip_sw \ action mirred egress redirect dev ens2f0np0pf0vf0 Error: mlx5_core: devices are not on same switch HW, can't offload forwarding. Hence, fix it by setting switch id for each PF and VF representors port as before the cited commit. Fixes: 71ad8d55f8e5 ("devlink: Replace devlink_port_attrs_set parameters with a struct") Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan --- .../net/ethernet/mellanox/mlx5/core/en_rep.c | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 0a69f10ac30c..c300729fb498 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1196,18 +1196,22 @@ static int register_devlink_port(struct mlx5_core_dev *dev, mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport); pfnum = PCI_FUNC(dev->pdev->devfn); - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pfnum; - memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len); - attrs.switch_id.id_len = ppid.id_len; - if (rep->vport == MLX5_VPORT_UPLINK) + if (rep->vport == MLX5_VPORT_UPLINK) { + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pfnum; + memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len); + attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_set(&rpriv->dl_port, &attrs); - else if (rep->vport == MLX5_VPORT_PF) + } else if (rep->vport == MLX5_VPORT_PF) { + memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len); + rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum); - else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) + } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) { + memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len); + rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_vf_set(&rpriv->dl_port, pfnum, rep->vport - 1); - + } return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index); } From 8b5ec43d73dbdb427651caac5b76610ec0f5f298 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 14 Jul 2020 18:54:46 -0700 Subject: [PATCH 1130/2056] net/mlx5e: Fix build break when CONFIG_XPS is not set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mlx5e_accel_sk_get_rxq is only used in ktls_rx.c file which already depends on XPS to be compiled, move it from the generic en_accel.h header to be local in ktls_rx.c, to fix the below build break In file included from ../drivers/net/ethernet/mellanox/mlx5/core/en_main.c:49:0: ../drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h: In function ‘mlx5e_accel_sk_get_rxq’: ../drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h:153:12: error: implicit declaration of function ‘sk_rx_queue_get’ ... int rxq = sk_rx_queue_get(sk); ^~~~~~~~~~~~~~~ Fixes: 1182f3659357 ("net/mlx5e: kTLS, Add kTLS RX HW offload support") Signed-off-by: Saeed Mahameed Reported-by: Randy Dunlap --- .../ethernet/mellanox/mlx5/core/en_accel/en_accel.h | 10 ---------- .../ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c | 12 +++++++++++- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h index 7b6abea850d4..110476bdeffb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h @@ -148,16 +148,6 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv, return true; } -static inline int mlx5e_accel_sk_get_rxq(struct sock *sk) -{ - int rxq = sk_rx_queue_get(sk); - - if (unlikely(rxq == -1)) - rxq = 0; - - return rxq; -} - static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) { return mlx5e_ktls_init_rx(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d7215defd403..acf6d80a6bb7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -547,6 +547,16 @@ void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) queue_work(rule->priv->tls->rx_wq, &rule->work); } +static int mlx5e_ktls_sk_get_rxq(struct sock *sk) +{ + int rxq = sk_rx_queue_get(sk); + + if (unlikely(rxq == -1)) + rxq = 0; + + return rxq; +} + int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn) @@ -573,7 +583,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->crypto_info = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; - rxq = mlx5e_accel_sk_get_rxq(sk); + rxq = mlx5e_ktls_sk_get_rxq(sk); priv_rx->rxq = rxq; priv_rx->sk = sk; From e21feb88f7d87bc54032db480047e70ba05dcbac Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 9 Apr 2020 19:53:24 +0300 Subject: [PATCH 1131/2056] net/mlx5: Make MLX5_EN_TLS non-prompt TLS runs only over Eth, and the Eth driver is the only user of the core TLS functionality. There is no meaning of having the core functionality without the usage in Eth driver. Hence, let both TLS core implementations depend on MLX5_CORE_EN, and select MLX5_EN_TLS. Signed-off-by: Tariq Toukan Reviewed-by: Raed Salem Reviewed-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 76b39659c39b..7d7148c9b744 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -150,7 +150,10 @@ config MLX5_FPGA_TLS bool "Mellanox Technologies TLS Innova support" depends on TLS_DEVICE depends on TLS=y || MLX5_CORE=m + depends on MLX5_CORE_EN depends on MLX5_FPGA + depends on XPS + select MLX5_EN_TLS default n help Build TLS support for the Innova family of network cards by Mellanox @@ -161,21 +164,19 @@ config MLX5_FPGA_TLS config MLX5_TLS bool "Mellanox Technologies TLS Connect-X support" - depends on MLX5_CORE_EN depends on TLS_DEVICE depends on TLS=y || MLX5_CORE=m + depends on MLX5_CORE_EN + depends on XPS select MLX5_ACCEL + select MLX5_EN_TLS default n help Build TLS support for the Connect-X family of network cards by Mellanox Technologies. config MLX5_EN_TLS - bool "TLS cryptography-offload accelaration" - depends on MLX5_CORE_EN - depends on XPS - depends on MLX5_FPGA_TLS || MLX5_TLS - default y + bool help Build support for TLS cryptography-offload accelaration in the NIC. Note: Support for hardware with this capability needs to be selected From 3d5f41ca01244fa2d261b76a0c728d6ac4526645 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sat, 27 Jun 2020 14:12:36 +0300 Subject: [PATCH 1132/2056] net/mlx5: E-switch, Avoid function change handler for non ECPF for non ECPF eswitch manager function, vports are already enabled/disabled when eswitch is enabled/disabled respectively. Simplify function change handler for such eswitch manager function. Therefore, ECPF is the only one which remains PF/VF function change handler. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 522cadc09149..b68e02ad65e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -513,16 +513,9 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; } -static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) +static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev) { - /* Ideally device should have the functions changed supported - * capability regardless of it being ECPF or PF wherever such - * event should be processed such as on eswitch manager device. - * However, some ECPF based device might not have this capability - * set. Hence OR for ECPF check to cover such device. - */ - return MLX5_CAP_ESW(dev, esw_functions_changed) || - mlx5_core_is_ecpf_esw_manager(dev); + return mlx5_core_is_ecpf_esw_manager(dev); } static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) From ea2128fd632c5308569eb789842910aa14796d22 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 26 Jun 2020 23:15:12 +0300 Subject: [PATCH 1133/2056] net/mlx5: E-switch, Reduce dependency on num_vfs during mode set Currently only ECPF allows enabling eswitch when SR-IOV is disabled. Enable PF also to enable eswitch when SR-IOV is disabled. Load VF vports when eswitch is already enabled. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 13 ++++++++++++- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 14 +------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c181f6b63f59..e8f900e9577e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1652,7 +1652,17 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) return 0; mutex_lock(&esw->mode_lock); - ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); + if (esw->mode == MLX5_ESWITCH_NONE) { + ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); + } else { + enum mlx5_eswitch_vport_event vport_events; + + vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ? + MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE; + ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events); + if (!ret) + esw->esw_funcs.num_vfs = num_vfs; + } mutex_unlock(&esw->mode_lock); return ret; } @@ -1699,6 +1709,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) mutex_lock(&esw->mode_lock); mlx5_eswitch_disable_locked(esw, clear_vf); + esw->esw_funcs.num_vfs = 0; mutex_unlock(&esw->mode_lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 74a2b76c7c07..db856d70c4f8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1578,13 +1578,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, { int err, err1; - if (esw->mode != MLX5_ESWITCH_LEGACY && - !mlx5_core_is_ecpf_esw_manager(esw->dev)) { - NL_SET_ERR_MSG_MOD(extack, - "Can't set offloads mode, SRIOV legacy not enabled"); - return -EINVAL; - } - mlx5_eswitch_disable_locked(esw, false); err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS, esw->dev->priv.sriov.num_vfs); @@ -2293,7 +2286,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, { u16 cur_mlx5_mode, mlx5_mode = 0; struct mlx5_eswitch *esw; - int err; + int err = 0; esw = mlx5_devlink_eswitch_get(devlink); if (IS_ERR(esw)) @@ -2303,12 +2296,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, return -EINVAL; mutex_lock(&esw->mode_lock); - err = eswitch_devlink_esw_mode_check(esw); - if (err) - goto unlock; - cur_mlx5_mode = esw->mode; - if (cur_mlx5_mode == mlx5_mode) goto unlock; From 9a6ad1ad71fbc5a52617e016a3608d71b91f62e8 Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Mon, 18 Nov 2019 14:30:20 +0200 Subject: [PATCH 1134/2056] net/mlx5: Accel, Add core IPsec support for the Connect-X family This to set the base for downstream patches to support the new IPsec implementation of the Connect-X family. Following modifications made: - Remove accel layer dependency from MLX5_FPGA_IPSEC. - Introduce accel_ipsec_ops, each IPsec device will have to support these ops. Signed-off-by: Raed Salem Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/accel/ipsec.c | 103 +++++++++++++----- .../ethernet/mellanox/mlx5/core/accel/ipsec.h | 45 ++++---- .../mellanox/mlx5/core/en_accel/ipsec.c | 4 +- .../ethernet/mellanox/mlx5/core/fpga/ipsec.c | 51 ++++++--- .../ethernet/mellanox/mlx5/core/fpga/ipsec.h | 37 ++----- .../net/ethernet/mellanox/mlx5/core/main.c | 9 +- include/linux/mlx5/accel.h | 6 +- include/linux/mlx5/driver.h | 3 + 8 files changed, 154 insertions(+), 104 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 8a4985d8cbfe..628c8887f086 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -31,37 +31,83 @@ * */ -#ifdef CONFIG_MLX5_FPGA_IPSEC - #include #include "accel/ipsec.h" #include "mlx5_core.h" #include "fpga/ipsec.h" +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mlx5_fpga_ipsec_ops(mdev); + int err = 0; + + if (!ipsec_ops || !ipsec_ops->init) { + mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); + return; + } + + err = ipsec_ops->init(mdev); + if (err) { + mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err); + return; + } + + mdev->ipsec_ops = ipsec_ops; +} + +void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) +{ + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->cleanup) + return; + + ipsec_ops->cleanup(mdev); +} + u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev) { - return mlx5_fpga_ipsec_device_caps(mdev); + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->device_caps) + return 0; + + return ipsec_ops->device_caps(mdev); } EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps); unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev) { - return mlx5_fpga_ipsec_counters_count(mdev); + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_count) + return -EOPNOTSUPP; + + return ipsec_ops->counters_count(mdev); } int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count) { - return mlx5_fpga_ipsec_counters_read(mdev, counters, count); + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->counters_read) + return -EOPNOTSUPP; + + return ipsec_ops->counters_read(mdev, counters, count); } void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, u32 *sa_handle) { + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; __be32 saddr[4] = {}, daddr[4] = {}; + if (!ipsec_ops || !ipsec_ops->create_hw_context) + return ERR_PTR(-EOPNOTSUPP); + if (!xfrm->attrs.is_ipv6) { saddr[3] = xfrm->attrs.saddr.a4; daddr[3] = xfrm->attrs.daddr.a4; @@ -70,29 +116,18 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr)); } - return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, - daddr, xfrm->attrs.spi, - xfrm->attrs.is_ipv6, sa_handle); + return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi, + xfrm->attrs.is_ipv6, sa_handle); } -void mlx5_accel_esp_free_hw_context(void *context) +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) { - mlx5_fpga_ipsec_delete_sa_ctx(context); -} + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; -int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - return mlx5_fpga_ipsec_init(mdev); -} + if (!ipsec_ops || !ipsec_ops->free_hw_context) + return; -void mlx5_accel_ipsec_build_fs_cmds(void) -{ - mlx5_fpga_ipsec_build_fs_cmds(); -} - -void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ - mlx5_fpga_ipsec_cleanup(mdev); + ipsec_ops->free_hw_context(context); } struct mlx5_accel_esp_xfrm * @@ -100,9 +135,13 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags) { + const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops; struct mlx5_accel_esp_xfrm *xfrm; - xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags); + if (!ipsec_ops || !ipsec_ops->esp_create_xfrm) + return ERR_PTR(-EOPNOTSUPP); + + xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags); if (IS_ERR(xfrm)) return xfrm; @@ -113,15 +152,23 @@ EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm); void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { - mlx5_fpga_esp_destroy_xfrm(xfrm); + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm) + return; + + ipsec_ops->esp_destroy_xfrm(xfrm); } EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm); int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { - return mlx5_fpga_esp_modify_xfrm(xfrm, attrs); + const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops; + + if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm) + return -EOPNOTSUPP; + + return ipsec_ops->esp_modify_xfrm(xfrm, attrs); } EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm); - -#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h index e89747674712..fbb9c5415d53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h @@ -37,7 +37,7 @@ #include #include -#ifdef CONFIG_MLX5_FPGA_IPSEC +#ifdef CONFIG_MLX5_ACCEL #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \ MLX5_ACCEL_IPSEC_CAP_DEVICE) @@ -49,12 +49,30 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, struct mlx5_accel_esp_xfrm *xfrm, u32 *sa_handle); -void mlx5_accel_esp_free_hw_context(void *context); +void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context); -int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_accel_ipsec_build_fs_cmds(void); +void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev); void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev); +struct mlx5_accel_ipsec_ops { + u32 (*device_caps)(struct mlx5_core_dev *mdev); + unsigned int (*counters_count)(struct mlx5_core_dev *mdev); + int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count); + void* (*create_hw_context)(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, u32 *sa_handle); + void (*free_hw_context)(void *context); + int (*init)(struct mlx5_core_dev *mdev); + void (*cleanup)(struct mlx5_core_dev *mdev); + struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags); + int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs); + void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm); +}; + #else #define MLX5_IPSEC_DEV(mdev) false @@ -67,23 +85,12 @@ mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev, return NULL; } -static inline void mlx5_accel_esp_free_hw_context(void *context) -{ -} +static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {} -static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) -{ - return 0; -} +static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {} -static inline void mlx5_accel_ipsec_build_fs_cmds(void) -{ -} +static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {} -static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) -{ -} - -#endif +#endif /* CONFIG_MLX5_ACCEL */ #endif /* __MLX5_ACCEL_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index bc55c82b55ba..8d797cd56e26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -342,7 +342,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) goto out; err_hw_ctx: - mlx5_accel_esp_free_hw_context(sa_entry->hw_context); + mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context); err_xfrm: mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); err_sa_entry: @@ -372,7 +372,7 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) if (sa_entry->hw_context) { flush_workqueue(sa_entry->ipsec->wq); - mlx5_accel_esp_free_hw_context(sa_entry->hw_context); + mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index b463787d6ca1..cc67366495b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -359,7 +359,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) return ret; } -unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) +static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -370,8 +370,8 @@ unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev) number_of_ipsec_counters); } -int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int counters_count) +static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, + unsigned int counters_count) { struct mlx5_fpga_device *fdev = mdev->fpga; unsigned int i; @@ -665,12 +665,10 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev, return true; } -void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], - const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - u32 *sa_handle) +static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, u32 *sa_handle) { struct mlx5_fpga_ipsec_sa_ctx *sa_ctx; struct mlx5_fpga_esp_xfrm *fpga_xfrm = @@ -862,7 +860,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx) mutex_unlock(&fipsec->sa_hash_lock); } -void mlx5_fpga_ipsec_delete_sa_ctx(void *context) +static void mlx5_fpga_ipsec_delete_sa_ctx(void *context) { struct mlx5_fpga_esp_xfrm *fpga_xfrm = ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm; @@ -1264,7 +1262,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flo } } -int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) +static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev) { struct mlx5_fpga_conn_attr init_attr = {0}; struct mlx5_fpga_device *fdev = mdev->fpga; @@ -1346,7 +1344,7 @@ static void destroy_rules_rb(struct rb_root *root) } } -void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) +static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -1451,7 +1449,7 @@ mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, return 0; } -struct mlx5_accel_esp_xfrm * +static struct mlx5_accel_esp_xfrm * mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, const struct mlx5_accel_esp_xfrm_attrs *attrs, u32 flags) @@ -1479,7 +1477,7 @@ mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, return &fpga_xfrm->accel_xfrm; } -void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) { struct mlx5_fpga_esp_xfrm *fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, @@ -1488,8 +1486,8 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) kfree(fpga_xfrm); } -int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs) +static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) { struct mlx5_core_dev *mdev = xfrm->mdev; struct mlx5_fpga_device *fdev = mdev->fpga; @@ -1560,3 +1558,24 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, mutex_unlock(&fpga_xfrm->lock); return err; } + +static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = { + .device_caps = mlx5_fpga_ipsec_device_caps, + .counters_count = mlx5_fpga_ipsec_counters_count, + .counters_read = mlx5_fpga_ipsec_counters_read, + .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx, + .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx, + .init = mlx5_fpga_ipsec_init, + .cleanup = mlx5_fpga_ipsec_cleanup, + .esp_create_xfrm = mlx5_fpga_esp_create_xfrm, + .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm, + .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm, +}; + +const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) +{ + if (!mlx5_fpga_is_ipsec_device(mdev)) + return NULL; + + return &fpga_ipsec_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h index 9ba637f0f0f2..db88eb4c49e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h @@ -38,44 +38,23 @@ #include "fs_cmd.h" #ifdef CONFIG_MLX5_FPGA_IPSEC +const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev); u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev); -unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev); -int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, - unsigned int counters_count); - -void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev, - struct mlx5_accel_esp_xfrm *accel_xfrm, - const __be32 saddr[4], - const __be32 daddr[4], - const __be32 spi, bool is_ipv6, - u32 *sa_handle); -void mlx5_fpga_ipsec_delete_sa_ctx(void *context); - -int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev); -void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev); -void mlx5_fpga_ipsec_build_fs_cmds(void); - -struct mlx5_accel_esp_xfrm * -mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev, - const struct mlx5_accel_esp_xfrm_attrs *attrs, - u32 flags); -void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm); -int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, - const struct mlx5_accel_esp_xfrm_attrs *attrs); - const struct mlx5_flow_cmds * mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type); +void mlx5_fpga_ipsec_build_fs_cmds(void); #else -static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) -{ - return 0; -} - +static inline +const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev) +{ return NULL; } +static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; } static inline const struct mlx5_flow_cmds * mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type) { return mlx5_fs_cmd_get_default(type); } +static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {}; + #endif /* CONFIG_MLX5_FPGA_IPSEC */ #endif /* __MLX5_FPGA_IPSEC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 8b658908f044..e32d46c33701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1089,11 +1089,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_fpga_start; } - err = mlx5_accel_ipsec_init(dev); - if (err) { - mlx5_core_err(dev, "IPSec device start failed %d\n", err); - goto err_ipsec_start; - } + mlx5_accel_ipsec_init(dev); err = mlx5_accel_tls_init(dev); if (err) { @@ -1135,7 +1131,6 @@ static int mlx5_load(struct mlx5_core_dev *dev) mlx5_accel_tls_cleanup(dev); err_tls_start: mlx5_accel_ipsec_cleanup(dev); -err_ipsec_start: mlx5_fpga_device_stop(dev); err_fpga_start: mlx5_rsc_dump_cleanup(dev); @@ -1628,7 +1623,7 @@ static int __init init(void) get_random_bytes(&sw_owner_id, sizeof(sw_owner_id)); mlx5_core_verify_params(); - mlx5_accel_ipsec_build_fs_cmds(); + mlx5_fpga_ipsec_build_fs_cmds(); mlx5_register_debugfs(); err = pci_register_driver(&mlx5_core_driver); diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 96ebaa94a92e..dacf69516002 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; -#ifdef CONFIG_MLX5_FPGA_IPSEC +#ifdef CONFIG_MLX5_ACCEL u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); @@ -152,5 +152,5 @@ static inline int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } -#endif -#endif +#endif /* CONFIG_MLX5_ACCEL */ +#endif /* __MLX5_ACCEL_H__ */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1e6ca716635a..6a97ad601991 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -707,6 +707,9 @@ struct mlx5_core_dev { } roce; #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; +#endif +#ifdef CONFIG_MLX5_ACCEL + const struct mlx5_accel_ipsec_ops *ipsec_ops; #endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; From 2d64663cd55972d3915a9efb8d7087e1aeeda17e Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Wed, 29 Jan 2020 18:15:15 +0200 Subject: [PATCH 1135/2056] net/mlx5: IPsec: Add HW crypto offload support This patch adds support for Connect-X IPsec crypto offload by implementing the IPsec acceleration layer needed routines, which delegates IPsec offloads to Connect-X routines. In Connect-X IPsec, a Security Association (SA) is added or deleted via allocating a HW context of an encryption/decryption key and a HW context of a matching SA (IPsec object). The Security Policy (SP) is added or deleted by creating matching Tx/Rx steering rules whith an action of encryption/decryption respectively, executed using the previously allocated SA HW context. When new xfrm state (SA) is added: - Use a separate crypto key HW context. - Create a separate IPsec context in HW to inlcude the SA properties: - aes-gcm salt. - ICV properties (ICV length, implicit IV). - on supported devices also update ESN. - associate the allocated crypto key with this IPsec context. Introduce a new compilation flag MLX5_IPSEC for it. Downstream patches will implement the Rx,Tx steering and will add the update esn. Signed-off-by: Raed Salem Signed-off-by: Huy Nguyen Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Kconfig | 15 +- .../net/ethernet/mellanox/mlx5/core/Makefile | 1 + .../ethernet/mellanox/mlx5/core/accel/ipsec.c | 7 +- .../mellanox/mlx5/core/accel/ipsec_offload.c | 291 ++++++++++++++++++ .../mellanox/mlx5/core/accel/ipsec_offload.h | 38 +++ .../ethernet/mellanox/mlx5/core/accel/tls.c | 4 +- .../mellanox/mlx5/core/en_accel/ipsec.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 6 + .../ethernet/mellanox/mlx5/core/lib/crypto.c | 5 +- .../ethernet/mellanox/mlx5/core/lib/mlx5.h | 8 +- 10 files changed, 372 insertions(+), 9 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 7d7148c9b744..99f1ec3b2575 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -134,12 +134,25 @@ config MLX5_FPGA_IPSEC mlx5_core driver will include the Innova FPGA core and allow building sandbox-specific client drivers. +config MLX5_IPSEC + bool "Mellanox Technologies IPsec Connect-X support" + depends on MLX5_CORE_EN + depends on XFRM_OFFLOAD + depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD + select MLX5_ACCEL + default n + help + Build IPsec support for the Connect-X family of network cards by Mellanox + Technologies. + Note: If you select this option, the mlx5_core driver will include + IPsec support for the Connect-X family. + config MLX5_EN_IPSEC bool "IPSec XFRM cryptography-offload accelaration" depends on MLX5_CORE_EN depends on XFRM_OFFLOAD depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD - depends on MLX5_FPGA_IPSEC + depends on MLX5_FPGA_IPSEC || MLX5_IPSEC default n help Build support for IPsec cryptography-offload accelaration in the NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 124caec65a34..375c8a2d42fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -64,6 +64,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib # # Accelerations & FPGA # +mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA_TLS) += fpga/tls.o mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c index 628c8887f086..09f5ce97af46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c @@ -36,12 +36,17 @@ #include "accel/ipsec.h" #include "mlx5_core.h" #include "fpga/ipsec.h" +#include "accel/ipsec_offload.h" void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) { - const struct mlx5_accel_ipsec_ops *ipsec_ops = mlx5_fpga_ipsec_ops(mdev); + const struct mlx5_accel_ipsec_ops *ipsec_ops; int err = 0; + ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ? + mlx5_ipsec_offload_ops(mdev) : + mlx5_fpga_ipsec_ops(mdev); + if (!ipsec_ops || !ipsec_ops->init) { mlx5_core_dbg(mdev, "IPsec ops is not supported\n"); return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c new file mode 100644 index 000000000000..1c8923f42b09 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c @@ -0,0 +1,291 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIBt +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#include "mlx5_core.h" +#include "ipsec_offload.h" +#include "lib/mlx5.h" + +#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \ + MLX5_ACCEL_IPSEC_CAP_LSO) +struct mlx5_ipsec_sa_ctx { + struct rhash_head hash; + u32 enc_key_id; + u32 ipsec_obj_id; + /* hw ctx */ + struct mlx5_core_dev *dev; + struct mlx5_ipsec_esp_xfrm *mxfrm; +}; + +struct mlx5_ipsec_esp_xfrm { + /* reference counter of SA ctx */ + struct mlx5_ipsec_sa_ctx *sa_ctx; + struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */ + struct mlx5_accel_esp_xfrm accel_xfrm; +}; + +static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) +{ + u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS; + + if (!mlx5_is_ipsec_device(mdev)) + return 0; + + if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && + MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) + caps |= MLX5_ACCEL_IPSEC_CAP_ESP; + + if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) { + caps |= MLX5_ACCEL_IPSEC_CAP_ESN; + caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN; + } + + /* We can accommodate up to 2^24 different IPsec objects + * because we use up to 24 bit in flow table metadata + * to hold the IPsec Object unique handle. + */ + WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24); + return caps; +} + +static int +mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) { + mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n", + attrs->replay_type); + return -EOPNOTSUPP; + } + + if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) { + mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n", + attrs->keymat_type); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.iv_algo != + MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) { + mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n", + attrs->keymat.aes_gcm.iv_algo); + return -EOPNOTSUPP; + } + + if (attrs->keymat.aes_gcm.key_len != 128 && + attrs->keymat.aes_gcm.key_len != 256) { + mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n", + attrs->keymat.aes_gcm.key_len); + return -EOPNOTSUPP; + } + + if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) && + !MLX5_CAP_IPSEC(mdev, ipsec_esn)) { + mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static struct mlx5_accel_esp_xfrm * +mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev, + const struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 flags) +{ + struct mlx5_ipsec_esp_xfrm *mxfrm; + int err = 0; + + err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs); + if (err) + return ERR_PTR(err); + + mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL); + if (!mxfrm) + return ERR_PTR(-ENOMEM); + + mutex_init(&mxfrm->lock); + memcpy(&mxfrm->accel_xfrm.attrs, attrs, + sizeof(mxfrm->accel_xfrm.attrs)); + + return &mxfrm->accel_xfrm; +} + +static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm) +{ + struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, + accel_xfrm); + + /* assuming no sa_ctx are connected to this xfrm_ctx */ + WARN_ON(mxfrm->sa_ctx); + kfree(mxfrm); +} + +struct mlx5_ipsec_obj_attrs { + const struct aes_gcm_keymat *aes_gcm; + u32 accel_flags; + u32 esn_msb; + u32 enc_key_id; +}; + +static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_obj_attrs *attrs, + u32 *ipsec_id) +{ + const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {}; + void *obj, *salt_p, *salt_iv_p; + int err; + + obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object); + + /* salt and seq_iv */ + salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt); + memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt)); + + switch (aes_gcm->icv_len) { + case 64: + MLX5_SET(ipsec_obj, obj, icv_length, + MLX5_IPSEC_OBJECT_ICV_LEN_8B); + break; + case 96: + MLX5_SET(ipsec_obj, obj, icv_length, + MLX5_IPSEC_OBJECT_ICV_LEN_12B); + break; + case 128: + MLX5_SET(ipsec_obj, obj, icv_length, + MLX5_IPSEC_OBJECT_ICV_LEN_16B); + break; + default: + return -EINVAL; + } + salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv); + memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv)); + /* esn */ + if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) { + MLX5_SET(ipsec_obj, obj, esn_en, 1); + MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); + if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) + MLX5_SET(ipsec_obj, obj, esn_overlap, 1); + } + + MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id); + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_GENERAL_OBJECT_TYPES_IPSEC); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (!err) + *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return err; +} + +static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id) +{ + u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; + u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; + + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, + MLX5_GENERAL_OBJECT_TYPES_IPSEC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev, + struct mlx5_accel_esp_xfrm *accel_xfrm, + const __be32 saddr[4], const __be32 daddr[4], + const __be32 spi, bool is_ipv6, u32 *hw_handle) +{ + struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs; + struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm; + struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; + struct mlx5_ipsec_esp_xfrm *mxfrm; + struct mlx5_ipsec_sa_ctx *sa_ctx; + int err; + + /* alloc SA context */ + sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL); + if (!sa_ctx) + return ERR_PTR(-ENOMEM); + + sa_ctx->dev = mdev; + + mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); + mutex_lock(&mxfrm->lock); + sa_ctx->mxfrm = mxfrm; + + /* key */ + err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key, + aes_gcm->key_len / BITS_PER_BYTE, + MLX5_ACCEL_OBJ_IPSEC_KEY, + &sa_ctx->enc_key_id); + if (err) { + mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err); + goto err_sa_ctx; + } + + ipsec_attrs.aes_gcm = aes_gcm; + ipsec_attrs.accel_flags = accel_xfrm->attrs.flags; + ipsec_attrs.esn_msb = accel_xfrm->attrs.esn; + ipsec_attrs.enc_key_id = sa_ctx->enc_key_id; + err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs, + &sa_ctx->ipsec_obj_id); + if (err) { + mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err); + goto err_enc_key; + } + + *hw_handle = sa_ctx->ipsec_obj_id; + mxfrm->sa_ctx = sa_ctx; + mutex_unlock(&mxfrm->lock); + + return sa_ctx; + +err_enc_key: + mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id); +err_sa_ctx: + mutex_unlock(&mxfrm->lock); + kfree(sa_ctx); + return ERR_PTR(err); +} + +static void mlx5_ipsec_offload_delete_sa_ctx(void *context) +{ + struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context; + struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm; + + mutex_lock(&mxfrm->lock); + mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id); + mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id); + kfree(sa_ctx); + mxfrm->sa_ctx = NULL; + mutex_unlock(&mxfrm->lock); +} + +static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) +{ + return 0; +} + +static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { + .device_caps = mlx5_ipsec_offload_device_caps, + .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, + .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx, + .init = mlx5_ipsec_offload_init, + .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, + .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, +}; + +const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) +{ + if (!mlx5_ipsec_offload_device_caps(mdev)) + return NULL; + + return &ipsec_offload_ops; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h new file mode 100644 index 000000000000..970c66d19c1d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_IPSEC_OFFLOAD_H__ +#define __MLX5_IPSEC_OFFLOAD_H__ + +#include +#include "accel/ipsec.h" + +#ifdef CONFIG_MLX5_IPSEC + +const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev); +static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) +{ + if (!MLX5_CAP_GEN(mdev, ipsec_offload)) + return false; + + if (!MLX5_CAP_GEN(mdev, log_max_dek)) + return false; + + if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) & + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) + return false; + + return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) && + MLX5_CAP_ETH(mdev, insert_trailer); +} + +#else +static inline const struct mlx5_accel_ipsec_ops * +mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; } +static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev) +{ + return false; +} + +#endif /* CONFIG_MLX5_IPSEC */ +#endif /* __MLX5_IPSEC_OFFLOAD_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c index cbf3d76c05a8..6c2b86a26863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c @@ -113,7 +113,9 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev, return -EINVAL; } - return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id); + return mlx5_create_encryption_key(mdev, key, sz_bytes, + MLX5_ACCEL_OBJ_TLS_KEY, + p_key_id); } void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 8d797cd56e26..147191b7a98a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -111,7 +111,7 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry) static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) { struct xfrm_replay_state_esn *replay_esn; - u32 seq_bottom; + u32 seq_bottom = 0; u8 overlap; u32 *esn; @@ -121,7 +121,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) } replay_esn = sa_entry->x->replay_esn; - seq_bottom = replay_esn->seq - replay_esn->replay_window + 1; + if (replay_esn->seq >= replay_esn->replay_window) + seq_bottom = replay_esn->seq - replay_esn->replay_window + 1; + overlap = sa_entry->esn_state.overlap; sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index c3095863372c..02558ac2ace6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -250,6 +250,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, ipsec_offload)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c index dcea87ec5977..57eb91bcbca7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c @@ -6,7 +6,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, void *key, u32 sz_bytes, - u32 *p_key_id) + u32 key_type, u32 *p_key_id) { u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; @@ -41,8 +41,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, memcpy(key_p, key, sz_bytes); MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size); - MLX5_SET(encryption_key_obj, obj, key_type, - MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS); + MLX5_SET(encryption_key_obj, obj, key_type, key_type); MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index 249539247e2e..d046db7bb047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -80,8 +80,14 @@ void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats) int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data); /* Crypto */ +enum { + MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS, + MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC, +}; + int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, - void *key, u32 sz_bytes, u32 *p_key_id); + void *key, u32 sz_bytes, + u32 key_type, u32 *p_key_id); void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id); static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) From 78fb6122fa2b6b55fafee1b32cd94913ad72f8a4 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Wed, 8 Apr 2020 20:09:05 -0500 Subject: [PATCH 1136/2056] net/mlx5: Add IPsec related Flow steering entry's fields Add FTE actions IPsec ENCRYPT/DECRYPT Add ipsec_obj_id field in FTE Add new action field MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME Signed-off-by: Huy Nguyen Reviewed-by: Raed Salem Signed-off-by: Saeed Mahameed --- include/linux/mlx5/fs.h | 5 ++++- include/linux/mlx5/mlx5_ifc.h | 12 ++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 6c5aa0a21425..92d991d93757 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -207,7 +207,10 @@ struct mlx5_flow_act { u32 action; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; - uintptr_t esp_id; + union { + u32 ipsec_obj_id; + uintptr_t esp_id; + }; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 791766e15d5c..9e64710bc54f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -416,7 +416,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 table_miss_action_domain[0x1]; u8 termination_table[0x1]; u8 reformat_and_fwd_to_table[0x1]; - u8 reserved_at_1a[0x6]; + u8 reserved_at_1a[0x2]; + u8 ipsec_encrypt[0x1]; + u8 ipsec_decrypt[0x1]; + u8 reserved_at_1e[0x2]; + u8 termination_table_raw_traffic[0x1]; u8 reserved_at_21[0x1]; u8 log_max_ft_size[0x6]; @@ -2965,6 +2969,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, + MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, + MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, }; enum { @@ -3006,7 +3012,8 @@ struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan_2; - u8 reserved_at_120[0xe0]; + u8 ipsec_obj_id[0x20]; + u8 reserved_at_140[0xc0]; struct mlx5_ifc_fte_match_param_bits match_value; @@ -5752,6 +5759,7 @@ enum { MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, + MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { From 5e466345291a91d1722e7198497198becda29e22 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Fri, 5 Jun 2020 16:36:35 -0500 Subject: [PATCH 1137/2056] net/mlx5e: IPsec: Add IPsec steering in local NIC RX Introduce decrypt FT, the RX error FT and the default rules. The IPsec RX decrypt flow table is pointed by the TTC (Traffic Type Classifier) ESP steering rules. The decrypt flow table has two flow groups. The first flow group keeps the decrypt steering rule programmed via the "ip xfrm s" interface. The second flow group has a default rule to forward all non-offloaded ESP packet to the TTC ESP default RSS TIR. The RX error flow table is the destination of the decrypt steering rules in the IPsec RX decrypt flow table. It has a fixed rule with single copy action that copies ipsec_syndrome to metadata_regB[0:6]. The IPsec syndrome is used to filter out non-ipsec packet and to return the IPsec crypto offload status in Rx flow. The destination of RX error flow table is the TTC ESP default RSS TIR. All the FTs (decrypt FT and error FT) are created only when IPsec SAs are added. If there is no IPsec SAs, the FTs are removed. Signed-off-by: Huy Nguyen Reviewed-by: Boris Pismenny Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../mellanox/mlx5/core/accel/ipsec_offload.c | 6 + .../net/ethernet/mellanox/mlx5/core/en/fs.h | 6 +- .../mellanox/mlx5/core/en_accel/ipsec.c | 37 +- .../mellanox/mlx5/core/en_accel/ipsec.h | 10 + .../mellanox/mlx5/core/en_accel/ipsec_fs.c | 544 ++++++++++++++++++ .../mellanox/mlx5/core/en_accel/ipsec_fs.h | 26 + .../net/ethernet/mellanox/mlx5/core/fs_cmd.c | 2 + .../net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- 9 files changed, 630 insertions(+), 5 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 375c8a2d42fa..10e6886c96ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -72,7 +72,7 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += lib/crypto.o accel/tls.o accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ - en_accel/ipsec_stats.o + en_accel/ipsec_stats.o en_accel/ipsec_fs.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \ en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c index 1c8923f42b09..c49699d580ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c @@ -4,9 +4,11 @@ #include "mlx5_core.h" #include "ipsec_offload.h" #include "lib/mlx5.h" +#include "en_accel/ipsec_fs.h" #define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \ MLX5_ACCEL_IPSEC_CAP_LSO) + struct mlx5_ipsec_sa_ctx { struct rhash_head hash; u32 enc_key_id; @@ -30,6 +32,10 @@ static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev) if (!mlx5_is_ipsec_device(mdev)) return 0; + if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) || + !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt)) + return 0; + if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) && MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt)) caps |= MLX5_ACCEL_IPSEC_CAP_ESP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 6f4767324044..6fdcd5e69476 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -129,7 +129,11 @@ enum { MLX5E_ACCEL_FS_TCP_FT_LEVEL, #endif #ifdef CONFIG_MLX5_EN_ARFS - MLX5E_ARFS_FT_LEVEL + MLX5E_ARFS_FT_LEVEL, +#endif +#ifdef CONFIG_MLX5_EN_IPSEC + MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1, + MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL, #endif }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 147191b7a98a..d39989cddd90 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -40,7 +40,7 @@ #include "en.h" #include "en_accel/ipsec.h" #include "en_accel/ipsec_rxtx.h" - +#include "en_accel/ipsec_fs.h" static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x) { @@ -284,6 +284,27 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x) return 0; } +static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv, + struct mlx5e_ipsec_sa_entry *sa_entry) +{ + if (!mlx5_is_ipsec_device(priv->mdev)) + return 0; + + return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs, + sa_entry->ipsec_obj_id, + &sa_entry->ipsec_rule); +} + +static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv, + struct mlx5e_ipsec_sa_entry *sa_entry) +{ + if (!mlx5_is_ipsec_device(priv->mdev)) + return; + + mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs, + &sa_entry->ipsec_rule); +} + static int mlx5e_xfrm_add_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = NULL; @@ -331,10 +352,15 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) goto err_xfrm; } + sa_entry->ipsec_obj_id = sa_handle; + err = mlx5e_xfrm_fs_add_rule(priv, sa_entry); + if (err) + goto err_hw_ctx; + if (x->xso.flags & XFRM_OFFLOAD_INBOUND) { err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle); if (err) - goto err_hw_ctx; + goto err_add_rule; } else { sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ? mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; @@ -343,6 +369,8 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x) x->xso.offload_handle = (unsigned long)sa_entry; goto out; +err_add_rule: + mlx5e_xfrm_fs_del_rule(priv, sa_entry); err_hw_ctx: mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context); err_xfrm: @@ -368,12 +396,14 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) static void mlx5e_xfrm_free_state(struct xfrm_state *x) { struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); + struct mlx5e_priv *priv = netdev_priv(x->xso.dev); if (!sa_entry) return; if (sa_entry->hw_context) { flush_workqueue(sa_entry->ipsec->wq); + mlx5e_xfrm_fs_del_rule(priv, sa_entry); mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context); mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm); } @@ -407,6 +437,8 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv) kfree(ipsec); return -ENOMEM; } + + mlx5e_accel_ipsec_fs_init(priv); netdev_dbg(priv->netdev, "IPSec attached to netdevice\n"); return 0; } @@ -418,6 +450,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv) if (!ipsec) return; + mlx5e_accel_ipsec_fs_cleanup(priv); destroy_workqueue(ipsec->wq); ida_destroy(&ipsec->halloc); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h index c85151a1e008..0fc8b4d4f4a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h @@ -75,6 +75,8 @@ struct mlx5e_ipsec_stats { u64 ipsec_cmd_drop; }; +struct mlx5e_accel_fs_esp; + struct mlx5e_ipsec { struct mlx5e_priv *en_priv; DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS); @@ -84,6 +86,7 @@ struct mlx5e_ipsec { struct mlx5e_ipsec_sw_stats sw_stats; struct mlx5e_ipsec_stats stats; struct workqueue_struct *wq; + struct mlx5e_accel_fs_esp *rx_fs; }; struct mlx5e_ipsec_esn_state { @@ -92,6 +95,11 @@ struct mlx5e_ipsec_esn_state { u8 overlap: 1; }; +struct mlx5e_ipsec_rule { + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *set_modify_hdr; +}; + struct mlx5e_ipsec_sa_entry { struct hlist_node hlist; /* Item in SADB_RX hashtable */ struct mlx5e_ipsec_esn_state esn_state; @@ -102,6 +110,8 @@ struct mlx5e_ipsec_sa_entry { void *hw_context; void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_offload *xo); + u32 ipsec_obj_id; + struct mlx5e_ipsec_rule ipsec_rule; }; void mlx5e_ipsec_build_inverse_table(void); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c new file mode 100644 index 000000000000..429428bbc903 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#include +#include "accel/ipsec_offload.h" +#include "ipsec_fs.h" +#include "fs_core.h" + +#define NUM_IPSEC_FTE BIT(15) + +enum accel_fs_esp_type { + ACCEL_FS_ESP4, + ACCEL_FS_ESP6, + ACCEL_FS_ESP_NUM_TYPES, +}; + +struct mlx5e_ipsec_rx_err { + struct mlx5_flow_table *ft; + struct mlx5_flow_handle *rule; + struct mlx5_modify_hdr *copy_modify_hdr; +}; + +struct mlx5e_accel_fs_esp_prot { + struct mlx5_flow_table *ft; + struct mlx5_flow_group *miss_group; + struct mlx5_flow_handle *miss_rule; + struct mlx5_flow_destination default_dest; + struct mlx5e_ipsec_rx_err rx_err; + u32 refcnt; + struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */ +}; + +struct mlx5e_accel_fs_esp { + struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES]; +}; + +/* IPsec RX flow steering */ +static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i) +{ + if (i == ACCEL_FS_ESP4) + return MLX5E_TT_IPV4_IPSEC_ESP; + return MLX5E_TT_IPV6_IPSEC_ESP; +} + +static int rx_err_add_rule(struct mlx5e_priv *priv, + struct mlx5e_accel_fs_esp_prot *fs_prot, + struct mlx5e_ipsec_rx_err *rx_err) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_flow_act flow_act = {}; + struct mlx5_modify_hdr *modify_hdr; + struct mlx5_flow_handle *fte; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = kzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + /* Action to copy 7 bit ipsec_syndrome to regB[0:6] */ + MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); + MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME); + MLX5_SET(copy_action_in, action, src_offset, 0); + MLX5_SET(copy_action_in, action, length, 7); + MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(copy_action_in, action, dst_offset, 0); + + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, + 1, action); + + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + netdev_err(priv->netdev, + "fail to alloc ipsec copy modify_header_id err=%d\n", err); + goto out_spec; + } + + /* create fte */ + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + flow_act.modify_hdr = modify_hdr; + fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act, + &fs_prot->default_dest, 1); + if (IS_ERR(fte)) { + err = PTR_ERR(fte); + netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err); + goto out; + } + + rx_err->rule = fte; + rx_err->copy_modify_hdr = modify_hdr; + +out: + if (err) + mlx5_modify_header_dealloc(mdev, modify_hdr); +out_spec: + kfree(spec); + return err; +} + +static void rx_err_del_rule(struct mlx5e_priv *priv, + struct mlx5e_ipsec_rx_err *rx_err) +{ + if (rx_err->rule) { + mlx5_del_flow_rules(rx_err->rule); + rx_err->rule = NULL; + } + + if (rx_err->copy_modify_hdr) { + mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr); + rx_err->copy_modify_hdr = NULL; + } +} + +static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err) +{ + rx_err_del_rule(priv, rx_err); + + if (rx_err->ft) { + mlx5_destroy_flow_table(rx_err->ft); + rx_err->ft = NULL; + } +} + +static int rx_err_create_ft(struct mlx5e_priv *priv, + struct mlx5e_accel_fs_esp_prot *fs_prot, + struct mlx5e_ipsec_rx_err *rx_err) +{ + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_table *ft; + int err; + + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err); + return err; + } + + rx_err->ft = ft; + err = rx_err_add_rule(priv, fs_prot, rx_err); + if (err) + goto out_err; + + return 0; + +out_err: + mlx5_destroy_flow_table(ft); + rx_err->ft = NULL; + return err; +} + +static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot) +{ + if (fs_prot->miss_rule) { + mlx5_del_flow_rules(fs_prot->miss_rule); + fs_prot->miss_rule = NULL; + } + + if (fs_prot->miss_group) { + mlx5_destroy_flow_group(fs_prot->miss_group); + fs_prot->miss_group = NULL; + } + + if (fs_prot->ft) { + mlx5_destroy_flow_table(fs_prot->ft); + fs_prot->ft = NULL; + } +} + +static int rx_fs_create(struct mlx5e_priv *priv, + struct mlx5e_accel_fs_esp_prot *fs_prot) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_group *miss_group; + struct mlx5_flow_handle *miss_rule; + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_spec *spec; + struct mlx5_flow_table *ft; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!flow_group_in || !spec) { + err = -ENOMEM; + goto out; + } + + /* Create FT */ + ft_attr.max_fte = NUM_IPSEC_FTE; + ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL; + ft_attr.prio = MLX5E_NIC_PRIO; + ft_attr.autogroup.num_reserved_entries = 1; + ft_attr.autogroup.max_num_groups = 1; + ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + if (IS_ERR(ft)) { + err = PTR_ERR(ft); + netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err); + goto out; + } + fs_prot->ft = ft; + + /* Create miss_group */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1); + miss_group = mlx5_create_flow_group(ft, flow_group_in); + if (IS_ERR(miss_group)) { + err = PTR_ERR(miss_group); + netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err); + goto out; + } + fs_prot->miss_group = miss_group; + + /* Create miss rule */ + miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1); + if (IS_ERR(miss_rule)) { + err = PTR_ERR(miss_rule); + netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err); + goto out; + } + fs_prot->miss_rule = miss_rule; + +out: + kfree(flow_group_in); + kfree(spec); + return err; +} + +static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +{ + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5e_accel_fs_esp *accel_esp; + + accel_esp = priv->ipsec->rx_fs; + + /* The netdev unreg already happened, so all offloaded rule are already removed */ + fs_prot = &accel_esp->fs_prot[type]; + + rx_fs_destroy(fs_prot); + + rx_err_destroy_ft(priv, &fs_prot->rx_err); + + return 0; +} + +static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +{ + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5e_accel_fs_esp *accel_esp; + int err; + + accel_esp = priv->ipsec->rx_fs; + fs_prot = &accel_esp->fs_prot[type]; + + fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type)); + + err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err); + if (err) + return err; + + err = rx_fs_create(priv, fs_prot); + if (err) + rx_destroy(priv, type); + + return err; +} + +static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +{ + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5_flow_destination dest = {}; + struct mlx5e_accel_fs_esp *accel_esp; + int err = 0; + + accel_esp = priv->ipsec->rx_fs; + fs_prot = &accel_esp->fs_prot[type]; + mutex_lock(&fs_prot->prot_mutex); + if (fs_prot->refcnt++) + goto out; + + /* create FT */ + err = rx_create(priv, type); + if (err) { + fs_prot->refcnt--; + goto out; + } + + /* connect */ + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = fs_prot->ft; + mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest); + +out: + mutex_unlock(&fs_prot->prot_mutex); + return err; +} + +static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) +{ + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5e_accel_fs_esp *accel_esp; + + accel_esp = priv->ipsec->rx_fs; + fs_prot = &accel_esp->fs_prot[type]; + mutex_lock(&fs_prot->prot_mutex); + if (--fs_prot->refcnt) + goto out; + + /* disconnect */ + mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type)); + + /* remove FT */ + rx_destroy(priv, type); + +out: + mutex_unlock(&fs_prot->prot_mutex); +} + +static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 ipsec_obj_id, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act) +{ + u8 ip_version = attrs->is_ipv6 ? 6 : 4; + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS; + + /* ip_version */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version); + + /* Non fragmented */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0); + + /* ESP header */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP); + + /* SPI number */ + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, + be32_to_cpu(attrs->spi)); + + if (ip_version == 4) { + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), + &attrs->saddr.a4, 4); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &attrs->daddr.a4, 4); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + } else { + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + &attrs->saddr.a6, 16); + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &attrs->daddr.a6, 16); + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); + } + + flow_act->ipsec_obj_id = ipsec_obj_id; + flow_act->flags |= FLOW_ACT_NO_APPEND; +} + +static int rx_add_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 ipsec_obj_id, + struct mlx5e_ipsec_rule *ipsec_rule) +{ + u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; + struct mlx5_modify_hdr *modify_hdr = NULL; + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5_flow_destination dest = {}; + struct mlx5e_accel_fs_esp *accel_esp; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_handle *rule; + enum accel_fs_esp_type type; + struct mlx5_flow_spec *spec; + int err = 0; + + accel_esp = priv->ipsec->rx_fs; + type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4; + fs_prot = &accel_esp->fs_prot[type]; + + err = rx_ft_get(priv, type); + if (err) + return err; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) { + err = -ENOMEM; + goto out_err; + } + + setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act); + + /* Set 1 bit ipsec marker */ + /* Set 24 bit ipsec_obj_id */ + MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); + MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); + MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1); + MLX5_SET(set_action_in, action, offset, 7); + MLX5_SET(set_action_in, action, length, 25); + + modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL, + 1, action); + if (IS_ERR(modify_hdr)) { + err = PTR_ERR(modify_hdr); + netdev_err(priv->netdev, + "fail to alloc ipsec set modify_header_id err=%d\n", err); + modify_hdr = NULL; + goto out_err; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT | + MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + flow_act.modify_hdr = modify_hdr; + dest.ft = fs_prot->rx_err.ft; + rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n", + attrs->action, err); + goto out_err; + } + + ipsec_rule->rule = rule; + ipsec_rule->set_modify_hdr = modify_hdr; + goto out; + +out_err: + if (modify_hdr) + mlx5_modify_header_dealloc(priv->mdev, modify_hdr); + rx_ft_put(priv, type); + +out: + kvfree(spec); + return err; +} + +static void rx_del_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule) +{ + mlx5_del_flow_rules(ipsec_rule->rule); + ipsec_rule->rule = NULL; + + mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr); + ipsec_rule->set_modify_hdr = NULL; + + rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4); +} + +int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 ipsec_obj_id, + struct mlx5e_ipsec_rule *ipsec_rule) +{ + if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT) + return -EOPNOTSUPP; + + return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule); +} + +void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule) +{ + if (!priv->ipsec->rx_fs) + return; + + rx_del_rule(priv, attrs, ipsec_rule); +} + +static void fs_cleanup_rx(struct mlx5e_priv *priv) +{ + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5e_accel_fs_esp *accel_esp; + enum accel_fs_esp_type i; + + accel_esp = priv->ipsec->rx_fs; + for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { + fs_prot = &accel_esp->fs_prot[i]; + mutex_destroy(&fs_prot->prot_mutex); + WARN_ON(fs_prot->refcnt); + } + kfree(priv->ipsec->rx_fs); + priv->ipsec->rx_fs = NULL; +} + +static int fs_init_rx(struct mlx5e_priv *priv) +{ + struct mlx5e_accel_fs_esp_prot *fs_prot; + struct mlx5e_accel_fs_esp *accel_esp; + enum accel_fs_esp_type i; + + priv->ipsec->rx_fs = + kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL); + if (!priv->ipsec->rx_fs) + return -ENOMEM; + + accel_esp = priv->ipsec->rx_fs; + for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) { + fs_prot = &accel_esp->fs_prot[i]; + mutex_init(&fs_prot->prot_mutex); + } + + return 0; +} + +void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) +{ + if (!priv->ipsec->rx_fs) + return; + + fs_cleanup_rx(priv); +} + +int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) +{ + if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec) + return -EOPNOTSUPP; + + return fs_init_rx(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h new file mode 100644 index 000000000000..3389b3bb3ef8 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_IPSEC_STEERING_H__ +#define __MLX5_IPSEC_STEERING_H__ + +#include "en.h" +#include "ipsec.h" +#include "accel/ipsec_offload.h" +#include "en/fs.h" + +#ifdef CONFIG_MLX5_EN_IPSEC +void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv); +int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv); +int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + u32 ipsec_obj_id, + struct mlx5e_ipsec_rule *ipsec_rule); +void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv, + struct mlx5_accel_esp_xfrm_attrs *attrs, + struct mlx5e_ipsec_rule *ipsec_rule); +#else +static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; } +#endif +#endif /* __MLX5_IPSEC_STEERING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 465a1076a477..fee169732de7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -459,6 +459,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_hdr->id); + MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id); + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 785b2960d6b5..6904ad96af48 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -105,7 +105,7 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Vlan, mac, ttc, inner ttc, {aRFS/accel} */ +/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */ #define KERNEL_NIC_PRIO_NUM_LEVELS 6 #define KERNEL_NIC_NUM_PRIOS 1 /* One more level for tc */ From b2ac7541e3777f325c49d900550c9e3dd10c0eda Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Thu, 24 Oct 2019 16:11:28 +0300 Subject: [PATCH 1138/2056] net/mlx5e: IPsec: Add Connect-X IPsec Rx data path offload On receive flow inspect received packets for IPsec offload indication using the cqe, for IPsec offloaded packets propagate offload status and stack handle to stack for further processing. Supported statuses: - Offload ok. - Authentication failure. - Bad trailer indication. Connect-X IPsec does not use mlx5e_ipsec_handle_rx_cqe. For RX only offload, we see the BW gain. Below is the iperf3 performance report on two server of 24 cores Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz with ConnectX6-DX. We use one thread per IPsec tunnel. --------------------------------------------------------------------- Mode | Num tunnel | BW | Send CPU util | Recv CPU util | | (Gbps) | (Average %) | (Average %) --------------------------------------------------------------------- Cryto offload | 1 | 4.6 | 4.2 | 14.5 --------------------------------------------------------------------- Cryto offload | 24 | 38 | 73 | 63 --------------------------------------------------------------------- Non-offload | 1 | 4 | 4 | 13 --------------------------------------------------------------------- Non-offload | 24 | 23 | 52 | 67 Signed-off-by: Raed Salem Reviewed-by: Boris Pismenny Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 56 +++++++++++++++++++ .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 22 +++++++- .../net/ethernet/mellanox/mlx5/core/en_main.c | 4 +- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 10 +++- 4 files changed, 88 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 824b87ac8f9e..93a8d68815ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -360,6 +360,62 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, return skb; } +enum { + MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED, + MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED, + MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER, +}; + +void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe) +{ + u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); + u8 ipsec_syndrome = ipsec_meta_data & 0xFF; + struct mlx5e_priv *priv; + struct xfrm_offload *xo; + struct xfrm_state *xs; + struct sec_path *sp; + u32 sa_handle; + + sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data); + priv = netdev_priv(netdev); + sp = secpath_set(skb); + if (unlikely(!sp)) { + atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc); + return; + } + + xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle); + if (unlikely(!xs)) { + atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss); + return; + } + + sp = skb_sec_path(skb); + sp->xvec[sp->len++] = xs; + sp->olen++; + + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + + switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) { + case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: + xo->status = CRYPTO_SUCCESS; + if (WARN_ON_ONCE(priv->ipsec->no_trailer)) + xo->flags |= XFRM_ESP_NO_TRAILER; + break; + case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED: + xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED; + break; + case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER: + xo->status = CRYPTO_INVALID_PACKET_SYNTAX; + break; + default: + atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome); + } +} + bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index ba02643586a5..2a47673da5a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -34,13 +34,17 @@ #ifndef __MLX5E_IPSEC_RXTX_H__ #define __MLX5E_IPSEC_RXTX_H__ -#ifdef CONFIG_MLX5_EN_IPSEC - #include #include #include "en.h" #include "en/txrx.h" +#define MLX5_IPSEC_METADATA_MARKER_MASK (0x80) +#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F) +#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF) + +#ifdef CONFIG_MLX5_EN_IPSEC + struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 *cqe_bcnt); void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); @@ -55,7 +59,21 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x, bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv, struct mlx5_wqe_eth_seg *eseg, struct sk_buff *skb); +void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe); +static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) +{ + return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata)); +} +#else +static inline +void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, + struct sk_buff *skb, + struct mlx5_cqe64 *cqe) +{} +static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; } #endif /* CONFIG_MLX5_EN_IPSEC */ #endif /* __MLX5E_IPSEC_RXTX_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 4e5d83f6334a..88ea1908cb14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -65,6 +65,7 @@ #include "en/hv_vhca_stats.h" #include "en/devlink.h" #include "lib/mlx5.h" +#include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -496,7 +497,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; #ifdef CONFIG_MLX5_EN_IPSEC - if (c->priv->ipsec) + if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && + c->priv->ipsec) rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; else #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 350f9c54e508..8b24e44f860a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -973,9 +973,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, goto csum_unnecessary; if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { - if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) + u8 ipproto = get_ip_proto(skb, network_depth, proto); + + if (unlikely(ipproto == IPPROTO_SCTP)) goto csum_unnecessary; + if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) + goto csum_none; + stats->csum_complete++; skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); @@ -1021,6 +1026,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); + if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) + mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); + if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); From 7ed92f97a1ad308ca77bc73eb62156ec8c2bf51a Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Sun, 29 Dec 2019 17:13:53 +0200 Subject: [PATCH 1139/2056] net/mlx5e: IPsec: Add Connect-X IPsec ESN update offload support Synchronize offloading device ESN with xfrm received SN by updating an existing IPsec HW context with the new SN. Signed-off-by: Raed Salem Reviewed-by: Boris Pismenny Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/accel/ipsec_offload.c | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c index c49699d580ff..2f13a250aab3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c @@ -279,6 +279,93 @@ static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev) return 0; } +static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev, + struct mlx5_ipsec_obj_attrs *attrs, + u32 ipsec_id) +{ + u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {}; + u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)]; + u64 modify_field_select = 0; + u64 general_obj_types; + void *obj; + int err; + + if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED)) + return 0; + + general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types); + if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC)) + return -EINVAL; + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC); + MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id); + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) { + mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n", + ipsec_id, err); + return err; + } + + obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object); + modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select); + + /* esn */ + if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) || + !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB)) + return -EOPNOTSUPP; + + obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object); + MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb); + if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) + MLX5_SET(ipsec_obj, obj, esn_overlap, 1); + + /* general object fields set */ + MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); + + return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, + const struct mlx5_accel_esp_xfrm_attrs *attrs) +{ + struct mlx5_ipsec_obj_attrs ipsec_attrs = {}; + struct mlx5_core_dev *mdev = xfrm->mdev; + struct mlx5_ipsec_esp_xfrm *mxfrm; + + int err = 0; + + if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs))) + return 0; + + if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs)) + return -EOPNOTSUPP; + + mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm); + + mutex_lock(&mxfrm->lock); + + if (!mxfrm->sa_ctx) + /* Not bound xfrm, change only sw attrs */ + goto change_sw_xfrm_attrs; + + /* need to add find and replace in ipsec_rhash_sa the sa_ctx */ + /* modify device with new hw_sa */ + ipsec_attrs.accel_flags = attrs->flags; + ipsec_attrs.esn_msb = attrs->esn; + err = mlx5_modify_ipsec_obj(mdev, + &ipsec_attrs, + mxfrm->sa_ctx->ipsec_obj_id); + +change_sw_xfrm_attrs: + if (!err) + memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs)); + + mutex_unlock(&mxfrm->lock); + return err; +} + static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { .device_caps = mlx5_ipsec_offload_device_caps, .create_hw_context = mlx5_ipsec_offload_create_sa_ctx, @@ -286,6 +373,7 @@ static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = { .init = mlx5_ipsec_offload_init, .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm, .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm, + .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm, }; const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) From 93761ca17edf83f4222ab5abda8bde837ea5a309 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 30 Apr 2020 14:32:45 +0300 Subject: [PATCH 1140/2056] net/mlx5e: XDP, Avoid indirect call in TX flow Use INDIRECT_CALL_2() helper to avoid the cost of the indirect call when/if CONFIG_RETPOLINE=y. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/xdp.c | 27 ++++++++++--------- .../net/ethernet/mellanox/mlx5/core/en/xdp.h | 13 +++++++++ .../ethernet/mellanox/mlx5/core/en/xsk/tx.c | 11 ++++++-- 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index c9d308e91965..e0c1b010d41a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -34,6 +34,7 @@ #include #include "en/xdp.h" #include "en/params.h" +#include int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { @@ -114,7 +115,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdpi.page.di = *di; } - return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0); + return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, + mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); } /* returns true if packet was consumed by xdp */ @@ -237,7 +239,7 @@ enum { MLX5E_XDP_CHECK_START_MPWQE = 2, }; -static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) +INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) { if (unlikely(!sq->mpwqe.wqe)) { const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); @@ -256,10 +258,9 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) return MLX5E_XDP_CHECK_OK; } -static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, - struct mlx5e_xdp_xmit_data *xdptxd, - struct mlx5e_xdp_info *xdpi, - int check_result) +INDIRECT_CALLABLE_SCOPE bool +mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xdp_info *xdpi, int check_result) { struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; struct mlx5e_xdpsq_stats *stats = sq->stats; @@ -293,7 +294,7 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, return true; } -static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) +INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) { if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { /* SQ is full, ring doorbell */ @@ -305,10 +306,9 @@ static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) return MLX5E_XDP_CHECK_OK; } -static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, - struct mlx5e_xdp_xmit_data *xdptxd, - struct mlx5e_xdp_info *xdpi, - int check_result) +INDIRECT_CALLABLE_SCOPE bool +mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xdp_info *xdpi, int check_result) { struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); @@ -506,6 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, struct xdp_frame *xdpf = frames[i]; struct mlx5e_xdp_xmit_data xdptxd; struct mlx5e_xdp_info xdpi; + bool ret; xdptxd.data = xdpf->data; xdptxd.len = xdpf->len; @@ -522,7 +523,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdpi.frame.xdpf = xdpf; xdpi.frame.dma_addr = xdptxd.dma_addr; - if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0))) { + ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, + mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0); + if (unlikely(!ret)) { dma_unmap_single(sq->pdev, xdptxd.dma_addr, xdptxd.len, DMA_TO_DEVICE); xdp_return_frame_rx_napi(xdpf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index ca48c293151b..e806c13d491f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -32,6 +32,8 @@ #ifndef __MLX5_EN_XDP_H__ #define __MLX5_EN_XDP_H__ +#include + #include "en.h" #include "en/txrx.h" @@ -70,6 +72,17 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); +INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, + struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xdp_info *xdpi, + int check_result)); +INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, + struct mlx5e_xdp_xmit_data *xdptxd, + struct mlx5e_xdp_info *xdpi, + int check_result)); +INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)); +INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)); + static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index e0b3c61af93e..0dfbc96e952a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -6,6 +6,7 @@ #include "en/xdp.h" #include "en/params.h" #include +#include int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { @@ -75,8 +76,12 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK; for (; budget; budget--) { - int check_result = sq->xmit_xdp_frame_check(sq); + int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check, + mlx5e_xmit_xdp_frame_check_mpwqe, + mlx5e_xmit_xdp_frame_check, + sq); struct xdp_desc desc; + bool ret; if (unlikely(check_result < 0)) { work_done = false; @@ -98,7 +103,9 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len); - if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) { + ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, + mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result); + if (unlikely(!ret)) { if (sq->mpwqe.wqe) mlx5e_xdp_mpwqe_complete(sq); From 2901a5c618dd18b3610747c18547b33e0c1eca08 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 30 Apr 2020 15:52:53 +0300 Subject: [PATCH 1141/2056] net/mlx5e: RX, Avoid indirect call in representor CQE handling Use INDIRECT_CALL_2() helper to avoid the cost of the indirect call when/if CONFIG_RETPOLINE=y. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8b24e44f860a..74860f3827b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1266,7 +1266,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto free_wqe; } - skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, + mlx5e_skb_from_cqe_linear, + mlx5e_skb_from_cqe_nonlinear, + rq, cqe, wi, cqe_bcnt); if (!skb) { /* probably for XDP */ if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { From 0bdc89b39d622b8929a687eee194e4e166fe84df Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 1 May 2019 15:23:06 +0300 Subject: [PATCH 1142/2056] net/mlx5e: Do not request completion on every single UMR WQE UMR WQEs are posted in bulks, and HW is notified once per a bulk. Reduce the number of completions by requesting such only for the last WQE of the bulk. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 88ea1908cb14..9d5d8b28bcd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -232,7 +232,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt); - cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; cseg->umr_mkey = rq->mkey_be; ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; From 54b154ecfb8c66dfeba6578a64e79c2104da4ced Mon Sep 17 00:00:00 2001 From: Eli Britstein Date: Thu, 18 Jun 2020 15:38:31 +0000 Subject: [PATCH 1143/2056] net/mlx5e: CT: Map 128 bits labels to 32 bit map ID The 128 bits ct_label field is matched using a 32 bit hardware register. As such, only the lower 32 bits of ct_label field are offloaded. Change this logic to support setting and matching higher bits too. Map the 128 bits data to a unique 32 bits ID. Matching is done as exact match of the mapping ID of key & mask. Signed-off-by: Eli Britstein Reviewed-by: Oz Shlomo Reviewed-by: Roi Dayan Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/en/tc_ct.c | 59 ++++++++++++------- .../ethernet/mellanox/mlx5/core/en/tc_ct.h | 3 + .../net/ethernet/mellanox/mlx5/core/en_tc.c | 3 +- 3 files changed, 42 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 4c65677feaab..c6bc9224c3b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -32,6 +32,9 @@ #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) #define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX +#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8) +#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0) + #define ct_dbg(fmt, args...)\ netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args) @@ -48,6 +51,7 @@ struct mlx5_tc_ct_priv { struct mlx5_flow_table *post_ct; struct mutex control_lock; /* guards parallel adds/dels */ struct mapping_ctx *zone_mapping; + struct mapping_ctx *labels_mapping; }; struct mlx5_ct_flow { @@ -404,6 +408,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv, mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr); mlx5e_mod_hdr_detach(ct_priv->esw->dev, &esw->offloads.mod_hdr, zone_rule->mh); + mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); } static void @@ -436,7 +441,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, struct mlx5e_tc_mod_hdr_acts *mod_acts, u8 ct_state, u32 mark, - u32 label, + u32 labels_id, u8 zone_restore_id) { struct mlx5_eswitch *esw = ct_priv->esw; @@ -453,7 +458,7 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv, return err; err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts, - LABELS_TO_REG, label); + LABELS_TO_REG, labels_id); if (err) return err; @@ -597,13 +602,10 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, if (!meta) return -EOPNOTSUPP; - if (meta->ct_metadata.labels[1] || - meta->ct_metadata.labels[2] || - meta->ct_metadata.labels[3]) { - ct_dbg("Failed to offload ct entry due to unsupported label"); + err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels, + &attr->ct_attr.ct_labels_id); + if (err) return -EOPNOTSUPP; - } - if (nat) { err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts); @@ -617,7 +619,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts, ct_state, meta->ct_metadata.mark, - meta->ct_metadata.labels[0], + attr->ct_attr.ct_labels_id, zone_restore_id); if (err) goto err_mapping; @@ -637,6 +639,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv, err_mapping: dealloc_mod_hdr_actions(&mod_acts); + mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); return err; } @@ -959,6 +962,7 @@ int mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_cls_offload *f, + struct mlx5_ct_attr *ct_attr, struct netlink_ext_ack *extack) { struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); @@ -969,6 +973,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, u16 ct_state_on, ct_state_off; u16 ct_state, ct_state_mask; struct flow_match_ct match; + u32 ct_labels[4]; if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) return 0; @@ -995,12 +1000,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, return -EOPNOTSUPP; } - if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) { - NL_SET_ERR_MSG_MOD(extack, - "only lower 32bits of ct_labels are supported for offload"); - return -EOPNOTSUPP; - } - ct_state_on = ct_state & ct_state_mask; ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask; trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED; @@ -1029,10 +1028,17 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, if (mask->ct_mark) mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG, key->ct_mark, mask->ct_mark); - if (mask->ct_labels[0]) - mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, - key->ct_labels[0], - mask->ct_labels[0]); + if (mask->ct_labels[0] || mask->ct_labels[1] || mask->ct_labels[2] || + mask->ct_labels[3]) { + ct_labels[0] = key->ct_labels[0] & mask->ct_labels[0]; + ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1]; + ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2]; + ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3]; + if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id)) + return -EOPNOTSUPP; + mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id, + MLX5_CT_LABELS_MASK); + } return 0; } @@ -1398,7 +1404,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * + tuple + zone match + * +--------------------+ * | set mark - * | set label + * | set labels_id * | set established * | set zone_restore * | do nat (if needed) @@ -1789,7 +1795,13 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true); if (IS_ERR(ct_priv->zone_mapping)) { err = PTR_ERR(ct_priv->zone_mapping); - goto err_mapping; + goto err_mapping_zone; + } + + ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true); + if (IS_ERR(ct_priv->labels_mapping)) { + err = PTR_ERR(ct_priv->labels_mapping); + goto err_mapping_labels; } ct_priv->esw = esw; @@ -1833,8 +1845,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv) err_ct_nat_tbl: mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct); err_ct_tbl: + mapping_destroy(ct_priv->labels_mapping); +err_mapping_labels: mapping_destroy(ct_priv->zone_mapping); -err_mapping: +err_mapping_zone: kfree(ct_priv); err_alloc: err_support: @@ -1854,6 +1868,7 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat); mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct); mapping_destroy(ct_priv->zone_mapping); + mapping_destroy(ct_priv->labels_mapping); rhashtable_destroy(&ct_priv->ct_tuples_ht); rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 5e10a72f5f24..3baef917a677 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -25,6 +25,7 @@ struct mlx5_ct_attr { u16 ct_action; struct mlx5_ct_flow *ct_flow; struct nf_flowtable *nf_ft; + u32 ct_labels_id; }; #define zone_to_reg_ct {\ @@ -90,6 +91,7 @@ int mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_cls_offload *f, + struct mlx5_ct_attr *ct_attr, struct netlink_ext_ack *extack); int mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, @@ -132,6 +134,7 @@ static inline int mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct flow_cls_offload *f, + struct mlx5_ct_attr *ct_attr, struct netlink_ext_ack *extack) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b366c46a5604..7a0c22d05575 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4401,7 +4401,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, goto err_free; /* actions validation depends on parsing the ct matches first */ - err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack); + err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, + &flow->esw_attr->ct_attr, extack); if (err) goto err_free; From 55f656cdb851bae32980d83d2494201e79d93b63 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 14 Jul 2020 20:03:07 +0300 Subject: [PATCH 1144/2056] net: sched: Do not drop root lock in tcf_qevent_handle() Mirred currently does not mix well with blocks executed after the qdisc root lock is taken. This includes classification blocks (such as in PRIO, ETS, DRR qdiscs) and qevents. The locking caused by the packet mirrored by mirred can cause deadlocks: either when the thread of execution attempts to take the lock a second time, or when two threads end up waiting on each other's locks. The qevent patchset attempted to not introduce further badness of this sort, and dropped the lock before executing the qevent block. However this lead to too little locking and races between qdisc configuration and packet enqueue in the RED qdisc. Before the deadlock issues are solved in a way that can be applied across many qdiscs reasonably easily, do for qevents what is done for the classification blocks and just keep holding the root lock. Signed-off-by: Petr Machata Signed-off-by: Jakub Kicinski --- include/net/pkt_cls.h | 4 ++-- net/sched/cls_api.c | 8 +------- net/sched/sch_red.c | 6 +++--- 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 690a7f49c8f9..d4d461236351 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -568,7 +568,7 @@ void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch); int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, struct netlink_ext_ack *extack); struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, - spinlock_t *root_lock, struct sk_buff **to_free, int *ret); + struct sk_buff **to_free, int *ret); int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe); #else static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, @@ -591,7 +591,7 @@ static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlatt static inline struct sk_buff * tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, - spinlock_t *root_lock, struct sk_buff **to_free, int *ret) + struct sk_buff **to_free, int *ret) { return skb; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 322b279154de..b2b7440c2ae7 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3822,7 +3822,7 @@ int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index EXPORT_SYMBOL(tcf_qevent_validate_change); struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, - spinlock_t *root_lock, struct sk_buff **to_free, int *ret) + struct sk_buff **to_free, int *ret) { struct tcf_result cl_res; struct tcf_proto *fl; @@ -3832,9 +3832,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru fl = rcu_dereference_bh(qe->filter_chain); - if (root_lock) - spin_unlock(root_lock); - switch (tcf_classify(skb, fl, &cl_res, false)) { case TC_ACT_SHOT: qdisc_qstats_drop(sch); @@ -3853,9 +3850,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru return NULL; } - if (root_lock) - spin_lock(root_lock); - return skb; } EXPORT_SYMBOL(tcf_qevent_handle); diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index de2be4d04ed6..a79602f7fab8 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; - skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { @@ -114,7 +114,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; - skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret); + skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; } else if (!red_use_nodrop(q)) { @@ -137,7 +137,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ return ret; congestion_drop: - skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret); + skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret); if (!skb) return NET_XMIT_CN | ret; From ac5c66f261b7174d0d9aaeb2bf9f8c2c2dbad0bd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Tue, 14 Jul 2020 20:03:08 +0300 Subject: [PATCH 1145/2056] Revert "net: sched: Pass root lock to Qdisc_ops.enqueue" This reverts commit aebe4426ccaa4838f36ea805cdf7d76503e65117. Signed-off-by: Petr Machata Signed-off-by: Jakub Kicinski --- include/net/sch_generic.h | 6 ++---- net/core/dev.c | 4 ++-- net/sched/sch_atm.c | 4 ++-- net/sched/sch_blackhole.c | 2 +- net/sched/sch_cake.c | 2 +- net/sched/sch_cbq.c | 4 ++-- net/sched/sch_cbs.c | 18 +++++++++--------- net/sched/sch_choke.c | 2 +- net/sched/sch_codel.c | 2 +- net/sched/sch_drr.c | 4 ++-- net/sched/sch_dsmark.c | 4 ++-- net/sched/sch_etf.c | 2 +- net/sched/sch_ets.c | 4 ++-- net/sched/sch_fifo.c | 6 +++--- net/sched/sch_fq.c | 2 +- net/sched/sch_fq_codel.c | 2 +- net/sched/sch_fq_pie.c | 2 +- net/sched/sch_generic.c | 4 ++-- net/sched/sch_gred.c | 2 +- net/sched/sch_hfsc.c | 6 +++--- net/sched/sch_hhf.c | 2 +- net/sched/sch_htb.c | 4 ++-- net/sched/sch_multiq.c | 4 ++-- net/sched/sch_netem.c | 8 ++++---- net/sched/sch_pie.c | 2 +- net/sched/sch_plug.c | 2 +- net/sched/sch_prio.c | 6 +++--- net/sched/sch_qfq.c | 4 ++-- net/sched/sch_red.c | 4 ++-- net/sched/sch_sfb.c | 4 ++-- net/sched/sch_sfq.c | 2 +- net/sched/sch_skbprio.c | 2 +- net/sched/sch_taprio.c | 4 ++-- net/sched/sch_tbf.c | 10 +++++----- net/sched/sch_teql.c | 4 ++-- 35 files changed, 71 insertions(+), 73 deletions(-) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index fceb3d63c925..c510b03b9751 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -57,7 +57,6 @@ struct qdisc_skb_head { struct Qdisc { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, - spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *sch); unsigned int flags; @@ -242,7 +241,6 @@ struct Qdisc_ops { int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, - spinlock_t *root_lock, struct sk_buff **to_free); struct sk_buff * (*dequeue)(struct Qdisc *); struct sk_buff * (*peek)(struct Qdisc *); @@ -790,11 +788,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb, #endif } -static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { qdisc_calculate_pkt_len(skb, sch); - return sch->enqueue(skb, sch, root_lock, to_free); + return sch->enqueue(skb, sch, to_free); } static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, diff --git a/net/core/dev.c b/net/core/dev.c index b61075828358..062a00fdca9b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_calculate_pkt_len(skb, q); if (q->flags & TCQ_F_NOLOCK) { - rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; qdisc_run(q); if (unlikely(to_free)) @@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, qdisc_run_end(q); rc = NET_XMIT_SUCCESS; } else { - rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK; + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; if (qdisc_run_begin(q)) { if (unlikely(contended)) { spin_unlock(&q->busylock); diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 1d5e422d9be2..1c281cc81f57 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct atm_qdisc_data *p = qdisc_priv(sch); @@ -432,7 +432,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro #endif } - ret = qdisc_enqueue(skb, flow->q, root_lock, to_free); + ret = qdisc_enqueue(skb, flow->q, to_free); if (ret != NET_XMIT_SUCCESS) { drop: __maybe_unused if (net_xmit_drop_count(ret)) { diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c index 187644657c4f..a7f7667ae984 100644 --- a/net/sched/sch_blackhole.c +++ b/net/sched/sch_blackhole.c @@ -13,7 +13,7 @@ #include #include -static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { qdisc_drop(skb, sch, to_free); diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index b3cdcd86cbfd..561d20c9adca 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -1687,7 +1687,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, static void cake_reconfigure(struct Qdisc *sch); -static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cake_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 9e16fdf47fe7..b2130df933a7 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) } static int -cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbq_sched_data *q = qdisc_priv(sch); @@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, return ret; } - ret = qdisc_enqueue(skb, cl->q, root_lock, to_free); + ret = qdisc_enqueue(skb, cl->q, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; cbq_mark_toplevel(q, cl); diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index 7af15ebe07f7..2eaac2ff380f 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -77,7 +77,7 @@ struct cbs_sched_data { s64 sendslope; /* in bytes/s */ s64 idleslope; /* in bytes/s */ struct qdisc_watchdog watchdog; - int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, + int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free); struct sk_buff *(*dequeue)(struct Qdisc *sch); struct Qdisc *qdisc; @@ -85,13 +85,13 @@ struct cbs_sched_data { }; static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, - struct Qdisc *child, spinlock_t *root_lock, + struct Qdisc *child, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); int err; - err = child->ops->enqueue(skb, child, root_lock, to_free); + err = child->ops->enqueue(skb, child, to_free); if (err != NET_XMIT_SUCCESS) return err; @@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch, return NET_XMIT_SUCCESS; } -static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc = q->qdisc; - return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); + return cbs_child_enqueue(skb, sch, qdisc, to_free); } -static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); @@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t * q->last = ktime_get_ns(); } - return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free); + return cbs_child_enqueue(skb, sch, qdisc, to_free); } -static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct cbs_sched_data *q = qdisc_priv(sch); - return q->enqueue(skb, sch, root_lock, to_free); + return q->enqueue(skb, sch, to_free); } /* timediff is in ns, slope is in bytes/s */ diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index baf3faee31aa..bd618b00d319 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q, return choke_match_flow(oskb, nskb); } -static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index 1d94837abdd8..30169b3adbbb 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) return skb; } -static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct codel_sched_data *q; diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index ad14df2ecf3a..dde564670ad8 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch, return NULL; } -static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 76a9c4f277f2..2b88710994d7 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl, /* --------------------------- Qdisc operations ---------------------------- */ -static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro } } - err = qdisc_enqueue(skb, p->q, root_lock, to_free); + err = qdisc_enqueue(skb, p->q, to_free); if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c index 7a7c50a68115..c48f91075b5c 100644 --- a/net/sched/sch_etf.c +++ b/net/sched/sch_etf.c @@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code) } static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch, - spinlock_t *root_lock, struct sk_buff **to_free) + struct sk_buff **to_free) { struct etf_sched_data *q = qdisc_priv(sch); struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index 1af86f30b18e..c1e84d1eeaba 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch, return &q->classes[band]; } -static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); @@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index b4da5b624ad8..a579a4131d22 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -16,7 +16,7 @@ /* 1 band FIFO pseudo-"scheduler" */ -static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) @@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo return qdisc_drop(skb, sch, to_free); } -static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { if (likely(sch->q.qlen < sch->limit)) @@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo return qdisc_drop(skb, sch, to_free); } -static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int prev_backlog; diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index a90d745c41e0..2fb76fc0cc31 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb, return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon)); } -static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index bca016ffc069..3106653c17f3 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets, return idx; } -static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_codel_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 741840b9994f..f98c74018805 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow, skb->next = NULL; } -static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 715cde1df9e4..265a61d011df 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off); cheaper. */ -static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, +static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { __qdisc_drop(skb, to_free); @@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv, return &priv->q[band]; } -static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock, +static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { int band = prio2band[skb->priority & TC_PRIO_MAX]; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 7d67c6cd6605..8599c6f31b05 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table) return false; } -static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct gred_sched_data *q = NULL; diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 13ba8648bb63..0f5f121404f3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) return -1; } -static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, - struct sk_buff **to_free) +static int +hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct hfsc_class *cl; @@ -1545,7 +1545,7 @@ static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root } first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { cl->qstats.drops++; diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index ddc6bf1d85d0..420ede875322 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free) return bucket - q->buckets; } -static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct hhf_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index b07f29059f47..ba37defaca7a 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) cl->prio_activity = 0; } -static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { int uninitialized_var(ret); @@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ __qdisc_drop(skb, to_free); return ret; #endif - } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock, + } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, to_free)) != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) { qdisc_qstats_drop(sch); diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 56bdc4bcdc63..5c27b4270b90 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c @@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) } static int -multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct Qdisc *qdisc; @@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, } #endif - ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 8fb17483a34f..84f82771cdf5 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, * NET_XMIT_DROP: queue length didn't change. * NET_XMIT_SUCCESS: one skb was queued. */ -static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct netem_sched_data *q = qdisc_priv(sch); @@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q->duplicate = 0; - rootq->enqueue(skb2, rootq, root_lock, to_free); + rootq->enqueue(skb2, rootq, to_free); q->duplicate = dupsave; rc_drop = NET_XMIT_SUCCESS; } @@ -604,7 +604,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; last_len = segs->len; - rc = qdisc_enqueue(segs, sch, root_lock, to_free); + rc = qdisc_enqueue(segs, sch, to_free); if (rc != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(rc)) qdisc_qstats_drop(sch); @@ -720,7 +720,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) struct sk_buff *to_free = NULL; int err; - err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free); + err = qdisc_enqueue(skb, q->qdisc, &to_free); kfree_skb_list(to_free); if (err != NET_XMIT_SUCCESS && net_xmit_drop_count(err)) { diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index b305313b64e3..c65077f0c0f3 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params, } EXPORT_SYMBOL_GPL(pie_drop_early); -static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct pie_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c index e5f8b4769b4d..cbc2ebca4548 100644 --- a/net/sched/sch_plug.c +++ b/net/sched/sch_plug.c @@ -84,7 +84,7 @@ struct plug_sched_data { u32 pkts_to_release; }; -static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct plug_sched_data *q = qdisc_priv(sch); diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 46b7ce81c6e3..3eabb871a1d5 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c @@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) return q->queues[band]; } -static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, - struct sk_buff **to_free) +static int +prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb); struct Qdisc *qdisc; @@ -83,7 +83,7 @@ static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root } #endif - ret = qdisc_enqueue(skb, qdisc, root_lock, to_free); + ret = qdisc_enqueue(skb, qdisc, to_free); if (ret == NET_XMIT_SUCCESS) { sch->qstats.backlog += len; sch->q.qlen++; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 3cfe6262eb00..6335230a971e 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) return agg; } -static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb), gso_segs; @@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; first = !cl->qdisc->q.qlen; - err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free); + err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); if (net_xmit_drop_count(err)) { diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index a79602f7fab8..4cc0ad0b1189 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -67,7 +67,7 @@ static int red_use_nodrop(struct red_sched_data *q) return q->flags & TC_RED_NODROP; } -static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct red_sched_data *q = qdisc_priv(sch); @@ -126,7 +126,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ break; } - ret = qdisc_enqueue(skb, child, root_lock, to_free); + ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c index 356f6d1d30db..da047a37a3bf 100644 --- a/net/sched/sch_sfb.c +++ b/net/sched/sch_sfb.c @@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, return false; } -static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { @@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ } enqueue: - ret = qdisc_enqueue(skb, child, root_lock, to_free); + ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index a1314510dc69..cae5dbbadc1c 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q) } static int -sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free) +sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash, dropped; diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index f75f237c4436..7a5e4c454715 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q) return SKBPRIO_MAX_PRIORITY - 1; } -static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index daef2ff60a98..e981992634dd 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -410,7 +410,7 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) return txtime; } -static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct taprio_sched *q = qdisc_priv(sch); @@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro qdisc_qstats_backlog_inc(sch, skb); sch->q.qlen++; - return qdisc_enqueue(skb, child, root_lock, to_free); + return qdisc_enqueue(skb, child, to_free); } static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index c3eb5cdb83a8..78e79029dc63 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch) /* GSO packet is too big, segment it so that tbf can transmit * each segment in time */ -static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ skb_mark_not_on_list(segs); qdisc_skb_cb(segs)->pkt_len = segs->len; len += segs->len; - ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free); + ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); @@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; } -static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, +static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct tbf_sched_data *q = qdisc_priv(sch); @@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_ if (qdisc_pkt_len(skb) > q->max_size) { if (skb_is_gso(skb) && skb_gso_validate_mac_len(skb, q->max_size)) - return tbf_segment(skb, sch, root_lock, to_free); + return tbf_segment(skb, sch, to_free); return qdisc_drop(skb, sch, to_free); } - ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free); + ret = qdisc_enqueue(skb, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index b586eec2eaeb..2f1f0a378408 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -72,8 +72,8 @@ struct teql_sched_data { /* "teql*" qdisc routines */ -static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, - struct sk_buff **to_free) +static int +teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct net_device *dev = qdisc_dev(sch); struct teql_sched_data *q = qdisc_priv(sch); From 0a0d93b943a20e4210491911e82c3b4a34391ce4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Jul 2020 15:02:30 +0800 Subject: [PATCH 1146/2056] xfrm: interface: use IS_REACHABLE to avoid some compile errors kernel test robot reported some compile errors: ia64-linux-ld: net/xfrm/xfrm_interface.o: in function `xfrmi4_fini': net/xfrm/xfrm_interface.c:900: undefined reference to `xfrm4_tunnel_deregister' ia64-linux-ld: net/xfrm/xfrm_interface.c:901: undefined reference to `xfrm4_tunnel_deregister' ia64-linux-ld: net/xfrm/xfrm_interface.o: in function `xfrmi4_init': net/xfrm/xfrm_interface.c:873: undefined reference to `xfrm4_tunnel_register' ia64-linux-ld: net/xfrm/xfrm_interface.c:876: undefined reference to `xfrm4_tunnel_register' ia64-linux-ld: net/xfrm/xfrm_interface.c:885: undefined reference to `xfrm4_tunnel_deregister' This happened when set CONFIG_XFRM_INTERFACE=y and CONFIG_INET_TUNNEL=m. We don't really want xfrm_interface to depend inet_tunnel completely, but only to disable the tunnel code when inet_tunnel is not seen. So instead of adding "select INET_TUNNEL" for XFRM_INTERFACE, this patch is only to change to IS_REACHABLE to avoid these compile error. Reported-by: kernel test robot Fixes: da9bbf0598c9 ("xfrm: interface: support IPIP and IPIP6 tunnels processing with .cb_handler") Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_interface.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 63a52b4b6ea9..4c904d332007 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -812,7 +812,7 @@ static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = { .priority = 10, }; -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) static int xfrmi6_rcv_tunnel(struct sk_buff *skb) { const xfrm_address_t *saddr; @@ -863,7 +863,7 @@ static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = { .priority = 10, }; -#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL) static int xfrmi4_rcv_tunnel(struct sk_buff *skb) { return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr); @@ -897,7 +897,7 @@ static int __init xfrmi4_init(void) err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP); if (err < 0) goto xfrm_proto_comp_failed; -#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL) err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET); if (err < 0) goto xfrm_tunnel_ipip_failed; @@ -908,7 +908,7 @@ static int __init xfrmi4_init(void) return 0; -#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL) xfrm_tunnel_ipip6_failed: xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET); xfrm_tunnel_ipip_failed: @@ -924,7 +924,7 @@ static int __init xfrmi4_init(void) static void xfrmi4_fini(void) { -#if IS_ENABLED(CONFIG_INET_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL) xfrm4_tunnel_deregister(&xfrmi_ipip6_handler, AF_INET6); xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET); #endif @@ -946,7 +946,7 @@ static int __init xfrmi6_init(void) err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP); if (err < 0) goto xfrm_proto_comp_failed; -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6); if (err < 0) goto xfrm_tunnel_ipv6_failed; @@ -957,7 +957,7 @@ static int __init xfrmi6_init(void) return 0; -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) xfrm_tunnel_ip6ip_failed: xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6); xfrm_tunnel_ipv6_failed: @@ -973,7 +973,7 @@ static int __init xfrmi6_init(void) static void xfrmi6_fini(void) { -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) xfrm6_tunnel_deregister(&xfrmi_ip6ip_handler, AF_INET); xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6); #endif From 96a208295040c00c85d022720c10ec1a751b9bdb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 17 Jul 2020 15:03:14 +0800 Subject: [PATCH 1147/2056] ip6_vti: use IS_REACHABLE to avoid some compile errors Naresh reported some compile errors: arm build failed due this error on linux-next 20200713 and 20200713 net/ipv6/ip6_vti.o: In function `vti6_rcv_tunnel': ip6_vti.c:(.text+0x1d20): undefined reference to `xfrm6_tunnel_spi_lookup' This happened when set CONFIG_IPV6_VTI=y and CONFIG_INET6_TUNNEL=m. We don't really want ip6_vti to depend inet6_tunnel completely, but only to disable the tunnel code when inet6_tunnel is not seen. So instead of adding "select INET6_TUNNEL" for IPV6_VTI, this patch is only to change to IS_REACHABLE to avoid these compile error. Reported-by: Naresh Kamboju Fixes: 08622869ed3f ("ip6_vti: support IP6IP6 tunnel processing with .cb_handler") Signed-off-by: Xin Long Signed-off-by: Steffen Klassert --- net/ipv6/ip6_vti.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 18ec4ab45be7..53f12b40528e 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1218,7 +1218,7 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { .priority = 100, }; -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) static int vti6_rcv_tunnel(struct sk_buff *skb) { const xfrm_address_t *saddr; @@ -1270,7 +1270,7 @@ static int __init vti6_tunnel_init(void) err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP); if (err < 0) goto xfrm_proto_comp_failed; -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) msg = "ipv6 tunnel"; err = xfrm6_tunnel_register(&vti_ipv6_handler, AF_INET6); if (err < 0) @@ -1288,7 +1288,7 @@ static int __init vti6_tunnel_init(void) return 0; rtnl_link_failed: -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) err = xfrm6_tunnel_deregister(&vti_ip6ip_handler, AF_INET); vti_tunnel_ip6ip_failed: err = xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); @@ -1312,7 +1312,7 @@ static int __init vti6_tunnel_init(void) static void __exit vti6_tunnel_cleanup(void) { rtnl_link_unregister(&vti6_link_ops); -#if IS_ENABLED(CONFIG_INET6_XFRM_TUNNEL) +#if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL) xfrm6_tunnel_deregister(&vti_ip6ip_handler, AF_INET); xfrm6_tunnel_deregister(&vti_ipv6_handler, AF_INET6); #endif From bba238ed037c60242332dd1e4c5778af9eba4d9b Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sun, 12 Jul 2020 19:48:15 +0300 Subject: [PATCH 1148/2056] net: phy: continue searching for C45 MMDs even if first returned ffff:ffff At the time of introduction, in commit bdeced75b13f ("net: dsa: felix: Add PCS operations for PHYLINK"), support for the Lynx PCS inside Felix was relying, for USXGMII support, on the fact that get_phy_device() is able to parse the Lynx PCS "device-in-package" registers for this C45 MDIO device and identify it correctly. However, this was actually working somewhat by mistake (in the sense that, even though it was detected, it was detected for the wrong reasons). The get_phy_c45_ids() function works by iterating through all MMDs starting from 1 (MDIO_MMD_PMAPMD) and stops at the first one which returns a non-zero value in the "device-in-package" register pair, proceeding to see what that non-zero value is. For the Felix PCS, the first MMD (1, for the PMA/PMD) returns a non-zero value of 0xffffffff in the "device-in-package" registers. There is a code branch which is supposed to treat this case and flag it as wrong, and normally, this would have caught my attention when adding initial support for this PCS: if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) { /* If mostly Fs, there is no device there, then let's probe * MMD 0, as some 10G PHYs have zero Devices In package, * e.g. Cortina CS4315/CS4340 PHY. */ However, this code never actually kicked in, it seems, because this snippet from get_phy_c45_devs_in_pkg() was basically sabotaging itself, by returning 0xfffffffe instead of 0xffffffff: /* Bit 0 doesn't represent a device, it indicates c22 regs presence */ *devices_in_package &= ~BIT(0); Then the rest of the code just carried on thinking "ok, MMD 1 (PMA/PMD) says that there are 31 devices in that package, each having a device id of ffff:ffff, that's perfectly fine, let's go ahead and probe this PHY device". But after cleanup commit 320ed3bf9000 ("net: phy: split devices_in_package"), this got "fixed", and now devs_in_pkg is no longer 0xfffffffe, but 0xffffffff. So now, get_phy_device is returning -ENODEV for the Lynx PCS, because the semantics have remained mostly unchanged: the loop stops at the first MMD that returns a non-zero value, and that is MMD 1. But the Lynx PCS is simply a clause 37 PCS which implements the required MAC-side functionality for USXGMII (when operated in C45 mode, which is where C45 devices-in-package detection is relevant to). Of course it will fail the PMD/PMA test (MMD 1), since it is not a PHY. But it does implement detection for MDIO_MMD_PCS (3): - MDIO_DEVS1=0x008a, MDIO_DEVS2=0x0000, - MDIO_DEVID1=0x0083, MDIO_DEVID2=0xe400 Let get_phy_c45_ids() continue searching for valid MMDs, and don't assume that every phy_device has a PMA/PMD MMD implemented. Signed-off-by: Vladimir Oltean Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 7cda95330aea..49e98a092b96 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -734,7 +734,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ - for (i = 1; i < MDIO_MMD_NUM && devs_in_pkg == 0; i++) { + for (i = 1; i < MDIO_MMD_NUM && devs_in_pkg == 0 && + (devs_in_pkg & 0x1fffffff) == 0x1fffffff; i++) { if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { /* Check that there is a device present at this * address before reading the devices-in-package From b18432c5a49c9413fd3afb717b378e08cb71331b Mon Sep 17 00:00:00 2001 From: Chris Healy Date: Tue, 14 Jul 2020 10:59:10 -0700 Subject: [PATCH 1149/2056] net: phy: sfp: Cotsworks SFF module EEPROM fixup Some Cotsworks SFF have invalid data in the first few bytes of the module EEPROM. This results in these modules not being detected as valid modules. Address this by poking the correct EEPROM values into the module EEPROM when the model/PN match and the existing module EEPROM contents are not correct. Signed-off-by: Chris Healy Signed-off-by: David S. Miller --- drivers/net/phy/sfp.c | 44 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index eef458ab0e5b..c24b0e83dd32 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -1632,10 +1632,43 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable) return 0; } +static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id) +{ + u8 check; + int err; + + if (id->base.phys_id != SFF8024_ID_SFF_8472 || + id->base.phys_ext_id != SFP_PHYS_EXT_ID_SFP || + id->base.connector != SFF8024_CONNECTOR_LC) { + dev_warn(sfp->dev, "Rewriting fiber module EEPROM with corrected values\n"); + id->base.phys_id = SFF8024_ID_SFF_8472; + id->base.phys_ext_id = SFP_PHYS_EXT_ID_SFP; + id->base.connector = SFF8024_CONNECTOR_LC; + err = sfp_write(sfp, false, SFP_PHYS_ID, &id->base, 3); + if (err != 3) { + dev_err(sfp->dev, "Failed to rewrite module EEPROM: %d\n", err); + return err; + } + + /* Cotsworks modules have been found to require a delay between write operations. */ + mdelay(50); + + /* Update base structure checksum */ + check = sfp_check(&id->base, sizeof(id->base) - 1); + err = sfp_write(sfp, false, SFP_CC_BASE, &check, 1); + if (err != 1) { + dev_err(sfp->dev, "Failed to update base structure checksum in fiber module EEPROM: %d\n", err); + return err; + } + } + return 0; +} + static int sfp_sm_mod_probe(struct sfp *sfp, bool report) { /* SFP module inserted - read I2C data */ struct sfp_eeprom_id id; + bool cotsworks_sfbg; bool cotsworks; u8 check; int ret; @@ -1657,6 +1690,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report) * serial number and date code. */ cotsworks = !memcmp(id.base.vendor_name, "COTSWORKS ", 16); + cotsworks_sfbg = !memcmp(id.base.vendor_pn, "SFBG", 4); + + /* Cotsworks SFF module EEPROM do not always have valid phys_id, + * phys_ext_id, and connector bytes. Rewrite SFF EEPROM bytes if + * Cotsworks PN matches and bytes are not correct. + */ + if (cotsworks && cotsworks_sfbg) { + ret = sfp_cotsworks_fixup_check(sfp, &id); + if (ret < 0) + return ret; + } /* Validate the checksum over the base structure */ check = sfp_check(&id.base, sizeof(id.base) - 1); From eac87c413bf9794c14d488998a5265ea5b32f04e Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Wed, 15 Jul 2020 14:09:28 +0200 Subject: [PATCH 1150/2056] net: openvswitch: reorder masks array based on usage This patch reorders the masks array every 4 seconds based on their usage count. This greatly reduces the masks per packet hit, and hence the overall performance. Especially in the OVS/OVN case for OpenShift. Here are some results from the OVS/OVN OpenShift test, which use 8 pods, each pod having 512 uperf connections, each connection sends a 64-byte request and gets a 1024-byte response (TCP). All uperf clients are on 1 worker node while all uperf servers are on the other worker node. Kernel without this patch : 7.71 Gbps Kernel with this patch applied: 14.52 Gbps We also run some tests to verify the rebalance activity does not lower the flow insertion rate, which does not. Signed-off-by: Eelco Chaudron Tested-by: Andrew Theurer Reviewed-by: Paolo Abeni Signed-off-by: David S. Miller --- net/openvswitch/datapath.c | 22 +++++ net/openvswitch/datapath.h | 8 +- net/openvswitch/flow_table.c | 173 ++++++++++++++++++++++++++++++++++- net/openvswitch/flow_table.h | 11 +++ 4 files changed, 207 insertions(+), 7 deletions(-) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 94b024534987..95805f0e27bd 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -130,6 +130,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, const struct dp_upcall_info *, uint32_t cutlen); +static void ovs_dp_masks_rebalance(struct work_struct *work); + /* Must be called with rcu_read_lock or ovs_mutex. */ const char *ovs_dp_name(const struct datapath *dp) { @@ -1653,6 +1655,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) goto err_destroy_reply; ovs_dp_set_net(dp, sock_net(skb->sk)); + INIT_DELAYED_WORK(&dp->masks_rebalance, ovs_dp_masks_rebalance); /* Allocate table. */ err = ovs_flow_tbl_init(&dp->table); @@ -1712,6 +1715,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); list_add_tail_rcu(&dp->list_node, &ovs_net->dps); + schedule_delayed_work(&dp->masks_rebalance, + msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); + ovs_unlock(); ovs_notify(&dp_datapath_genl_family, reply, info); @@ -1756,6 +1762,9 @@ static void __dp_destroy(struct datapath *dp) /* RCU destroy the flow table */ call_rcu(&dp->rcu, destroy_dp_rcu); + + /* Cancel remaining work. */ + cancel_delayed_work_sync(&dp->masks_rebalance); } static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) @@ -2338,6 +2347,19 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) return skb->len; } +static void ovs_dp_masks_rebalance(struct work_struct *work) +{ + struct datapath *dp = container_of(work, struct datapath, + masks_rebalance.work); + + ovs_lock(); + ovs_flow_masks_rebalance(&dp->table); + ovs_unlock(); + + schedule_delayed_work(&dp->masks_rebalance, + msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); +} + static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = { [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) }, diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 2016dd107939..697a2354194b 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -20,8 +20,9 @@ #include "meter.h" #include "vport-internal_dev.h" -#define DP_MAX_PORTS USHRT_MAX -#define DP_VPORT_HASH_BUCKETS 1024 +#define DP_MAX_PORTS USHRT_MAX +#define DP_VPORT_HASH_BUCKETS 1024 +#define DP_MASKS_REBALANCE_INTERVAL 4000 /** * struct dp_stats_percpu - per-cpu packet processing statistics for a given @@ -83,6 +84,9 @@ struct datapath { /* Switch meters. */ struct dp_meter_table meter_tbl; + + /* re-balance flow masks timer */ + struct delayed_work masks_rebalance; }; /** diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 2398d7238300..af22c9ee28dd 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -169,16 +170,70 @@ static struct table_instance *table_instance_alloc(int new_size) return ti; } +static void __mask_array_destroy(struct mask_array *ma) +{ + free_percpu(ma->masks_usage_cntr); + kfree(ma); +} + +static void mask_array_rcu_cb(struct rcu_head *rcu) +{ + struct mask_array *ma = container_of(rcu, struct mask_array, rcu); + + __mask_array_destroy(ma); +} + +static void tbl_mask_array_reset_counters(struct mask_array *ma) +{ + int i, cpu; + + /* As the per CPU counters are not atomic we can not go ahead and + * reset them from another CPU. To be able to still have an approximate + * zero based counter we store the value at reset, and subtract it + * later when processing. + */ + for (i = 0; i < ma->max; i++) { + ma->masks_usage_zero_cntr[i] = 0; + + for_each_possible_cpu(cpu) { + u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, + cpu); + unsigned int start; + u64 counter; + + do { + start = u64_stats_fetch_begin_irq(&ma->syncp); + counter = usage_counters[i]; + } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); + + ma->masks_usage_zero_cntr[i] += counter; + } + } +} + static struct mask_array *tbl_mask_array_alloc(int size) { struct mask_array *new; size = max(MASK_ARRAY_SIZE_MIN, size); new = kzalloc(sizeof(struct mask_array) + - sizeof(struct sw_flow_mask *) * size, GFP_KERNEL); + sizeof(struct sw_flow_mask *) * size + + sizeof(u64) * size, GFP_KERNEL); if (!new) return NULL; + new->masks_usage_zero_cntr = (u64 *)((u8 *)new + + sizeof(struct mask_array) + + sizeof(struct sw_flow_mask *) * + size); + + new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size, + __alignof__(u64)); + if (!new->masks_usage_cntr) { + kfree(new); + return NULL; + } + new->count = 0; new->max = size; @@ -202,10 +257,10 @@ static int tbl_mask_array_realloc(struct flow_table *tbl, int size) if (ovsl_dereference(old->masks[i])) new->masks[new->count++] = old->masks[i]; } + call_rcu(&old->rcu, mask_array_rcu_cb); } rcu_assign_pointer(tbl->mask_array, new); - kfree_rcu(old, rcu); return 0; } @@ -223,6 +278,11 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl, return err; ma = ovsl_dereference(tbl->mask_array); + } else { + /* On every add or delete we need to reset the counters so + * every new mask gets a fair chance of being prioritized. + */ + tbl_mask_array_reset_counters(ma); } BUG_ON(ovsl_dereference(ma->masks[ma_count])); @@ -260,6 +320,9 @@ static void tbl_mask_array_del_mask(struct flow_table *tbl, if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) && ma_count <= (ma->max / 3)) tbl_mask_array_realloc(tbl, ma->max / 2); + else + tbl_mask_array_reset_counters(ma); + } /* Remove 'mask' from the mask list, if it is not needed any more. */ @@ -312,7 +375,7 @@ int ovs_flow_tbl_init(struct flow_table *table) free_ti: __table_instance_destroy(ti); free_mask_array: - kfree(ma); + __mask_array_destroy(ma); free_mask_cache: free_percpu(table->mask_cache); return -ENOMEM; @@ -392,7 +455,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table) struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); free_percpu(table->mask_cache); - kfree_rcu(rcu_dereference_raw(table->mask_array), rcu); + call_rcu(&table->mask_array->rcu, mask_array_rcu_cb); table_instance_destroy(table, ti, ufid_ti, false); } @@ -606,6 +669,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, u32 *n_mask_hit, u32 *index) { + u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); struct sw_flow *flow; struct sw_flow_mask *mask; int i; @@ -614,8 +678,12 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, mask = rcu_dereference_ovsl(ma->masks[*index]); if (mask) { flow = masked_flow_lookup(ti, key, mask, n_mask_hit); - if (flow) + if (flow) { + u64_stats_update_begin(&ma->syncp); + usage_counters[*index]++; + u64_stats_update_end(&ma->syncp); return flow; + } } } @@ -631,6 +699,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, flow = masked_flow_lookup(ti, key, mask, n_mask_hit); if (flow) { /* Found */ *index = i; + u64_stats_update_begin(&ma->syncp); + usage_counters[*index]++; + u64_stats_update_end(&ma->syncp); return flow; } } @@ -934,6 +1005,98 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, return 0; } +static int compare_mask_and_count(const void *a, const void *b) +{ + const struct mask_count *mc_a = a; + const struct mask_count *mc_b = b; + + return (s64)mc_b->counter - (s64)mc_a->counter; +} + +/* Must be called with OVS mutex held. */ +void ovs_flow_masks_rebalance(struct flow_table *table) +{ + struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); + struct mask_count *masks_and_count; + struct mask_array *new; + int masks_entries = 0; + int i; + + /* Build array of all current entries with use counters. */ + masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count), + GFP_KERNEL); + if (!masks_and_count) + return; + + for (i = 0; i < ma->max; i++) { + struct sw_flow_mask *mask; + unsigned int start; + int cpu; + + mask = rcu_dereference_ovsl(ma->masks[i]); + if (unlikely(!mask)) + break; + + masks_and_count[i].index = i; + masks_and_count[i].counter = 0; + + for_each_possible_cpu(cpu) { + u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr, + cpu); + u64 counter; + + do { + start = u64_stats_fetch_begin_irq(&ma->syncp); + counter = usage_counters[i]; + } while (u64_stats_fetch_retry_irq(&ma->syncp, start)); + + masks_and_count[i].counter += counter; + } + + /* Subtract the zero count value. */ + masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i]; + + /* Rather than calling tbl_mask_array_reset_counters() + * below when no change is needed, do it inline here. + */ + ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter; + } + + if (i == 0) + goto free_mask_entries; + + /* Sort the entries */ + masks_entries = i; + sort(masks_and_count, masks_entries, sizeof(*masks_and_count), + compare_mask_and_count, NULL); + + /* If the order is the same, nothing to do... */ + for (i = 0; i < masks_entries; i++) { + if (i != masks_and_count[i].index) + break; + } + if (i == masks_entries) + goto free_mask_entries; + + /* Rebuilt the new list in order of usage. */ + new = tbl_mask_array_alloc(ma->max); + if (!new) + goto free_mask_entries; + + for (i = 0; i < masks_entries; i++) { + int index = masks_and_count[i].index; + + new->masks[new->count++] = + rcu_dereference_ovsl(ma->masks[index]); + } + + rcu_assign_pointer(table->mask_array, new); + call_rcu(&ma->rcu, mask_array_rcu_cb); + +free_mask_entries: + kfree(masks_and_count); +} + /* Initializes the flow module. * Returns zero if successful or a negative error code. */ int ovs_flow_init(void) diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 8a5cea6ae111..1f664b050e3b 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -27,9 +27,17 @@ struct mask_cache_entry { u32 mask_index; }; +struct mask_count { + int index; + u64 counter; +}; + struct mask_array { struct rcu_head rcu; int count, max; + u64 __percpu *masks_usage_cntr; + u64 *masks_usage_zero_cntr; + struct u64_stats_sync syncp; struct sw_flow_mask __rcu *masks[]; }; @@ -86,4 +94,7 @@ bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, bool full, const struct sw_flow_mask *mask); + +void ovs_flow_masks_rebalance(struct flow_table *table); + #endif /* flow_table.h */ From 0226009ce0f6089f9b31211f7a2703cf9a327a01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Rodr=C3=ADguez=20P=C3=A9rez?= Date: Wed, 15 Jul 2020 20:40:56 +0200 Subject: [PATCH 1151/2056] net: cdc_ether: use dev->intf to get interface information MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit usbnet_cdc_update_filter was getting the interface number from the usb_interface struct in cdc_state->control. However, cdc_ncm does not initialize that structure in its bind function, but uses cdc_ncm_ctx instead. Getting intf directly from struct usbnet solves the problem. Signed-off-by: Miguel Rodríguez Pérez Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ether.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a657943c9f01..2afe258e3648 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -65,8 +65,6 @@ static const u8 mbm_guid[16] = { static void usbnet_cdc_update_filter(struct usbnet *dev) { - struct cdc_state *info = (void *) &dev->data; - struct usb_interface *intf = info->control; struct net_device *net = dev->net; u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED @@ -86,7 +84,7 @@ static void usbnet_cdc_update_filter(struct usbnet *dev) USB_CDC_SET_ETHERNET_PACKET_FILTER, USB_TYPE_CLASS | USB_RECIP_INTERFACE, cdc_filter, - intf->cur_altsetting->desc.bInterfaceNumber, + dev->intf->cur_altsetting->desc.bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT From e506addeff844237d60545ef4f6141de21471caf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Rodr=C3=ADguez=20P=C3=A9rez?= Date: Wed, 15 Jul 2020 20:40:57 +0200 Subject: [PATCH 1152/2056] net: cdc_ether: export usbnet_cdc_update_filter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes the function available to other drivers, like cdc_ncm. Signed-off-by: Miguel Rodríguez Pérez Acked-by: Oliver Neukum Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ether.c | 3 ++- include/linux/usb/usbnet.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 2afe258e3648..8c1d61c2cbac 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -63,7 +63,7 @@ static const u8 mbm_guid[16] = { 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, }; -static void usbnet_cdc_update_filter(struct usbnet *dev) +void usbnet_cdc_update_filter(struct usbnet *dev) { struct net_device *net = dev->net; @@ -90,6 +90,7 @@ static void usbnet_cdc_update_filter(struct usbnet *dev) USB_CTRL_SET_TIMEOUT ); } +EXPORT_SYMBOL_GPL(usbnet_cdc_update_filter); /* probes control interface, claims data interface, collects the bulk * endpoints, activates data interface (if needed), maybe sets MTU. diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index b0bff3083278..3a856963a363 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -207,6 +207,7 @@ struct cdc_state { struct usb_interface *data; }; +extern void usbnet_cdc_update_filter(struct usbnet *dev); extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); From 1ea2b748b5eb4f9b6c8f66d098f1277afb724336 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Wed, 15 Jul 2020 20:40:58 +0200 Subject: [PATCH 1153/2056] net: usbnet: export usbnet_set_rx_mode() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function can be reused by other usbnet minidrivers. Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/usbnet.c | 3 ++- include/linux/usb/usbnet.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 5ec97def3513..e45935a5856a 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1108,12 +1108,13 @@ static void __handle_link_change(struct usbnet *dev) clear_bit(EVENT_LINK_CHANGE, &dev->flags); } -static void usbnet_set_rx_mode(struct net_device *net) +void usbnet_set_rx_mode(struct net_device *net) { struct usbnet *dev = netdev_priv(net); usbnet_defer_kevent(dev, EVENT_SET_RX_MODE); } +EXPORT_SYMBOL_GPL(usbnet_set_rx_mode); static void __handle_set_rx_mode(struct usbnet *dev) { diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 3a856963a363..2e4f7721fc4e 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -274,6 +274,7 @@ extern int usbnet_set_link_ksettings(struct net_device *net, extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); +extern void usbnet_set_rx_mode(struct net_device *net); extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); From 37a2ebdd9e597ae1a0270ac747883ea8f6f767b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Rodr=C3=ADguez=20P=C3=A9rez?= Date: Wed, 15 Jul 2020 20:40:59 +0200 Subject: [PATCH 1154/2056] net: cdc_ncm: add .ndo_set_rx_mode to cdc_ncm_netdev_ops MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The cdc_ncm driver overrides the net_device_ops structure used by usbnet to be able to hook into .ndo_change_mtu. However, the structure was missing the .ndo_set_rx_mode field, preventing the driver from hooking into usbnet's set_rx_mode. This patch adds the missing callback to usbnet_set_rx_mode in net_device_ops. Signed-off-by: Miguel Rodríguez Pérez Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ncm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 8929669b5e6d..f5d7b933792b 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -792,6 +792,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = { .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_set_rx_mode = usbnet_set_rx_mode, .ndo_get_stats64 = usbnet_get_stats64, .ndo_change_mtu = cdc_ncm_change_mtu, .ndo_set_mac_address = eth_mac_addr, From e10dcb1b6ba714243ad5a35a11b91cc14103a9a9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Rodr=C3=ADguez=20P=C3=A9rez?= Date: Wed, 15 Jul 2020 20:41:00 +0200 Subject: [PATCH 1155/2056] net: cdc_ncm: hook into set_rx_mode to admit multicast traffic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We set set_rx_mode to usbnet_cdc_update_filter provided by cdc_ether that simply admits all multicast traffic if there is more than one multicast filter configured. Signed-off-by: Miguel Rodríguez Pérez Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ncm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f5d7b933792b..e04f588538cc 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1896,6 +1896,7 @@ static const struct driver_info cdc_ncm_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, + .set_rx_mode = usbnet_cdc_update_filter, }; /* Same as cdc_ncm_info, but with FLAG_WWAN */ @@ -1909,6 +1910,7 @@ static const struct driver_info wwan_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, + .set_rx_mode = usbnet_cdc_update_filter, }; /* Same as wwan_info, but with FLAG_NOARP */ @@ -1922,6 +1924,7 @@ static const struct driver_info wwan_noarp_info = { .status = cdc_ncm_status, .rx_fixup = cdc_ncm_rx_fixup, .tx_fixup = cdc_ncm_tx_fixup, + .set_rx_mode = usbnet_cdc_update_filter, }; static const struct usb_device_id cdc_devs[] = { From 8c728940487945e25cdfe020d58da42143aa98c1 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 15 Jul 2020 22:27:05 +0200 Subject: [PATCH 1156/2056] mptcp: silence warning in subflow_data_ready() since commit d47a72152097 ("mptcp: fix race in subflow_data_ready()"), it is possible to observe a regression in MP_JOIN kselftests. For sockets in TCP_CLOSE state, it's not sufficient to just wake up the main socket: we also need to ensure that received data are made available to the reader. Silence the WARN_ON_ONCE() in these cases: it preserves the syzkaller fix and restores kselftests when they are ran as follows: # while true; do > make KBUILD_OUTPUT=/tmp/kselftest TARGETS=net/mptcp kselftest > done Reported-by: Florian Westphal Fixes: d47a72152097 ("mptcp: fix race in subflow_data_ready()") Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/47 Signed-off-by: Davide Caratti Reviewed-by: Matthieu Baerts Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 9f7f3772c13c..519122e66f17 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -869,18 +869,19 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space) static void subflow_data_ready(struct sock *sk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + u16 state = 1 << inet_sk_state_load(sk); struct sock *parent = subflow->conn; struct mptcp_sock *msk; msk = mptcp_sk(parent); - if ((1 << inet_sk_state_load(sk)) & (TCPF_LISTEN | TCPF_CLOSE)) { + if (state & TCPF_LISTEN) { set_bit(MPTCP_DATA_READY, &msk->flags); parent->sk_data_ready(parent); return; } WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable && - !subflow->mp_join); + !subflow->mp_join && !(state & TCPF_CLOSE)); if (mptcp_subflow_data_available(sk)) mptcp_data_ready(parent, sk); From dcc82bb0727c08f93a91fa7532b950bafa2598f2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 16 Jul 2020 21:03:58 +0200 Subject: [PATCH 1157/2056] net: sun: cassini: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'cas_tx_tiny_alloc()', GFP_KERNEL can be used because a few lines below in its only caller, 'cas_alloc_rxds()', is also called. This function makes an explicit use of GFP_KERNEL. When memory is allocated in 'cas_init_one()', GFP_KERNEL can be used because it is a probe function and no lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 104 +++++++++++++++-------------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 3bab2cb700c7..4536e97bbf53 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -443,8 +443,8 @@ static void cas_phy_powerdown(struct cas *cp) /* cp->lock held. note: the last put_page will free the buffer */ static int cas_page_free(struct cas *cp, cas_page_t *page) { - pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size, + DMA_FROM_DEVICE); __free_pages(page->buffer, cp->page_order); kfree(page); return 0; @@ -474,8 +474,8 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) page->buffer = alloc_pages(flags, cp->page_order); if (!page->buffer) goto page_err; - page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, - cp->page_size, PCI_DMA_FROMDEVICE); + page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0, + cp->page_size, DMA_FROM_DEVICE); return page; page_err: @@ -1863,8 +1863,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) daddr = le64_to_cpu(txd->buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd->control)); - pci_unmap_page(cp->pdev, daddr, dlen, - PCI_DMA_TODEVICE); + dma_unmap_page(&cp->pdev->dev, daddr, dlen, + DMA_TO_DEVICE); entry = TX_DESC_NEXT(ring, entry); /* tiny buffer may follow */ @@ -1957,12 +1957,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (!dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); RX_USED_ADD(page, 0x100); p += hlen; @@ -1988,16 +1989,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); /* make sure we always copy a header */ swivel = 0; if (p == (char *) skb->data) { /* not split */ addr = cas_page_map(page->buffer); memcpy(p, addr + off, RX_COPY_MIN); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); off += RX_COPY_MIN; swivel = RX_COPY_MIN; @@ -2024,12 +2026,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, - hlen + cp->crc_size, - PCI_DMA_FROMDEVICE); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, - hlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, + page->dma_addr, + hlen + cp->crc_size, + DMA_FROM_DEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr, + hlen + cp->crc_size, + DMA_FROM_DEVICE); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen; @@ -2066,12 +2070,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, + i, DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr + off, i, + DMA_FROM_DEVICE); cas_page_unmap(addr); if (p == (char *) skb->data) /* not split */ RX_USED_ADD(page, cp->mtu_stride); @@ -2083,14 +2088,16 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, p += hlen; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; - pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, - dlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&cp->pdev->dev, + page->dma_addr, + dlen + cp->crc_size, + DMA_FROM_DEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr, dlen + cp->crc_size); - pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, - dlen + cp->crc_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&cp->pdev->dev, + page->dma_addr, + dlen + cp->crc_size, + DMA_FROM_DEVICE); cas_page_unmap(addr); RX_USED_ADD(page, dlen + cp->crc_size); } @@ -2766,9 +2773,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, nr_frags = skb_shinfo(skb)->nr_frags; len = skb_headlen(skb); - mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), len, - PCI_DMA_TODEVICE); + mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data), + offset_in_page(skb->data), len, DMA_TO_DEVICE); tentry = entry; tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); @@ -3882,8 +3888,8 @@ static void cas_clean_txd(struct cas *cp, int ring) daddr = le64_to_cpu(txd[ent].buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd[ent].control)); - pci_unmap_page(cp->pdev, daddr, dlen, - PCI_DMA_TODEVICE); + dma_unmap_page(&cp->pdev->dev, daddr, dlen, + DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) { i++; @@ -4181,9 +4187,8 @@ static void cas_tx_tiny_free(struct cas *cp) if (!cp->tx_tiny_bufs[i]) continue; - pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, - cp->tx_tiny_bufs[i], - cp->tx_tiny_dvma[i]); + dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, + cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); cp->tx_tiny_bufs[i] = NULL; } } @@ -4195,8 +4200,8 @@ static int cas_tx_tiny_alloc(struct cas *cp) for (i = 0; i < N_TX_RINGS; i++) { cp->tx_tiny_bufs[i] = - pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, - &cp->tx_tiny_dvma[i]); + dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK, + &cp->tx_tiny_dvma[i], GFP_KERNEL); if (!cp->tx_tiny_bufs[i]) { cas_tx_tiny_free(cp); return -1; @@ -4958,10 +4963,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " "for consistent allocations\n"); @@ -4969,7 +4973,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, " "aborting\n"); @@ -5048,8 +5052,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cas_saturn_firmware_init(cp); cp->init_block = - pci_alloc_consistent(pdev, sizeof(struct cas_init_block), - &cp->block_dvma); + dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block), + &cp->block_dvma, GFP_KERNEL); if (!cp->init_block) { dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); goto err_out_iounmap; @@ -5109,8 +5113,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_out_free_consistent: - pci_free_consistent(pdev, sizeof(struct cas_init_block), - cp->init_block, cp->block_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); err_out_iounmap: mutex_lock(&cp->pm_mutex); @@ -5164,8 +5168,8 @@ static void cas_remove_one(struct pci_dev *pdev) cp->orig_cacheline_size); } #endif - pci_free_consistent(pdev, sizeof(struct cas_init_block), - cp->init_block, cp->block_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block), + cp->init_block, cp->block_dvma); pci_iounmap(pdev, cp->regs); free_netdev(dev); pci_release_regions(pdev); From a71d77e6be1e29ec809cc7c85d9594e7769406cd Mon Sep 17 00:00:00 2001 From: Priyaranjan Jha Date: Thu, 16 Jul 2020 12:12:34 -0700 Subject: [PATCH 1158/2056] tcp: fix segment accounting when DSACK range covers multiple segments Currently, while processing DSACK, we assume DSACK covers only one segment. This leads to significant underestimation of DSACKs with LRO/GRO. This patch fixes segment accounting with DSACK by estimating segment count from DSACK sequence range / MSS. Signed-off-by: Priyaranjan Jha Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Soheil Hassas Yeganeh Signed-off-by: Eric Dumazet Signed-off-by: Yousuk Seung Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 80 ++++++++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index b03ca68d4111..5d6bbcb1e570 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -871,12 +871,41 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) return min_t(__u32, cwnd, tp->snd_cwnd_clamp); } +struct tcp_sacktag_state { + /* Timestamps for earliest and latest never-retransmitted segment + * that was SACKed. RTO needs the earliest RTT to stay conservative, + * but congestion control should still get an accurate delay signal. + */ + u64 first_sackt; + u64 last_sackt; + u32 reord; + u32 sack_delivered; + int flag; + unsigned int mss_now; + struct rate_sample *rate; +}; + /* Take a notice that peer is sending D-SACKs */ -static void tcp_dsack_seen(struct tcp_sock *tp) +static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, + u32 end_seq, struct tcp_sacktag_state *state) { + u32 seq_len, dup_segs = 1; + + if (before(start_seq, end_seq)) { + seq_len = end_seq - start_seq; + if (seq_len > tp->mss_cache) + dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache); + } + tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; tp->rack.dsack_seen = 1; - tp->dsack_dups++; + tp->dsack_dups += dup_segs; + + state->flag |= FLAG_DSACKING_ACK; + /* A spurious retransmission is delivered */ + state->sack_delivered += dup_segs; + + return dup_segs; } /* It's reordering when higher sequence was delivered (i.e. sacked) before @@ -1103,53 +1132,37 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, - u32 prior_snd_una) + u32 prior_snd_una, struct tcp_sacktag_state *state) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); - bool dup_sack = false; + u32 dup_segs; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { - dup_sack = true; - tcp_dsack_seen(tp); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); - if (!after(end_seq_0, end_seq_1) && - !before(start_seq_0, start_seq_1)) { - dup_sack = true; - tcp_dsack_seen(tp); - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPDSACKOFORECV); - } + if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1)) + return false; + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); + } else { + return false; } + dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); + /* D-SACK for already forgotten data... Do dumb counting. */ - if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && + if (tp->undo_marker && tp->undo_retrans > 0 && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) - tp->undo_retrans--; + tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs); - return dup_sack; + return true; } -struct tcp_sacktag_state { - u32 reord; - /* Timestamps for earliest and latest never-retransmitted segment - * that was SACKed. RTO needs the earliest RTT to stay conservative, - * but congestion control should still get an accurate delay signal. - */ - u64 first_sackt; - u64 last_sackt; - struct rate_sample *rate; - int flag; - unsigned int mss_now; - u32 sack_delivered; -}; - /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment @@ -1692,12 +1705,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, tcp_highest_sack_reset(sk); found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, - num_sacks, prior_snd_una); - if (found_dup_sack) { - state->flag |= FLAG_DSACKING_ACK; - /* A spurious retransmission is delivered */ - state->sack_delivered++; - } + num_sacks, prior_snd_una, state); /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can From e3a5a1e8b6548f5d37328e2d3571edc5c9e6d7c0 Mon Sep 17 00:00:00 2001 From: Priyaranjan Jha Date: Thu, 16 Jul 2020 12:12:35 -0700 Subject: [PATCH 1159/2056] tcp: add SNMP counter for no. of duplicate segments reported by DSACK There are two existing SNMP counters, TCPDSACKRecv and TCPDSACKOfoRecv, which are incremented depending on whether the DSACKed range is below the cumulative ACK sequence number or not. Unfortunately, these both implicitly assume each DSACK covers only one segment. This makes these counters unusable for estimating spurious retransmit rates, or real/non-spurious loss rate. This patch introduces a new SNMP counter, TCPDSACKRecvSegs, which tracks the estimated number of duplicate segments based on: (DSACKed sequence range) / MSS. This counter is usable for estimating spurious retransmit rates, or real/non-spurious loss rate. Signed-off-by: Priyaranjan Jha Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 1 + net/ipv4/proc.c | 1 + net/ipv4/tcp_input.c | 1 + 3 files changed, 3 insertions(+) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 7d91f4debc48..cee9f8e6fce3 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -287,6 +287,7 @@ enum LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */ LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */ LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */ + LINUX_MIB_TCPDSACKRECVSEGS, /* TCPDSACKRecvSegs */ __LINUX_MIB_MAX }; diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 75545a829a2b..1074df726ec0 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -292,6 +292,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenPassiveAltKey", LINUX_MIB_TCPFASTOPENPASSIVEALTKEY), SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH), SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH), + SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5d6bbcb1e570..82906deb7874 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1153,6 +1153,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, } dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); + NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs); /* D-SACK for already forgotten data... Do dumb counting. */ if (tp->undo_marker && tp->undo_retrans > 0 && From e0c3f4c4fdc0648cffedd5e70f16be7ab45340b4 Mon Sep 17 00:00:00 2001 From: Suraj Upadhyay Date: Fri, 17 Jul 2020 00:46:45 +0530 Subject: [PATCH 1160/2056] net: decnet: af_decnet: Simplify goto loop. Replace goto loop with while loop. Signed-off-by: Suraj Upadhyay Signed-off-by: David S. Miller --- net/decnet/af_decnet.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 0a46ea3bddd5..7d7ae2dd69b8 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -2134,14 +2134,11 @@ static struct sock *dn_socket_get_next(struct seq_file *seq, struct dn_iter_state *state = seq->private; n = sk_next(n); -try_again: - if (n) - goto out; - if (++state->bucket >= DN_SK_HASH_SIZE) - goto out; - n = sk_head(&dn_sk_hash[state->bucket]); - goto try_again; -out: + while (!n) { + if (++state->bucket >= DN_SK_HASH_SIZE) + break; + n = sk_head(&dn_sk_hash[state->bucket]); + } return n; } From 8d4f62ca198a330c566ce202a1ee11bca4741b3b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 16 Jul 2020 21:28:21 +0200 Subject: [PATCH 1161/2056] net: sungem: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'gem_init_one()', GFP_KERNEL can be used because it is a probe function and no lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sungem.c | 53 ++++++++++++++++--------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 0e9d2cf7979b..eeb8518c8a84 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -670,7 +670,8 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st dma_addr = le64_to_cpu(txd->buffer); dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; - pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); + dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len, + DMA_TO_DEVICE); entry = NEXT_TX(entry); } @@ -809,16 +810,15 @@ static int gem_rx(struct gem *gp, int work_to_do) drops++; goto drop_it; } - pci_unmap_page(gp->pdev, dma_addr, - RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE); + dma_unmap_page(&gp->pdev->dev, dma_addr, + RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); gp->rx_skbs[entry] = new_skb; skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); - rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, + rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev, virt_to_page(new_skb->data), offset_in_page(new_skb->data), RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE)); + DMA_FROM_DEVICE)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ @@ -833,9 +833,11 @@ static int gem_rx(struct gem *gp, int work_to_do) skb_reserve(copy_skb, 2); skb_put(copy_skb, len); - pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); - pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&gp->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); /* We'll reuse the original ring buffer. */ skb = copy_skb; @@ -1020,10 +1022,10 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, u32 len; len = skb->len; - mapping = pci_map_page(gp->pdev, + mapping = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), - len, PCI_DMA_TODEVICE); + len, DMA_TO_DEVICE); ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; if (gem_intme(entry)) ctrl |= TXDCTRL_INTME; @@ -1046,9 +1048,10 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, * Otherwise we could race with the device. */ first_len = skb_headlen(skb); - first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data), + first_mapping = dma_map_page(&gp->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - first_len, PCI_DMA_TODEVICE); + first_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { @@ -1574,9 +1577,9 @@ static void gem_clean_rings(struct gem *gp) if (gp->rx_skbs[i] != NULL) { skb = gp->rx_skbs[i]; dma_addr = le64_to_cpu(rxd->buffer); - pci_unmap_page(gp->pdev, dma_addr, + dma_unmap_page(&gp->pdev->dev, dma_addr, RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb_any(skb); gp->rx_skbs[i] = NULL; } @@ -1598,9 +1601,9 @@ static void gem_clean_rings(struct gem *gp) txd = &gb->txd[ent]; dma_addr = le64_to_cpu(txd->buffer); - pci_unmap_page(gp->pdev, dma_addr, + dma_unmap_page(&gp->pdev->dev, dma_addr, le64_to_cpu(txd->control_word) & - TXDCTRL_BUFSZ, PCI_DMA_TODEVICE); + TXDCTRL_BUFSZ, DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; @@ -1637,11 +1640,11 @@ static void gem_init_rings(struct gem *gp) gp->rx_skbs[i] = skb; skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); - dma_addr = pci_map_page(gp->pdev, + dma_addr = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), RX_BUF_ALLOC_SIZE(gp), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rxd->buffer = cpu_to_le64(dma_addr); dma_wmb(); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); @@ -2814,10 +2817,8 @@ static void gem_remove_one(struct pci_dev *pdev) cancel_work_sync(&gp->reset_task); /* Free resources */ - pci_free_consistent(pdev, - sizeof(struct gem_init_block), - gp->init_block, - gp->gblock_dvma); + dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block), + gp->init_block, gp->gblock_dvma); iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev); @@ -2873,10 +2874,10 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_GEM && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { pr_err("No usable DMA configuration, aborting\n"); goto err_disable_device; @@ -2965,8 +2966,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) * PAGE_SIZE aligned. */ gp->init_block = (struct gem_init_block *) - pci_alloc_consistent(pdev, sizeof(struct gem_init_block), - &gp->gblock_dvma); + dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), + &gp->gblock_dvma, GFP_KERNEL); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; From f4079e5d72ff973a8c2937e7aa9fc463dc6bcaa2 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 16 Jul 2020 22:48:02 +0200 Subject: [PATCH 1162/2056] net: alteon: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'ace_allocate_descriptors()' and 'ace_init()' GFP_KERNEL can be used because both functions are called from the probe function and no lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/alteon/acenic.c | 114 +++++++++++++-------------- 1 file changed, 56 insertions(+), 58 deletions(-) diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 5d192d551623..99431c9a899b 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -642,9 +642,8 @@ static void acenic_remove_one(struct pci_dev *pdev) ringp = &ap->skb->rx_std_skbuff[i]; mapping = dma_unmap_addr(ringp, mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_STD_BUFSIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ap->pdev->dev, mapping, + ACE_STD_BUFSIZE, DMA_FROM_DEVICE); ap->rx_std_ring[i].size = 0; ap->skb->rx_std_skbuff[i].skb = NULL; @@ -662,9 +661,9 @@ static void acenic_remove_one(struct pci_dev *pdev) ringp = &ap->skb->rx_mini_skbuff[i]; mapping = dma_unmap_addr(ringp,mapping); - pci_unmap_page(ap->pdev, mapping, + dma_unmap_page(&ap->pdev->dev, mapping, ACE_MINI_BUFSIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); ap->rx_mini_ring[i].size = 0; ap->skb->rx_mini_skbuff[i].skb = NULL; @@ -681,9 +680,8 @@ static void acenic_remove_one(struct pci_dev *pdev) ringp = &ap->skb->rx_jumbo_skbuff[i]; mapping = dma_unmap_addr(ringp, mapping); - pci_unmap_page(ap->pdev, mapping, - ACE_JUMBO_BUFSIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ap->pdev->dev, mapping, + ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE); ap->rx_jumbo_ring[i].size = 0; ap->skb->rx_jumbo_skbuff[i].skb = NULL; @@ -713,8 +711,8 @@ static void ace_free_descriptors(struct net_device *dev) RX_JUMBO_RING_ENTRIES + RX_MINI_RING_ENTRIES + RX_RETURN_RING_ENTRIES)); - pci_free_consistent(ap->pdev, size, ap->rx_std_ring, - ap->rx_ring_base_dma); + dma_free_coherent(&ap->pdev->dev, size, ap->rx_std_ring, + ap->rx_ring_base_dma); ap->rx_std_ring = NULL; ap->rx_jumbo_ring = NULL; ap->rx_mini_ring = NULL; @@ -722,31 +720,30 @@ static void ace_free_descriptors(struct net_device *dev) } if (ap->evt_ring != NULL) { size = (sizeof(struct event) * EVT_RING_ENTRIES); - pci_free_consistent(ap->pdev, size, ap->evt_ring, - ap->evt_ring_dma); + dma_free_coherent(&ap->pdev->dev, size, ap->evt_ring, + ap->evt_ring_dma); ap->evt_ring = NULL; } if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); - pci_free_consistent(ap->pdev, size, ap->tx_ring, - ap->tx_ring_dma); + dma_free_coherent(&ap->pdev->dev, size, ap->tx_ring, + ap->tx_ring_dma); } ap->tx_ring = NULL; if (ap->evt_prd != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->evt_prd, ap->evt_prd_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(u32), + (void *)ap->evt_prd, ap->evt_prd_dma); ap->evt_prd = NULL; } if (ap->rx_ret_prd != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->rx_ret_prd, - ap->rx_ret_prd_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(u32), + (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma); ap->rx_ret_prd = NULL; } if (ap->tx_csm != NULL) { - pci_free_consistent(ap->pdev, sizeof(u32), - (void *)ap->tx_csm, ap->tx_csm_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(u32), + (void *)ap->tx_csm, ap->tx_csm_dma); ap->tx_csm = NULL; } } @@ -763,8 +760,8 @@ static int ace_allocate_descriptors(struct net_device *dev) RX_MINI_RING_ENTRIES + RX_RETURN_RING_ENTRIES)); - ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, - &ap->rx_ring_base_dma); + ap->rx_std_ring = dma_alloc_coherent(&ap->pdev->dev, size, + &ap->rx_ring_base_dma, GFP_KERNEL); if (ap->rx_std_ring == NULL) goto fail; @@ -774,7 +771,8 @@ static int ace_allocate_descriptors(struct net_device *dev) size = (sizeof(struct event) * EVT_RING_ENTRIES); - ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); + ap->evt_ring = dma_alloc_coherent(&ap->pdev->dev, size, + &ap->evt_ring_dma, GFP_KERNEL); if (ap->evt_ring == NULL) goto fail; @@ -786,25 +784,25 @@ static int ace_allocate_descriptors(struct net_device *dev) if (!ACE_IS_TIGON_I(ap)) { size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); - ap->tx_ring = pci_alloc_consistent(ap->pdev, size, - &ap->tx_ring_dma); + ap->tx_ring = dma_alloc_coherent(&ap->pdev->dev, size, + &ap->tx_ring_dma, GFP_KERNEL); if (ap->tx_ring == NULL) goto fail; } - ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->evt_prd_dma); + ap->evt_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32), + &ap->evt_prd_dma, GFP_KERNEL); if (ap->evt_prd == NULL) goto fail; - ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->rx_ret_prd_dma); + ap->rx_ret_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32), + &ap->rx_ret_prd_dma, GFP_KERNEL); if (ap->rx_ret_prd == NULL) goto fail; - ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), - &ap->tx_csm_dma); + ap->tx_csm = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32), + &ap->tx_csm_dma, GFP_KERNEL); if (ap->tx_csm == NULL) goto fail; @@ -830,8 +828,8 @@ static void ace_init_cleanup(struct net_device *dev) ace_free_descriptors(dev); if (ap->info) - pci_free_consistent(ap->pdev, sizeof(struct ace_info), - ap->info, ap->info_dma); + dma_free_coherent(&ap->pdev->dev, sizeof(struct ace_info), + ap->info, ap->info_dma); kfree(ap->skb); kfree(ap->trace_buf); @@ -1129,9 +1127,9 @@ static int ace_init(struct net_device *dev) /* * Configure DMA attributes. */ - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { ap->pci_using_dac = 1; - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { ap->pci_using_dac = 0; } else { ecode = -ENODEV; @@ -1143,8 +1141,8 @@ static int ace_init(struct net_device *dev) * and the control blocks for the transmit and receive rings * as they need to be setup once and for all. */ - if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), - &ap->info_dma))) { + if (!(info = dma_alloc_coherent(&ap->pdev->dev, sizeof(struct ace_info), + &ap->info_dma, GFP_KERNEL))) { ecode = -EAGAIN; goto init_error; } @@ -1646,10 +1644,10 @@ static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) if (!skb) break; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&ap->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - ACE_STD_BUFSIZE, - PCI_DMA_FROMDEVICE); + ACE_STD_BUFSIZE, DMA_FROM_DEVICE); ap->skb->rx_std_skbuff[idx].skb = skb; dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], mapping, mapping); @@ -1707,10 +1705,10 @@ static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs) if (!skb) break; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&ap->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - ACE_MINI_BUFSIZE, - PCI_DMA_FROMDEVICE); + ACE_MINI_BUFSIZE, DMA_FROM_DEVICE); ap->skb->rx_mini_skbuff[idx].skb = skb; dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], mapping, mapping); @@ -1763,10 +1761,10 @@ static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs) if (!skb) break; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&ap->pdev->dev, + virt_to_page(skb->data), offset_in_page(skb->data), - ACE_JUMBO_BUFSIZE, - PCI_DMA_FROMDEVICE); + ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE); ap->skb->rx_jumbo_skbuff[idx].skb = skb; dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], mapping, mapping); @@ -1977,10 +1975,8 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) skb = rip->skb; rip->skb = NULL; - pci_unmap_page(ap->pdev, - dma_unmap_addr(rip, mapping), - mapsize, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping), + mapsize, DMA_FROM_DEVICE); skb_put(skb, retdesc->size); /* @@ -2046,9 +2042,10 @@ static inline void ace_tx_int(struct net_device *dev, skb = info->skb; if (dma_unmap_len(info, maplen)) { - pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_page(&ap->pdev->dev, + dma_unmap_addr(info, mapping), dma_unmap_len(info, maplen), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dma_unmap_len_set(info, maplen, 0); } @@ -2337,9 +2334,10 @@ static int ace_close(struct net_device *dev) } else memset(ap->tx_ring + i, 0, sizeof(struct tx_desc)); - pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), + dma_unmap_page(&ap->pdev->dev, + dma_unmap_addr(info, mapping), dma_unmap_len(info, maplen), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dma_unmap_len_set(info, maplen, 0); } if (skb) { @@ -2369,9 +2367,9 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, dma_addr_t mapping; struct tx_ring_info *info; - mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), - offset_in_page(skb->data), - skb->len, PCI_DMA_TODEVICE); + mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data), + offset_in_page(skb->data), skb->len, + DMA_TO_DEVICE); info = ap->skb->tx_skbuff + idx; info->skb = tail; From 721dab2b56535b06f9d5756cf27d875623cb5a60 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 16 Jul 2020 22:52:42 +0200 Subject: [PATCH 1163/2056] net: alteon: Avoid some useless memset Avoid a memset after a call to 'dma_alloc_coherent()'. This is useless since commit 518a2f1925c3 ("dma-mapping: zero memory returned from dma_alloc_*") Replace a kmalloc+memset with a corresponding kzalloc. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/alteon/acenic.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 99431c9a899b..ac86fcae1582 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c @@ -1151,7 +1151,7 @@ static int ace_init(struct net_device *dev) /* * Get the memory for the skb rings. */ - if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { + if (!(ap->skb = kzalloc(sizeof(struct ace_skb), GFP_KERNEL))) { ecode = -EAGAIN; goto init_error; } @@ -1172,9 +1172,6 @@ static int ace_init(struct net_device *dev) ap->last_mini_rx = 0; #endif - memset(ap->info, 0, sizeof(struct ace_info)); - memset(ap->skb, 0, sizeof(struct ace_skb)); - ecode = ace_load_firmware(dev); if (ecode) goto init_error; From 18c7015cc65ab62a161291b863225db5cfc717a4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 17 Jul 2020 13:59:58 -0700 Subject: [PATCH 1164/2056] net: bnxt: don't complain if TC flower can't be supported The fact that NETIF_F_HW_TC is not set should be a sufficient indication to the user that TC offloads are not supported. No need to bother users of older firmware versions with pointless warnings on every boot. Also, since the support is optional, bnxt_init_tc() should not return an error in case FW is old, similarly to how error is not returned when CONFIG_BNXT_FLOWER_OFFLOAD is not set. With that we can add an error message to the caller, to warn about actual unexpected failures. Signed-off-by: Jakub Kicinski Reviewed-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 ++++- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 7 ++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 0911eb3b8007..a7e5ebe2d68a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -12086,7 +12086,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err_pci_clean; } } - bnxt_init_tc(bp); + rc = bnxt_init_tc(bp); + if (rc) + netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n", + rc); } bnxt_dl_register(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index e82e5cf64d61..5e4429b14b8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -2000,11 +2000,8 @@ int bnxt_init_tc(struct bnxt *bp) struct bnxt_tc_info *tc_info; int rc; - if (bp->hwrm_spec_code < 0x10803) { - netdev_warn(bp->dev, - "Firmware does not support TC flower offload.\n"); - return -ENOTSUPP; - } + if (bp->hwrm_spec_code < 0x10803) + return 0; tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL); if (!tc_info) From 5686b10978c580f374b09c9092c073dad83a1b23 Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Fri, 17 Jul 2020 18:23:04 +0800 Subject: [PATCH 1165/2056] net: bna: Remove unused variable 't' Gcc report warning as follows: drivers/net/ethernet/brocade/bna/bfa_ioc.c:1538:6: warning: variable 't' set but not used [-Wunused-but-set-variable] 1538 | u32 t; | ^ After commit c107ba171f3d ("bna: Firmware Patch Simplification"), 't' is never used, so removing it to avoid build warning. Reported-by: Hulk Robot Signed-off-by: Zhang Changzhong Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index e17bfc87da90..49358d42a0e2 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1535,7 +1535,6 @@ static int bfa_flash_fifo_flush(void __iomem *pci_bar) { u32 i; - u32 t; union bfa_flash_dev_status_reg dev_status; dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); @@ -1545,7 +1544,7 @@ bfa_flash_fifo_flush(void __iomem *pci_bar) /* fifo counter in terms of words */ for (i = 0; i < dev_status.r.fifo_cnt; i++) - t = readl(pci_bar + FLI_RDDATA_REG); + readl(pci_bar + FLI_RDDATA_REG); /* Check the device status. It may take some time. */ for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { From eacc43d2c3b87e1cdffa005b4abd1562c5c8f8a3 Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Fri, 17 Jul 2020 18:33:30 +0800 Subject: [PATCH 1166/2056] net: ethernet: et131x: Remove unused variable 'pm_csr' Gcc report warning as follows: drivers/net/ethernet/agere/et131x.c:953:6: warning: variable 'pm_csr' set but not used [-Wunused-but-set-variable] 953 | u32 pm_csr; | ^~~~~~ drivers/net/ethernet/agere/et131x.c:1002:6:warning: variable 'pm_csr' set but not used [-Wunused-but-set-variable] 1002 | u32 pm_csr; | ^~~~~~ drivers/net/ethernet/agere/et131x.c:3446:8: warning: variable 'pm_csr' set but not used [-Wunused-but-set-variable] 3446 | u32 pm_csr; | ^~~~~~ After commit 38df6492eb51 ("et131x: Add PCIe gigabit ethernet driver et131x to drivers/net"), 'pm_csr' is never used in these functions, so removing it to avoid build warning. Reported-by: Hulk Robot Signed-off-by: Zhang Changzhong Acked-by: Mark Einon Signed-off-by: David S. Miller --- drivers/net/ethernet/agere/et131x.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 865892c1f23f..8806e1e4c20f 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -950,7 +950,6 @@ static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) u32 hash2 = 0; u32 hash3 = 0; u32 hash4 = 0; - u32 pm_csr; /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision * the multi-cast LIST. If it is NOT specified, (and "ALL" is not @@ -984,7 +983,7 @@ static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) } /* Write out the new hash to the device */ - pm_csr = readl(&adapter->regs->global.pm_csr); + readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(hash1, &rxmac->multi_hash1); writel(hash2, &rxmac->multi_hash2); @@ -999,7 +998,6 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) u32 uni_pf1; u32 uni_pf2; u32 uni_pf3; - u32 pm_csr; /* Set up unicast packet filter reg 3 to be the first two octets of * the MAC address for both address @@ -1025,7 +1023,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | adapter->addr[5]; - pm_csr = readl(&adapter->regs->global.pm_csr); + readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(uni_pf1, &rxmac->uni_pf_addr1); writel(uni_pf2, &rxmac->uni_pf_addr2); @@ -3443,12 +3441,10 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) * send a pause packet, otherwise just exit */ if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) { - u32 pm_csr; - /* Tell the device to send a pause packet via the back * pressure register (bp req and bp xon/xoff) */ - pm_csr = readl(&iomem->global.pm_csr); + readl(&iomem->global.pm_csr); if (!et1310_in_phy_coma(adapter)) writel(3, &iomem->txmac.bp_ctrl); } From 11f3c1f583ea3f43cc244dba12ff77943de3edd9 Mon Sep 17 00:00:00 2001 From: Mark Einon Date: Fri, 17 Jul 2020 14:21:35 +0100 Subject: [PATCH 1167/2056] net: ethernet: et131x: Remove redundant register read Following the removal of an unused variable assignment (remove unused variable 'pm_csr') the associated register read can also go, as the read also occurs in the subsequent et1310_in_phy_coma() call. Signed-off-by: Mark Einon Signed-off-by: David S. Miller --- drivers/net/ethernet/agere/et131x.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 8806e1e4c20f..41f8821f792d 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -983,7 +983,6 @@ static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) } /* Write out the new hash to the device */ - readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(hash1, &rxmac->multi_hash1); writel(hash2, &rxmac->multi_hash2); @@ -1023,7 +1022,6 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) | adapter->addr[5]; - readl(&adapter->regs->global.pm_csr); if (!et1310_in_phy_coma(adapter)) { writel(uni_pf1, &rxmac->uni_pf_addr1); writel(uni_pf2, &rxmac->uni_pf_addr2); @@ -3444,7 +3442,6 @@ static irqreturn_t et131x_isr(int irq, void *dev_id) /* Tell the device to send a pause packet via the back * pressure register (bp req and bp xon/xoff) */ - readl(&iomem->global.pm_csr); if (!et1310_in_phy_coma(adapter)) writel(3, &iomem->txmac.bp_ctrl); } From b567edbfc85ac375181862808928aeb381560c68 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Fri, 17 Jul 2020 21:01:46 +0300 Subject: [PATCH 1168/2056] net: atlantic: align return value of ver_match function with function name This patch aligns the return value of hw_atl_utils_ver_match function with its name. Change the return type to bool, because it's better aligned with the actual usage. Return true when the version matches, false otherwise. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 29 +++++++++---------- .../aquantia/atlantic/hw_atl/hw_atl_utils.h | 2 +- .../aquantia/atlantic/hw_atl2/hw_atl2_utils.c | 3 +- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index bf4c41cc312b..22f68e4a638c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -72,14 +72,11 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) self->fw_ver_actual = hw_atl_utils_get_fw_version(self); - if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, - self->fw_ver_actual) == 0) { + if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual)) { *fw_ops = &aq_fw_1x_ops; - } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, - self->fw_ver_actual) == 0) { + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual)) { *fw_ops = &aq_fw_2x_ops; - } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, - self->fw_ver_actual) == 0) { + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) { *fw_ops = &aq_fw_2x_ops; } else { aq_pr_err("Bad FW version detected: %x\n", @@ -262,9 +259,9 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) /* FW 1.x may bootup in an invalid POWER state (WOL feature). * We should work around this by forcing its state back to DEINIT */ - if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, - aq_hw_read_reg(self, - HW_ATL_MPI_FW_VERSION))) { + if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, + aq_hw_read_reg(self, + HW_ATL_MPI_FW_VERSION))) { int err = 0; hw_atl_utils_mpi_set_state(self, MPI_DEINIT); @@ -434,20 +431,20 @@ int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p, p, cnt, MCP_AREA_SETTINGS); } -int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) +bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual) { const u32 dw_major_mask = 0xff000000U; const u32 dw_minor_mask = 0x00ffffffU; - int err = 0; + bool ver_match; - err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0; - if (err < 0) + ver_match = (dw_major_mask & (ver_expected ^ ver_actual)) ? false : true; + if (!ver_match) goto err_exit; - err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? - -EOPNOTSUPP : 0; + ver_match = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ? + false : true; err_exit: - return err; + return ver_match; } static int hw_atl_utils_init_ucp(struct aq_hw_s *self, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index 0b4b54fc1de0..f5901f8e3907 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -635,7 +635,7 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size); int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, struct hw_atl_utils_fw_rpc **rpc); -int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); +bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); extern const struct aq_fw_ops aq_fw_1x_ops; extern const struct aq_fw_ops aq_fw_2x_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c index f3766780e975..0fe6257d9c08 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c @@ -36,8 +36,7 @@ int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) self->fw_ver_actual = hw_atl2_utils_get_fw_version(self); - if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X, - self->fw_ver_actual) == 0) { + if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X, self->fw_ver_actual)) { *fw_ops = &aq_a2_fw_ops; } else { aq_pr_err("Bad FW version detected: %x, but continue\n", From 0044b1e1470aa62eeabb24983e2ff5433a68666a Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Fri, 17 Jul 2020 21:01:47 +0300 Subject: [PATCH 1169/2056] net: atlantic: add support for FW 4.x This patch adds support for FW 4.x, which is about to get into the production for some products. 4.x is mostly compatible with 3.x, save for soft reset, which requires the acquisition of 2 additional semaphores. Other differences (e.g. absence of PTP support) are handled via capabilities. Note: 4.x targets specific products only. 3.x is still the main firmware branch, which should be used by most users (at least for now). Signed-off-by: Dmitry Bogdanov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../aquantia/atlantic/hw_atl/hw_atl_llh.c | 17 +++++++-- .../aquantia/atlantic/hw_atl/hw_atl_llh.h | 10 ++++-- .../atlantic/hw_atl/hw_atl_llh_internal.h | 11 ++++-- .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 35 +++++++++++++++---- 4 files changed, 58 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index d775b23025c1..9c3debae425f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File hw_atl_llh.c: Definitions of bitfield and register access functions for @@ -1724,6 +1725,16 @@ u32 hw_atl_sem_mdio_get(struct aq_hw_s *self) return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO); } +u32 hw_atl_sem_reset1_get(struct aq_hw_s *self) +{ + return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RESET1); +} + +u32 hw_atl_sem_reset2_get(struct aq_hw_s *self) +{ + return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RESET2); +} + u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp) { return aq_hw_read_reg(aq_hw, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 61a6f70c51cd..f0954711df24 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File hw_atl_llh.h: Declarations of bitfield and register access functions for @@ -838,6 +839,9 @@ u32 hw_atl_sem_ram_get(struct aq_hw_s *self); /* get global microprocessor mdio semaphore */ u32 hw_atl_sem_mdio_get(struct aq_hw_s *self); +u32 hw_atl_sem_reset1_get(struct aq_hw_s *self); +u32 hw_atl_sem_reset2_get(struct aq_hw_s *self); + /* get global microprocessor scratch pad register */ u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 7430ff025134..ee11cb88325e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File hw_atl_llh_internal.h: Preprocessor definitions @@ -2837,7 +2838,11 @@ /* Default value of bitfield MDIO Address [F:0] */ #define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0 +#define HW_ATL_MIF_RESET_TIMEOUT_ADR 0x00000348 + #define HW_ATL_FW_SM_MDIO 0x0U #define HW_ATL_FW_SM_RAM 0x2U +#define HW_ATL_FW_SM_RESET1 0x3U +#define HW_ATL_FW_SM_RESET2 0x4U #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 22f68e4a638c..cacab3352cb8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -46,6 +46,7 @@ #define HW_ATL_FW_VER_1X 0x01050006U #define HW_ATL_FW_VER_2X 0x02000000U #define HW_ATL_FW_VER_3X 0x03000000U +#define HW_ATL_FW_VER_4X 0x04000000U #define FORCE_FLASHLESS 0 @@ -78,6 +79,8 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) *fw_ops = &aq_fw_2x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) { *fw_ops = &aq_fw_2x_ops; + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, self->fw_ver_actual)) { + *fw_ops = &aq_fw_2x_ops; } else { aq_pr_err("Bad FW version detected: %x\n", self->fw_ver_actual); @@ -236,6 +239,7 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) int hw_atl_utils_soft_reset(struct aq_hw_s *self) { + int ver = hw_atl_utils_get_fw_version(self); u32 boot_exit_code = 0; u32 val; int k; @@ -256,14 +260,12 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) self->rbl_enabled = (boot_exit_code != 0); - /* FW 1.x may bootup in an invalid POWER state (WOL feature). - * We should work around this by forcing its state back to DEINIT - */ - if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, - aq_hw_read_reg(self, - HW_ATL_MPI_FW_VERSION))) { + if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, ver)) { int err = 0; + /* FW 1.x may bootup in an invalid POWER state (WOL feature). + * We should work around this by forcing its state back to DEINIT + */ hw_atl_utils_mpi_set_state(self, MPI_DEINIT); err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state, self, val, @@ -272,6 +274,27 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) 10, 10000U); if (err) return err; + } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, ver)) { + u64 sem_timeout = aq_hw_read_reg(self, HW_ATL_MIF_RESET_TIMEOUT_ADR); + + /* Acquire 2 semaphores before issuing reset for FW 4.x */ + if (sem_timeout > 3000) + sem_timeout = 3000; + sem_timeout = sem_timeout * 1000; + + if (sem_timeout != 0) { + int err; + + err = readx_poll_timeout_atomic(hw_atl_sem_reset1_get, self, val, + val == 1U, 1U, sem_timeout); + if (err) + aq_pr_err("reset sema1 timeout"); + + err = readx_poll_timeout_atomic(hw_atl_sem_reset2_get, self, val, + val == 1U, 1U, sem_timeout); + if (err) + aq_pr_err("reset sema2 timeout"); + } } if (self->rbl_enabled) From a050d82f5b04e585a6780b2a71faf1bd65d73928 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Fri, 17 Jul 2020 20:21:48 +0200 Subject: [PATCH 1170/2056] ne2k-pci: Use netif_msg_init to initialize msg_enable bits Use netif_msg_enable() to process param settings. Signed-off-by: Armin Wolf Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ne2k-pci.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index e500f0c05725..bc6edb3f1af3 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -62,7 +62,10 @@ static int options[MAX_UNITS]; #include "8390.h" -static u32 ne2k_msg_enable; +static int ne2k_msg_enable; + +static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR); #if defined(__powerpc__) #define inl_le(addr) le32_to_cpu(inl(addr)) @@ -74,7 +77,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); -module_param_named(msg_enable, ne2k_msg_enable, uint, 0444); +module_param_named(msg_enable, ne2k_msg_enable, int, 0444); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); @@ -282,7 +285,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev, } dev->netdev_ops = &ne2k_netdev_ops; ei_local = netdev_priv(dev); - ei_local->msg_enable = ne2k_msg_enable; + ei_local->msg_enable = netif_msg_init(ne2k_msg_enable, default_msg_level); SET_NETDEV_DEV(dev, &pdev->dev); From ce3aa9cc5109363099b7c4ac82e2c9768afcaf31 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:22 +0200 Subject: [PATCH 1171/2056] bpf, netns: Handle multiple link attachments Extend the BPF netns link callbacks to rebuild (grow/shrink) or update the prog_array at given position when link gets attached/updated/released. This let's us lift the limit of having just one link attached for the new attach type introduced by subsequent patch. No functional changes intended. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-2-jakub@cloudflare.com --- include/linux/bpf.h | 3 ++ kernel/bpf/core.c | 55 +++++++++++++++++++++++ kernel/bpf/net_namespace.c | 90 ++++++++++++++++++++++++++++++++++---- 3 files changed, 139 insertions(+), 9 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 54ad426dbea1..c8c9eabcd106 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -928,6 +928,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog); int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 9df4cc9a2907..7be02e555ab9 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1958,6 +1958,61 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array, } } +/** + * bpf_prog_array_delete_safe_at() - Replaces the program at the given + * index into the program array with + * a dummy no-op program. + * @array: a bpf_prog_array + * @index: the index of the program to replace + * + * Skips over dummy programs, by not counting them, when calculating + * the the position of the program to replace. + * + * Return: + * * 0 - Success + * * -EINVAL - Invalid index value. Must be a non-negative integer. + * * -ENOENT - Index out of range + */ +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index) +{ + return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog); +} + +/** + * bpf_prog_array_update_at() - Updates the program at the given index + * into the program array. + * @array: a bpf_prog_array + * @index: the index of the program to update + * @prog: the program to insert into the array + * + * Skips over dummy programs, by not counting them, when calculating + * the position of the program to update. + * + * Return: + * * 0 - Success + * * -EINVAL - Invalid index value. Must be a non-negative integer. + * * -ENOENT - Index out of range + */ +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog) +{ + struct bpf_prog_array_item *item; + + if (unlikely(index < 0)) + return -EINVAL; + + for (item = array->items; item->prog; item++) { + if (item->prog == &dummy_bpf_prog.prog) + continue; + if (!index) { + WRITE_ONCE(item->prog, prog); + return 0; + } + index--; + } + return -ENOENT; +} + int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 310241ca7991..e9c8e26ac8f2 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -36,12 +36,50 @@ static void netns_bpf_run_array_detach(struct net *net, bpf_prog_array_free(run_array); } +static int link_index(struct net *net, enum netns_bpf_attach_type type, + struct bpf_netns_link *link) +{ + struct bpf_netns_link *pos; + int i = 0; + + list_for_each_entry(pos, &net->bpf.links[type], node) { + if (pos == link) + return i; + i++; + } + return -ENOENT; +} + +static int link_count(struct net *net, enum netns_bpf_attach_type type) +{ + struct list_head *pos; + int i = 0; + + list_for_each(pos, &net->bpf.links[type]) + i++; + return i; +} + +static void fill_prog_array(struct net *net, enum netns_bpf_attach_type type, + struct bpf_prog_array *prog_array) +{ + struct bpf_netns_link *pos; + unsigned int i = 0; + + list_for_each_entry(pos, &net->bpf.links[type], node) { + prog_array->items[i].prog = pos->link.prog; + i++; + } +} + static void bpf_netns_link_release(struct bpf_link *link) { struct bpf_netns_link *net_link = container_of(link, struct bpf_netns_link, link); enum netns_bpf_attach_type type = net_link->netns_type; + struct bpf_prog_array *old_array, *new_array; struct net *net; + int cnt, idx; mutex_lock(&netns_bpf_mutex); @@ -53,9 +91,27 @@ static void bpf_netns_link_release(struct bpf_link *link) if (!net) goto out_unlock; - netns_bpf_run_array_detach(net, type); + /* Remember link position in case of safe delete */ + idx = link_index(net, type, net_link); list_del(&net_link->node); + cnt = link_count(net, type); + if (!cnt) { + netns_bpf_run_array_detach(net, type); + goto out_unlock; + } + + old_array = rcu_dereference_protected(net->bpf.run_array[type], + lockdep_is_held(&netns_bpf_mutex)); + new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL); + if (!new_array) { + WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx)); + goto out_unlock; + } + fill_prog_array(net, type, new_array); + rcu_assign_pointer(net->bpf.run_array[type], new_array); + bpf_prog_array_free(old_array); + out_unlock: mutex_unlock(&netns_bpf_mutex); } @@ -77,7 +133,7 @@ static int bpf_netns_link_update_prog(struct bpf_link *link, enum netns_bpf_attach_type type = net_link->netns_type; struct bpf_prog_array *run_array; struct net *net; - int ret = 0; + int idx, ret; if (old_prog && old_prog != link->prog) return -EPERM; @@ -95,7 +151,10 @@ static int bpf_netns_link_update_prog(struct bpf_link *link, run_array = rcu_dereference_protected(net->bpf.run_array[type], lockdep_is_held(&netns_bpf_mutex)); - WRITE_ONCE(run_array->items[0].prog, new_prog); + idx = link_index(net, type, net_link); + ret = bpf_prog_array_update_at(run_array, idx, new_prog); + if (ret) + goto out_unlock; old_prog = xchg(&link->prog, new_prog); bpf_prog_put(old_prog); @@ -309,18 +368,28 @@ int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) return ret; } +static int netns_bpf_max_progs(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_FLOW_DISSECTOR: + return 1; + default: + return 0; + } +} + static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, enum netns_bpf_attach_type type) { struct bpf_netns_link *net_link = container_of(link, struct bpf_netns_link, link); struct bpf_prog_array *run_array; - int err; + int cnt, err; mutex_lock(&netns_bpf_mutex); - /* Allow attaching only one prog or link for now */ - if (!list_empty(&net->bpf.links[type])) { + cnt = link_count(net, type); + if (cnt >= netns_bpf_max_progs(type)) { err = -E2BIG; goto out_unlock; } @@ -341,16 +410,19 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, if (err) goto out_unlock; - run_array = bpf_prog_array_alloc(1, GFP_KERNEL); + run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL); if (!run_array) { err = -ENOMEM; goto out_unlock; } - run_array->items[0].prog = link->prog; - rcu_assign_pointer(net->bpf.run_array[type], run_array); list_add_tail(&net_link->node, &net->bpf.links[type]); + fill_prog_array(net, type, run_array); + run_array = rcu_replace_pointer(net->bpf.run_array[type], run_array, + lockdep_is_held(&netns_bpf_mutex)); + bpf_prog_array_free(run_array); + out_unlock: mutex_unlock(&netns_bpf_mutex); return err; From e9ddbb7707ff5891616240026062b8c1e29864ca Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:23 +0200 Subject: [PATCH 1172/2056] bpf: Introduce SK_LOOKUP program type with a dedicated attach point Add a new program type BPF_PROG_TYPE_SK_LOOKUP with a dedicated attach type BPF_SK_LOOKUP. The new program kind is to be invoked by the transport layer when looking up a listening socket for a new connection request for connection oriented protocols, or when looking up an unconnected socket for a packet for connection-less protocols. When called, SK_LOOKUP BPF program can select a socket that will receive the packet. This serves as a mechanism to overcome the limits of what bind() API allows to express. Two use-cases driving this work are: (1) steer packets destined to an IP range, on fixed port to a socket 192.0.2.0/24, port 80 -> NGINX socket (2) steer packets destined to an IP address, on any port to a socket 198.51.100.1, any port -> L7 proxy socket In its run-time context program receives information about the packet that triggered the socket lookup. Namely IP version, L4 protocol identifier, and address 4-tuple. Context can be further extended to include ingress interface identifier. To select a socket BPF program fetches it from a map holding socket references, like SOCKMAP or SOCKHASH, and calls bpf_sk_assign(ctx, sk, ...) helper to record the selection. Transport layer then uses the selected socket as a result of socket lookup. In its basic form, SK_LOOKUP acts as a filter and hence must return either SK_PASS or SK_DROP. If the program returns with SK_PASS, transport should look for a socket to receive the packet, or use the one selected by the program if available, while SK_DROP informs the transport layer that the lookup should fail. This patch only enables the user to attach an SK_LOOKUP program to a network namespace. Subsequent patches hook it up to run on local delivery path in ipv4 and ipv6 stacks. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-3-jakub@cloudflare.com --- include/linux/bpf-netns.h | 3 + include/linux/bpf.h | 1 + include/linux/bpf_types.h | 2 + include/linux/filter.h | 17 ++++ include/uapi/linux/bpf.h | 77 ++++++++++++++++ kernel/bpf/net_namespace.c | 5 ++ kernel/bpf/syscall.c | 9 ++ kernel/bpf/verifier.c | 13 ++- net/core/filter.c | 180 +++++++++++++++++++++++++++++++++++++ scripts/bpf_helpers_doc.py | 9 +- 10 files changed, 312 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h index 47d5b0c708c9..722f799c1a2e 100644 --- a/include/linux/bpf-netns.h +++ b/include/linux/bpf-netns.h @@ -8,6 +8,7 @@ enum netns_bpf_attach_type { NETNS_BPF_INVALID = -1, NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP, MAX_NETNS_BPF_ATTACH_TYPE }; @@ -17,6 +18,8 @@ to_netns_bpf_attach_type(enum bpf_attach_type attach_type) switch (attach_type) { case BPF_FLOW_DISSECTOR: return NETNS_BPF_FLOW_DISSECTOR; + case BPF_SK_LOOKUP: + return NETNS_BPF_SK_LOOKUP; default: return NETNS_BPF_INVALID; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c8c9eabcd106..adb16bdc5f0a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -249,6 +249,7 @@ enum bpf_arg_type { ARG_PTR_TO_INT, /* pointer to int */ ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ + ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index a18ae82a298a..a52a5688418e 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -64,6 +64,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2, #ifdef CONFIG_INET BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport, struct sk_reuseport_md, struct sk_reuseport_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup, + struct bpf_sk_lookup, struct bpf_sk_lookup_kern) #endif #if defined(CONFIG_BPF_JIT) BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops, diff --git a/include/linux/filter.h b/include/linux/filter.h index 0b0144752d78..fa1ea12ad2cd 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1278,4 +1278,21 @@ struct bpf_sockopt_kern { s32 retval; }; +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + __be16 sport; + u16 dport; + struct sock *selected_sk; + bool no_reuseport; +}; + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7ac3992dacfe..54d0c886e3ba 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -189,6 +189,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_STRUCT_OPS, BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, }; enum bpf_attach_type { @@ -228,6 +229,7 @@ enum bpf_attach_type { BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, + BPF_SK_LOOKUP, __MAX_BPF_ATTACH_TYPE }; @@ -3069,6 +3071,10 @@ union bpf_attr { * * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SCHED_CLS** and + * **BPF_PROG_TYPE_SCHED_ACT** programs. + * * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, * will cause *skb* to be delivered to the specified socket. @@ -3094,6 +3100,56 @@ union bpf_attr { * **-ESOCKTNOSUPPORT** if the socket type is not supported * (reuseport). * + * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) + * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. + * + * Select the *sk* as a result of a socket lookup. + * + * For the operation to succeed passed socket must be compatible + * with the packet description provided by the *ctx* object. + * + * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must + * be an exact match. While IP family (**AF_INET** or + * **AF_INET6**) must be compatible, that is IPv6 sockets + * that are not v6-only can be selected for IPv4 packets. + * + * Only TCP listeners and UDP unconnected sockets can be + * selected. *sk* can also be NULL to reset any previous + * selection. + * + * *flags* argument can combination of following values: + * + * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous + * socket selection, potentially done by a BPF program + * that ran before us. + * + * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip + * load-balancing within reuseport group for the socket + * being selected. + * + * On success *ctx->sk* will point to the selected socket. + * + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EAFNOSUPPORT** if socket family (*sk->family*) is + * not compatible with packet family (*ctx->family*). + * + * * **-EEXIST** if socket has been already selected, + * potentially by another program, and + * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. + * + * * **-EINVAL** if unsupported flags were specified. + * + * * **-EPROTOTYPE** if socket L4 protocol + * (*sk->protocol*) doesn't match packet protocol + * (*ctx->protocol*). + * + * * **-ESOCKTNOSUPPORT** if socket is not in allowed + * state (TCP listening or UDP unconnected). + * * u64 bpf_ktime_get_boot_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. @@ -3607,6 +3663,12 @@ enum { BPF_RINGBUF_HDR_SZ = 8, }; +/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ +enum { + BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), + BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), +}; + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, @@ -4349,4 +4411,19 @@ struct bpf_pidns_info { __u32 pid; __u32 tgid; }; + +/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ +struct bpf_sk_lookup { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + + __u32 family; /* Protocol family (AF_INET, AF_INET6) */ + __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ + __u32 remote_ip4; /* Network byte order */ + __u32 remote_ip6[4]; /* Network byte order */ + __u32 remote_port; /* Network byte order */ + __u32 local_ip4; /* Network byte order */ + __u32 local_ip6[4]; /* Network byte order */ + __u32 local_port; /* Host byte order */ +}; + #endif /* _UAPI__LINUX_BPF_H__ */ diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index e9c8e26ac8f2..38b368bccda2 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -373,6 +373,8 @@ static int netns_bpf_max_progs(enum netns_bpf_attach_type type) switch (type) { case NETNS_BPF_FLOW_DISSECTOR: return 1; + case NETNS_BPF_SK_LOOKUP: + return 64; default: return 0; } @@ -403,6 +405,9 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, case NETNS_BPF_FLOW_DISSECTOR: err = flow_dissector_bpf_prog_attach_check(net, link->prog); break; + case NETNS_BPF_SK_LOOKUP: + err = 0; /* nothing to check */ + break; default: err = -EINVAL; break; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 7ea9dfbebd8c..d07417d17712 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2022,6 +2022,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, default: return -EINVAL; } + case BPF_PROG_TYPE_SK_LOOKUP: + if (expected_attach_type == BPF_SK_LOOKUP) + return 0; + return -EINVAL; case BPF_PROG_TYPE_EXT: if (expected_attach_type) return -EINVAL; @@ -2756,6 +2760,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_SK_LOOKUP: return attach_type == prog->expected_attach_type ? 0 : -EINVAL; case BPF_PROG_TYPE_CGROUP_SKB: if (!capable(CAP_NET_ADMIN)) @@ -2817,6 +2822,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_CGROUP_SOCKOPT; case BPF_TRACE_ITER: return BPF_PROG_TYPE_TRACING; + case BPF_SK_LOOKUP: + return BPF_PROG_TYPE_SK_LOOKUP; default: return BPF_PROG_TYPE_UNSPEC; } @@ -2953,6 +2960,7 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_LIRC_MODE2: return lirc_prog_query(attr, uattr); case BPF_FLOW_DISSECTOR: + case BPF_SK_LOOKUP: return netns_bpf_prog_query(attr, uattr); default: return -EINVAL; @@ -3891,6 +3899,7 @@ static int link_create(union bpf_attr *attr) ret = tracing_bpf_link_attach(attr, prog); break; case BPF_PROG_TYPE_FLOW_DISSECTOR: + case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; default: diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 3c1efc9d08fd..9a6703bc3f36 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3878,10 +3878,14 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, } meta->ref_obj_id = reg->ref_obj_id; } - } else if (arg_type == ARG_PTR_TO_SOCKET) { + } else if (arg_type == ARG_PTR_TO_SOCKET || + arg_type == ARG_PTR_TO_SOCKET_OR_NULL) { expected_type = PTR_TO_SOCKET; - if (type != expected_type) - goto err_type; + if (!(register_is_null(reg) && + arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) { + if (type != expected_type) + goto err_type; + } } else if (arg_type == ARG_PTR_TO_BTF_ID) { expected_type = PTR_TO_BTF_ID; if (type != expected_type) @@ -7354,6 +7358,9 @@ static int check_return_code(struct bpf_verifier_env *env) return -ENOTSUPP; } break; + case BPF_PROG_TYPE_SK_LOOKUP: + range = tnum_range(SK_DROP, SK_PASS); + break; case BPF_PROG_TYPE_EXT: /* freplace program can return anything as its return value * depends on the to-be-replaced kernel func or bpf program. diff --git a/net/core/filter.c b/net/core/filter.c index bdd2382e655d..d099436b3ff5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9229,6 +9229,186 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = { const struct bpf_prog_ops sk_reuseport_prog_ops = { }; + +BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, + struct sock *, sk, u64, flags) +{ + if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE | + BPF_SK_LOOKUP_F_NO_REUSEPORT))) + return -EINVAL; + if (unlikely(sk && sk_is_refcounted(sk))) + return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ + if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED)) + return -ESOCKTNOSUPPORT; /* reject connected sockets */ + + /* Check if socket is suitable for packet L3/L4 protocol */ + if (sk && sk->sk_protocol != ctx->protocol) + return -EPROTOTYPE; + if (sk && sk->sk_family != ctx->family && + (sk->sk_family == AF_INET || ipv6_only_sock(sk))) + return -EAFNOSUPPORT; + + if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE)) + return -EEXIST; + + /* Select socket as lookup result */ + ctx->selected_sk = sk; + ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT; + return 0; +} + +static const struct bpf_func_proto bpf_sk_lookup_assign_proto = { + .func = bpf_sk_lookup_assign, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL, + .arg3_type = ARG_ANYTHING, +}; + +static const struct bpf_func_proto * +sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_event_output_data_proto; + case BPF_FUNC_sk_assign: + return &bpf_sk_lookup_assign_proto; + case BPF_FUNC_sk_release: + return &bpf_sk_release_proto; + default: + return bpf_base_func_proto(func_id); + } +} + +static bool sk_lookup_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(struct bpf_sk_lookup)) + return false; + if (off % size != 0) + return false; + if (type != BPF_READ) + return false; + + switch (off) { + case offsetof(struct bpf_sk_lookup, sk): + info->reg_type = PTR_TO_SOCKET_OR_NULL; + return size == sizeof(__u64); + + case bpf_ctx_range(struct bpf_sk_lookup, family): + case bpf_ctx_range(struct bpf_sk_lookup, protocol): + case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4): + case bpf_ctx_range(struct bpf_sk_lookup, local_ip4): + case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]): + case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]): + case bpf_ctx_range(struct bpf_sk_lookup, remote_port): + case bpf_ctx_range(struct bpf_sk_lookup, local_port): + bpf_ctx_record_field_size(info, sizeof(__u32)); + return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32)); + + default: + return false; + } +} + +static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type, + const struct bpf_insn *si, + struct bpf_insn *insn_buf, + struct bpf_prog *prog, + u32 *target_size) +{ + struct bpf_insn *insn = insn_buf; + + switch (si->off) { + case offsetof(struct bpf_sk_lookup, sk): + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, selected_sk)); + break; + + case offsetof(struct bpf_sk_lookup, family): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + family, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, protocol): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + protocol, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, remote_ip4): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + v4.saddr, 4, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, local_ip4): + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + v4.daddr, 4, target_size)); + break; + + case bpf_ctx_range_till(struct bpf_sk_lookup, + remote_ip6[0], remote_ip6[3]): { +#if IS_ENABLED(CONFIG_IPV6) + int off = si->off; + + off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]); + off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, v6.saddr)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + } + case bpf_ctx_range_till(struct bpf_sk_lookup, + local_ip6[0], local_ip6[3]): { +#if IS_ENABLED(CONFIG_IPV6) + int off = si->off; + + off -= offsetof(struct bpf_sk_lookup, local_ip6[0]); + off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, + offsetof(struct bpf_sk_lookup_kern, v6.daddr)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + } + case offsetof(struct bpf_sk_lookup, remote_port): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + sport, 2, target_size)); + break; + + case offsetof(struct bpf_sk_lookup, local_port): + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, + bpf_target_off(struct bpf_sk_lookup_kern, + dport, 2, target_size)); + break; + } + + return insn - insn_buf; +} + +const struct bpf_prog_ops sk_lookup_prog_ops = { +}; + +const struct bpf_verifier_ops sk_lookup_verifier_ops = { + .get_func_proto = sk_lookup_func_proto, + .is_valid_access = sk_lookup_is_valid_access, + .convert_ctx_access = sk_lookup_convert_ctx_access, +}; + #endif /* CONFIG_INET */ DEFINE_BPF_DISPATCHER(xdp) diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py index 6843376733df..5bfa448b4704 100755 --- a/scripts/bpf_helpers_doc.py +++ b/scripts/bpf_helpers_doc.py @@ -404,6 +404,7 @@ class PrinterHelpers(Printer): type_fwds = [ 'struct bpf_fib_lookup', + 'struct bpf_sk_lookup', 'struct bpf_perf_event_data', 'struct bpf_perf_event_value', 'struct bpf_pidns_info', @@ -450,6 +451,7 @@ class PrinterHelpers(Printer): 'struct bpf_perf_event_data', 'struct bpf_perf_event_value', 'struct bpf_pidns_info', + 'struct bpf_sk_lookup', 'struct bpf_sock', 'struct bpf_sock_addr', 'struct bpf_sock_ops', @@ -487,6 +489,11 @@ class PrinterHelpers(Printer): 'struct sk_msg_buff': 'struct sk_msg_md', 'struct xdp_buff': 'struct xdp_md', } + # Helpers overloaded for different context types. + overloaded_helpers = [ + 'bpf_get_socket_cookie', + 'bpf_sk_assign', + ] def print_header(self): header = '''\ @@ -543,7 +550,7 @@ class PrinterHelpers(Printer): for i, a in enumerate(proto['args']): t = a['type'] n = a['name'] - if proto['name'] == 'bpf_get_socket_cookie' and i == 0: + if proto['name'] in self.overloaded_helpers and i == 0: t = 'void' n = 'ctx' one_arg = '{}{}'.format(comma, self.map_type(t)) From 80b373f74f9e28b0093930a6b95c929732f02512 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:24 +0200 Subject: [PATCH 1173/2056] inet: Extract helper for selecting socket from reuseport group Prepare for calling into reuseport from __inet_lookup_listener as well. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-4-jakub@cloudflare.com --- net/ipv4/inet_hashtables.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2bbaaf0c7176..ab64834837c8 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -246,6 +246,21 @@ static inline int compute_score(struct sock *sk, struct net *net, return score; } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned short hnum) +{ + struct sock *reuse_sk = NULL; + u32 phash; + + if (sk->sk_reuseport) { + phash = inet_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, phash, skb, doff); + } + return reuse_sk; +} + /* * Here are some nice properties to exploit here. The BSD API * does not allow a listening sock to specify the remote port nor the @@ -265,21 +280,17 @@ static struct sock *inet_lhash2_lookup(struct net *net, struct inet_connection_sock *icsk; struct sock *sk, *result = NULL; int score, hiscore = 0; - u32 phash = 0; inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { sk = (struct sock *)icsk; score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif); if (score > hiscore) { - if (sk->sk_reuseport) { - phash = inet_ehashfn(net, daddr, hnum, - saddr, sport); - result = reuseport_select_sock(sk, phash, - skb, doff); - if (result) - return result; - } + result = lookup_reuseport(net, sk, skb, doff, + saddr, sport, daddr, hnum); + if (result) + return result; + result = sk; hiscore = score; } From 1559b4aa1db443096af493c7d621dc156054babe Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:25 +0200 Subject: [PATCH 1174/2056] inet: Run SK_LOOKUP BPF program on socket lookup Run a BPF program before looking up a listening socket on the receive path. Program selects a listening socket to yield as result of socket lookup by calling bpf_sk_assign() helper and returning SK_PASS code. Program can revert its decision by assigning a NULL socket with bpf_sk_assign(). Alternatively, BPF program can also fail the lookup by returning with SK_DROP, or let the lookup continue as usual with SK_PASS on return, when no socket has been selected with bpf_sk_assign(). This lets the user match packets with listening sockets freely at the last possible point on the receive path, where we know that packets are destined for local delivery after undergoing policing, filtering, and routing. With BPF code selecting the socket, directing packets destined to an IP range or to a port range to a single socket becomes possible. In case multiple programs are attached, they are run in series in the order in which they were attached. The end result is determined from return codes of all the programs according to following rules: 1. If any program returned SK_PASS and selected a valid socket, the socket is used as result of socket lookup. 2. If more than one program returned SK_PASS and selected a socket, last selection takes effect. 3. If any program returned SK_DROP, and no program returned SK_PASS and selected a socket, socket lookup fails with -ECONNREFUSED. 4. If all programs returned SK_PASS and none of them selected a socket, socket lookup continues to htable-based lookup. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-5-jakub@cloudflare.com --- include/linux/filter.h | 91 ++++++++++++++++++++++++++++++++++++++ kernel/bpf/net_namespace.c | 32 +++++++++++++- net/core/filter.c | 3 ++ net/ipv4/inet_hashtables.c | 31 +++++++++++++ 4 files changed, 156 insertions(+), 1 deletion(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index fa1ea12ad2cd..c4f54c216347 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1295,4 +1295,95 @@ struct bpf_sk_lookup_kern { bool no_reuseport; }; +extern struct static_key_false bpf_sk_lookup_enabled; + +/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. + * + * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and + * SK_DROP. Their meaning is as follows: + * + * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result + * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup + * SK_DROP : terminate lookup with -ECONNREFUSED + * + * This macro aggregates return values and selected sockets from + * multiple BPF programs according to following rules in order: + * + * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, + * macro result is SK_PASS and last ctx.selected_sk is used. + * 2. If any program returned SK_DROP return value, + * macro result is SK_DROP. + * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. + * + * Caller must ensure that the prog array is non-NULL, and that the + * array as well as the programs it contains remain valid. + */ +#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_sk_lookup_kern *_ctx = &(ctx); \ + struct bpf_prog_array_item *_item; \ + struct sock *_selected_sk = NULL; \ + bool _no_reuseport = false; \ + struct bpf_prog *_prog; \ + bool _all_pass = true; \ + u32 _ret; \ + \ + migrate_disable(); \ + _item = &(array)->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + /* restore most recent selection */ \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + \ + _ret = func(_prog, _ctx); \ + if (_ret == SK_PASS && _ctx->selected_sk) { \ + /* remember last non-NULL socket */ \ + _selected_sk = _ctx->selected_sk; \ + _no_reuseport = _ctx->no_reuseport; \ + } else if (_ret == SK_DROP && _all_pass) { \ + _all_pass = false; \ + } \ + _item++; \ + } \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + migrate_enable(); \ + _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ + }) + +static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET, + .protocol = protocol, + .v4.saddr = saddr, + .v4.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} + #endif /* __LINUX_FILTER_H__ */ diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 38b368bccda2..4e1bcaa2c3cb 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -25,6 +25,28 @@ struct bpf_netns_link { /* Protects updates to netns_bpf */ DEFINE_MUTEX(netns_bpf_mutex); +static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_SK_LOOKUP: + static_branch_dec(&bpf_sk_lookup_enabled); + break; + default: + break; + } +} + +static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type) +{ + switch (type) { + case NETNS_BPF_SK_LOOKUP: + static_branch_inc(&bpf_sk_lookup_enabled); + break; + default: + break; + } +} + /* Must be called with netns_bpf_mutex held. */ static void netns_bpf_run_array_detach(struct net *net, enum netns_bpf_attach_type type) @@ -91,6 +113,9 @@ static void bpf_netns_link_release(struct bpf_link *link) if (!net) goto out_unlock; + /* Mark attach point as unused */ + netns_bpf_attach_type_unneed(type); + /* Remember link position in case of safe delete */ idx = link_index(net, type, net_link); list_del(&net_link->node); @@ -428,6 +453,9 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link, lockdep_is_held(&netns_bpf_mutex)); bpf_prog_array_free(run_array); + /* Mark attach point as used */ + netns_bpf_attach_type_need(type); + out_unlock: mutex_unlock(&netns_bpf_mutex); return err; @@ -503,8 +531,10 @@ static void __net_exit netns_bpf_pernet_pre_exit(struct net *net) mutex_lock(&netns_bpf_mutex); for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) { netns_bpf_run_array_detach(net, type); - list_for_each_entry(net_link, &net->bpf.links[type], node) + list_for_each_entry(net_link, &net->bpf.links[type], node) { net_link->net = NULL; /* auto-detach link */ + netns_bpf_attach_type_unneed(type); + } if (net->bpf.progs[type]) bpf_prog_put(net->bpf.progs[type]); } diff --git a/net/core/filter.c b/net/core/filter.c index d099436b3ff5..2bd129b5ae74 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9230,6 +9230,9 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = { const struct bpf_prog_ops sk_reuseport_prog_ops = { }; +DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled); +EXPORT_SYMBOL(bpf_sk_lookup_enabled); + BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, struct sock *, sk, u64, flags) { diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index ab64834837c8..4eb4cd8d20dd 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -299,6 +299,29 @@ static struct sock *inet_lhash2_lookup(struct net *net, return result; } +static inline struct sock *inet_lookup_run_bpf(struct net *net, + struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, + __be32 saddr, __be16 sport, + __be32 daddr, u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (hashinfo != &tcp_hashinfo) + return NULL; /* only TCP is supported */ + + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + struct sock *__inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -310,6 +333,14 @@ struct sock *__inet_lookup_listener(struct net *net, struct sock *result = NULL; unsigned int hash2; + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + result = inet_lookup_run_bpf(net, hashinfo, skb, doff, + saddr, sport, daddr, hnum); + if (result) + goto done; + } + hash2 = ipv4_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); From 5df6531292b5021ac9e4ed261eb7d1fa9ff3bf08 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:26 +0200 Subject: [PATCH 1175/2056] inet6: Extract helper for selecting socket from reuseport group Prepare for calling into reuseport from inet6_lookup_listener as well. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-6-jakub@cloudflare.com --- net/ipv6/inet6_hashtables.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index fbe9d4295eac..03942eef8ab6 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -111,6 +111,23 @@ static inline int compute_score(struct sock *sk, struct net *net, return score; } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + unsigned short hnum) +{ + struct sock *reuse_sk = NULL; + u32 phash; + + if (sk->sk_reuseport) { + phash = inet6_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, phash, skb, doff); + } + return reuse_sk; +} + /* called with rcu_read_lock() */ static struct sock *inet6_lhash2_lookup(struct net *net, struct inet_listen_hashbucket *ilb2, @@ -123,21 +140,17 @@ static struct sock *inet6_lhash2_lookup(struct net *net, struct inet_connection_sock *icsk; struct sock *sk, *result = NULL; int score, hiscore = 0; - u32 phash = 0; inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) { sk = (struct sock *)icsk; score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif); if (score > hiscore) { - if (sk->sk_reuseport) { - phash = inet6_ehashfn(net, daddr, hnum, - saddr, sport); - result = reuseport_select_sock(sk, phash, - skb, doff); - if (result) - return result; - } + result = lookup_reuseport(net, sk, skb, doff, + saddr, sport, daddr, hnum); + if (result) + return result; + result = sk; hiscore = score; } From 1122702f02678597c4f1c7d316365ef502aafe08 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:27 +0200 Subject: [PATCH 1176/2056] inet6: Run SK_LOOKUP BPF program on socket lookup Following ipv4 stack changes, run a BPF program attached to netns before looking up a listening socket. Program can return a listening socket to use as result of socket lookup, fail the lookup, or take no action. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-7-jakub@cloudflare.com --- include/linux/filter.h | 39 +++++++++++++++++++++++++++++++++++++ net/ipv6/inet6_hashtables.c | 35 +++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/include/linux/filter.h b/include/linux/filter.h index c4f54c216347..8252572db918 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1386,4 +1386,43 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, return no_reuseport; } +#if IS_ENABLED(CONFIG_IPV6) +static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET6, + .protocol = protocol, + .v6.saddr = saddr, + .v6.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + #endif /* __LINUX_FILTER_H__ */ diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index 03942eef8ab6..2d3add9e6116 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -21,6 +21,8 @@ #include #include +extern struct inet_hashinfo tcp_hashinfo; + u32 inet6_ehashfn(const struct net *net, const struct in6_addr *laddr, const u16 lport, const struct in6_addr *faddr, const __be16 fport) @@ -159,6 +161,31 @@ static struct sock *inet6_lhash2_lookup(struct net *net, return result; } +static inline struct sock *inet6_lookup_run_bpf(struct net *net, + struct inet_hashinfo *hashinfo, + struct sk_buff *skb, int doff, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (hashinfo != &tcp_hashinfo) + return NULL; /* only TCP is supported */ + + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, @@ -170,6 +197,14 @@ struct sock *inet6_lookup_listener(struct net *net, struct sock *result = NULL; unsigned int hash2; + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + result = inet6_lookup_run_bpf(net, hashinfo, skb, doff, + saddr, sport, daddr, hnum); + if (result) + goto done; + } + hash2 = ipv6_portaddr_hash(net, daddr, hnum); ilb2 = inet_lhash2_bucket(hashinfo, hash2); From 7629c73a1466ec2348e9f64c874c19bf13f35f4c Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:28 +0200 Subject: [PATCH 1177/2056] udp: Extract helper for selecting socket from reuseport group Prepare for calling into reuseport from __udp4_lib_lookup as well. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-8-jakub@cloudflare.com --- net/ipv4/udp.c | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 073d346f515c..9296faea3acf 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -408,6 +408,25 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr, udp_ehash_secret + net_hash_mix(net)); } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, + __be32 saddr, __be16 sport, + __be32 daddr, unsigned short hnum) +{ + struct sock *reuse_sk = NULL; + u32 hash; + + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { + hash = udp_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + /* Fall back to scoring if group has connections */ + if (reuseport_has_conns(sk, false)) + return NULL; + } + return reuse_sk; +} + /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, @@ -418,7 +437,6 @@ static struct sock *udp4_lib_lookup2(struct net *net, { struct sock *sk, *result; int score, badness; - u32 hash = 0; result = NULL; badness = 0; @@ -426,15 +444,11 @@ static struct sock *udp4_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { - if (sk->sk_reuseport && - sk->sk_state != TCP_ESTABLISHED) { - hash = udp_ehashfn(net, daddr, hnum, - saddr, sport); - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; - } + result = lookup_reuseport(net, sk, skb, + saddr, sport, daddr, hnum); + if (result) + return result; + badness = score; result = sk; } From 72f7e9440e9bd06f855b21eba09c1017395f430a Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:29 +0200 Subject: [PATCH 1178/2056] udp: Run SK_LOOKUP BPF program on socket lookup Following INET/TCP socket lookup changes, modify UDP socket lookup to let BPF program select a receiving socket before searching for a socket by destination address and port as usual. Lookup of connected sockets that match packet 4-tuple is unaffected by this change. BPF program runs, and potentially overrides the lookup result, only if a 4-tuple match was not found. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-9-jakub@cloudflare.com --- net/ipv4/udp.c | 57 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 8 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 9296faea3acf..b738c63d7a77 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -456,6 +456,29 @@ static struct sock *udp4_lib_lookup2(struct net *net, return result; } +static inline struct sock *udp4_lookup_run_bpf(struct net *net, + struct udp_table *udptable, + struct sk_buff *skb, + __be32 saddr, __be16 sport, + __be32 daddr, u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (udptable != &udp_table) + return NULL; /* only UDP is supported */ + + no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ @@ -463,27 +486,45 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, int sdif, struct udp_table *udptable, struct sk_buff *skb) { - struct sock *result; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2; struct udp_hslot *hslot2; + struct sock *result, *sk; hash2 = ipv4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; + /* Lookup connected or non-wildcard socket */ result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, sdif, hslot2, skb); - if (!result) { - hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); - slot2 = hash2 & udptable->mask; - hslot2 = &udptable->hash2[slot2]; + if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) + goto done; - result = udp4_lib_lookup2(net, saddr, sport, - htonl(INADDR_ANY), hnum, dif, sdif, - hslot2, skb); + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + sk = udp4_lookup_run_bpf(net, udptable, skb, + saddr, sport, daddr, hnum); + if (sk) { + result = sk; + goto done; + } } + + /* Got non-wildcard socket or error on first lookup */ + if (result) + goto done; + + /* Lookup wildcard sockets */ + hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum); + slot2 = hash2 & udptable->mask; + hslot2 = &udptable->hash2[slot2]; + + result = udp4_lib_lookup2(net, saddr, sport, + htonl(INADDR_ANY), hnum, dif, sdif, + hslot2, skb); +done: if (IS_ERR(result)) return NULL; return result; From 2a08748cd384cdfb8e1222bd3645a8d1d36e6a5d Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:30 +0200 Subject: [PATCH 1179/2056] udp6: Extract helper for selecting socket from reuseport group Prepare for calling into reuseport from __udp6_lib_lookup as well. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-10-jakub@cloudflare.com --- net/ipv6/udp.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 38c0d9350c6b..084205c18a33 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -141,6 +141,27 @@ static int compute_score(struct sock *sk, struct net *net, return score; } +static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk, + struct sk_buff *skb, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + unsigned int hnum) +{ + struct sock *reuse_sk = NULL; + u32 hash; + + if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) { + hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); + reuse_sk = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + /* Fall back to scoring if group has connections */ + if (reuseport_has_conns(sk, false)) + return NULL; + } + return reuse_sk; +} + /* called with rcu_read_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -150,7 +171,6 @@ static struct sock *udp6_lib_lookup2(struct net *net, { struct sock *sk, *result; int score, badness; - u32 hash = 0; result = NULL; badness = -1; @@ -158,16 +178,11 @@ static struct sock *udp6_lib_lookup2(struct net *net, score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, sdif); if (score > badness) { - if (sk->sk_reuseport && - sk->sk_state != TCP_ESTABLISHED) { - hash = udp6_ehashfn(net, daddr, hnum, - saddr, sport); + result = lookup_reuseport(net, sk, skb, + saddr, sport, daddr, hnum); + if (result) + return result; - result = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (result && !reuseport_has_conns(sk, false)) - return result; - } result = sk; badness = score; } From 6d4201b1386b335bb53628a04ad63e547872dc5a Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:31 +0200 Subject: [PATCH 1180/2056] udp6: Run SK_LOOKUP BPF program on socket lookup Same as for udp4, let BPF program override the socket lookup result, by selecting a receiving socket of its choice or failing the lookup, if no connected UDP socket matched packet 4-tuple. Suggested-by: Marek Majkowski Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-11-jakub@cloudflare.com --- net/ipv6/udp.c | 60 ++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 51 insertions(+), 9 deletions(-) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 084205c18a33..ff8be202726a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -190,6 +190,31 @@ static struct sock *udp6_lib_lookup2(struct net *net, return result; } +static inline struct sock *udp6_lookup_run_bpf(struct net *net, + struct udp_table *udptable, + struct sk_buff *skb, + const struct in6_addr *saddr, + __be16 sport, + const struct in6_addr *daddr, + u16 hnum) +{ + struct sock *sk, *reuse_sk; + bool no_reuseport; + + if (udptable != &udp_table) + return NULL; /* only UDP is supported */ + + no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, + saddr, sport, daddr, hnum, &sk); + if (no_reuseport || IS_ERR_OR_NULL(sk)) + return sk; + + reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); + if (reuse_sk) + sk = reuse_sk; + return sk; +} + /* rcu_read_lock() must be held */ struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, @@ -200,25 +225,42 @@ struct sock *__udp6_lib_lookup(struct net *net, unsigned short hnum = ntohs(dport); unsigned int hash2, slot2; struct udp_hslot *hslot2; - struct sock *result; + struct sock *result, *sk; hash2 = ipv6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; + /* Lookup connected or non-wildcard sockets */ result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, sdif, hslot2, skb); - if (!result) { - hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); - slot2 = hash2 & udptable->mask; + if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED) + goto done; - hslot2 = &udptable->hash2[slot2]; - - result = udp6_lib_lookup2(net, saddr, sport, - &in6addr_any, hnum, dif, sdif, - hslot2, skb); + /* Lookup redirect from BPF */ + if (static_branch_unlikely(&bpf_sk_lookup_enabled)) { + sk = udp6_lookup_run_bpf(net, udptable, skb, + saddr, sport, daddr, hnum); + if (sk) { + result = sk; + goto done; + } } + + /* Got non-wildcard socket or error on first lookup */ + if (result) + goto done; + + /* Lookup wildcard sockets */ + hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum); + slot2 = hash2 & udptable->mask; + hslot2 = &udptable->hash2[slot2]; + + result = udp6_lib_lookup2(net, saddr, sport, + &in6addr_any, hnum, dif, sdif, + hslot2, skb); +done: if (IS_ERR(result)) return NULL; return result; From a352b32ae969788b706f666f764702cd0ab4a40a Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:32 +0200 Subject: [PATCH 1181/2056] bpf: Sync linux/bpf.h to tools/ Newly added program, context type and helper is used by tests in a subsequent patch. Synchronize the header file. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-12-jakub@cloudflare.com --- tools/include/uapi/linux/bpf.h | 77 ++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 7ac3992dacfe..54d0c886e3ba 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -189,6 +189,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_STRUCT_OPS, BPF_PROG_TYPE_EXT, BPF_PROG_TYPE_LSM, + BPF_PROG_TYPE_SK_LOOKUP, }; enum bpf_attach_type { @@ -228,6 +229,7 @@ enum bpf_attach_type { BPF_XDP_DEVMAP, BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, + BPF_SK_LOOKUP, __MAX_BPF_ATTACH_TYPE }; @@ -3069,6 +3071,10 @@ union bpf_attr { * * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags) * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SCHED_CLS** and + * **BPF_PROG_TYPE_SCHED_ACT** programs. + * * Assign the *sk* to the *skb*. When combined with appropriate * routing configuration to receive the packet towards the socket, * will cause *skb* to be delivered to the specified socket. @@ -3094,6 +3100,56 @@ union bpf_attr { * **-ESOCKTNOSUPPORT** if the socket type is not supported * (reuseport). * + * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags) + * Description + * Helper is overloaded depending on BPF program type. This + * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs. + * + * Select the *sk* as a result of a socket lookup. + * + * For the operation to succeed passed socket must be compatible + * with the packet description provided by the *ctx* object. + * + * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must + * be an exact match. While IP family (**AF_INET** or + * **AF_INET6**) must be compatible, that is IPv6 sockets + * that are not v6-only can be selected for IPv4 packets. + * + * Only TCP listeners and UDP unconnected sockets can be + * selected. *sk* can also be NULL to reset any previous + * selection. + * + * *flags* argument can combination of following values: + * + * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous + * socket selection, potentially done by a BPF program + * that ran before us. + * + * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip + * load-balancing within reuseport group for the socket + * being selected. + * + * On success *ctx->sk* will point to the selected socket. + * + * Return + * 0 on success, or a negative errno in case of failure. + * + * * **-EAFNOSUPPORT** if socket family (*sk->family*) is + * not compatible with packet family (*ctx->family*). + * + * * **-EEXIST** if socket has been already selected, + * potentially by another program, and + * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified. + * + * * **-EINVAL** if unsupported flags were specified. + * + * * **-EPROTOTYPE** if socket L4 protocol + * (*sk->protocol*) doesn't match packet protocol + * (*ctx->protocol*). + * + * * **-ESOCKTNOSUPPORT** if socket is not in allowed + * state (TCP listening or UDP unconnected). + * * u64 bpf_ktime_get_boot_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. @@ -3607,6 +3663,12 @@ enum { BPF_RINGBUF_HDR_SZ = 8, }; +/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */ +enum { + BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0), + BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1), +}; + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, @@ -4349,4 +4411,19 @@ struct bpf_pidns_info { __u32 pid; __u32 tgid; }; + +/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */ +struct bpf_sk_lookup { + __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */ + + __u32 family; /* Protocol family (AF_INET, AF_INET6) */ + __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ + __u32 remote_ip4; /* Network byte order */ + __u32 remote_ip6[4]; /* Network byte order */ + __u32 remote_port; /* Network byte order */ + __u32 local_ip4; /* Network byte order */ + __u32 local_ip6[4]; /* Network byte order */ + __u32 local_port; /* Host byte order */ +}; + #endif /* _UAPI__LINUX_BPF_H__ */ From 499dd29d90bb78d4ac4f0d6648e26f4a339829b1 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:33 +0200 Subject: [PATCH 1182/2056] libbpf: Add support for SK_LOOKUP program type Make libbpf aware of the newly added program type, and assign it a section name. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200717103536.397595-13-jakub@cloudflare.com --- tools/lib/bpf/libbpf.c | 3 +++ tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 2 ++ tools/lib/bpf/libbpf_probes.c | 3 +++ 4 files changed, 10 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f55fd8a5c008..846164c79df1 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6799,6 +6799,7 @@ BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); +BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); enum bpf_attach_type bpf_program__get_expected_attach_type(struct bpf_program *prog) @@ -6981,6 +6982,8 @@ static const struct bpf_sec_def section_defs[] = { BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT), BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS), + BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP, + BPF_SK_LOOKUP), }; #undef BPF_PROG_SEC_IMPL diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 2335971ed0bd..c2272132e929 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -350,6 +350,7 @@ LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, @@ -377,6 +378,7 @@ LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c5d5c7664c3b..6f0856abe299 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -287,6 +287,8 @@ LIBBPF_0.1.0 { bpf_map__type; bpf_map__value_size; bpf_program__autoload; + bpf_program__is_sk_lookup; bpf_program__set_autoload; + bpf_program__set_sk_lookup; btf__set_fd; } LIBBPF_0.0.9; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 10cd8d1891f5..5a3d3f078408 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -78,6 +78,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT; break; + case BPF_PROG_TYPE_SK_LOOKUP: + xattr.expected_attach_type = BPF_SK_LOOKUP; + break; case BPF_PROG_TYPE_KPROBE: xattr.kern_version = get_kernel_version(); break; From 93a3545d812ae7cfe4426374e00a7d8f64ac02e0 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:34 +0200 Subject: [PATCH 1183/2056] tools/bpftool: Add name mappings for SK_LOOKUP prog and attach type Make bpftool show human-friendly identifiers for newly introduced program and attach type, BPF_PROG_TYPE_SK_LOOKUP and BPF_SK_LOOKUP, respectively. Also, add the new prog type bash-completion, man page and help message. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-14-jakub@cloudflare.com --- tools/bpf/bpftool/Documentation/bpftool-prog.rst | 2 +- tools/bpf/bpftool/bash-completion/bpftool | 2 +- tools/bpf/bpftool/common.c | 1 + tools/bpf/bpftool/prog.c | 3 ++- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 412ea3d9bf7f..82e356b664e8 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst @@ -45,7 +45,7 @@ PROG COMMANDS | **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** | | **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** | | **cgroup/getsockopt** | **cgroup/setsockopt** | -| **struct_ops** | **fentry** | **fexit** | **freplace** +| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup** | } | *ATTACH_TYPE* := { | **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector** diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 25b25aca1112..7b137264ea3a 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -479,7 +479,7 @@ _bpftool() cgroup/post_bind4 cgroup/post_bind6 \ cgroup/sysctl cgroup/getsockopt \ cgroup/setsockopt struct_ops \ - fentry fexit freplace" -- \ + fentry fexit freplace sk_lookup" -- \ "$cur" ) ) return 0 ;; diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 29f4e7611ae8..9b28c69dd8e4 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -64,6 +64,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = { [BPF_TRACE_FEXIT] = "fexit", [BPF_MODIFY_RETURN] = "mod_ret", [BPF_LSM_MAC] = "lsm_mac", + [BPF_SK_LOOKUP] = "sk_lookup", }; void p_err(const char *fmt, ...) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 6863c57effd0..3e6ecc6332e2 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -59,6 +59,7 @@ const char * const prog_type_name[] = { [BPF_PROG_TYPE_TRACING] = "tracing", [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", [BPF_PROG_TYPE_EXT] = "ext", + [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", }; const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name); @@ -1905,7 +1906,7 @@ static int do_help(int argc, char **argv) " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n" " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n" " cgroup/getsockopt | cgroup/setsockopt |\n" - " struct_ops | fentry | fexit | freplace }\n" + " struct_ops | fentry | fexit | freplace | sk_lookup }\n" " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n" " flow_dissector }\n" " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n" From f7726cbea402fc92a3c27226b761a6dbc0390cac Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:35 +0200 Subject: [PATCH 1184/2056] selftests/bpf: Add verifier tests for bpf_sk_lookup context access Exercise verifier access checks for bpf_sk_lookup context fields. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-15-jakub@cloudflare.com --- .../selftests/bpf/verifier/ctx_sk_lookup.c | 492 ++++++++++++++++++ 1 file changed, 492 insertions(+) create mode 100644 tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c new file mode 100644 index 000000000000..2ad5f974451c --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c @@ -0,0 +1,492 @@ +{ + "valid 1,2,4,8-byte reads from bpf_sk_lookup", + .insns = { + /* 1-byte read from family field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 3), + /* 2-byte read from family field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family) + 2), + /* 4-byte read from family field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + + /* 1-byte read from protocol field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 3), + /* 2-byte read from protocol field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol) + 2), + /* 4-byte read from protocol field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + + /* 1-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 3), + /* 2-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4) + 2), + /* 4-byte read from remote_ip4 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + + /* 1-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 6), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 7), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 9), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 11), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 13), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 14), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 15), + /* 2-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 6), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 10), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 14), + /* 4-byte read from remote_ip6 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 4), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6) + 12), + + /* 1-byte read from remote_port field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 3), + /* 2-byte read from remote_port field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port) + 2), + /* 4-byte read from remote_port field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + + /* 1-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 3), + /* 2-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4) + 2), + /* 4-byte read from local_ip4 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + + /* 1-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 5), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 6), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 7), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 9), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 10), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 11), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 13), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 14), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 15), + /* 2-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 2), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 6), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 10), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 14), + /* 4-byte read from local_ip6 field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 4), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 8), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6) + 12), + + /* 1-byte read from local_port field */ + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 2), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 3), + /* 2-byte read from local_port field */ + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port) + 2), + /* 4-byte read from local_port field */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + + /* 8-byte read from sk field */ + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */ +{ + "invalid 8-byte read from bpf_sk_lookup family field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, family)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup protocol field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, protocol)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_ip4 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip4)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_ip6 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_ip6)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup remote_port field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, remote_port)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_ip4 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip4)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_ip6 field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_ip6)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 8-byte read from bpf_sk_lookup local_port field", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, local_port)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */ +{ + "invalid 4-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 2-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 1-byte read from bpf_sk_lookup sk field", + .insns = { + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, + offsetof(struct bpf_sk_lookup, sk)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* out of bounds and unaligned reads from bpf_sk_lookup */ +{ + "invalid 4-byte read past end of bpf_sk_lookup", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + sizeof(struct bpf_sk_lookup)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte unaligned read from bpf_sk_lookup at odd offset", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte unaligned read from bpf_sk_lookup at even offset", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 2), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +/* in-bound and out-of-bound writes to bpf_sk_lookup */ +{ + "invalid 8-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 2-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 1-byte write to bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, +{ + "invalid 4-byte write past end of bpf_sk_lookup", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U), + BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, + sizeof(struct bpf_sk_lookup)), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_SK_LOOKUP, + .expected_attach_type = BPF_SK_LOOKUP, +}, From 0ab5539f85840d3c4e5a8a4783901c0038f8321e Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Fri, 17 Jul 2020 12:35:36 +0200 Subject: [PATCH 1185/2056] selftests/bpf: Tests for BPF_SK_LOOKUP attach point Add tests to test_progs that exercise: - attaching/detaching/querying programs to BPF_SK_LOOKUP hook, - redirecting socket lookup to a socket selected by BPF program, - failing a socket lookup on BPF program's request, - error scenarios for selecting a socket from BPF program, - accessing BPF program context, - attaching and running multiple BPF programs. Run log: bash-5.0# ./test_progs -n 70 #70/1 query lookup prog:OK #70/2 TCP IPv4 redir port:OK #70/3 TCP IPv4 redir addr:OK #70/4 TCP IPv4 redir with reuseport:OK #70/5 TCP IPv4 redir skip reuseport:OK #70/6 TCP IPv6 redir port:OK #70/7 TCP IPv6 redir addr:OK #70/8 TCP IPv4->IPv6 redir port:OK #70/9 TCP IPv6 redir with reuseport:OK #70/10 TCP IPv6 redir skip reuseport:OK #70/11 UDP IPv4 redir port:OK #70/12 UDP IPv4 redir addr:OK #70/13 UDP IPv4 redir with reuseport:OK #70/14 UDP IPv4 redir skip reuseport:OK #70/15 UDP IPv6 redir port:OK #70/16 UDP IPv6 redir addr:OK #70/17 UDP IPv4->IPv6 redir port:OK #70/18 UDP IPv6 redir and reuseport:OK #70/19 UDP IPv6 redir skip reuseport:OK #70/20 TCP IPv4 drop on lookup:OK #70/21 TCP IPv6 drop on lookup:OK #70/22 UDP IPv4 drop on lookup:OK #70/23 UDP IPv6 drop on lookup:OK #70/24 TCP IPv4 drop on reuseport:OK #70/25 TCP IPv6 drop on reuseport:OK #70/26 UDP IPv4 drop on reuseport:OK #70/27 TCP IPv6 drop on reuseport:OK #70/28 sk_assign returns EEXIST:OK #70/29 sk_assign honors F_REPLACE:OK #70/30 sk_assign accepts NULL socket:OK #70/31 access ctx->sk:OK #70/32 narrow access to ctx v4:OK #70/33 narrow access to ctx v6:OK #70/34 sk_assign rejects TCP established:OK #70/35 sk_assign rejects UDP connected:OK #70/36 multi prog - pass, pass:OK #70/37 multi prog - drop, drop:OK #70/38 multi prog - pass, drop:OK #70/39 multi prog - drop, pass:OK #70/40 multi prog - pass, redir:OK #70/41 multi prog - redir, pass:OK #70/42 multi prog - drop, redir:OK #70/43 multi prog - redir, drop:OK #70/44 multi prog - redir, redir:OK #70 sk_lookup:OK Summary: 1/44 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717103536.397595-16-jakub@cloudflare.com --- tools/testing/selftests/bpf/network_helpers.c | 58 +- tools/testing/selftests/bpf/network_helpers.h | 2 + .../selftests/bpf/prog_tests/sk_lookup.c | 1282 +++++++++++++++++ .../selftests/bpf/progs/test_sk_lookup.c | 641 +++++++++ 4 files changed, 1960 insertions(+), 23 deletions(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/sk_lookup.c create mode 100644 tools/testing/selftests/bpf/progs/test_sk_lookup.c diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index acd08715be2e..f56655690f9b 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -73,29 +73,8 @@ int start_server(int family, int type, const char *addr_str, __u16 port, socklen_t len; int fd; - if (family == AF_INET) { - struct sockaddr_in *sin = (void *)&addr; - - sin->sin_family = AF_INET; - sin->sin_port = htons(port); - if (addr_str && - inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) { - log_err("inet_pton(AF_INET, %s)", addr_str); - return -1; - } - len = sizeof(*sin); - } else { - struct sockaddr_in6 *sin6 = (void *)&addr; - - sin6->sin6_family = AF_INET6; - sin6->sin6_port = htons(port); - if (addr_str && - inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) { - log_err("inet_pton(AF_INET6, %s)", addr_str); - return -1; - } - len = sizeof(*sin6); - } + if (make_sockaddr(family, addr_str, port, &addr, &len)) + return -1; fd = socket(family, type, 0); if (fd < 0) { @@ -194,3 +173,36 @@ int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) return 0; } + +int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len) +{ + if (family == AF_INET) { + struct sockaddr_in *sin = (void *)addr; + + sin->sin_family = AF_INET; + sin->sin_port = htons(port); + if (addr_str && + inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) { + log_err("inet_pton(AF_INET, %s)", addr_str); + return -1; + } + if (len) + *len = sizeof(*sin); + return 0; + } else if (family == AF_INET6) { + struct sockaddr_in6 *sin6 = (void *)addr; + + sin6->sin6_family = AF_INET6; + sin6->sin6_port = htons(port); + if (addr_str && + inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) { + log_err("inet_pton(AF_INET6, %s)", addr_str); + return -1; + } + if (len) + *len = sizeof(*sin6); + return 0; + } + return -1; +} diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index f580e82fda58..c3728f6667e4 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -37,5 +37,7 @@ int start_server(int family, int type, const char *addr, __u16 port, int timeout_ms); int connect_to_fd(int server_fd, int timeout_ms); int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); +int make_sockaddr(int family, const char *addr_str, __u16 port, + struct sockaddr_storage *addr, socklen_t *len); #endif diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c new file mode 100644 index 000000000000..f1784ae4565a --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -0,0 +1,1282 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare +/* + * Test BPF attach point for INET socket lookup (BPF_SK_LOOKUP). + * + * Tests exercise: + * - attaching/detaching/querying programs to BPF_SK_LOOKUP hook, + * - redirecting socket lookup to a socket selected by BPF program, + * - failing a socket lookup on BPF program's request, + * - error scenarios for selecting a socket from BPF program, + * - accessing BPF program context, + * - attaching and running multiple BPF programs. + * + * Tests run in a dedicated network namespace. + */ + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "test_progs.h" +#include "bpf_rlimit.h" +#include "bpf_util.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_sk_lookup.skel.h" + +/* External (address, port) pairs the client sends packets to. */ +#define EXT_IP4 "127.0.0.1" +#define EXT_IP6 "fd00::1" +#define EXT_PORT 7007 + +/* Internal (address, port) pairs the server listens/receives at. */ +#define INT_IP4 "127.0.0.2" +#define INT_IP4_V6 "::ffff:127.0.0.2" +#define INT_IP6 "fd00::2" +#define INT_PORT 8008 + +#define IO_TIMEOUT_SEC 3 + +enum server { + SERVER_A = 0, + SERVER_B = 1, + MAX_SERVERS, +}; + +enum { + PROG1 = 0, + PROG2, +}; + +struct inet_addr { + const char *ip; + unsigned short port; +}; + +struct test { + const char *desc; + struct bpf_program *lookup_prog; + struct bpf_program *reuseport_prog; + struct bpf_map *sock_map; + int sotype; + struct inet_addr connect_to; + struct inet_addr listen_at; + enum server accept_on; +}; + +static __u32 duration; /* for CHECK macro */ + +static bool is_ipv6(const char *ip) +{ + return !!strchr(ip, ':'); +} + +static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog) +{ + int err, prog_fd; + + prog_fd = bpf_program__fd(reuseport_prog); + if (prog_fd < 0) { + errno = -prog_fd; + return -1; + } + + err = setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, + &prog_fd, sizeof(prog_fd)); + if (err) + return -1; + + return 0; +} + +static socklen_t inetaddr_len(const struct sockaddr_storage *addr) +{ + return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) : + addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0); +} + +static int make_socket(int sotype, const char *ip, int port, + struct sockaddr_storage *addr) +{ + struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC }; + int err, family, fd; + + family = is_ipv6(ip) ? AF_INET6 : AF_INET; + err = make_sockaddr(family, ip, port, addr, NULL); + if (CHECK(err, "make_address", "failed\n")) + return -1; + + fd = socket(addr->ss_family, sotype, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to make socket"); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) { + log_err("failed to set SNDTIMEO"); + close(fd); + return -1; + } + + err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); + if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) { + log_err("failed to set RCVTIMEO"); + close(fd); + return -1; + } + + return fd; +} + +static int make_server(int sotype, const char *ip, int port, + struct bpf_program *reuseport_prog) +{ + struct sockaddr_storage addr = {0}; + const int one = 1; + int err, fd = -1; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */ + if (sotype == SOCK_DGRAM) { + err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IP_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) { + err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) { + log_err("failed to enable IPV6_RECVORIGDSTADDR"); + goto fail; + } + } + + if (sotype == SOCK_STREAM) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) { + log_err("failed to enable SO_REUSEADDR"); + goto fail; + } + } + + if (reuseport_prog) { + err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, + sizeof(one)); + if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) { + log_err("failed to enable SO_REUSEPORT"); + goto fail; + } + } + + err = bind(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "bind", "failed\n")) { + log_err("failed to bind listen socket"); + goto fail; + } + + if (sotype == SOCK_STREAM) { + err = listen(fd, SOMAXCONN); + if (CHECK(err, "make_server", "listen")) { + log_err("failed to listen on port %d", port); + goto fail; + } + } + + /* Late attach reuseport prog so we can have one init path */ + if (reuseport_prog) { + err = attach_reuseport(fd, reuseport_prog); + if (CHECK(err, "attach_reuseport", "failed\n")) { + log_err("failed to attach reuseport prog"); + goto fail; + } + } + + return fd; +fail: + close(fd); + return -1; +} + +static int make_client(int sotype, const char *ip, int port) +{ + struct sockaddr_storage addr = {0}; + int err, fd; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + err = connect(fd, (void *)&addr, inetaddr_len(&addr)); + if (CHECK(err, "make_client", "connect")) { + log_err("failed to connect client socket"); + goto fail; + } + + return fd; +fail: + close(fd); + return -1; +} + +static int send_byte(int fd) +{ + ssize_t n; + + errno = 0; + n = send(fd, "a", 1, 0); + if (CHECK(n <= 0, "send_byte", "send")) { + log_err("failed/partial send"); + return -1; + } + return 0; +} + +static int recv_byte(int fd) +{ + char buf[1]; + ssize_t n; + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv_byte", "recv")) { + log_err("failed/partial recv"); + return -1; + } + return 0; +} + +static int tcp_recv_send(int server_fd) +{ + char buf[1]; + int ret, fd; + ssize_t n; + + fd = accept(server_fd, NULL, NULL); + if (CHECK(fd < 0, "accept", "failed\n")) { + log_err("failed to accept"); + return -1; + } + + n = recv(fd, buf, sizeof(buf), 0); + if (CHECK(n <= 0, "recv", "failed\n")) { + log_err("failed/partial recv"); + ret = -1; + goto close; + } + + n = send(fd, buf, n, 0); + if (CHECK(n <= 0, "send", "failed\n")) { + log_err("failed/partial send"); + ret = -1; + goto close; + } + + ret = 0; +close: + close(fd); + return ret; +} + +static void v4_to_v6(struct sockaddr_storage *ss) +{ + struct sockaddr_in6 *v6 = (struct sockaddr_in6 *)ss; + struct sockaddr_in v4 = *(struct sockaddr_in *)ss; + + v6->sin6_family = AF_INET6; + v6->sin6_port = v4.sin_port; + v6->sin6_addr.s6_addr[10] = 0xff; + v6->sin6_addr.s6_addr[11] = 0xff; + memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); +} + +static int udp_recv_send(int server_fd) +{ + char cmsg_buf[CMSG_SPACE(sizeof(struct sockaddr_storage))]; + struct sockaddr_storage _src_addr = { 0 }; + struct sockaddr_storage *src_addr = &_src_addr; + struct sockaddr_storage *dst_addr = NULL; + struct msghdr msg = { 0 }; + struct iovec iov = { 0 }; + struct cmsghdr *cm; + char buf[1]; + int ret, fd; + ssize_t n; + + iov.iov_base = buf; + iov.iov_len = sizeof(buf); + + msg.msg_name = src_addr; + msg.msg_namelen = sizeof(*src_addr); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + msg.msg_control = cmsg_buf; + msg.msg_controllen = sizeof(cmsg_buf); + + errno = 0; + n = recvmsg(server_fd, &msg, 0); + if (CHECK(n <= 0, "recvmsg", "failed\n")) { + log_err("failed to receive"); + return -1; + } + if (CHECK(msg.msg_flags & MSG_CTRUNC, "recvmsg", "truncated cmsg\n")) + return -1; + + for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) { + if ((cm->cmsg_level == SOL_IP && + cm->cmsg_type == IP_ORIGDSTADDR) || + (cm->cmsg_level == SOL_IPV6 && + cm->cmsg_type == IPV6_ORIGDSTADDR)) { + dst_addr = (struct sockaddr_storage *)CMSG_DATA(cm); + break; + } + log_err("warning: ignored cmsg at level %d type %d", + cm->cmsg_level, cm->cmsg_type); + } + if (CHECK(!dst_addr, "recvmsg", "missing ORIGDSTADDR\n")) + return -1; + + /* Server socket bound to IPv4-mapped IPv6 address */ + if (src_addr->ss_family == AF_INET6 && + dst_addr->ss_family == AF_INET) { + v4_to_v6(dst_addr); + } + + /* Reply from original destination address. */ + fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0); + if (CHECK(fd < 0, "socket", "failed\n")) { + log_err("failed to create tx socket"); + return -1; + } + + ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr)); + if (CHECK(ret, "bind", "failed\n")) { + log_err("failed to bind tx socket"); + goto out; + } + + msg.msg_control = NULL; + msg.msg_controllen = 0; + n = sendmsg(fd, &msg, 0); + if (CHECK(n <= 0, "sendmsg", "failed\n")) { + log_err("failed to send echo reply"); + ret = -1; + goto out; + } + + ret = 0; +out: + close(fd); + return ret; +} + +static int tcp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = tcp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static int udp_echo_test(int client_fd, int server_fd) +{ + int err; + + err = send_byte(client_fd); + if (err) + return -1; + err = udp_recv_send(server_fd); + if (err) + return -1; + err = recv_byte(client_fd); + if (err) + return -1; + + return 0; +} + +static struct bpf_link *attach_lookup_prog(struct bpf_program *prog) +{ + struct bpf_link *link; + int net_fd; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return NULL; + } + + link = bpf_program__attach_netns(prog, net_fd); + if (CHECK(IS_ERR(link), "bpf_program__attach_netns", "failed\n")) { + errno = -PTR_ERR(link); + log_err("failed to attach program '%s' to netns", + bpf_program__name(prog)); + link = NULL; + } + + close(net_fd); + return link; +} + +static int update_lookup_map(struct bpf_map *map, int index, int sock_fd) +{ + int err, map_fd; + uint64_t value; + + map_fd = bpf_map__fd(map); + if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) { + errno = -map_fd; + log_err("failed to get map FD"); + return -1; + } + + value = (uint64_t)sock_fd; + err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) { + log_err("failed to update redir_map @ %d", index); + return -1; + } + + return 0; +} + +static __u32 link_info_prog_id(struct bpf_link *link) +{ + struct bpf_link_info info = {}; + __u32 info_len = sizeof(info); + int link_fd, err; + + link_fd = bpf_link__fd(link); + if (CHECK(link_fd < 0, "bpf_link__fd", "failed\n")) { + errno = -link_fd; + log_err("bpf_link__fd failed"); + return 0; + } + + err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); + if (CHECK(err, "bpf_obj_get_info_by_fd", "failed\n")) { + log_err("bpf_obj_get_info_by_fd"); + return 0; + } + if (CHECK(info_len != sizeof(info), "bpf_obj_get_info_by_fd", + "unexpected info len %u\n", info_len)) + return 0; + + return info.prog_id; +} + +static void query_lookup_prog(struct test_sk_lookup *skel) +{ + struct bpf_link *link[3] = {}; + __u32 attach_flags = 0; + __u32 prog_ids[3] = {}; + __u32 prog_cnt = 3; + __u32 prog_id; + int net_fd; + int err; + + net_fd = open("/proc/self/ns/net", O_RDONLY); + if (CHECK(net_fd < 0, "open", "failed\n")) { + log_err("failed to open /proc/self/ns/net"); + return; + } + + link[0] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[0]) + goto close; + link[1] = attach_lookup_prog(skel->progs.lookup_pass); + if (!link[1]) + goto detach; + link[2] = attach_lookup_prog(skel->progs.lookup_drop); + if (!link[2]) + goto detach; + + err = bpf_prog_query(net_fd, BPF_SK_LOOKUP, 0 /* query flags */, + &attach_flags, prog_ids, &prog_cnt); + if (CHECK(err, "bpf_prog_query", "failed\n")) { + log_err("failed to query lookup prog"); + goto detach; + } + + errno = 0; + if (CHECK(attach_flags != 0, "bpf_prog_query", + "wrong attach_flags on query: %u", attach_flags)) + goto detach; + if (CHECK(prog_cnt != 3, "bpf_prog_query", + "wrong program count on query: %u", prog_cnt)) + goto detach; + prog_id = link_info_prog_id(link[0]); + CHECK(prog_ids[0] != prog_id, "bpf_prog_query", + "invalid program #0 id on query: %u != %u\n", + prog_ids[0], prog_id); + prog_id = link_info_prog_id(link[1]); + CHECK(prog_ids[1] != prog_id, "bpf_prog_query", + "invalid program #1 id on query: %u != %u\n", + prog_ids[1], prog_id); + prog_id = link_info_prog_id(link[2]); + CHECK(prog_ids[2] != prog_id, "bpf_prog_query", + "invalid program #2 id on query: %u != %u\n", + prog_ids[2], prog_id); + +detach: + if (link[2]) + bpf_link__destroy(link[2]); + if (link[1]) + bpf_link__destroy(link[1]); + if (link[0]) + bpf_link__destroy(link[0]); +close: + close(net_fd); +} + +static void run_lookup_prog(const struct test *t) +{ + int client_fd, server_fds[MAX_SERVERS] = { -1 }; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(t->sotype, t->listen_at.ip, + t->listen_at.port, + t->reuseport_prog); + if (server_fds[i] < 0) + goto close; + + err = update_lookup_map(t->sock_map, i, server_fds[i]); + if (err) + goto close; + + /* want just one server for non-reuseport test */ + if (!t->reuseport_prog) + break; + } + + client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port); + if (client_fd < 0) + goto close; + + if (t->sotype == SOCK_STREAM) + tcp_echo_test(client_fd, server_fds[t->accept_on]); + else + udp_echo_test(client_fd, server_fds[t->accept_on]); + + close(client_fd); +close: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void test_redirect_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "TCP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "TCP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4_V6, INT_PORT }, + }, + { + .desc = "TCP IPv6 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "TCP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv4 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, INT_PORT }, + }, + { + .desc = "UDP IPv4 redir addr", + .lookup_prog = skel->progs.redir_ip4, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv4 redir with reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv4 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_A, + }, + { + .desc = "UDP IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv6 redir addr", + .lookup_prog = skel->progs.redir_ip6, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4->IPv6 redir port", + .lookup_prog = skel->progs.redir_port, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .listen_at = { INT_IP4_V6, INT_PORT }, + .connect_to = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 redir and reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + }, + { + .desc = "UDP IPv6 redir skip reuseport", + .lookup_prog = skel->progs.select_sock_a_no_reuseport, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_A, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + run_lookup_prog(t); + } +} + +static void drop_on_lookup(const struct test *t) +{ + struct sockaddr_storage dst = {}; + int client_fd, server_fd, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server_fd < 0) + goto detach; + + client_fd = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client_fd < 0) + goto close_srv; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client_fd); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client_fd, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client_fd); +close_srv: + close(server_fd); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_lookup(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "TCP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, EXT_PORT }, + }, + { + .desc = "UDP IPv4 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "UDP IPv6 drop on lookup", + .lookup_prog = skel->progs.lookup_drop, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { EXT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_lookup(t); + } +} + +static void drop_on_reuseport(const struct test *t) +{ + struct sockaddr_storage dst = { 0 }; + int client, server1, server2, err; + struct bpf_link *lookup_link; + ssize_t n; + + lookup_link = attach_lookup_prog(t->lookup_prog); + if (!lookup_link) + return; + + server1 = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, + t->reuseport_prog); + if (server1 < 0) + goto detach; + + err = update_lookup_map(t->sock_map, SERVER_A, server1); + if (err) + goto detach; + + /* second server on destination address we should never reach */ + server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, + NULL /* reuseport prog */); + if (server2 < 0) + goto close_srv1; + + client = make_socket(t->sotype, t->connect_to.ip, + t->connect_to.port, &dst); + if (client < 0) + goto close_srv2; + + err = connect(client, (void *)&dst, inetaddr_len(&dst)); + if (t->sotype == SOCK_DGRAM) { + err = send_byte(client); + if (err) + goto close_all; + + /* Read out asynchronous error */ + n = recv(client, NULL, 0, 0); + err = n == -1; + } + if (CHECK(!err || errno != ECONNREFUSED, "connect", + "unexpected success or error\n")) + log_err("expected ECONNREFUSED on connect"); + +close_all: + close(client); +close_srv2: + close(server2); +close_srv1: + close(server1); +detach: + bpf_link__destroy(lookup_link); +} + +static void test_drop_on_reuseport(struct test_sk_lookup *skel) +{ + const struct test tests[] = { + { + .desc = "TCP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + { + .desc = "UDP IPv4 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "TCP IPv6 drop on reuseport", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.reuseport_drop, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_STREAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + }, + }; + const struct test *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + if (test__start_subtest(t->desc)) + drop_on_reuseport(t); + } +} + +static void run_sk_assign(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog, + const char *listen_ip, const char *connect_ip) +{ + int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 }; + struct bpf_link *lookup_link; + int i, err; + + lookup_link = attach_lookup_prog(lookup_prog); + if (!lookup_link) + return; + + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL); + if (server_fds[i] < 0) + goto close_servers; + + err = update_lookup_map(skel->maps.redir_map, i, + server_fds[i]); + if (err) + goto close_servers; + } + + client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT); + if (client_fd < 0) + goto close_servers; + + peer_fd = accept(server_fds[SERVER_B], NULL, NULL); + if (CHECK(peer_fd < 0, "accept", "failed\n")) + goto close_client; + + close(peer_fd); +close_client: + close(client_fd); +close_servers: + for (i = 0; i < ARRAY_SIZE(server_fds); i++) { + if (server_fds[i] != -1) + close(server_fds[i]); + } + bpf_link__destroy(lookup_link); +} + +static void run_sk_assign_v4(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP4, EXT_IP4); +} + +static void run_sk_assign_v6(struct test_sk_lookup *skel, + struct bpf_program *lookup_prog) +{ + run_sk_assign(skel, lookup_prog, INT_IP6, EXT_IP6); +} + +static void run_sk_assign_connected(struct test_sk_lookup *skel, + int sotype) +{ + int err, client_fd, connected_fd, server_fd; + struct bpf_link *lookup_link; + + server_fd = make_server(sotype, EXT_IP4, EXT_PORT, NULL); + if (server_fd < 0) + return; + + connected_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (connected_fd < 0) + goto out_close_server; + + /* Put a connected socket in redirect map */ + err = update_lookup_map(skel->maps.redir_map, SERVER_A, connected_fd); + if (err) + goto out_close_connected; + + lookup_link = attach_lookup_prog(skel->progs.sk_assign_esocknosupport); + if (!lookup_link) + goto out_close_connected; + + /* Try to redirect TCP SYN / UDP packet to a connected socket */ + client_fd = make_client(sotype, EXT_IP4, EXT_PORT); + if (client_fd < 0) + goto out_unlink_prog; + if (sotype == SOCK_DGRAM) { + send_byte(client_fd); + recv_byte(server_fd); + } + + close(client_fd); +out_unlink_prog: + bpf_link__destroy(lookup_link); +out_close_connected: + close(connected_fd); +out_close_server: + close(server_fd); +} + +static void test_sk_assign_helper(struct test_sk_lookup *skel) +{ + if (test__start_subtest("sk_assign returns EEXIST")) + run_sk_assign_v4(skel, skel->progs.sk_assign_eexist); + if (test__start_subtest("sk_assign honors F_REPLACE")) + run_sk_assign_v4(skel, skel->progs.sk_assign_replace_flag); + if (test__start_subtest("sk_assign accepts NULL socket")) + run_sk_assign_v4(skel, skel->progs.sk_assign_null); + if (test__start_subtest("access ctx->sk")) + run_sk_assign_v4(skel, skel->progs.access_ctx_sk); + if (test__start_subtest("narrow access to ctx v4")) + run_sk_assign_v4(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("narrow access to ctx v6")) + run_sk_assign_v6(skel, skel->progs.ctx_narrow_access); + if (test__start_subtest("sk_assign rejects TCP established")) + run_sk_assign_connected(skel, SOCK_STREAM); + if (test__start_subtest("sk_assign rejects UDP connected")) + run_sk_assign_connected(skel, SOCK_DGRAM); +} + +struct test_multi_prog { + const char *desc; + struct bpf_program *prog1; + struct bpf_program *prog2; + struct bpf_map *redir_map; + struct bpf_map *run_map; + int expect_errno; + struct inet_addr listen_at; +}; + +static void run_multi_prog_lookup(const struct test_multi_prog *t) +{ + struct sockaddr_storage dst = {}; + int map_fd, server_fd, client_fd; + struct bpf_link *link1, *link2; + int prog_idx, done, err; + + map_fd = bpf_map__fd(t->run_map); + + done = 0; + prog_idx = PROG1; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + prog_idx = PROG2; + err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY); + if (CHECK(err, "bpf_map_update_elem", "failed\n")) + return; + + link1 = attach_lookup_prog(t->prog1); + if (!link1) + return; + link2 = attach_lookup_prog(t->prog2); + if (!link2) + goto out_unlink1; + + server_fd = make_server(SOCK_STREAM, t->listen_at.ip, + t->listen_at.port, NULL); + if (server_fd < 0) + goto out_unlink2; + + err = update_lookup_map(t->redir_map, SERVER_A, server_fd); + if (err) + goto out_close_server; + + client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst); + if (client_fd < 0) + goto out_close_server; + + err = connect(client_fd, (void *)&dst, inetaddr_len(&dst)); + if (CHECK(err && !t->expect_errno, "connect", + "unexpected error %d\n", errno)) + goto out_close_client; + if (CHECK(err && t->expect_errno && errno != t->expect_errno, + "connect", "unexpected error %d\n", errno)) + goto out_close_client; + + done = 0; + prog_idx = PROG1; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG1 !done\n"); + + done = 0; + prog_idx = PROG2; + err = bpf_map_lookup_elem(map_fd, &prog_idx, &done); + CHECK(err, "bpf_map_lookup_elem", "failed\n"); + CHECK(!done, "bpf_map_lookup_elem", "PROG2 !done\n"); + +out_close_client: + close(client_fd); +out_close_server: + close(server_fd); +out_unlink2: + bpf_link__destroy(link2); +out_unlink1: + bpf_link__destroy(link1); +} + +static void test_multi_prog_lookup(struct test_sk_lookup *skel) +{ + struct test_multi_prog tests[] = { + { + .desc = "multi prog - pass, pass", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + }, + { + .desc = "multi prog - drop, drop", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, drop", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - drop, pass", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { EXT_IP4, EXT_PORT }, + .expect_errno = ECONNREFUSED, + }, + { + .desc = "multi prog - pass, redir", + .prog1 = skel->progs.multi_prog_pass1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, pass", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_pass2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - drop, redir", + .prog1 = skel->progs.multi_prog_drop1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, drop", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_drop2, + .listen_at = { INT_IP4, INT_PORT }, + }, + { + .desc = "multi prog - redir, redir", + .prog1 = skel->progs.multi_prog_redir1, + .prog2 = skel->progs.multi_prog_redir2, + .listen_at = { INT_IP4, INT_PORT }, + }, + }; + struct test_multi_prog *t; + + for (t = tests; t < tests + ARRAY_SIZE(tests); t++) { + t->redir_map = skel->maps.redir_map; + t->run_map = skel->maps.run_map; + if (test__start_subtest(t->desc)) + run_multi_prog_lookup(t); + } +} + +static void run_tests(struct test_sk_lookup *skel) +{ + if (test__start_subtest("query lookup prog")) + query_lookup_prog(skel); + test_redirect_lookup(skel); + test_drop_on_lookup(skel); + test_drop_on_reuseport(skel); + test_sk_assign_helper(skel); + test_multi_prog_lookup(skel); +} + +static int switch_netns(void) +{ + static const char * const setup_script[] = { + "ip -6 addr add dev lo " EXT_IP6 "/128 nodad", + "ip -6 addr add dev lo " INT_IP6 "/128 nodad", + "ip link set dev lo up", + NULL, + }; + const char * const *cmd; + int err; + + err = unshare(CLONE_NEWNET); + if (CHECK(err, "unshare", "failed\n")) { + log_err("unshare(CLONE_NEWNET)"); + return -1; + } + + for (cmd = setup_script; *cmd; cmd++) { + err = system(*cmd); + if (CHECK(err, "system", "failed\n")) { + log_err("system(%s)", *cmd); + return -1; + } + } + + return 0; +} + +void test_sk_lookup(void) +{ + struct test_sk_lookup *skel; + int err; + + err = switch_netns(); + if (err) + return; + + skel = test_sk_lookup__open_and_load(); + if (CHECK(!skel, "skel open_and_load", "failed\n")) + return; + + run_tests(skel); + + test_sk_lookup__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c new file mode 100644 index 000000000000..bbf8296f4d66 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c @@ -0,0 +1,641 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// Copyright (c) 2020 Cloudflare + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define IP4(a, b, c, d) \ + bpf_htonl((((__u32)(a) & 0xffU) << 24) | \ + (((__u32)(b) & 0xffU) << 16) | \ + (((__u32)(c) & 0xffU) << 8) | \ + (((__u32)(d) & 0xffU) << 0)) +#define IP6(aaaa, bbbb, cccc, dddd) \ + { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) } + +#define MAX_SOCKS 32 + +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, MAX_SOCKS); + __type(key, __u32); + __type(value, __u64); +} redir_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 2); + __type(key, int); + __type(value, int); +} run_map SEC(".maps"); + +enum { + PROG1 = 0, + PROG2, +}; + +enum { + SERVER_A = 0, + SERVER_B, +}; + +/* Addressable key/value constants for convenience */ +static const int KEY_PROG1 = PROG1; +static const int KEY_PROG2 = PROG2; +static const int PROG_DONE = 1; + +static const __u32 KEY_SERVER_A = SERVER_A; +static const __u32 KEY_SERVER_B = SERVER_B; + +static const __u16 DST_PORT = 7007; /* Host byte order */ +static const __u32 DST_IP4 = IP4(127, 0, 0, 1); +static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001); + +SEC("sk_lookup/lookup_pass") +int lookup_pass(struct bpf_sk_lookup *ctx) +{ + return SK_PASS; +} + +SEC("sk_lookup/lookup_drop") +int lookup_drop(struct bpf_sk_lookup *ctx) +{ + return SK_DROP; +} + +SEC("sk_reuseport/reuse_pass") +int reuseport_pass(struct sk_reuseport_md *ctx) +{ + return SK_PASS; +} + +SEC("sk_reuseport/reuse_drop") +int reuseport_drop(struct sk_reuseport_md *ctx) +{ + return SK_DROP; +} + +/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */ +SEC("sk_lookup/redir_port") +int redir_port(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->local_port != DST_PORT) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */ +SEC("sk_lookup/redir_ip4") +int redir_ip4(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->family != AF_INET) + return SK_PASS; + if (ctx->local_port != DST_PORT) + return SK_PASS; + if (ctx->local_ip4 != DST_IP4) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */ +SEC("sk_lookup/redir_ip6") +int redir_ip6(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + if (ctx->family != AF_INET6) + return SK_PASS; + if (ctx->local_port != DST_PORT) + return SK_PASS; + if (ctx->local_ip6[0] != DST_IP6[0] || + ctx->local_ip6[1] != DST_IP6[1] || + ctx->local_ip6[2] != DST_IP6[2] || + ctx->local_ip6[3] != DST_IP6[3]) + return SK_PASS; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_lookup/select_sock_a") +int select_sock_a(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_PASS; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_lookup/select_sock_a_no_reuseport") +int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_DROP; + + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT); + bpf_sk_release(sk); + return err ? SK_DROP : SK_PASS; +} + +SEC("sk_reuseport/select_sock_b") +int select_sock_b(struct sk_reuseport_md *ctx) +{ + __u32 key = KEY_SERVER_B; + int err; + + err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0); + return err ? SK_DROP : SK_PASS; +} + +/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */ +SEC("sk_lookup/sk_assign_eexist") +int sk_assign_eexist(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err) + goto out; + bpf_sk_release(sk); + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err != -EEXIST) { + bpf_printk("sk_assign returned %d, expected %d\n", + err, -EEXIST); + goto out; + } + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */ +SEC("sk_lookup/sk_assign_replace_flag") +int sk_assign_replace_flag(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, 0); + if (err) + goto out; + bpf_sk_release(sk); + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that bpf_sk_assign(sk=NULL) is accepted. */ +SEC("sk_lookup/sk_assign_null") +int sk_assign_null(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk = NULL; + int err, ret; + + ret = SK_DROP; + + err = bpf_sk_assign(ctx, NULL, 0); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) { + bpf_printk("sk_assign returned %d, expected 0\n", err); + goto out; + } + + if (ctx->sk != sk) + goto out; + err = bpf_sk_assign(ctx, NULL, 0); + if (err != -EEXIST) + goto out; + err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +/* Check that selected sk is accessible through context. */ +SEC("sk_lookup/access_ctx_sk") +int access_ctx_sk(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk1 = NULL, *sk2 = NULL; + int err, ret; + + ret = SK_DROP; + + /* Try accessing unassigned (NULL) ctx->sk field */ + if (ctx->sk && ctx->sk->family != AF_INET) + goto out; + + /* Assign a value to ctx->sk */ + sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk1) + goto out; + err = bpf_sk_assign(ctx, sk1, 0); + if (err) + goto out; + if (ctx->sk != sk1) + goto out; + + /* Access ctx->sk fields */ + if (ctx->sk->family != AF_INET || + ctx->sk->type != SOCK_STREAM || + ctx->sk->state != BPF_TCP_LISTEN) + goto out; + + /* Reset selection */ + err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + if (ctx->sk) + goto out; + + /* Assign another socket */ + sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (!sk2) + goto out; + err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE); + if (err) + goto out; + if (ctx->sk != sk2) + goto out; + + /* Access reassigned ctx->sk fields */ + if (ctx->sk->family != AF_INET || + ctx->sk->type != SOCK_STREAM || + ctx->sk->state != BPF_TCP_LISTEN) + goto out; + + ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */ +out: + if (sk1) + bpf_sk_release(sk1); + if (sk2) + bpf_sk_release(sk2); + return ret; +} + +/* Check narrow loads from ctx fields that support them. + * + * Narrow loads of size >= target field size from a non-zero offset + * are not covered because they give bogus results, that is the + * verifier ignores the offset. + */ +SEC("sk_lookup/ctx_narrow_access") +int ctx_narrow_access(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, family; + __u16 *half; + __u8 *byte; + bool v4; + + v4 = (ctx->family == AF_INET); + + /* Narrow loads from family field */ + byte = (__u8 *)&ctx->family; + half = (__u16 *)&ctx->family; + if (byte[0] != (v4 ? AF_INET : AF_INET6) || + byte[1] != 0 || byte[2] != 0 || byte[3] != 0) + return SK_DROP; + if (half[0] != (v4 ? AF_INET : AF_INET6)) + return SK_DROP; + + byte = (__u8 *)&ctx->protocol; + if (byte[0] != IPPROTO_TCP || + byte[1] != 0 || byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->protocol; + if (half[0] != IPPROTO_TCP) + return SK_DROP; + + /* Narrow loads from remote_port field. Expect non-0 value. */ + byte = (__u8 *)&ctx->remote_port; + if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_port; + if (half[0] == 0) + return SK_DROP; + + /* Narrow loads from local_port field. Expect DST_PORT. */ + byte = (__u8 *)&ctx->local_port; + if (byte[0] != ((DST_PORT >> 0) & 0xff) || + byte[1] != ((DST_PORT >> 8) & 0xff) || + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_port; + if (half[0] != DST_PORT) + return SK_DROP; + + /* Narrow loads from IPv4 fields */ + if (v4) { + /* Expect non-0.0.0.0 in remote_ip4 */ + byte = (__u8 *)&ctx->remote_ip4; + if (byte[0] == 0 && byte[1] == 0 && + byte[2] == 0 && byte[3] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip4; + if (half[0] == 0 && half[1] == 0) + return SK_DROP; + + /* Expect DST_IP4 in local_ip4 */ + byte = (__u8 *)&ctx->local_ip4; + if (byte[0] != ((DST_IP4 >> 0) & 0xff) || + byte[1] != ((DST_IP4 >> 8) & 0xff) || + byte[2] != ((DST_IP4 >> 16) & 0xff) || + byte[3] != ((DST_IP4 >> 24) & 0xff)) + return SK_DROP; + half = (__u16 *)&ctx->local_ip4; + if (half[0] != ((DST_IP4 >> 0) & 0xffff) || + half[1] != ((DST_IP4 >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect 0.0.0.0 IPs when family != AF_INET */ + byte = (__u8 *)&ctx->remote_ip4; + if (byte[0] != 0 || byte[1] != 0 && + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip4; + if (half[0] != 0 || half[1] != 0) + return SK_DROP; + + byte = (__u8 *)&ctx->local_ip4; + if (byte[0] != 0 || byte[1] != 0 && + byte[2] != 0 || byte[3] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_ip4; + if (half[0] != 0 || half[1] != 0) + return SK_DROP; + } + + /* Narrow loads from IPv6 fields */ + if (!v4) { + /* Expenct non-:: IP in remote_ip6 */ + byte = (__u8 *)&ctx->remote_ip6; + if (byte[0] == 0 && byte[1] == 0 && + byte[2] == 0 && byte[3] == 0 && + byte[4] == 0 && byte[5] == 0 && + byte[6] == 0 && byte[7] == 0 && + byte[8] == 0 && byte[9] == 0 && + byte[10] == 0 && byte[11] == 0 && + byte[12] == 0 && byte[13] == 0 && + byte[14] == 0 && byte[15] == 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip6; + if (half[0] == 0 && half[1] == 0 && + half[2] == 0 && half[3] == 0 && + half[4] == 0 && half[5] == 0 && + half[6] == 0 && half[7] == 0) + return SK_DROP; + + /* Expect DST_IP6 in local_ip6 */ + byte = (__u8 *)&ctx->local_ip6; + if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) || + byte[1] != ((DST_IP6[0] >> 8) & 0xff) || + byte[2] != ((DST_IP6[0] >> 16) & 0xff) || + byte[3] != ((DST_IP6[0] >> 24) & 0xff) || + byte[4] != ((DST_IP6[1] >> 0) & 0xff) || + byte[5] != ((DST_IP6[1] >> 8) & 0xff) || + byte[6] != ((DST_IP6[1] >> 16) & 0xff) || + byte[7] != ((DST_IP6[1] >> 24) & 0xff) || + byte[8] != ((DST_IP6[2] >> 0) & 0xff) || + byte[9] != ((DST_IP6[2] >> 8) & 0xff) || + byte[10] != ((DST_IP6[2] >> 16) & 0xff) || + byte[11] != ((DST_IP6[2] >> 24) & 0xff) || + byte[12] != ((DST_IP6[3] >> 0) & 0xff) || + byte[13] != ((DST_IP6[3] >> 8) & 0xff) || + byte[14] != ((DST_IP6[3] >> 16) & 0xff) || + byte[15] != ((DST_IP6[3] >> 24) & 0xff)) + return SK_DROP; + half = (__u16 *)&ctx->local_ip6; + if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) || + half[1] != ((DST_IP6[0] >> 16) & 0xffff) || + half[2] != ((DST_IP6[1] >> 0) & 0xffff) || + half[3] != ((DST_IP6[1] >> 16) & 0xffff) || + half[4] != ((DST_IP6[2] >> 0) & 0xffff) || + half[5] != ((DST_IP6[2] >> 16) & 0xffff) || + half[6] != ((DST_IP6[3] >> 0) & 0xffff) || + half[7] != ((DST_IP6[3] >> 16) & 0xffff)) + return SK_DROP; + } else { + /* Expect :: IPs when family != AF_INET6 */ + byte = (__u8 *)&ctx->remote_ip6; + if (byte[0] != 0 || byte[1] != 0 || + byte[2] != 0 || byte[3] != 0 || + byte[4] != 0 || byte[5] != 0 || + byte[6] != 0 || byte[7] != 0 || + byte[8] != 0 || byte[9] != 0 || + byte[10] != 0 || byte[11] != 0 || + byte[12] != 0 || byte[13] != 0 || + byte[14] != 0 || byte[15] != 0) + return SK_DROP; + half = (__u16 *)&ctx->remote_ip6; + if (half[0] != 0 || half[1] != 0 || + half[2] != 0 || half[3] != 0 || + half[4] != 0 || half[5] != 0 || + half[6] != 0 || half[7] != 0) + return SK_DROP; + + byte = (__u8 *)&ctx->local_ip6; + if (byte[0] != 0 || byte[1] != 0 || + byte[2] != 0 || byte[3] != 0 || + byte[4] != 0 || byte[5] != 0 || + byte[6] != 0 || byte[7] != 0 || + byte[8] != 0 || byte[9] != 0 || + byte[10] != 0 || byte[11] != 0 || + byte[12] != 0 || byte[13] != 0 || + byte[14] != 0 || byte[15] != 0) + return SK_DROP; + half = (__u16 *)&ctx->local_ip6; + if (half[0] != 0 || half[1] != 0 || + half[2] != 0 || half[3] != 0 || + half[4] != 0 || half[5] != 0 || + half[6] != 0 || half[7] != 0) + return SK_DROP; + } + + /* Success, redirect to KEY_SERVER_B */ + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B); + if (sk) { + bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + } + return SK_PASS; +} + +/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */ +SEC("sk_lookup/sk_assign_esocknosupport") +int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err, ret; + + ret = SK_DROP; + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + goto out; + + err = bpf_sk_assign(ctx, sk, 0); + if (err != -ESOCKTNOSUPPORT) { + bpf_printk("sk_assign returned %d, expected %d\n", + err, -ESOCKTNOSUPPORT); + goto out; + } + + ret = SK_PASS; /* Success, pass to regular lookup */ +out: + if (sk) + bpf_sk_release(sk); + return ret; +} + +SEC("sk_lookup/multi_prog_pass1") +int multi_prog_pass1(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_pass2") +int multi_prog_pass2(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_drop1") +int multi_prog_drop1(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_DROP; +} + +SEC("sk_lookup/multi_prog_drop2") +int multi_prog_drop2(struct bpf_sk_lookup *ctx) +{ + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_DROP; +} + +static __always_inline int select_server_a(struct bpf_sk_lookup *ctx) +{ + struct bpf_sock *sk; + int err; + + sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A); + if (!sk) + return SK_DROP; + + err = bpf_sk_assign(ctx, sk, 0); + bpf_sk_release(sk); + if (err) + return SK_DROP; + + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_redir1") +int multi_prog_redir1(struct bpf_sk_lookup *ctx) +{ + int ret; + + ret = select_server_a(ctx); + bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +SEC("sk_lookup/multi_prog_redir2") +int multi_prog_redir2(struct bpf_sk_lookup *ctx) +{ + int ret; + + ret = select_server_a(ctx); + bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY); + return SK_PASS; +} + +char _license[] SEC("license") = "Dual BSD/GPL"; +__u32 _version SEC("version") = 1; From c4471ad9a50d5548e66ae4511acfb1dc23a48744 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 20 Jul 2020 00:03:33 +0200 Subject: [PATCH 1186/2056] net: phy: add USXGMII link partner ability constants The constants are taken from the USXGMII Singleport Copper Interface specification. The naming are based on the SGMII ones, but with an MDIO_ prefix. Signed-off-by: Michael Walle Reviewed-by: Russell King Signed-off-by: David S. Miller --- include/uapi/linux/mdio.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 4bcb41c71b8c..3f302e2523b2 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -324,4 +324,30 @@ static inline __u16 mdio_phy_id_c45(int prtad, int devad) return MDIO_PHY_ID_C45 | (prtad << 5) | devad; } +/* UsxgmiiChannelInfo[15:0] for USXGMII in-band auto-negotiation.*/ +#define MDIO_USXGMII_EEE_CLK_STP 0x0080 /* EEE clock stop supported */ +#define MDIO_USXGMII_EEE 0x0100 /* EEE supported */ +#define MDIO_USXGMII_SPD_MASK 0x0e00 /* USXGMII speed mask */ +#define MDIO_USXGMII_FULL_DUPLEX 0x1000 /* USXGMII full duplex */ +#define MDIO_USXGMII_DPX_SPD_MASK 0x1e00 /* USXGMII duplex and speed bits */ +#define MDIO_USXGMII_10 0x0000 /* 10Mbps */ +#define MDIO_USXGMII_10HALF 0x0000 /* 10Mbps half-duplex */ +#define MDIO_USXGMII_10FULL 0x1000 /* 10Mbps full-duplex */ +#define MDIO_USXGMII_100 0x0200 /* 100Mbps */ +#define MDIO_USXGMII_100HALF 0x0200 /* 100Mbps half-duplex */ +#define MDIO_USXGMII_100FULL 0x1200 /* 100Mbps full-duplex */ +#define MDIO_USXGMII_1000 0x0400 /* 1000Mbps */ +#define MDIO_USXGMII_1000HALF 0x0400 /* 1000Mbps half-duplex */ +#define MDIO_USXGMII_1000FULL 0x1400 /* 1000Mbps full-duplex */ +#define MDIO_USXGMII_10G 0x0600 /* 10Gbps */ +#define MDIO_USXGMII_10GHALF 0x0600 /* 10Gbps half-duplex */ +#define MDIO_USXGMII_10GFULL 0x1600 /* 10Gbps full-duplex */ +#define MDIO_USXGMII_2500 0x0800 /* 2500Mbps */ +#define MDIO_USXGMII_2500HALF 0x0800 /* 2500Mbps half-duplex */ +#define MDIO_USXGMII_2500FULL 0x1800 /* 2500Mbps full-duplex */ +#define MDIO_USXGMII_5000 0x0a00 /* 5000Mbps */ +#define MDIO_USXGMII_5000HALF 0x0a00 /* 5000Mbps half-duplex */ +#define MDIO_USXGMII_5000FULL 0x1a00 /* 5000Mbps full-duplex */ +#define MDIO_USXGMII_LINK 0x8000 /* PHY link with copper-side partner */ + #endif /* _UAPI__LINUX_MDIO_H__ */ From 16659b811add79493183e7009744026cdb3d9c62 Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 20 Jul 2020 00:03:34 +0200 Subject: [PATCH 1187/2056] net: dsa: felix: (re)use already existing constants Now that there are USXGMII constants available, drop the old definitions and reuse the generic ones. Signed-off-by: Michael Walle Tested-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/ocelot/felix_vsc9959.c | 45 +++++++------------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 1bdf74fd8be4..9b720c8ddfc3 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -11,35 +11,15 @@ #include #include #include +#include #include #include "felix.h" #define VSC9959_VCAP_IS2_CNT 1024 #define VSC9959_VCAP_IS2_ENTRY_WIDTH 376 #define VSC9959_VCAP_PORT_CNT 6 - -/* TODO: should find a better place for these */ -#define USXGMII_BMCR_RESET BIT(15) -#define USXGMII_BMCR_AN_EN BIT(12) -#define USXGMII_BMCR_RST_AN BIT(9) -#define USXGMII_BMSR_LNKS(status) (((status) & GENMASK(2, 2)) >> 2) -#define USXGMII_BMSR_AN_CMPL(status) (((status) & GENMASK(5, 5)) >> 5) -#define USXGMII_ADVERTISE_LNKS(x) (((x) << 15) & BIT(15)) -#define USXGMII_ADVERTISE_FDX BIT(12) -#define USXGMII_ADVERTISE_SPEED(x) (((x) << 9) & GENMASK(11, 9)) -#define USXGMII_LPA_LNKS(lpa) ((lpa) >> 15) -#define USXGMII_LPA_DUPLEX(lpa) (((lpa) & GENMASK(12, 12)) >> 12) -#define USXGMII_LPA_SPEED(lpa) (((lpa) & GENMASK(11, 9)) >> 9) - #define VSC9959_TAS_GCL_ENTRY_MAX 63 -enum usxgmii_speed { - USXGMII_SPEED_10 = 0, - USXGMII_SPEED_100 = 1, - USXGMII_SPEED_1000 = 2, - USXGMII_SPEED_2500 = 4, -}; - static const u32 vsc9959_ana_regmap[] = { REG(ANA_ADVLEARN, 0x0089a0), REG(ANA_VLANMASK, 0x0089a4), @@ -845,11 +825,10 @@ static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs, { /* Configure device ability for the USXGMII Replicator */ phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE, - USXGMII_ADVERTISE_SPEED(USXGMII_SPEED_2500) | - USXGMII_ADVERTISE_LNKS(1) | + MDIO_USXGMII_2500FULL | + MDIO_USXGMII_LINK | ADVERTISE_SGMII | - ADVERTISE_LPACK | - USXGMII_ADVERTISE_FDX); + ADVERTISE_LPACK); } void vsc9959_pcs_config(struct ocelot *ocelot, int port, @@ -1063,8 +1042,8 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs, return; pcs->autoneg = true; - pcs->autoneg_complete = USXGMII_BMSR_AN_CMPL(status); - pcs->link = USXGMII_BMSR_LNKS(status); + pcs->autoneg_complete = !!(status & BMSR_ANEGCOMPLETE); + pcs->link = !!(status & BMSR_LSTATUS); if (!pcs->link || !pcs->autoneg_complete) return; @@ -1073,24 +1052,24 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs, if (lpa < 0) return; - switch (USXGMII_LPA_SPEED(lpa)) { - case USXGMII_SPEED_10: + switch (lpa & MDIO_USXGMII_SPD_MASK) { + case MDIO_USXGMII_10: pcs->speed = SPEED_10; break; - case USXGMII_SPEED_100: + case MDIO_USXGMII_100: pcs->speed = SPEED_100; break; - case USXGMII_SPEED_1000: + case MDIO_USXGMII_1000: pcs->speed = SPEED_1000; break; - case USXGMII_SPEED_2500: + case MDIO_USXGMII_2500: pcs->speed = SPEED_2500; break; default: break; } - if (USXGMII_LPA_DUPLEX(lpa)) + if (lpa & MDIO_USXGMII_FULL_DUPLEX) pcs->duplex = DUPLEX_FULL; else pcs->duplex = DUPLEX_HALF; From 975d183ef0ca071dfeda4e70d268d1103d09ad5f Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Mon, 20 Jul 2020 00:03:35 +0200 Subject: [PATCH 1188/2056] net: enetc: Initialize SerDes for SGMII and USXGMII protocols ENETC has ethernet MACs capable of SGMII, 2500BaseX and USXGMII. But in order to use these protocols some SerDes configurations need to be performed. The SerDes is configurable via an internal PCS PHY which is connected to an internal MDIO bus at address 0. This patch basically removes the dependency on bootloader regarding SerDes initialization. Signed-off-by: Michael Walle Reviewed-by: Claudiu Manoil Tested-by: Vladimir Oltean Signed-off-by: David S. Miller --- .../net/ethernet/freescale/enetc/enetc_hw.h | 3 + .../net/ethernet/freescale/enetc/enetc_pf.c | 135 ++++++++++++++++++ .../net/ethernet/freescale/enetc/enetc_pf.h | 2 + 3 files changed, 140 insertions(+) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index fc357bc56835..135bf46354ea 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -224,6 +224,9 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_PM0_MAXFRM 0x8014 #define ENETC_SET_TX_MTU(val) ((val) << 16) #define ENETC_SET_MAXFRM(val) ((val) & 0xffff) + +#define ENETC_PM_IMDIO_BASE 0x8030 + #define ENETC_PM0_IF_MODE 0x8300 #define ENETC_PMO_IFM_RG BIT(2) #define ENETC_PM0_IFM_RLP (BIT(5) | BIT(11)) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 4fac57dbb3c8..3bf345b84b76 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2017-2019 NXP */ +#include #include #include #include @@ -833,6 +834,135 @@ static void enetc_of_put_phy(struct enetc_ndev_priv *priv) of_node_put(priv->phy_node); } +static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45) +{ + struct device *dev = &pf->si->pdev->dev; + struct enetc_mdio_priv *mdio_priv; + struct phy_device *pcs; + struct mii_bus *bus; + int err; + + bus = mdiobus_alloc_size(sizeof(*mdio_priv)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale ENETC internal MDIO Bus"; + bus->read = enetc_mdio_read; + bus->write = enetc_mdio_write; + bus->parent = dev; + bus->phy_mask = ~0; + mdio_priv = bus->priv; + mdio_priv->hw = &pf->si->hw; + mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev)); + + err = mdiobus_register(bus); + if (err) { + dev_err(dev, "cannot register internal MDIO bus (%d)\n", err); + goto free_mdio_bus; + } + + pcs = get_phy_device(bus, 0, is_c45); + if (IS_ERR(pcs)) { + err = PTR_ERR(pcs); + dev_err(dev, "cannot get internal PCS PHY (%d)\n", err); + goto unregister_mdiobus; + } + + pf->imdio = bus; + pf->pcs = pcs; + + return 0; + +unregister_mdiobus: + mdiobus_unregister(bus); +free_mdio_bus: + mdiobus_free(bus); + return err; +} + +static void enetc_imdio_remove(struct enetc_pf *pf) +{ + if (pf->pcs) + put_device(&pf->pcs->mdio.dev); + if (pf->imdio) { + mdiobus_unregister(pf->imdio); + mdiobus_free(pf->imdio); + } +} + +static void enetc_configure_sgmii(struct phy_device *pcs) +{ + /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001 + * for the MAC PCS in order to acknowledge the AN. + */ + phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII | ADVERTISE_LPACK); + + phy_write(pcs, ENETC_PCS_IF_MODE, + ENETC_PCS_IF_MODE_SGMII_EN | + ENETC_PCS_IF_MODE_USE_SGMII_AN); + + /* Adjust link timer for SGMII */ + phy_write(pcs, ENETC_PCS_LINK_TIMER1, ENETC_PCS_LINK_TIMER1_VAL); + phy_write(pcs, ENETC_PCS_LINK_TIMER2, ENETC_PCS_LINK_TIMER2_VAL); + + phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE); +} + +static void enetc_configure_2500basex(struct phy_device *pcs) +{ + phy_write(pcs, ENETC_PCS_IF_MODE, + ENETC_PCS_IF_MODE_SGMII_EN | + ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500)); + + phy_write(pcs, MII_BMCR, BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_RESET); +} + +static void enetc_configure_usxgmii(struct phy_device *pcs) +{ + /* Configure device ability for the USXGMII Replicator */ + phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE, + ADVERTISE_SGMII | ADVERTISE_LPACK | + MDIO_USXGMII_FULL_DUPLEX); + + /* Restart PCS AN */ + phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR, + BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); +} + +static int enetc_configure_serdes(struct enetc_ndev_priv *priv) +{ + bool is_c45 = priv->if_mode == PHY_INTERFACE_MODE_USXGMII; + struct enetc_pf *pf = enetc_si_priv(priv->si); + int err; + + if (priv->if_mode != PHY_INTERFACE_MODE_SGMII && + priv->if_mode != PHY_INTERFACE_MODE_2500BASEX && + priv->if_mode != PHY_INTERFACE_MODE_USXGMII) + return 0; + + err = enetc_imdio_init(pf, is_c45); + if (err) + return err; + + switch (priv->if_mode) { + case PHY_INTERFACE_MODE_SGMII: + enetc_configure_sgmii(pf->pcs); + break; + case PHY_INTERFACE_MODE_2500BASEX: + enetc_configure_2500basex(pf->pcs); + break; + case PHY_INTERFACE_MODE_USXGMII: + enetc_configure_usxgmii(pf->pcs); + break; + default: + dev_err(&pf->si->pdev->dev, "Unsupported link mode %s\n", + phy_modes(priv->if_mode)); + } + + return 0; +} + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -897,6 +1027,10 @@ static int enetc_pf_probe(struct pci_dev *pdev, if (err) dev_warn(&pdev->dev, "Fallback to PHY-less operation\n"); + err = enetc_configure_serdes(priv); + if (err) + dev_warn(&pdev->dev, "Attempted SerDes config but failed\n"); + err = register_netdev(ndev); if (err) goto err_reg_netdev; @@ -932,6 +1066,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) priv = netdev_priv(si->ndev); unregister_netdev(si->ndev); + enetc_imdio_remove(pf); enetc_mdio_remove(pf); enetc_of_put_phy(priv); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h index 59e65a6f6c3e..2cb922b59f46 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -44,6 +44,8 @@ struct enetc_pf { DECLARE_BITMAP(active_vlans, VLAN_N_VID); struct mii_bus *mdio; /* saved for cleanup */ + struct mii_bus *imdio; + struct phy_device *pcs; }; int enetc_msg_psi_init(struct enetc_pf *pf); From 07095c025ac2f75c14550c287683ae90a8cf706e Mon Sep 17 00:00:00 2001 From: Alex Marginean Date: Mon, 20 Jul 2020 00:03:36 +0200 Subject: [PATCH 1189/2056] net: enetc: Use DT protocol information to set up the ports Use DT information rather than in-band information from bootloader to set up MAC for XGMII. For RGMII use the DT indication in addition to RGMII defaults in hardware. However, this implies that PHY connection information needs to be extracted before netdevice creation, when the ENETC Port MAC is being configured. Signed-off-by: Alex Marginean Signed-off-by: Claudiu Manoil Signed-off-by: Michael Walle Tested-by: Vladimir Oltean Signed-off-by: David S. Miller --- .../net/ethernet/freescale/enetc/enetc_pf.c | 57 ++++++++++--------- .../net/ethernet/freescale/enetc/enetc_pf.h | 3 + 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 3bf345b84b76..5a08f66b123c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -482,7 +482,8 @@ static void enetc_port_si_configure(struct enetc_si *si) enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); } -static void enetc_configure_port_mac(struct enetc_hw *hw) +static void enetc_configure_port_mac(struct enetc_hw *hw, + phy_interface_t phy_mode) { enetc_port_wr(hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE)); @@ -498,9 +499,11 @@ static void enetc_configure_port_mac(struct enetc_hw *hw) ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); /* set auto-speed for RGMII */ - if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) + if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG || + phy_interface_mode_is_rgmii(phy_mode)) enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO); - if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) + + if (phy_mode == PHY_INTERFACE_MODE_USXGMII) enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII); } @@ -524,7 +527,7 @@ static void enetc_configure_port(struct enetc_pf *pf) enetc_configure_port_pmac(hw); - enetc_configure_port_mac(hw); + enetc_configure_port_mac(hw, pf->if_mode); enetc_port_si_configure(pf->si); @@ -776,27 +779,27 @@ static void enetc_mdio_remove(struct enetc_pf *pf) mdiobus_unregister(pf->mdio); } -static int enetc_of_get_phy(struct enetc_ndev_priv *priv) +static int enetc_of_get_phy(struct enetc_pf *pf) { - struct enetc_pf *pf = enetc_si_priv(priv->si); - struct device_node *np = priv->dev->of_node; + struct device *dev = &pf->si->pdev->dev; + struct device_node *np = dev->of_node; struct device_node *mdio_np; int err; - priv->phy_node = of_parse_phandle(np, "phy-handle", 0); - if (!priv->phy_node) { + pf->phy_node = of_parse_phandle(np, "phy-handle", 0); + if (!pf->phy_node) { if (!of_phy_is_fixed_link(np)) { - dev_err(priv->dev, "PHY not specified\n"); + dev_err(dev, "PHY not specified\n"); return -ENODEV; } err = of_phy_register_fixed_link(np); if (err < 0) { - dev_err(priv->dev, "fixed link registration failed\n"); + dev_err(dev, "fixed link registration failed\n"); return err; } - priv->phy_node = of_node_get(np); + pf->phy_node = of_node_get(np); } mdio_np = of_get_child_by_name(np, "mdio"); @@ -804,15 +807,15 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) of_node_put(mdio_np); err = enetc_mdio_probe(pf); if (err) { - of_node_put(priv->phy_node); + of_node_put(pf->phy_node); return err; } } - err = of_get_phy_mode(np, &priv->if_mode); + err = of_get_phy_mode(np, &pf->if_mode); if (err) { - dev_err(priv->dev, "missing phy type\n"); - of_node_put(priv->phy_node); + dev_err(dev, "missing phy type\n"); + of_node_put(pf->phy_node); if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); else @@ -824,14 +827,14 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv) return 0; } -static void enetc_of_put_phy(struct enetc_ndev_priv *priv) +static void enetc_of_put_phy(struct enetc_pf *pf) { - struct device_node *np = priv->dev->of_node; + struct device_node *np = pf->si->pdev->dev.of_node; if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); - if (priv->phy_node) - of_node_put(priv->phy_node); + if (pf->phy_node) + of_node_put(pf->phy_node); } static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45) @@ -994,6 +997,10 @@ static int enetc_pf_probe(struct pci_dev *pdev, pf->si = si; pf->total_vfs = pci_sriov_get_totalvfs(pdev); + err = enetc_of_get_phy(pf); + if (err) + dev_warn(&pdev->dev, "Fallback to PHY-less operation\n"); + enetc_configure_port(pf); enetc_get_si_caps(si); @@ -1008,6 +1015,8 @@ static int enetc_pf_probe(struct pci_dev *pdev, enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops); priv = netdev_priv(ndev); + priv->phy_node = pf->phy_node; + priv->if_mode = pf->if_mode; enetc_init_si_rings_params(priv); @@ -1023,10 +1032,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_msix; } - err = enetc_of_get_phy(priv); - if (err) - dev_warn(&pdev->dev, "Fallback to PHY-less operation\n"); - err = enetc_configure_serdes(priv); if (err) dev_warn(&pdev->dev, "Attempted SerDes config but failed\n"); @@ -1040,7 +1045,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, return 0; err_reg_netdev: - enetc_of_put_phy(priv); enetc_free_msix(priv); err_alloc_msix: enetc_free_si_resources(priv); @@ -1048,6 +1052,7 @@ static int enetc_pf_probe(struct pci_dev *pdev, si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: + enetc_of_put_phy(pf); err_map_pf_space: enetc_pci_remove(pdev); @@ -1068,7 +1073,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) enetc_imdio_remove(pf); enetc_mdio_remove(pf); - enetc_of_put_phy(priv); + enetc_of_put_phy(pf); enetc_free_msix(priv); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h index 2cb922b59f46..0d0ee91282a5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h @@ -46,6 +46,9 @@ struct enetc_pf { struct mii_bus *mdio; /* saved for cleanup */ struct mii_bus *imdio; struct phy_device *pcs; + + struct device_node *phy_node; + phy_interface_t if_mode; }; int enetc_msg_psi_init(struct enetc_pf *pf); From 7dce80c2a526a199ae1447e7a58d4135bb18ff8a Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Sun, 19 Jul 2020 10:05:30 +0200 Subject: [PATCH 1190/2056] net: phy: at803x: add mdix configuration support for AR9331 and AR8035 This patch add MDIX configuration ability for AR9331 and AR8035. Theoretically it should work on other Atheros PHYs, but I was able to test only this two. Since I have no certified reference HW able to detect or configure MDIX, this functionality was confirmed by oscilloscope. Signed-off-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/at803x.c | 78 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 96c61aa75bd7..101651b2de54 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -21,6 +21,17 @@ #include #include +#define AT803X_SPECIFIC_FUNCTION_CONTROL 0x10 +#define AT803X_SFC_ASSERT_CRS BIT(11) +#define AT803X_SFC_FORCE_LINK BIT(10) +#define AT803X_SFC_MDI_CROSSOVER_MODE_M GENMASK(6, 5) +#define AT803X_SFC_AUTOMATIC_CROSSOVER 0x3 +#define AT803X_SFC_MANUAL_MDIX 0x1 +#define AT803X_SFC_MANUAL_MDI 0x0 +#define AT803X_SFC_SQE_TEST BIT(2) +#define AT803X_SFC_POLARITY_REVERSAL BIT(1) +#define AT803X_SFC_DISABLE_JABBER BIT(0) + #define AT803X_SPECIFIC_STATUS 0x11 #define AT803X_SS_SPEED_MASK (3 << 14) #define AT803X_SS_SPEED_1000 (2 << 14) @@ -703,6 +714,12 @@ static int at803x_read_status(struct phy_device *phydev) return ss; if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) { + int sfc; + + sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL); + if (sfc < 0) + return sfc; + switch (ss & AT803X_SS_SPEED_MASK) { case AT803X_SS_SPEED_10: phydev->speed = SPEED_10; @@ -718,10 +735,23 @@ static int at803x_read_status(struct phy_device *phydev) phydev->duplex = DUPLEX_FULL; else phydev->duplex = DUPLEX_HALF; + if (ss & AT803X_SS_MDIX) phydev->mdix = ETH_TP_MDI_X; else phydev->mdix = ETH_TP_MDI; + + switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) { + case AT803X_SFC_MANUAL_MDI: + phydev->mdix_ctrl = ETH_TP_MDI; + break; + case AT803X_SFC_MANUAL_MDIX: + phydev->mdix_ctrl = ETH_TP_MDI_X; + break; + case AT803X_SFC_AUTOMATIC_CROSSOVER: + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + break; + } } if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) @@ -730,6 +760,50 @@ static int at803x_read_status(struct phy_device *phydev) return 0; } +static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl) +{ + u16 val; + + switch (ctrl) { + case ETH_TP_MDI: + val = AT803X_SFC_MANUAL_MDI; + break; + case ETH_TP_MDI_X: + val = AT803X_SFC_MANUAL_MDIX; + break; + case ETH_TP_MDI_AUTO: + val = AT803X_SFC_AUTOMATIC_CROSSOVER; + break; + default: + return 0; + } + + return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL, + AT803X_SFC_MDI_CROSSOVER_MODE_M, + FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val)); +} + +static int at803x_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = at803x_config_mdix(phydev, phydev->mdix_ctrl); + if (ret < 0) + return ret; + + /* Changes of the midx bits are disruptive to the normal operation; + * therefore any changes to these registers must be followed by a + * software reset to take effect. + */ + if (ret == 1) { + ret = genphy_soft_reset(phydev); + if (ret < 0) + return ret; + } + + return genphy_config_aneg(phydev); +} + static int at803x_get_downshift(struct phy_device *phydev, u8 *d) { int val; @@ -979,6 +1053,7 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_POLL_CABLE_TEST, .probe = at803x_probe, .remove = at803x_remove, + .config_aneg = at803x_config_aneg, .config_init = at803x_config_init, .soft_reset = genphy_soft_reset, .set_wol = at803x_set_wol, @@ -1061,6 +1136,9 @@ static struct phy_driver at803x_driver[] = { .config_intr = &at803x_config_intr, .cable_test_start = at803x_cable_test_start, .cable_test_get_status = at803x_cable_test_get_status, + .read_status = at803x_read_status, + .soft_reset = genphy_soft_reset, + .config_aneg = at803x_config_aneg, } }; module_phy_driver(at803x_driver); From dfd5ec1ba602b1a9b36f9b6208f09546784932b9 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 19 Jul 2020 11:08:01 -0700 Subject: [PATCH 1191/2056] net: atm: lec_arpc.h: delete duplicated word Delete the doubled word "the" in a comment. Signed-off-by: Randy Dunlap Cc: "David S. Miller" Cc: Jakub Kicinski Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- net/atm/lec_arpc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/atm/lec_arpc.h b/net/atm/lec_arpc.h index 1205d8792d28..39115fe074c4 100644 --- a/net/atm/lec_arpc.h +++ b/net/atm/lec_arpc.h @@ -44,7 +44,7 @@ struct lec_arp_table { u8 *tlvs; u32 sizeoftlvs; /* * LANE2: Each MAC address can have TLVs - * associated with it. sizeoftlvs tells the + * associated with it. sizeoftlvs tells * the length of the tlvs array */ struct sk_buff_head tx_wait; /* wait queue for outgoing packets */ From 089377b7e8e70f00c2290b00cb0024cc6e165a7d Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 19 Jul 2020 11:08:24 -0700 Subject: [PATCH 1192/2056] net: rds: rdma_transport.h: delete duplicated word Delete the doubled word "be" in a comment. Signed-off-by: Randy Dunlap Cc: Santosh Shilimkar Cc: netdev@vger.kernel.org Cc: linux-rdma@vger.kernel.org Signed-off-by: David S. Miller --- net/rds/rdma_transport.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/rds/rdma_transport.h b/net/rds/rdma_transport.h index bfafd4a6d827..ca4c3a667091 100644 --- a/net/rds/rdma_transport.h +++ b/net/rds/rdma_transport.h @@ -13,7 +13,7 @@ /* Below reject reason is for legacy interoperability issue with non-linux * RDS endpoints where older version incompatibility is conveyed via value 1. - * For future version(s), proper encoded reject reason should be be used. + * For future version(s), proper encoded reject reason should be used. */ #define RDS_RDMA_REJ_INCOMPAT 1 From a06d30ae7af492497ffbca6abf1621d508b8fcaa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:10 +0200 Subject: [PATCH 1193/2056] net/atm: remove the atmdev_ops {get, set}sockopt methods All implementations of these two methods are dummies that always return -EINVAL. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- drivers/atm/eni.c | 17 ----------------- drivers/atm/firestream.c | 2 -- drivers/atm/fore200e.c | 27 --------------------------- drivers/atm/horizon.c | 40 ---------------------------------------- drivers/atm/iphase.c | 16 ---------------- drivers/atm/lanai.c | 2 -- drivers/atm/solos-pci.c | 2 -- drivers/atm/zatm.c | 16 ---------------- include/linux/atmdev.h | 9 --------- net/atm/common.c | 14 ++------------ 10 files changed, 2 insertions(+), 143 deletions(-) diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 17d47ad03ab7..b3d8e00e7671 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2027,21 +2027,6 @@ static int eni_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) return dev->phy->ioctl(dev,cmd,arg); } - -static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) -{ - return -EINVAL; -} - - -static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen) -{ - return -EINVAL; -} - - static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb) { enum enq_res res; @@ -2215,8 +2200,6 @@ static const struct atmdev_ops ops = { .open = eni_open, .close = eni_close, .ioctl = eni_ioctl, - .getsockopt = eni_getsockopt, - .setsockopt = eni_setsockopt, .send = eni_send, .phy_put = eni_phy_put, .phy_get = eni_phy_get, diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index cc87004d5e2d..2ca9ec802734 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1277,8 +1277,6 @@ static const struct atmdev_ops ops = { .send = fs_send, .owner = THIS_MODULE, /* ioctl: fs_ioctl, */ - /* getsockopt: fs_getsockopt, */ - /* setsockopt: fs_setsockopt, */ /* change_qos: fs_change_qos, */ /* For now implement these internally here... */ diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f4ad7ce25ae8..a81bc49c14ac 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -1710,31 +1710,6 @@ fore200e_getstats(struct fore200e* fore200e) return 0; } - -static int -fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen) -{ - /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ - - DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", - vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); - - return -EINVAL; -} - - -static int -fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen) -{ - /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */ - - DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n", - vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen); - - return -EINVAL; -} - - #if 0 /* currently unused */ static int fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) @@ -3026,8 +3001,6 @@ static const struct atmdev_ops fore200e_ops = { .open = fore200e_open, .close = fore200e_close, .ioctl = fore200e_ioctl, - .getsockopt = fore200e_getsockopt, - .setsockopt = fore200e_setsockopt, .send = fore200e_send, .change_qos = fore200e_change_qos, .proc_read = fore200e_proc_read, diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index e5da51f907a2..4f2951cbe69c 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2527,46 +2527,6 @@ static void hrz_close (struct atm_vcc * atm_vcc) { clear_bit(ATM_VF_ADDR,&atm_vcc->flags); } -#if 0 -static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, - void *optval, int optlen) { - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - PRINTD (DBG_FLOW|DBG_VCC, "hrz_getsockopt"); - switch (level) { - case SOL_SOCKET: - switch (optname) { -// case SO_BCTXOPT: -// break; -// case SO_BCRXOPT: -// break; - default: - return -ENOPROTOOPT; - }; - break; - } - return -EINVAL; -} - -static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, - void *optval, unsigned int optlen) { - hrz_dev * dev = HRZ_DEV(atm_vcc->dev); - PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt"); - switch (level) { - case SOL_SOCKET: - switch (optname) { -// case SO_BCTXOPT: -// break; -// case SO_BCRXOPT: -// break; - default: - return -ENOPROTOOPT; - }; - break; - } - return -EINVAL; -} -#endif - #if 0 static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) { hrz_dev * dev = HRZ_DEV(atm_dev); diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 8c7a996d1f16..eef637fd90b3 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2880,20 +2880,6 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) return 0; } -static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname, - void __user *optval, int optlen) -{ - IF_EVENT(printk(">ia_getsockopt\n");) - return -EINVAL; -} - -static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname, - void __user *optval, unsigned int optlen) -{ - IF_EVENT(printk(">ia_setsockopt\n");) - return -EINVAL; -} - static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { IADEV *iadev; struct dle *wr_ptr; @@ -3164,8 +3150,6 @@ static const struct atmdev_ops ops = { .open = ia_open, .close = ia_close, .ioctl = ia_ioctl, - .getsockopt = ia_getsockopt, - .setsockopt = ia_setsockopt, .send = ia_send, .phy_put = ia_phy_put, .phy_get = ia_phy_get, diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 645a6bc1df88..986c1313694c 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2537,8 +2537,6 @@ static const struct atmdev_ops ops = { .dev_close = lanai_dev_close, .open = lanai_open, .close = lanai_close, - .getsockopt = NULL, - .setsockopt = NULL, .send = lanai_send, .phy_put = NULL, .phy_get = NULL, diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index b7646ae55942..94fbc3abe60e 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1179,8 +1179,6 @@ static const struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, - .getsockopt = NULL, - .setsockopt = NULL, .send = psend, .send_oam = NULL, .phy_put = NULL, diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 57f97b95a453..2788b985edbe 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1515,20 +1515,6 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg) } } - -static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen) -{ - return -EINVAL; -} - - -static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen) -{ - return -EINVAL; -} - static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb) { int error; @@ -1582,8 +1568,6 @@ static const struct atmdev_ops ops = { .open = zatm_open, .close = zatm_close, .ioctl = zatm_ioctl, - .getsockopt = zatm_getsockopt, - .setsockopt = zatm_setsockopt, .send = zatm_send, .phy_put = zatm_phy_put, .phy_get = zatm_phy_get, diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 8124815eb121..5d5ff2203fa2 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -176,11 +176,6 @@ struct atm_dev { #define ATM_OF_IMMED 1 /* Attempt immediate delivery */ #define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ - -/* - * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. - */ - struct atmdev_ops { /* only send is required */ void (*dev_close)(struct atm_dev *dev); int (*open)(struct atm_vcc *vcc); @@ -190,10 +185,6 @@ struct atmdev_ops { /* only send is required */ int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, void __user *arg); #endif - int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen); - int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen); int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, diff --git a/net/atm/common.c b/net/atm/common.c index 8575f5d52087..9b28f1fb3c69 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -782,13 +782,8 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, vcc->atm_options &= ~ATM_ATMOPT_CLP; return 0; default: - if (level == SOL_SOCKET) - return -EINVAL; - break; - } - if (!vcc->dev || !vcc->dev->ops->setsockopt) return -EINVAL; - return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen); + } } int vcc_getsockopt(struct socket *sock, int level, int optname, @@ -826,13 +821,8 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0; } default: - if (level == SOL_SOCKET) - return -EINVAL; - break; - } - if (!vcc->dev || !vcc->dev->ops->getsockopt) return -EINVAL; - return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); + } } int register_atmdevice_notifier(struct notifier_block *nb) From 4a3672993f95c5f2ff0cf0951ba9a712d941a248 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:11 +0200 Subject: [PATCH 1194/2056] net: streamline __sys_setsockopt Return early when sockfd_lookup_light fails to reduce a level of indentation for most of the function body. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/socket.c | 65 +++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 34 deletions(-) diff --git a/net/socket.c b/net/socket.c index 770503c4ca76..49a6daf0293b 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2107,43 +2107,40 @@ static int __sys_setsockopt(int fd, int level, int optname, return -EINVAL; sock = sockfd_lookup_light(fd, &err, &fput_needed); - if (sock != NULL) { - err = security_socket_setsockopt(sock, level, optname); - if (err) - goto out_put; + if (!sock) + return err; - err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, - &optname, optval, &optlen, - &kernel_optval); + err = security_socket_setsockopt(sock, level, optname); + if (err) + goto out_put; - if (err < 0) { - goto out_put; - } else if (err > 0) { - err = 0; - goto out_put; - } - - if (kernel_optval) { - set_fs(KERNEL_DS); - optval = (char __user __force *)kernel_optval; - } - - if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) - err = - sock_setsockopt(sock, level, optname, optval, - optlen); - else - err = - sock->ops->setsockopt(sock, level, optname, optval, - optlen); - - if (kernel_optval) { - set_fs(oldfs); - kfree(kernel_optval); - } -out_put: - fput_light(sock->file, fput_needed); + err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, + optval, &optlen, &kernel_optval); + if (err < 0) + goto out_put; + if (err > 0) { + err = 0; + goto out_put; } + + if (kernel_optval) { + set_fs(KERNEL_DS); + optval = (char __user __force *)kernel_optval; + } + + if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) + err = sock_setsockopt(sock, level, optname, optval, optlen); + else + err = sock->ops->setsockopt(sock, level, optname, optval, + optlen); + + if (kernel_optval) { + set_fs(oldfs); + kfree(kernel_optval); + } + +out_put: + fput_light(sock->file, fput_needed); return err; } From d8a9b38f83ea91341f80beb0a07a5777c6acf7a4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:12 +0200 Subject: [PATCH 1195/2056] net: streamline __sys_getsockopt Return early when sockfd_lookup_light fails to reduce a level of indentation for most of the function body. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/socket.c | 31 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/net/socket.c b/net/socket.c index 49a6daf0293b..b79376b17b45 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2163,28 +2163,25 @@ static int __sys_getsockopt(int fd, int level, int optname, int max_optlen; sock = sockfd_lookup_light(fd, &err, &fput_needed); - if (sock != NULL) { - err = security_socket_getsockopt(sock, level, optname); - if (err) - goto out_put; + if (!sock) + return err; - max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); + err = security_socket_getsockopt(sock, level, optname); + if (err) + goto out_put; - if (level == SOL_SOCKET) - err = - sock_getsockopt(sock, level, optname, optval, + max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); + + if (level == SOL_SOCKET) + err = sock_getsockopt(sock, level, optname, optval, optlen); + else + err = sock->ops->getsockopt(sock, level, optname, optval, optlen); - else - err = - sock->ops->getsockopt(sock, level, optname, optval, - optlen); - err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, - optval, optlen, - max_optlen, err); + err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, optval, + optlen, max_optlen, err); out_put: - fput_light(sock->file, fput_needed); - } + fput_light(sock->file, fput_needed); return err; } From 4d295e54611509854a12c26f95a6f4430731d614 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:13 +0200 Subject: [PATCH 1196/2056] net: simplify cBPF setsockopt compat handling Add a helper that copies either a native or compat bpf_fprog from userspace after verifying the length, and remove the compat setsockopt handlers that now aren't required. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/filter.h | 4 ++-- include/net/compat.h | 1 - net/compat.c | 45 +----------------------------------------- net/core/filter.c | 23 +++++++++++++++++++++ net/core/sock.c | 30 ++++++++++------------------ net/packet/af_packet.c | 33 ++++--------------------------- 6 files changed, 40 insertions(+), 96 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 0b0144752d78..4d049c8e1fbe 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -502,13 +502,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) offsetof(TYPE, MEMBER); \ }) -#ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { u16 len; compat_uptr_t filter; /* struct sock_filter * */ }; -#endif struct sock_fprog_kern { u16 len; @@ -1278,4 +1276,6 @@ struct bpf_sockopt_kern { s32 retval; }; +int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len); + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/net/compat.h b/include/net/compat.h index f241666117d8..745db0d605b6 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -61,7 +61,6 @@ int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg, compat_size_t *len); int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, struct sockaddr __user **, struct iovec **); -struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); int put_cmsg_compat(struct msghdr*, int, int, int, void *); int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, diff --git a/net/compat.c b/net/compat.c index 5e3041a2c37d..3e6c2c5ff260 100644 --- a/net/compat.c +++ b/net/compat.c @@ -335,49 +335,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) __scm_destroy(scm); } -/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */ -struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval) -{ - struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; - struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); - struct compat_sock_fprog f32; - struct sock_fprog f; - - if (copy_from_user(&f32, fprog32, sizeof(*fprog32))) - return NULL; - memset(&f, 0, sizeof(f)); - f.len = f32.len; - f.filter = compat_ptr(f32.filter); - if (copy_to_user(kfprog, &f, sizeof(struct sock_fprog))) - return NULL; - - return kfprog; -} -EXPORT_SYMBOL_GPL(get_compat_bpf_fprog); - -static int do_set_attach_filter(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct sock_fprog __user *kfprog; - - kfprog = get_compat_bpf_fprog(optval); - if (!kfprog) - return -EFAULT; - - return sock_setsockopt(sock, level, optname, (char __user *)kfprog, - sizeof(struct sock_fprog)); -} - -static int compat_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (optname == SO_ATTACH_FILTER || - optname == SO_ATTACH_REUSEPORT_CBPF) - return do_set_attach_filter(sock, level, optname, - optval, optlen); - return sock_setsockopt(sock, level, optname, optval, optlen); -} - static int __compat_sys_setsockopt(int fd, int level, int optname, char __user *optval, unsigned int optlen) { @@ -396,7 +353,7 @@ static int __compat_sys_setsockopt(int fd, int level, int optname, } if (level == SOL_SOCKET) - err = compat_sock_setsockopt(sock, level, + err = sock_setsockopt(sock, level, optname, optval, optlen); else if (sock->ops->compat_setsockopt) err = sock->ops->compat_setsockopt(sock, level, diff --git a/net/core/filter.c b/net/core/filter.c index bdd2382e655d..2bf6624796d8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -77,6 +77,29 @@ #include #include +int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len) +{ + if (in_compat_syscall()) { + struct compat_sock_fprog f32; + + if (len != sizeof(f32)) + return -EINVAL; + if (copy_from_user(&f32, src, sizeof(f32))) + return -EFAULT; + memset(dst, 0, sizeof(*dst)); + dst->len = f32.len; + dst->filter = compat_ptr(f32.filter); + } else { + if (len != sizeof(*dst)) + return -EINVAL; + if (copy_from_user(dst, src, sizeof(*dst))) + return -EFAULT; + } + + return 0; +} +EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user); + /** * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff diff --git a/net/core/sock.c b/net/core/sock.c index 11d6f77dd562..e085df794825 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1059,19 +1059,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname, ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD); break; - case SO_ATTACH_FILTER: - ret = -EINVAL; - if (optlen == sizeof(struct sock_fprog)) { - struct sock_fprog fprog; - - ret = -EFAULT; - if (copy_from_user(&fprog, optval, sizeof(fprog))) - break; + case SO_ATTACH_FILTER: { + struct sock_fprog fprog; + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + if (!ret) ret = sk_attach_filter(&fprog, sk); - } break; - + } case SO_ATTACH_BPF: ret = -EINVAL; if (optlen == sizeof(u32)) { @@ -1085,19 +1080,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname, } break; - case SO_ATTACH_REUSEPORT_CBPF: - ret = -EINVAL; - if (optlen == sizeof(struct sock_fprog)) { - struct sock_fprog fprog; - - ret = -EFAULT; - if (copy_from_user(&fprog, optval, sizeof(fprog))) - break; + case SO_ATTACH_REUSEPORT_CBPF: { + struct sock_fprog fprog; + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + if (!ret) ret = sk_reuseport_attach_filter(&fprog, sk); - } break; - + } case SO_ATTACH_REUSEPORT_EBPF: ret = -EINVAL; if (optlen == sizeof(u32)) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 781fee93b7d5..35aee9e98053 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1545,10 +1545,10 @@ static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, if (sock_flag(&po->sk, SOCK_FILTER_LOCKED)) return -EPERM; - if (len != sizeof(fprog)) - return -EINVAL; - if (copy_from_user(&fprog, data, len)) - return -EFAULT; + + ret = copy_bpf_fprog_from_user(&fprog, data, len); + if (ret) + return ret; ret = bpf_prog_create_from_user(&new, &fprog, NULL, false); if (ret) @@ -4040,28 +4040,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, return 0; } - -#ifdef CONFIG_COMPAT -static int compat_packet_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct packet_sock *po = pkt_sk(sock->sk); - - if (level != SOL_PACKET) - return -ENOPROTOOPT; - - if (optname == PACKET_FANOUT_DATA && - po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { - optval = (char __user *)get_compat_bpf_fprog(optval); - if (!optval) - return -EFAULT; - optlen = sizeof(struct sock_fprog); - } - - return packet_setsockopt(sock, level, optname, optval, optlen); -} -#endif - static int packet_notifier(struct notifier_block *this, unsigned long msg, void *ptr) { @@ -4549,9 +4527,6 @@ static const struct proto_ops packet_ops = { .shutdown = sock_no_shutdown, .setsockopt = packet_setsockopt, .getsockopt = packet_getsockopt, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_packet_setsockopt, -#endif .sendmsg = packet_sendmsg, .recvmsg = packet_recvmsg, .mmap = packet_mmap, From 8c918ffbbad49454ed26c53eb1b90bf98bb5e394 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:14 +0200 Subject: [PATCH 1197/2056] net: remove compat_sock_common_{get,set}sockopt Add the compat handling to sock_common_{get,set}sockopt instead, keyed of in_compat_syscall(). This allow to remove the now unused ->compat_{get,set}sockopt methods from struct proto_ops. Signed-off-by: Christoph Hellwig Acked-by: Matthieu Baerts Acked-by: Stefan Schmidt Signed-off-by: David S. Miller --- include/linux/net.h | 6 ------ include/net/sock.h | 4 ---- net/core/sock.c | 38 ++++++++++---------------------------- net/dccp/ipv4.c | 4 ---- net/dccp/ipv6.c | 2 -- net/ieee802154/socket.c | 8 -------- net/ipv4/af_inet.c | 6 ------ net/ipv6/af_inet6.c | 4 ---- net/ipv6/ipv6_sockglue.c | 12 ++---------- net/ipv6/raw.c | 2 -- net/l2tp/l2tp_ip.c | 4 ---- net/l2tp/l2tp_ip6.c | 2 -- net/mptcp/protocol.c | 6 ------ net/phonet/socket.c | 8 -------- net/sctp/ipv6.c | 2 -- net/sctp/protocol.c | 4 ---- 16 files changed, 12 insertions(+), 100 deletions(-) diff --git a/include/linux/net.h b/include/linux/net.h index 016a9c5faa34..858ff1d98154 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -165,12 +165,6 @@ struct proto_ops { int optname, char __user *optval, unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); - int (*compat_getsockopt)(struct socket *sock, int level, - int optname, char __user *optval, int __user *optlen); -#endif void (*show_fdinfo)(struct seq_file *m, struct socket *sock); int (*sendmsg) (struct socket *sock, struct msghdr *m, size_t total_len); diff --git a/include/net/sock.h b/include/net/sock.h index 4bf884165148..1fd7cf5fc751 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1744,10 +1744,6 @@ int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen); -int compat_sock_common_getsockopt(struct socket *sock, int level, - int optname, char __user *optval, int __user *optlen); -int compat_sock_common_setsockopt(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); void sk_common_release(struct sock *sk); diff --git a/net/core/sock.c b/net/core/sock.c index e085df794825..018404d17626 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3199,24 +3199,15 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; +#ifdef CONFIG_COMPAT + if (in_compat_syscal() && sk->sk_prot->compat_getsockopt) + return sk->sk_prot->compat_getsockopt(sk, level, optname, + optval, optlen); +#endif return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_getsockopt); -#ifdef CONFIG_COMPAT -int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) -{ - struct sock *sk = sock->sk; - - if (sk->sk_prot->compat_getsockopt != NULL) - return sk->sk_prot->compat_getsockopt(sk, level, optname, - optval, optlen); - return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(compat_sock_common_getsockopt); -#endif - int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { @@ -3240,24 +3231,15 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; +#ifdef CONFIG_COMPAT + if (in_compat_syscall() && sk->sk_prot->compat_setsockopt) + return sk->sk_prot->compat_setsockopt(sk, level, optname, + optval, optlen); +#endif return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_setsockopt); -#ifdef CONFIG_COMPAT -int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - struct sock *sk = sock->sk; - - if (sk->sk_prot->compat_setsockopt != NULL) - return sk->sk_prot->compat_setsockopt(sk, level, optname, - optval, optlen); - return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(compat_sock_common_setsockopt); -#endif - void sk_common_release(struct sock *sk) { if (sk->sk_prot->destroy) diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index a7e989919c53..316cc5ac0da7 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -999,10 +999,6 @@ static const struct proto_ops inet_dccp_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; static struct inet_protosw dccp_v4_protosw = { diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 650187d68851..b50f85a72cd5 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1083,8 +1083,6 @@ static const struct proto_ops inet6_dccp_ops = { .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index d93d4531aa9b..94ae9662133e 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -423,10 +423,6 @@ static const struct proto_ops ieee802154_raw_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; /* DGRAM Sockets (802.15.4 dataframes) */ @@ -986,10 +982,6 @@ static const struct proto_ops ieee802154_dgram_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; /* Create a socket. Initialise the socket, blank the addresses diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ff141d630bdf..4307503a6f0b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1043,8 +1043,6 @@ const struct proto_ops inet_stream_ops = { .sendpage_locked = tcp_sendpage_locked, .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, .compat_ioctl = inet_compat_ioctl, #endif .set_rcvlowat = tcp_set_rcvlowat, @@ -1073,8 +1071,6 @@ const struct proto_ops inet_dgram_ops = { .sendpage = inet_sendpage, .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, .compat_ioctl = inet_compat_ioctl, #endif }; @@ -1105,8 +1101,6 @@ static const struct proto_ops inet_sockraw_ops = { .mmap = sock_no_mmap, .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, .compat_ioctl = inet_compat_ioctl, #endif }; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index b304b882e031..0306509ab063 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -688,8 +688,6 @@ const struct proto_ops inet6_stream_ops = { .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif .set_rcvlowat = tcp_set_rcvlowat, }; @@ -717,8 +715,6 @@ const struct proto_ops inet6_dgram_ops = { .set_peek_off = sk_set_peek_off, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 20576e87a5f7..6ab44ec2c369 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -914,12 +914,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, { int err; - if (level == SOL_IP && sk->sk_type != SOCK_RAW) { - if (udp_prot.compat_setsockopt != NULL) - return udp_prot.compat_setsockopt(sk, level, optname, - optval, optlen); + if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.setsockopt(sk, level, optname, optval, optlen); - } if (level != SOL_IPV6) return -ENOPROTOOPT; @@ -1480,12 +1476,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, { int err; - if (level == SOL_IP && sk->sk_type != SOCK_RAW) { - if (udp_prot.compat_getsockopt != NULL) - return udp_prot.compat_getsockopt(sk, level, optname, - optval, optlen); + if (level == SOL_IP && sk->sk_type != SOCK_RAW) return udp_prot.getsockopt(sk, level, optname, optval, optlen); - } if (level != SOL_IPV6) return -ENOPROTOOPT; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8ef5a7b30524..e23c6b461758 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1378,8 +1378,6 @@ const struct proto_ops inet6_sockraw_ops = { .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 955662a6dee7..f8d7412cfb3d 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -638,10 +638,6 @@ static const struct proto_ops l2tp_ip_ops = { .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; static struct inet_protosw l2tp_ip_protosw = { diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 526ed2c24dd5..2cdc0b7a7a43 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -773,8 +773,6 @@ static const struct proto_ops l2tp_ip6_ops = { .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index dbe43e0cd734..f0b0b503c262 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2166,10 +2166,6 @@ static const struct proto_ops mptcp_stream_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .sendpage = inet_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; static struct inet_protosw mptcp_protosw = { @@ -2222,8 +2218,6 @@ static const struct proto_ops mptcp_v6_stream_ops = { .sendpage = inet_sendpage, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 76d499f6af9a..87c60f83c180 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -441,10 +441,6 @@ const struct proto_ops phonet_dgram_ops = { .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, -#ifdef CONFIG_COMPAT - .compat_setsockopt = sock_no_setsockopt, - .compat_getsockopt = sock_no_getsockopt, -#endif .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, @@ -466,10 +462,6 @@ const struct proto_ops phonet_stream_ops = { .shutdown = sock_no_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ccfa0ab3e7f4..ebda31b7747d 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -1033,8 +1033,6 @@ static const struct proto_ops inet6_seqpacket_ops = { .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, #endif }; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index cde29f3c7fb3..8d25cc464efd 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1036,10 +1036,6 @@ static const struct proto_ops inet_seqpacket_ops = { .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_sock_common_setsockopt, - .compat_getsockopt = compat_sock_common_getsockopt, -#endif }; /* Registration with AF_INET family. */ From 55db9c0e853421fa71cac5e6855898601f78a1f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:15 +0200 Subject: [PATCH 1198/2056] net: remove compat_sys_{get,set}sockopt Now that the ->compat_{get,set}sockopt proto_ops methods are gone there is no good reason left to keep the compat syscalls separate. This fixes the odd use of unsigned int for the compat_setsockopt optlen and the missing sock_use_custom_sol_socket. It would also easily allow running the eBPF hooks for the compat syscalls, but such a large change in behavior does not belong into a consolidation patch like this one. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- arch/arm64/include/asm/unistd32.h | 4 +- arch/mips/kernel/syscalls/syscall_n32.tbl | 4 +- arch/mips/kernel/syscalls/syscall_o32.tbl | 4 +- arch/parisc/kernel/syscalls/syscall.tbl | 4 +- arch/powerpc/kernel/syscalls/syscall.tbl | 4 +- arch/s390/kernel/syscalls/syscall.tbl | 4 +- arch/sparc/kernel/sys32.S | 12 +-- arch/sparc/kernel/syscalls/syscall.tbl | 4 +- arch/x86/entry/syscall_x32.c | 7 ++ arch/x86/entry/syscalls/syscall_32.tbl | 4 +- arch/x86/entry/syscalls/syscall_64.tbl | 4 +- include/linux/compat.h | 4 - include/linux/syscalls.h | 4 + include/uapi/asm-generic/unistd.h | 4 +- net/compat.c | 79 +------------------ net/socket.c | 25 +++--- tools/include/uapi/asm-generic/unistd.h | 4 +- .../arch/powerpc/entry/syscalls/syscall.tbl | 4 +- .../perf/arch/s390/entry/syscalls/syscall.tbl | 4 +- .../arch/x86/entry/syscalls/syscall_64.tbl | 4 +- 20 files changed, 62 insertions(+), 125 deletions(-) diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 6d95d0c8bf2f..166e36903110 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -599,9 +599,9 @@ __SYSCALL(__NR_recvfrom, compat_sys_recvfrom) #define __NR_shutdown 293 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_setsockopt 294 -__SYSCALL(__NR_setsockopt, compat_sys_setsockopt) +__SYSCALL(__NR_setsockopt, sys_setsockopt) #define __NR_getsockopt 295 -__SYSCALL(__NR_getsockopt, compat_sys_getsockopt) +__SYSCALL(__NR_getsockopt, sys_getsockopt) #define __NR_sendmsg 296 __SYSCALL(__NR_sendmsg, compat_sys_sendmsg) #define __NR_recvmsg 297 diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl index f777141f5256..8488b0d0a99e 100644 --- a/arch/mips/kernel/syscalls/syscall_n32.tbl +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl @@ -60,8 +60,8 @@ 50 n32 getsockname sys_getsockname 51 n32 getpeername sys_getpeername 52 n32 socketpair sys_socketpair -53 n32 setsockopt compat_sys_setsockopt -54 n32 getsockopt compat_sys_getsockopt +53 n32 setsockopt sys_setsockopt +54 n32 getsockopt sys_getsockopt 55 n32 clone __sys_clone 56 n32 fork __sys_fork 57 n32 execve compat_sys_execve diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl index 13280625d312..b20522f813f9 100644 --- a/arch/mips/kernel/syscalls/syscall_o32.tbl +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl @@ -184,7 +184,7 @@ 170 o32 connect sys_connect 171 o32 getpeername sys_getpeername 172 o32 getsockname sys_getsockname -173 o32 getsockopt sys_getsockopt compat_sys_getsockopt +173 o32 getsockopt sys_getsockopt sys_getsockopt 174 o32 listen sys_listen 175 o32 recv sys_recv compat_sys_recv 176 o32 recvfrom sys_recvfrom compat_sys_recvfrom @@ -192,7 +192,7 @@ 178 o32 send sys_send 179 o32 sendmsg sys_sendmsg compat_sys_sendmsg 180 o32 sendto sys_sendto -181 o32 setsockopt sys_setsockopt compat_sys_setsockopt +181 o32 setsockopt sys_setsockopt sys_setsockopt 182 o32 shutdown sys_shutdown 183 o32 socket sys_socket 184 o32 socketpair sys_socketpair diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl index 5a758fa6ec52..3494e4fa1a17 100644 --- a/arch/parisc/kernel/syscalls/syscall.tbl +++ b/arch/parisc/kernel/syscalls/syscall.tbl @@ -198,8 +198,8 @@ 178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo 179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend 180 common chown sys_chown -181 common setsockopt sys_setsockopt compat_sys_setsockopt -182 common getsockopt sys_getsockopt compat_sys_getsockopt +181 common setsockopt sys_setsockopt sys_setsockopt +182 common getsockopt sys_getsockopt sys_getsockopt 183 common sendmsg sys_sendmsg compat_sys_sendmsg 184 common recvmsg sys_recvmsg compat_sys_recvmsg 185 common semop sys_semop diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl index f833a3190822..94eb5b27ef65 100644 --- a/arch/powerpc/kernel/syscalls/syscall.tbl +++ b/arch/powerpc/kernel/syscalls/syscall.tbl @@ -433,8 +433,8 @@ 336 common recv sys_recv compat_sys_recv 337 common recvfrom sys_recvfrom compat_sys_recvfrom 338 common shutdown sys_shutdown -339 common setsockopt sys_setsockopt compat_sys_setsockopt -340 common getsockopt sys_getsockopt compat_sys_getsockopt +339 common setsockopt sys_setsockopt sys_setsockopt +340 common getsockopt sys_getsockopt sys_getsockopt 341 common sendmsg sys_sendmsg compat_sys_sendmsg 342 common recvmsg sys_recvmsg compat_sys_recvmsg 343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl index bfdcb7633957..0d63c71fc544 100644 --- a/arch/s390/kernel/syscalls/syscall.tbl +++ b/arch/s390/kernel/syscalls/syscall.tbl @@ -372,8 +372,8 @@ 362 common connect sys_connect sys_connect 363 common listen sys_listen sys_listen 364 common accept4 sys_accept4 sys_accept4 -365 common getsockopt sys_getsockopt compat_sys_getsockopt -366 common setsockopt sys_setsockopt compat_sys_setsockopt +365 common getsockopt sys_getsockopt sys_getsockopt +366 common setsockopt sys_setsockopt sys_setsockopt 367 common getsockname sys_getsockname sys_getsockname 368 common getpeername sys_getpeername sys_getpeername 369 common sendto sys_sendto sys_sendto diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 489ffab918a8..a45f0f31fe51 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -157,22 +157,22 @@ do_sys_shutdown: /* sys_shutdown(int, int) */ nop nop nop -do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */ +do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */ 47: ldswa [%o1 + 0x0] %asi, %o0 - sethi %hi(compat_sys_setsockopt), %g1 + sethi %hi(sys_setsockopt), %g1 48: ldswa [%o1 + 0x8] %asi, %o2 49: lduwa [%o1 + 0xc] %asi, %o3 50: ldswa [%o1 + 0x10] %asi, %o4 - jmpl %g1 + %lo(compat_sys_setsockopt), %g0 + jmpl %g1 + %lo(sys_setsockopt), %g0 51: ldswa [%o1 + 0x4] %asi, %o1 nop -do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */ +do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */ 52: ldswa [%o1 + 0x0] %asi, %o0 - sethi %hi(compat_sys_getsockopt), %g1 + sethi %hi(sys_getsockopt), %g1 53: ldswa [%o1 + 0x8] %asi, %o2 54: lduwa [%o1 + 0xc] %asi, %o3 55: lduwa [%o1 + 0x10] %asi, %o4 - jmpl %g1 + %lo(compat_sys_getsockopt), %g0 + jmpl %g1 + %lo(sys_getsockopt), %g0 56: ldswa [%o1 + 0x4] %asi, %o1 nop do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */ diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl index 8004a276cb74..c59b37965add 100644 --- a/arch/sparc/kernel/syscalls/syscall.tbl +++ b/arch/sparc/kernel/syscalls/syscall.tbl @@ -147,7 +147,7 @@ 115 32 getgroups32 sys_getgroups 116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday 117 common getrusage sys_getrusage compat_sys_getrusage -118 common getsockopt sys_getsockopt compat_sys_getsockopt +118 common getsockopt sys_getsockopt sys_getsockopt 119 common getcwd sys_getcwd 120 common readv sys_readv compat_sys_readv 121 common writev sys_writev compat_sys_writev @@ -425,7 +425,7 @@ 352 common userfaultfd sys_userfaultfd 353 common bind sys_bind 354 common listen sys_listen -355 common setsockopt sys_setsockopt compat_sys_setsockopt +355 common setsockopt sys_setsockopt sys_setsockopt 356 common mlock2 sys_mlock2 357 common copy_file_range sys_copy_file_range 358 common preadv2 sys_preadv2 compat_sys_preadv2 diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c index 3d8d70d3896c..1583831f61a9 100644 --- a/arch/x86/entry/syscall_x32.c +++ b/arch/x86/entry/syscall_x32.c @@ -8,6 +8,13 @@ #include #include +/* + * Reuse the 64-bit entry points for the x32 versions that occupy different + * slots in the syscall table. + */ +#define __x32_sys_getsockopt __x64_sys_getsockopt +#define __x32_sys_setsockopt __x64_sys_setsockopt + #define __SYSCALL_64(nr, sym) #define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *); diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index d8f8a1a69ed1..43742a69dba1 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -376,8 +376,8 @@ 362 i386 connect sys_connect 363 i386 listen sys_listen 364 i386 accept4 sys_accept4 -365 i386 getsockopt sys_getsockopt compat_sys_getsockopt -366 i386 setsockopt sys_setsockopt compat_sys_setsockopt +365 i386 getsockopt sys_getsockopt sys_getsockopt +366 i386 setsockopt sys_setsockopt sys_setsockopt 367 i386 getsockname sys_getsockname 368 i386 getpeername sys_getpeername 369 i386 sendto sys_sendto diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 78847b32e137..e008d638e641 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -396,8 +396,8 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt +541 x32 setsockopt sys_setsockopt +542 x32 getsockopt sys_getsockopt 543 x32 io_setup compat_sys_io_setup 544 x32 io_submit compat_sys_io_submit 545 x32 execveat compat_sys_execveat diff --git a/include/linux/compat.h b/include/linux/compat.h index e90100c0de72..c4255d8a4a8a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -737,10 +737,6 @@ asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, unsigned flags, struct sockaddr __user *addr, int __user *addrlen); -asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, - char __user *optval, unsigned int optlen); -asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen); asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags); asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b951a87da987..aa46825c6f9d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -1424,4 +1424,8 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, const struct old_timespec32 __user *timeout); +int __sys_getsockopt(int fd, int level, int optname, char __user *optval, + int __user *optlen); +int __sys_setsockopt(int fd, int level, int optname, char __user *optval, + int optlen); #endif diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index f4a01305d9a6..c8c189a5f0a6 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto) #define __NR_recvfrom 207 __SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom) #define __NR_setsockopt 208 -__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt) +__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt) #define __NR_getsockopt 209 -__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt) +__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt) #define __NR_shutdown 210 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_sendmsg 211 diff --git a/net/compat.c b/net/compat.c index 3e6c2c5ff260..091875bd6210 100644 --- a/net/compat.c +++ b/net/compat.c @@ -335,77 +335,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) __scm_destroy(scm); } -static int __compat_sys_setsockopt(int fd, int level, int optname, - char __user *optval, unsigned int optlen) -{ - int err; - struct socket *sock; - - if (optlen > INT_MAX) - return -EINVAL; - - sock = sockfd_lookup(fd, &err); - if (sock) { - err = security_socket_setsockopt(sock, level, optname); - if (err) { - sockfd_put(sock); - return err; - } - - if (level == SOL_SOCKET) - err = sock_setsockopt(sock, level, - optname, optval, optlen); - else if (sock->ops->compat_setsockopt) - err = sock->ops->compat_setsockopt(sock, level, - optname, optval, optlen); - else - err = sock->ops->setsockopt(sock, level, - optname, optval, optlen); - sockfd_put(sock); - } - return err; -} - -COMPAT_SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, - char __user *, optval, unsigned int, optlen) -{ - return __compat_sys_setsockopt(fd, level, optname, optval, optlen); -} - -static int __compat_sys_getsockopt(int fd, int level, int optname, - char __user *optval, - int __user *optlen) -{ - int err; - struct socket *sock = sockfd_lookup(fd, &err); - - if (sock) { - err = security_socket_getsockopt(sock, level, optname); - if (err) { - sockfd_put(sock); - return err; - } - - if (level == SOL_SOCKET) - err = sock_getsockopt(sock, level, - optname, optval, optlen); - else if (sock->ops->compat_getsockopt) - err = sock->ops->compat_getsockopt(sock, level, - optname, optval, optlen); - else - err = sock->ops->getsockopt(sock, level, - optname, optval, optlen); - sockfd_put(sock); - } - return err; -} - -COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname, - char __user *, optval, int __user *, optlen) -{ - return __compat_sys_getsockopt(fd, level, optname, optval, optlen); -} - /* Argument list sizes for compat_sys_socketcall */ #define AL(x) ((x) * sizeof(u32)) static unsigned char nas[21] = { @@ -565,13 +494,11 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args) ret = __sys_shutdown(a0, a1); break; case SYS_SETSOCKOPT: - ret = __compat_sys_setsockopt(a0, a1, a[2], - compat_ptr(a[3]), a[4]); + ret = __sys_setsockopt(a0, a1, a[2], compat_ptr(a[3]), a[4]); break; case SYS_GETSOCKOPT: - ret = __compat_sys_getsockopt(a0, a1, a[2], - compat_ptr(a[3]), - compat_ptr(a[4])); + ret = __sys_getsockopt(a0, a1, a[2], compat_ptr(a[3]), + compat_ptr(a[4])); break; case SYS_SENDMSG: ret = __compat_sys_sendmsg(a0, compat_ptr(a1), a[2]); diff --git a/net/socket.c b/net/socket.c index b79376b17b45..dec345982abb 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2094,9 +2094,8 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ - -static int __sys_setsockopt(int fd, int level, int optname, - char __user *optval, int optlen) +int __sys_setsockopt(int fd, int level, int optname, char __user *optval, + int optlen) { mm_segment_t oldfs = get_fs(); char *kernel_optval = NULL; @@ -2114,8 +2113,10 @@ static int __sys_setsockopt(int fd, int level, int optname, if (err) goto out_put; - err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, - optval, &optlen, &kernel_optval); + if (!in_compat_syscall()) + err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, + optval, &optlen, + &kernel_optval); if (err < 0) goto out_put; if (err > 0) { @@ -2154,9 +2155,8 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname, * Get a socket option. Because we don't know the option lengths we have * to pass a user mode parameter for the protocols to sort out. */ - -static int __sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen) +int __sys_getsockopt(int fd, int level, int optname, char __user *optval, + int __user *optlen) { int err, fput_needed; struct socket *sock; @@ -2170,7 +2170,8 @@ static int __sys_getsockopt(int fd, int level, int optname, if (err) goto out_put; - max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); + if (!in_compat_syscall()) + max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen); if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); @@ -2178,8 +2179,10 @@ static int __sys_getsockopt(int fd, int level, int optname, err = sock->ops->getsockopt(sock, level, optname, optval, optlen); - err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, optval, - optlen, max_optlen, err); + if (!in_compat_syscall()) + err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname, + optval, optlen, max_optlen, + err); out_put: fput_light(sock->file, fput_needed); return err; diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index f4a01305d9a6..c8c189a5f0a6 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto) #define __NR_recvfrom 207 __SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom) #define __NR_setsockopt 208 -__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt) +__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt) #define __NR_getsockopt 209 -__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt) +__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt) #define __NR_shutdown 210 __SYSCALL(__NR_shutdown, sys_shutdown) #define __NR_sendmsg 211 diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 35b61bfc1b1a..b190f2eb2611 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -427,8 +427,8 @@ 336 common recv sys_recv compat_sys_recv 337 common recvfrom sys_recvfrom compat_sys_recvfrom 338 common shutdown sys_shutdown -339 common setsockopt sys_setsockopt compat_sys_setsockopt -340 common getsockopt sys_getsockopt compat_sys_getsockopt +339 common setsockopt sys_setsockopt sys_setsockopt +340 common getsockopt sys_getsockopt sys_getsockopt 341 common sendmsg sys_sendmsg compat_sys_sendmsg 342 common recvmsg sys_recvmsg compat_sys_recvmsg 343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index b38d48464368..56ae24b6e4be 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -372,8 +372,8 @@ 362 common connect sys_connect compat_sys_connect 363 common listen sys_listen sys_listen 364 common accept4 sys_accept4 compat_sys_accept4 -365 common getsockopt sys_getsockopt compat_sys_getsockopt -366 common setsockopt sys_setsockopt compat_sys_setsockopt +365 common getsockopt sys_getsockopt sys_getsockopt +366 common setsockopt sys_setsockopt sys_setsockopt 367 common getsockname sys_getsockname compat_sys_getsockname 368 common getpeername sys_getpeername compat_sys_getpeername 369 common sendto sys_sendto compat_sys_sendto diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 78847b32e137..e008d638e641 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -396,8 +396,8 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt +541 x32 setsockopt sys_setsockopt +542 x32 getsockopt sys_getsockopt 543 x32 io_setup compat_sys_io_setup 544 x32 io_submit compat_sys_io_submit 545 x32 execveat compat_sys_execveat From 983094b4fc2d6a0b356f189593970cea2682a20c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:16 +0200 Subject: [PATCH 1199/2056] netfilter/arp_tables: clean up compat {get, set}sockopt handling Merge the native and compat {get,set}sockopt handlers using in_compat_syscall(). Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/netfilter/arp_tables.c | 85 ++++++++------------------------- 1 file changed, 21 insertions(+), 64 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index b167f4a5b684..15807fb4a65f 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -787,8 +787,7 @@ static int compat_table_info(const struct xt_table_info *info, } #endif -static int get_info(struct net *net, void __user *user, - const int *len, int compat) +static int get_info(struct net *net, void __user *user, const int *len) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; @@ -802,7 +801,7 @@ static int get_info(struct net *net, void __user *user, name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT - if (compat) + if (in_compat_syscall()) xt_compat_lock(NFPROTO_ARP); #endif t = xt_request_find_table_lock(net, NFPROTO_ARP, name); @@ -812,7 +811,7 @@ static int get_info(struct net *net, void __user *user, #ifdef CONFIG_COMPAT struct xt_table_info tmp; - if (compat) { + if (in_compat_syscall()) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(NFPROTO_ARP); private = &tmp; @@ -837,7 +836,7 @@ static int get_info(struct net *net, void __user *user, } else ret = PTR_ERR(t); #ifdef CONFIG_COMPAT - if (compat) + if (in_compat_syscall()) xt_compat_unlock(NFPROTO_ARP); #endif return ret; @@ -998,7 +997,7 @@ static int do_replace(struct net *net, const void __user *user, } static int do_add_counters(struct net *net, const void __user *user, - unsigned int len, int compat) + unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1009,7 +1008,8 @@ static int do_add_counters(struct net *net, const void __user *user, struct arpt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + paddc = xt_copy_counters_from_user(user, len, &tmp, + in_compat_syscall()); if (IS_ERR(paddc)) return PTR_ERR(paddc); @@ -1294,30 +1294,6 @@ static int compat_do_replace(struct net *net, void __user *user, return ret; } -static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, - unsigned int len) -{ - int ret; - - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case ARPT_SO_SET_REPLACE: - ret = compat_do_replace(sock_net(sk), user, len); - break; - - case ARPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len, 1); - break; - - default: - ret = -EINVAL; - } - - return ret; -} - static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr, compat_uint_t *size, struct xt_counters *counters, @@ -1425,29 +1401,6 @@ static int compat_get_entries(struct net *net, xt_compat_unlock(NFPROTO_ARP); return ret; } - -static int do_arpt_get_ctl(struct sock *, int, void __user *, int *); - -static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, - int *len) -{ - int ret; - - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case ARPT_SO_GET_INFO: - ret = get_info(sock_net(sk), user, len, 1); - break; - case ARPT_SO_GET_ENTRIES: - ret = compat_get_entries(sock_net(sk), user, len); - break; - default: - ret = do_arpt_get_ctl(sk, cmd, user, len); - } - return ret; -} #endif static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) @@ -1459,11 +1412,16 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned switch (cmd) { case ARPT_SO_SET_REPLACE: - ret = do_replace(sock_net(sk), user, len); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_do_replace(sock_net(sk), user, len); + else +#endif + ret = do_replace(sock_net(sk), user, len); break; case ARPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len, 0); + ret = do_add_counters(sock_net(sk), user, len); break; default: @@ -1482,11 +1440,16 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len switch (cmd) { case ARPT_SO_GET_INFO: - ret = get_info(sock_net(sk), user, len, 0); + ret = get_info(sock_net(sk), user, len); break; case ARPT_SO_GET_ENTRIES: - ret = get_entries(sock_net(sk), user, len); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_get_entries(sock_net(sk), user, len); + else +#endif + ret = get_entries(sock_net(sk), user, len); break; case ARPT_SO_GET_REVISION_TARGET: { @@ -1610,15 +1573,9 @@ static struct nf_sockopt_ops arpt_sockopts = { .set_optmin = ARPT_BASE_CTL, .set_optmax = ARPT_SO_SET_MAX+1, .set = do_arpt_set_ctl, -#ifdef CONFIG_COMPAT - .compat_set = compat_do_arpt_set_ctl, -#endif .get_optmin = ARPT_BASE_CTL, .get_optmax = ARPT_SO_GET_MAX+1, .get = do_arpt_get_ctl, -#ifdef CONFIG_COMPAT - .compat_get = compat_do_arpt_get_ctl, -#endif .owner = THIS_MODULE, }; From 89c53c14e4d218ca45b47402d991e82b77875888 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:17 +0200 Subject: [PATCH 1200/2056] netfilter/ip_tables: clean up compat {get,set}sockopt handling Merge the native and compat {get,set}sockopt handlers using in_compat_syscall(). Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/netfilter/ip_tables.c | 86 +++++++++------------------------- 1 file changed, 21 insertions(+), 65 deletions(-) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 5bf9fa06aee0..fbfad38f3979 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -944,8 +944,7 @@ static int compat_table_info(const struct xt_table_info *info, } #endif -static int get_info(struct net *net, void __user *user, - const int *len, int compat) +static int get_info(struct net *net, void __user *user, const int *len) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; @@ -959,7 +958,7 @@ static int get_info(struct net *net, void __user *user, name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT - if (compat) + if (in_compat_syscall()) xt_compat_lock(AF_INET); #endif t = xt_request_find_table_lock(net, AF_INET, name); @@ -969,7 +968,7 @@ static int get_info(struct net *net, void __user *user, #ifdef CONFIG_COMPAT struct xt_table_info tmp; - if (compat) { + if (in_compat_syscall()) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET); private = &tmp; @@ -995,7 +994,7 @@ static int get_info(struct net *net, void __user *user, } else ret = PTR_ERR(t); #ifdef CONFIG_COMPAT - if (compat) + if (in_compat_syscall()) xt_compat_unlock(AF_INET); #endif return ret; @@ -1153,7 +1152,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) static int do_add_counters(struct net *net, const void __user *user, - unsigned int len, int compat) + unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1164,7 +1163,8 @@ do_add_counters(struct net *net, const void __user *user, struct ipt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + paddc = xt_copy_counters_from_user(user, len, &tmp, + in_compat_syscall()); if (IS_ERR(paddc)) return PTR_ERR(paddc); @@ -1534,31 +1534,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return ret; } -static int -compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, - unsigned int len) -{ - int ret; - - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case IPT_SO_SET_REPLACE: - ret = compat_do_replace(sock_net(sk), user, len); - break; - - case IPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len, 1); - break; - - default: - ret = -EINVAL; - } - - return ret; -} - struct compat_ipt_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; @@ -1634,29 +1609,6 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, xt_compat_unlock(AF_INET); return ret; } - -static int do_ipt_get_ctl(struct sock *, int, void __user *, int *); - -static int -compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) -{ - int ret; - - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case IPT_SO_GET_INFO: - ret = get_info(sock_net(sk), user, len, 1); - break; - case IPT_SO_GET_ENTRIES: - ret = compat_get_entries(sock_net(sk), user, len); - break; - default: - ret = do_ipt_get_ctl(sk, cmd, user, len); - } - return ret; -} #endif static int @@ -1669,11 +1621,16 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) switch (cmd) { case IPT_SO_SET_REPLACE: - ret = do_replace(sock_net(sk), user, len); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_do_replace(sock_net(sk), user, len); + else +#endif + ret = do_replace(sock_net(sk), user, len); break; case IPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len, 0); + ret = do_add_counters(sock_net(sk), user, len); break; default: @@ -1693,11 +1650,16 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) switch (cmd) { case IPT_SO_GET_INFO: - ret = get_info(sock_net(sk), user, len, 0); + ret = get_info(sock_net(sk), user, len); break; case IPT_SO_GET_ENTRIES: - ret = get_entries(sock_net(sk), user, len); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_get_entries(sock_net(sk), user, len); + else +#endif + ret = get_entries(sock_net(sk), user, len); break; case IPT_SO_GET_REVISION_MATCH: @@ -1886,15 +1848,9 @@ static struct nf_sockopt_ops ipt_sockopts = { .set_optmin = IPT_BASE_CTL, .set_optmax = IPT_SO_SET_MAX+1, .set = do_ipt_set_ctl, -#ifdef CONFIG_COMPAT - .compat_set = compat_do_ipt_set_ctl, -#endif .get_optmin = IPT_BASE_CTL, .get_optmax = IPT_SO_GET_MAX+1, .get = do_ipt_get_ctl, -#ifdef CONFIG_COMPAT - .compat_get = compat_do_ipt_get_ctl, -#endif .owner = THIS_MODULE, }; From f415e76fd723594ed878a5a79a0ec36eca81c312 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:18 +0200 Subject: [PATCH 1201/2056] netfilter/ip6_tables: clean up compat {get, set}sockopt handling Merge the native and compat {get,set}sockopt handlers using in_compat_syscall(). Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/netfilter/ip6_tables.c | 87 ++++++++------------------------- 1 file changed, 21 insertions(+), 66 deletions(-) diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index e96a431549bc..96c48e91e6c7 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -960,8 +960,7 @@ static int compat_table_info(const struct xt_table_info *info, } #endif -static int get_info(struct net *net, void __user *user, - const int *len, int compat) +static int get_info(struct net *net, void __user *user, const int *len) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; @@ -975,7 +974,7 @@ static int get_info(struct net *net, void __user *user, name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT - if (compat) + if (in_compat_syscall()) xt_compat_lock(AF_INET6); #endif t = xt_request_find_table_lock(net, AF_INET6, name); @@ -985,7 +984,7 @@ static int get_info(struct net *net, void __user *user, #ifdef CONFIG_COMPAT struct xt_table_info tmp; - if (compat) { + if (in_compat_syscall()) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; @@ -1011,7 +1010,7 @@ static int get_info(struct net *net, void __user *user, } else ret = PTR_ERR(t); #ifdef CONFIG_COMPAT - if (compat) + if (in_compat_syscall()) xt_compat_unlock(AF_INET6); #endif return ret; @@ -1169,8 +1168,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) } static int -do_add_counters(struct net *net, const void __user *user, unsigned int len, - int compat) +do_add_counters(struct net *net, const void __user *user, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1181,7 +1179,8 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len, struct ip6t_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, compat); + paddc = xt_copy_counters_from_user(user, len, &tmp, + in_compat_syscall()); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); @@ -1543,31 +1542,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return ret; } -static int -compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, - unsigned int len) -{ - int ret; - - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case IP6T_SO_SET_REPLACE: - ret = compat_do_replace(sock_net(sk), user, len); - break; - - case IP6T_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len, 1); - break; - - default: - ret = -EINVAL; - } - - return ret; -} - struct compat_ip6t_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; @@ -1643,29 +1617,6 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, xt_compat_unlock(AF_INET6); return ret; } - -static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); - -static int -compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) -{ - int ret; - - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case IP6T_SO_GET_INFO: - ret = get_info(sock_net(sk), user, len, 1); - break; - case IP6T_SO_GET_ENTRIES: - ret = compat_get_entries(sock_net(sk), user, len); - break; - default: - ret = do_ip6t_get_ctl(sk, cmd, user, len); - } - return ret; -} #endif static int @@ -1678,11 +1629,16 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) switch (cmd) { case IP6T_SO_SET_REPLACE: - ret = do_replace(sock_net(sk), user, len); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_do_replace(sock_net(sk), user, len); + else +#endif + ret = do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len, 0); + ret = do_add_counters(sock_net(sk), user, len); break; default: @@ -1702,11 +1658,16 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) switch (cmd) { case IP6T_SO_GET_INFO: - ret = get_info(sock_net(sk), user, len, 0); + ret = get_info(sock_net(sk), user, len); break; case IP6T_SO_GET_ENTRIES: - ret = get_entries(sock_net(sk), user, len); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_get_entries(sock_net(sk), user, len); + else +#endif + ret = get_entries(sock_net(sk), user, len); break; case IP6T_SO_GET_REVISION_MATCH: @@ -1897,15 +1858,9 @@ static struct nf_sockopt_ops ip6t_sockopts = { .set_optmin = IP6T_BASE_CTL, .set_optmax = IP6T_SO_SET_MAX+1, .set = do_ip6t_set_ctl, -#ifdef CONFIG_COMPAT - .compat_set = compat_do_ip6t_set_ctl, -#endif .get_optmin = IP6T_BASE_CTL, .get_optmax = IP6T_SO_GET_MAX+1, .get = do_ip6t_get_ctl, -#ifdef CONFIG_COMPAT - .compat_get = compat_do_ip6t_get_ctl, -#endif .owner = THIS_MODULE, }; From fc66de8e16ec4d9b4d2696d961de4f81db78a1b6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:19 +0200 Subject: [PATCH 1202/2056] netfilter/ebtables: clean up compat {get, set}sockopt handling Merge the native and compat {get,set}sockopt handlers using in_compat_syscall(). Note that this required moving a fair amout of code around to be done sanely. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/bridge/netfilter/ebtables.c | 214 +++++++++++++++----------------- 1 file changed, 98 insertions(+), 116 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c83ffe912163..fe13108af1f5 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1451,86 +1451,6 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user, ebt_entry_to_user, entries, tmp.entries); } -static int do_ebt_set_ctl(struct sock *sk, - int cmd, void __user *user, unsigned int len) -{ - int ret; - struct net *net = sock_net(sk); - - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case EBT_SO_SET_ENTRIES: - ret = do_replace(net, user, len); - break; - case EBT_SO_SET_COUNTERS: - ret = update_counters(net, user, len); - break; - default: - ret = -EINVAL; - } - return ret; -} - -static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) -{ - int ret; - struct ebt_replace tmp; - struct ebt_table *t; - struct net *net = sock_net(sk); - - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - if (copy_from_user(&tmp, user, sizeof(tmp))) - return -EFAULT; - - tmp.name[sizeof(tmp.name) - 1] = '\0'; - - t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); - if (!t) - return ret; - - switch (cmd) { - case EBT_SO_GET_INFO: - case EBT_SO_GET_INIT_INFO: - if (*len != sizeof(struct ebt_replace)) { - ret = -EINVAL; - mutex_unlock(&ebt_mutex); - break; - } - if (cmd == EBT_SO_GET_INFO) { - tmp.nentries = t->private->nentries; - tmp.entries_size = t->private->entries_size; - tmp.valid_hooks = t->valid_hooks; - } else { - tmp.nentries = t->table->nentries; - tmp.entries_size = t->table->entries_size; - tmp.valid_hooks = t->table->valid_hooks; - } - mutex_unlock(&ebt_mutex); - if (copy_to_user(user, &tmp, *len) != 0) { - ret = -EFAULT; - break; - } - ret = 0; - break; - - case EBT_SO_GET_ENTRIES: - case EBT_SO_GET_INIT_ENTRIES: - ret = copy_everything_to_user(t, user, len, cmd); - mutex_unlock(&ebt_mutex); - break; - - default: - mutex_unlock(&ebt_mutex); - ret = -EINVAL; - } - - return ret; -} - #ifdef CONFIG_COMPAT /* 32 bit-userspace compatibility definitions. */ struct compat_ebt_replace { @@ -2314,28 +2234,6 @@ static int compat_update_counters(struct net *net, void __user *user, hlp.num_counters, user, len); } -static int compat_do_ebt_set_ctl(struct sock *sk, - int cmd, void __user *user, unsigned int len) -{ - int ret; - struct net *net = sock_net(sk); - - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - switch (cmd) { - case EBT_SO_SET_ENTRIES: - ret = compat_do_replace(net, user, len); - break; - case EBT_SO_SET_COUNTERS: - ret = compat_update_counters(net, user, len); - break; - default: - ret = -EINVAL; - } - return ret; -} - static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { @@ -2344,14 +2242,6 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, struct ebt_table *t; struct net *net = sock_net(sk); - if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) - return -EPERM; - - /* try real handler in case userland supplied needed padding */ - if ((cmd == EBT_SO_GET_INFO || - cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp)) - return do_ebt_get_ctl(sk, cmd, user, len); - if (copy_from_user(&tmp, user, sizeof(tmp))) return -EFAULT; @@ -2413,20 +2303,112 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, } #endif +static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) +{ + struct net *net = sock_net(sk); + struct ebt_replace tmp; + struct ebt_table *t; + int ret; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + +#ifdef CONFIG_COMPAT + /* try real handler in case userland supplied needed padding */ + if (in_compat_syscall() && + ((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) || + *len != sizeof(tmp))) + return compat_do_ebt_get_ctl(sk, cmd, user, len); +#endif + + if (copy_from_user(&tmp, user, sizeof(tmp))) + return -EFAULT; + + tmp.name[sizeof(tmp.name) - 1] = '\0'; + + t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); + if (!t) + return ret; + + switch (cmd) { + case EBT_SO_GET_INFO: + case EBT_SO_GET_INIT_INFO: + if (*len != sizeof(struct ebt_replace)) { + ret = -EINVAL; + mutex_unlock(&ebt_mutex); + break; + } + if (cmd == EBT_SO_GET_INFO) { + tmp.nentries = t->private->nentries; + tmp.entries_size = t->private->entries_size; + tmp.valid_hooks = t->valid_hooks; + } else { + tmp.nentries = t->table->nentries; + tmp.entries_size = t->table->entries_size; + tmp.valid_hooks = t->table->valid_hooks; + } + mutex_unlock(&ebt_mutex); + if (copy_to_user(user, &tmp, *len) != 0) { + ret = -EFAULT; + break; + } + ret = 0; + break; + + case EBT_SO_GET_ENTRIES: + case EBT_SO_GET_INIT_ENTRIES: + ret = copy_everything_to_user(t, user, len, cmd); + mutex_unlock(&ebt_mutex); + break; + + default: + mutex_unlock(&ebt_mutex); + ret = -EINVAL; + } + + return ret; +} + +static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, + unsigned int len) +{ + struct net *net = sock_net(sk); + int ret; + + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) + return -EPERM; + + switch (cmd) { + case EBT_SO_SET_ENTRIES: +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_do_replace(net, user, len); + else +#endif + ret = do_replace(net, user, len); + break; + case EBT_SO_SET_COUNTERS: +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + ret = compat_update_counters(net, user, len); + else +#endif + ret = update_counters(net, user, len); + break; + default: + ret = -EINVAL; + } + return ret; +} + static struct nf_sockopt_ops ebt_sockopts = { .pf = PF_INET, .set_optmin = EBT_BASE_CTL, .set_optmax = EBT_SO_SET_MAX + 1, .set = do_ebt_set_ctl, -#ifdef CONFIG_COMPAT - .compat_set = compat_do_ebt_set_ctl, -#endif .get_optmin = EBT_BASE_CTL, .get_optmax = EBT_SO_GET_MAX + 1, .get = do_ebt_get_ctl, -#ifdef CONFIG_COMPAT - .compat_get = compat_do_ebt_get_ctl, -#endif .owner = THIS_MODULE, }; From 77d4df41d53e5c2af14db26f20fe50da52e382ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:20 +0200 Subject: [PATCH 1203/2056] netfilter: remove the compat_{get,set} methods All instances handle compat sockopts via in_compat_syscall() now, so remove the compat_{get,set} methods as well as the compat_nf_{get,set}sockopt wrappers. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter.h | 14 ------------- net/ipv4/ip_sockglue.c | 5 ++--- net/ipv6/ipv6_sockglue.c | 5 ++--- net/netfilter/nf_sockopt.c | 42 -------------------------------------- 4 files changed, 4 insertions(+), 62 deletions(-) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index eb312e7ca36e..711b4d4486f0 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -164,17 +164,9 @@ struct nf_sockopt_ops { int set_optmin; int set_optmax; int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); -#ifdef CONFIG_COMPAT - int (*compat_set)(struct sock *sk, int optval, - void __user *user, unsigned int len); -#endif int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); -#ifdef CONFIG_COMPAT - int (*compat_get)(struct sock *sk, int optval, - void __user *user, int *len); -#endif /* Use the module struct to lock set/get code in place */ struct module *owner; }; @@ -350,12 +342,6 @@ int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); -#ifdef CONFIG_COMPAT -int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, unsigned int len); -int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, int *len); -#endif struct flowi; struct nf_queue_entry; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 84ec3703c909..95f4248c6fc5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1446,8 +1446,7 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) - err = compat_nf_setsockopt(sk, PF_INET, optname, optval, - optlen); + err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); #endif return err; } @@ -1821,7 +1820,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; - err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len); + err = nf_getsockopt(sk, PF_INET, optname, optval, &len); if (err >= 0) err = put_user(len, optlen); return err; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 6ab44ec2c369..6adfbdcb7979 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -1030,8 +1030,7 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) - err = compat_nf_setsockopt(sk, PF_INET6, optname, optval, - optlen); + err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); #endif return err; } @@ -1531,7 +1530,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, if (get_user(len, optlen)) return -EFAULT; - err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len); + err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); if (err >= 0) err = put_user(len, optlen); } diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 46cb3786e0ec..02870993d335 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -122,45 +122,3 @@ int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, return nf_sockopt(sk, pf, val, opt, len, 1); } EXPORT_SYMBOL(nf_getsockopt); - -#ifdef CONFIG_COMPAT -static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, - char __user *opt, int *len, int get) -{ - struct nf_sockopt_ops *ops; - int ret; - - ops = nf_sockopt_find(sk, pf, val, get); - if (IS_ERR(ops)) - return PTR_ERR(ops); - - if (get) { - if (ops->compat_get) - ret = ops->compat_get(sk, val, opt, len); - else - ret = ops->get(sk, val, opt, len); - } else { - if (ops->compat_set) - ret = ops->compat_set(sk, val, opt, *len); - else - ret = ops->set(sk, val, opt, *len); - } - - module_put(ops->owner); - return ret; -} - -int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, - int val, char __user *opt, unsigned int len) -{ - return compat_nf_sockopt(sk, pf, val, opt, &len, 0); -} -EXPORT_SYMBOL(compat_nf_setsockopt); - -int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, - int val, char __user *opt, int *len) -{ - return compat_nf_sockopt(sk, pf, val, opt, len, 1); -} -EXPORT_SYMBOL(compat_nf_getsockopt); -#endif From c34bc10d2535719ddf77d44ee849f6c7589583ba Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:21 +0200 Subject: [PATCH 1204/2056] netfilter: remove the compat argument to xt_copy_counters_from_user Lift the in_compat_syscall() from the callers instead. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter/x_tables.h | 2 +- net/ipv4/netfilter/arp_tables.c | 3 +-- net/ipv4/netfilter/ip_tables.c | 3 +-- net/ipv6/netfilter/ip6_tables.c | 3 +-- net/netfilter/x_tables.c | 9 ++++----- 5 files changed, 8 insertions(+), 12 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5da88451853b..b8b943ee7b8b 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -302,7 +302,7 @@ int xt_data_to_user(void __user *dst, const void *src, int usersize, int size, int aligned_size); void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info, bool compat); + struct xt_counters_info *info); struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 15807fb4a65f..2c8a4dad39d7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1008,8 +1008,7 @@ static int do_add_counters(struct net *net, const void __user *user, struct arpt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, - in_compat_syscall()); + paddc = xt_copy_counters_from_user(user, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index fbfad38f3979..161901dd1cae 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1163,8 +1163,7 @@ do_add_counters(struct net *net, const void __user *user, struct ipt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, - in_compat_syscall()); + paddc = xt_copy_counters_from_user(user, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 96c48e91e6c7..fd1f8f931231 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1179,8 +1179,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len) struct ip6t_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp, - in_compat_syscall()); + paddc = xt_copy_counters_from_user(user, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 99a468be4a59..32bab45af7e4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1033,15 +1033,14 @@ EXPORT_SYMBOL_GPL(xt_check_target); * @user: src pointer to userspace memory * @len: alleged size of userspace memory * @info: where to store the xt_counters_info metadata - * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel * * Copies counter meta data from @user and stores it in @info. * * vmallocs memory to hold the counters, then copies the counter data * from @user to the new memory and returns a pointer to it. * - * If @compat is true, @info gets converted automatically to the 64bit - * representation. + * If called from a compat syscall, @info gets converted automatically to the + * 64bit representation. * * The metadata associated with the counters is stored in @info. * @@ -1049,13 +1048,13 @@ EXPORT_SYMBOL_GPL(xt_check_target); * If IS_ERR is false, caller has to vfree the pointer. */ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info, bool compat) + struct xt_counters_info *info) { void *mem; u64 size; #ifdef CONFIG_COMPAT - if (compat) { + if (in_compat_syscall()) { /* structures only differ in size due to alignment */ struct compat_xt_counters_info compat_tmp; From 657e4c34a2370da1e82bbfc0d253ad394056b19e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:22 +0200 Subject: [PATCH 1205/2056] netfilter: split nf_sockopt Split nf_sockopt into a getsockopt and setsockopt side as they share very little code. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/netfilter/nf_sockopt.c | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 02870993d335..90469b1f628a 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -89,36 +89,32 @@ static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, return ops; } -/* Call get/setsockopt() */ -static int nf_sockopt(struct sock *sk, u_int8_t pf, int val, - char __user *opt, int *len, int get) +int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, + unsigned int len) { struct nf_sockopt_ops *ops; int ret; - ops = nf_sockopt_find(sk, pf, val, get); + ops = nf_sockopt_find(sk, pf, val, 0); if (IS_ERR(ops)) return PTR_ERR(ops); - - if (get) - ret = ops->get(sk, val, opt, len); - else - ret = ops->set(sk, val, opt, *len); - + ret = ops->set(sk, val, opt, len); module_put(ops->owner); return ret; } - -int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, - unsigned int len) -{ - return nf_sockopt(sk, pf, val, opt, &len, 0); -} EXPORT_SYMBOL(nf_setsockopt); int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len) { - return nf_sockopt(sk, pf, val, opt, len, 1); + struct nf_sockopt_ops *ops; + int ret; + + ops = nf_sockopt_find(sk, pf, val, 1); + if (IS_ERR(ops)) + return PTR_ERR(ops); + ret = ops->get(sk, val, opt, len); + module_put(ops->owner); + return ret; } EXPORT_SYMBOL(nf_getsockopt); From 49e74c24f31048b4faa3cfcccb93f444abb0b910 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:23 +0200 Subject: [PATCH 1206/2056] net/ipv4: factor out MCAST_MSFILTER getsockopt helpers Factor out one helper each for getting the native and compat version of the MCAST_MSFILTER option. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 131 ++++++++++++++++++++++------------------- 1 file changed, 70 insertions(+), 61 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 95f4248c6fc5..70d32c9476a2 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1468,6 +1468,74 @@ static bool getsockopt_needs_rtnl(int optname) return false; } +static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval, + int __user *optlen, int len) +{ + const int size0 = offsetof(struct group_filter, gf_slist); + struct group_filter __user *p = optval; + struct group_filter gsf; + int num; + int err; + + if (len < size0) + return -EINVAL; + if (copy_from_user(&gsf, p, size0)) + return -EFAULT; + + num = gsf.gf_numsrc; + err = ip_mc_gsfget(sk, &gsf, p->gf_slist); + if (err) + return err; + if (gsf.gf_numsrc < num) + num = gsf.gf_numsrc; + if (put_user(GROUP_FILTER_SIZE(num), optlen) || + copy_to_user(p, &gsf, size0)) + return -EFAULT; + return 0; +} + +#ifdef CONFIG_COMPAT +static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, + int __user *optlen) +{ + const int size0 = offsetof(struct compat_group_filter, gf_slist); + struct compat_group_filter __user *p = optval; + struct compat_group_filter gf32; + struct group_filter gf; + int len, err; + int num; + + if (get_user(len, optlen)) + return -EFAULT; + if (len < size0) + return -EINVAL; + + if (copy_from_user(&gf32, p, size0)) + return -EFAULT; + + gf.gf_interface = gf32.gf_interface; + gf.gf_fmode = gf32.gf_fmode; + num = gf.gf_numsrc = gf32.gf_numsrc; + gf.gf_group = gf32.gf_group; + + rtnl_lock(); + lock_sock(sk); + err = ip_mc_gsfget(sk, &gf, p->gf_slist); + release_sock(sk); + rtnl_unlock(); + if (err) + return err; + if (gf.gf_numsrc < num) + num = gf.gf_numsrc; + len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32)); + if (put_user(len, optlen) || + put_user(gf.gf_fmode, &p->gf_fmode) || + put_user(gf.gf_numsrc, &p->gf_numsrc)) + return -EFAULT; + return 0; +} +#endif + static int do_ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { @@ -1626,31 +1694,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, goto out; } case MCAST_MSFILTER: - { - struct group_filter __user *p = (void __user *)optval; - struct group_filter gsf; - const int size0 = offsetof(struct group_filter, gf_slist); - int num; - - if (len < size0) { - err = -EINVAL; - goto out; - } - if (copy_from_user(&gsf, p, size0)) { - err = -EFAULT; - goto out; - } - num = gsf.gf_numsrc; - err = ip_mc_gsfget(sk, &gsf, p->gf_slist); - if (err) - goto out; - if (gsf.gf_numsrc < num) - num = gsf.gf_numsrc; - if (put_user(GROUP_FILTER_SIZE(num), optlen) || - copy_to_user(p, &gsf, size0)) - err = -EFAULT; + err = ip_get_mcast_msfilter(sk, optval, optlen, len); goto out; - } case IP_MULTICAST_ALL: val = inet->mc_all; break; @@ -1762,45 +1807,9 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, int err; if (optname == MCAST_MSFILTER) { - const int size0 = offsetof(struct compat_group_filter, gf_slist); - struct compat_group_filter __user *p = (void __user *)optval; - struct compat_group_filter gf32; - struct group_filter gf; - int ulen, err; - int num; - if (level != SOL_IP) return -EOPNOTSUPP; - - if (get_user(ulen, optlen)) - return -EFAULT; - - if (ulen < size0) - return -EINVAL; - - if (copy_from_user(&gf32, p, size0)) - return -EFAULT; - - gf.gf_interface = gf32.gf_interface; - gf.gf_fmode = gf32.gf_fmode; - num = gf.gf_numsrc = gf32.gf_numsrc; - gf.gf_group = gf32.gf_group; - - rtnl_lock(); - lock_sock(sk); - err = ip_mc_gsfget(sk, &gf, p->gf_slist); - release_sock(sk); - rtnl_unlock(); - if (err) - return err; - if (gf.gf_numsrc < num) - num = gf.gf_numsrc; - ulen = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32)); - if (put_user(ulen, optlen) || - put_user(gf.gf_fmode, &p->gf_fmode) || - put_user(gf.gf_numsrc, &p->gf_numsrc)) - return -EFAULT; - return 0; + return compat_ip_get_mcast_msfilter(sk, optval, optlen); } err = do_ip_getsockopt(sk, level, optname, optval, optlen, From d62c38f6a1a8c833df8dd35dacde23b0b6c931f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:24 +0200 Subject: [PATCH 1207/2056] net/ipv4: factor out MCAST_MSFILTER setsockopt helpers Factor out one helper each for setting the native and compat version of the MCAST_MSFILTER option. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 162 ++++++++++++++++++++++------------------- 1 file changed, 86 insertions(+), 76 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 70d32c9476a2..b587dee006f8 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -722,6 +722,90 @@ static int do_mcast_group_source(struct sock *sk, int optname, return ip_mc_source(add, omode, sk, &mreqs, greqs->gsr_interface); } +static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, + int optlen) +{ + struct group_filter *gsf = NULL; + int err; + + if (optlen < GROUP_FILTER_SIZE(0)) + return -EINVAL; + if (optlen > sysctl_optmem_max) + return -ENOBUFS; + + gsf = memdup_user(optval, optlen); + if (IS_ERR(gsf)) + return PTR_ERR(gsf); + + /* numsrc >= (4G-140)/128 overflow in 32 bits */ + err = -ENOBUFS; + if (gsf->gf_numsrc >= 0x1ffffff || + gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf) + goto out_free_gsf; + + err = -EINVAL; + if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) + goto out_free_gsf; + + err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc, + gsf->gf_fmode, &gsf->gf_group, gsf->gf_slist); +out_free_gsf: + kfree(gsf); + return err; +} + +#ifdef CONFIG_COMPAT +static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, + int optlen) +{ + const int size0 = offsetof(struct compat_group_filter, gf_slist); + struct compat_group_filter *gf32; + unsigned int n; + void *p; + int err; + + if (optlen < size0) + return -EINVAL; + if (optlen > sysctl_optmem_max - 4) + return -ENOBUFS; + + p = kmalloc(optlen + 4, GFP_KERNEL); + if (!p) + return -ENOMEM; + gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */ + + err = -EFAULT; + if (copy_from_user(gf32, optval, optlen)) + goto out_free_gsf; + + /* numsrc >= (4G-140)/128 overflow in 32 bits */ + n = gf32->gf_numsrc; + err = -ENOBUFS; + if (n >= 0x1ffffff) + goto out_free_gsf; + + err = -EINVAL; + if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) + goto out_free_gsf; + + rtnl_lock(); + lock_sock(sk); + + /* numsrc >= (4G-140)/128 overflow in 32 bits */ + err = -ENOBUFS; + if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) + goto out_unlock; + err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, + &gf32->gf_group, gf32->gf_slist); +out_unlock: + release_sock(sk); + rtnl_unlock(); +out_free_gsf: + kfree(p); + return err; +} +#endif + static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -1167,37 +1251,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, break; } case MCAST_MSFILTER: - { - struct group_filter *gsf = NULL; - - if (optlen < GROUP_FILTER_SIZE(0)) - goto e_inval; - if (optlen > sysctl_optmem_max) { - err = -ENOBUFS; - break; - } - gsf = memdup_user(optval, optlen); - if (IS_ERR(gsf)) { - err = PTR_ERR(gsf); - break; - } - /* numsrc >= (4G-140)/128 overflow in 32 bits */ - if (gsf->gf_numsrc >= 0x1ffffff || - gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) { - err = -ENOBUFS; - goto mc_msf_out; - } - if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { - err = -EINVAL; - goto mc_msf_out; - } - err = set_mcast_msfilter(sk, gsf->gf_interface, - gsf->gf_numsrc, gsf->gf_fmode, - &gsf->gf_group, gsf->gf_slist); -mc_msf_out: - kfree(gsf); + err = ip_set_mcast_msfilter(sk, optval, optlen); break; - } case IP_MULTICAST_ALL: if (optlen < 1) goto e_inval; @@ -1391,52 +1446,7 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, return err; } case MCAST_MSFILTER: - { - const int size0 = offsetof(struct compat_group_filter, gf_slist); - struct compat_group_filter *gf32; - unsigned int n; - void *p; - - if (optlen < size0) - return -EINVAL; - if (optlen > sysctl_optmem_max - 4) - return -ENOBUFS; - - p = kmalloc(optlen + 4, GFP_KERNEL); - if (!p) - return -ENOMEM; - gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */ - if (copy_from_user(gf32, optval, optlen)) { - err = -EFAULT; - goto mc_msf_out; - } - - n = gf32->gf_numsrc; - /* numsrc >= (4G-140)/128 overflow in 32 bits */ - if (n >= 0x1ffffff) { - err = -ENOBUFS; - goto mc_msf_out; - } - if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) { - err = -EINVAL; - goto mc_msf_out; - } - - rtnl_lock(); - lock_sock(sk); - /* numsrc >= (4G-140)/128 overflow in 32 bits */ - if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) - err = -ENOBUFS; - else - err = set_mcast_msfilter(sk, gf32->gf_interface, - n, gf32->gf_fmode, - &gf32->gf_group, gf32->gf_slist); - release_sock(sk); - rtnl_unlock(); -mc_msf_out: - kfree(p); - return err; - } + return compat_ip_set_mcast_msfilter(sk, optval, optlen); } err = do_ip_setsockopt(sk, level, optname, optval, optlen); From 02caad7cc0841f94bc105b93fbe468d3cbf6f378 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:25 +0200 Subject: [PATCH 1208/2056] net/ipv4: factor out mcast join/leave setsockopt helpers Factor out one helper each for setting the native and compat version of the MCAST_MSFILTER option. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 109 +++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 53 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index b587dee006f8..73bb88fbe546 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -806,6 +806,60 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, } #endif +static int ip_mcast_join_leave(struct sock *sk, int optname, + void __user *optval, int optlen) +{ + struct ip_mreqn mreq = { }; + struct sockaddr_in *psin; + struct group_req greq; + + if (optlen < sizeof(struct group_req)) + return -EINVAL; + if (copy_from_user(&greq, optval, sizeof(greq))) + return -EFAULT; + + psin = (struct sockaddr_in *)&greq.gr_group; + if (psin->sin_family != AF_INET) + return -EINVAL; + mreq.imr_multiaddr = psin->sin_addr; + mreq.imr_ifindex = greq.gr_interface; + if (optname == MCAST_JOIN_GROUP) + return ip_mc_join_group(sk, &mreq); + return ip_mc_leave_group(sk, &mreq); +} + +#ifdef CONFIG_COMPAT +static int compat_ip_mcast_join_leave(struct sock *sk, int optname, + void __user *optval, int optlen) +{ + struct compat_group_req greq; + struct ip_mreqn mreq = { }; + struct sockaddr_in *psin; + int err; + + if (optlen < sizeof(struct compat_group_req)) + return -EINVAL; + if (copy_from_user(&greq, optval, sizeof(greq))) + return -EFAULT; + + psin = (struct sockaddr_in *)&greq.gr_group; + if (psin->sin_family != AF_INET) + return -EINVAL; + mreq.imr_multiaddr = psin->sin_addr; + mreq.imr_ifindex = greq.gr_interface; + + rtnl_lock(); + lock_sock(sk); + if (optname == MCAST_JOIN_GROUP) + err = ip_mc_join_group(sk, &mreq); + else + err = ip_mc_leave_group(sk, &mreq); + release_sock(sk); + rtnl_unlock(); + return err; +} +#endif + static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -1211,29 +1265,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - { - struct group_req greq; - struct sockaddr_in *psin; - struct ip_mreqn mreq; - - if (optlen < sizeof(struct group_req)) - goto e_inval; - err = -EFAULT; - if (copy_from_user(&greq, optval, sizeof(greq))) - break; - psin = (struct sockaddr_in *)&greq.gr_group; - if (psin->sin_family != AF_INET) - goto e_inval; - memset(&mreq, 0, sizeof(mreq)); - mreq.imr_multiaddr = psin->sin_addr; - mreq.imr_ifindex = greq.gr_interface; - - if (optname == MCAST_JOIN_GROUP) - err = ip_mc_join_group(sk, &mreq); - else - err = ip_mc_leave_group(sk, &mreq); + err = ip_mcast_join_leave(sk, optname, optval, optlen); break; - } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: @@ -1389,37 +1422,7 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, switch (optname) { case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - { - struct compat_group_req __user *gr32 = (void __user *)optval; - struct group_req greq; - struct sockaddr_in *psin = (struct sockaddr_in *)&greq.gr_group; - struct ip_mreqn mreq; - - if (optlen < sizeof(struct compat_group_req)) - return -EINVAL; - - if (get_user(greq.gr_interface, &gr32->gr_interface) || - copy_from_user(&greq.gr_group, &gr32->gr_group, - sizeof(greq.gr_group))) - return -EFAULT; - - if (psin->sin_family != AF_INET) - return -EINVAL; - - memset(&mreq, 0, sizeof(mreq)); - mreq.imr_multiaddr = psin->sin_addr; - mreq.imr_ifindex = greq.gr_interface; - - rtnl_lock(); - lock_sock(sk); - if (optname == MCAST_JOIN_GROUP) - err = ip_mc_join_group(sk, &mreq); - else - err = ip_mc_leave_group(sk, &mreq); - release_sock(sk); - rtnl_unlock(); - return err; - } + return compat_ip_mcast_join_leave(sk, optname, optval, optlen); case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: From b6238c04c0e5dbe7ae4ea48e96e004905b120a04 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:26 +0200 Subject: [PATCH 1209/2056] net/ipv4: remove compat_ip_{get,set}sockopt Handle the few cases that need special treatment in-line using in_compat_syscall(). Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/ip.h | 4 - net/dccp/ipv4.c | 4 - net/ipv4/ip_sockglue.c | 216 ++++++++++++----------------------------- net/ipv4/raw.c | 22 ----- net/ipv4/tcp_ipv4.c | 4 - net/ipv4/udp.c | 24 ----- net/ipv4/udp_impl.h | 6 -- net/ipv4/udplite.c | 4 - net/l2tp/l2tp_ip.c | 4 - net/sctp/protocol.c | 4 - 10 files changed, 62 insertions(+), 230 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 862c9545833a..3d34acc95ca8 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -727,10 +727,6 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int compat_ip_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)); diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 316cc5ac0da7..b91373eb1c79 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -913,10 +913,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = { .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif }; static int dccp_v4_init_sock(struct sock *sk) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 73bb88fbe546..86b3b9a7cea3 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -679,20 +679,48 @@ static int set_mcast_msfilter(struct sock *sk, int ifindex, return -EADDRNOTAVAIL; } -static int do_mcast_group_source(struct sock *sk, int optname, - struct group_source_req *greqs) +static int copy_group_source_from_user(struct group_source_req *greqs, + void __user *optval, int optlen) { + if (in_compat_syscall()) { + struct compat_group_source_req gr32; + + if (optlen != sizeof(gr32)) + return -EINVAL; + if (copy_from_user(&gr32, optval, sizeof(gr32))) + return -EFAULT; + greqs->gsr_interface = gr32.gsr_interface; + greqs->gsr_group = gr32.gsr_group; + greqs->gsr_source = gr32.gsr_source; + } else { + if (optlen != sizeof(*greqs)) + return -EINVAL; + if (copy_from_user(greqs, optval, sizeof(*greqs))) + return -EFAULT; + } + + return 0; +} + +static int do_mcast_group_source(struct sock *sk, int optname, + void __user *optval, int optlen) +{ + struct group_source_req greqs; struct ip_mreq_source mreqs; struct sockaddr_in *psin; int omode, add, err; - if (greqs->gsr_group.ss_family != AF_INET || - greqs->gsr_source.ss_family != AF_INET) + err = copy_group_source_from_user(&greqs, optval, optlen); + if (err) + return err; + + if (greqs.gsr_group.ss_family != AF_INET || + greqs.gsr_source.ss_family != AF_INET) return -EADDRNOTAVAIL; - psin = (struct sockaddr_in *)&greqs->gsr_group; + psin = (struct sockaddr_in *)&greqs.gsr_group; mreqs.imr_multiaddr = psin->sin_addr.s_addr; - psin = (struct sockaddr_in *)&greqs->gsr_source; + psin = (struct sockaddr_in *)&greqs.gsr_source; mreqs.imr_sourceaddr = psin->sin_addr.s_addr; mreqs.imr_interface = 0; /* use index for mc_source */ @@ -705,21 +733,21 @@ static int do_mcast_group_source(struct sock *sk, int optname, } else if (optname == MCAST_JOIN_SOURCE_GROUP) { struct ip_mreqn mreq; - psin = (struct sockaddr_in *)&greqs->gsr_group; + psin = (struct sockaddr_in *)&greqs.gsr_group; mreq.imr_multiaddr = psin->sin_addr; mreq.imr_address.s_addr = 0; - mreq.imr_ifindex = greqs->gsr_interface; + mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE); if (err && err != -EADDRINUSE) return err; - greqs->gsr_interface = mreq.imr_ifindex; + greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; add = 1; } else /* MCAST_LEAVE_SOURCE_GROUP */ { omode = MCAST_INCLUDE; add = 0; } - return ip_mc_source(add, omode, sk, &mreqs, greqs->gsr_interface); + return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); } static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, @@ -754,7 +782,6 @@ static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, return err; } -#ifdef CONFIG_COMPAT static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, int optlen) { @@ -788,23 +815,16 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) goto out_free_gsf; - rtnl_lock(); - lock_sock(sk); - /* numsrc >= (4G-140)/128 overflow in 32 bits */ err = -ENOBUFS; if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) - goto out_unlock; + goto out_free_gsf; err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, &gf32->gf_group, gf32->gf_slist); -out_unlock: - release_sock(sk); - rtnl_unlock(); out_free_gsf: kfree(p); return err; } -#endif static int ip_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) @@ -828,14 +848,12 @@ static int ip_mcast_join_leave(struct sock *sk, int optname, return ip_mc_leave_group(sk, &mreq); } -#ifdef CONFIG_COMPAT static int compat_ip_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) { struct compat_group_req greq; struct ip_mreqn mreq = { }; struct sockaddr_in *psin; - int err; if (optlen < sizeof(struct compat_group_req)) return -EINVAL; @@ -848,17 +866,10 @@ static int compat_ip_mcast_join_leave(struct sock *sk, int optname, mreq.imr_multiaddr = psin->sin_addr; mreq.imr_ifindex = greq.gr_interface; - rtnl_lock(); - lock_sock(sk); if (optname == MCAST_JOIN_GROUP) - err = ip_mc_join_group(sk, &mreq); - else - err = ip_mc_leave_group(sk, &mreq); - release_sock(sk); - rtnl_unlock(); - return err; + return ip_mc_join_group(sk, &mreq); + return ip_mc_leave_group(sk, &mreq); } -#endif static int do_ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) @@ -1265,26 +1276,23 @@ static int do_ip_setsockopt(struct sock *sk, int level, } case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - err = ip_mcast_join_leave(sk, optname, optval, optlen); + if (in_compat_syscall()) + err = compat_ip_mcast_join_leave(sk, optname, optval, + optlen); + else + err = ip_mcast_join_leave(sk, optname, optval, optlen); break; case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: - { - struct group_source_req greqs; - - if (optlen != sizeof(struct group_source_req)) - goto e_inval; - if (copy_from_user(&greqs, optval, sizeof(greqs))) { - err = -EFAULT; - break; - } - err = do_mcast_group_source(sk, optname, &greqs); + err = do_mcast_group_source(sk, optname, optval, optlen); break; - } case MCAST_MSFILTER: - err = ip_set_mcast_msfilter(sk, optval, optlen); + if (in_compat_syscall()) + err = compat_ip_set_mcast_msfilter(sk, optval, optlen); + else + err = ip_set_mcast_msfilter(sk, optval, optlen); break; case IP_MULTICAST_ALL: if (optlen < 1) @@ -1410,62 +1418,6 @@ int ip_setsockopt(struct sock *sk, int level, } EXPORT_SYMBOL(ip_setsockopt); -#ifdef CONFIG_COMPAT -int compat_ip_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - int err; - - if (level != SOL_IP) - return -ENOPROTOOPT; - - switch (optname) { - case MCAST_JOIN_GROUP: - case MCAST_LEAVE_GROUP: - return compat_ip_mcast_join_leave(sk, optname, optval, optlen); - case MCAST_JOIN_SOURCE_GROUP: - case MCAST_LEAVE_SOURCE_GROUP: - case MCAST_BLOCK_SOURCE: - case MCAST_UNBLOCK_SOURCE: - { - struct compat_group_source_req __user *gsr32 = (void __user *)optval; - struct group_source_req greqs; - - if (optlen != sizeof(struct compat_group_source_req)) - return -EINVAL; - - if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) || - copy_from_user(&greqs.gsr_group, &gsr32->gsr_group, - sizeof(greqs.gsr_group)) || - copy_from_user(&greqs.gsr_source, &gsr32->gsr_source, - sizeof(greqs.gsr_source))) - return -EFAULT; - - rtnl_lock(); - lock_sock(sk); - err = do_mcast_group_source(sk, optname, &greqs); - release_sock(sk); - rtnl_unlock(); - return err; - } - case MCAST_MSFILTER: - return compat_ip_set_mcast_msfilter(sk, optval, optlen); - } - - err = do_ip_setsockopt(sk, level, optname, optval, optlen); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IP_HDRINCL && - optname != IP_IPSEC_POLICY && - optname != IP_XFRM_POLICY && - !ip_mroute_opt(optname)) - err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); -#endif - return err; -} -EXPORT_SYMBOL(compat_ip_setsockopt); -#endif - /* * Get the options. Note for future reference. The GET of IP options gets * the _received_ ones. The set sets the _sent_ ones. @@ -1507,22 +1459,18 @@ static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval, return 0; } -#ifdef CONFIG_COMPAT static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, - int __user *optlen) + int __user *optlen, int len) { const int size0 = offsetof(struct compat_group_filter, gf_slist); struct compat_group_filter __user *p = optval; struct compat_group_filter gf32; struct group_filter gf; - int len, err; int num; + int err; - if (get_user(len, optlen)) - return -EFAULT; if (len < size0) return -EINVAL; - if (copy_from_user(&gf32, p, size0)) return -EFAULT; @@ -1531,11 +1479,7 @@ static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, num = gf.gf_numsrc = gf32.gf_numsrc; gf.gf_group = gf32.gf_group; - rtnl_lock(); - lock_sock(sk); err = ip_mc_gsfget(sk, &gf, p->gf_slist); - release_sock(sk); - rtnl_unlock(); if (err) return err; if (gf.gf_numsrc < num) @@ -1547,10 +1491,9 @@ static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval, return -EFAULT; return 0; } -#endif static int do_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen, unsigned int flags) + char __user *optval, int __user *optlen) { struct inet_sock *inet = inet_sk(sk); bool needs_rtnl = getsockopt_needs_rtnl(optname); @@ -1707,7 +1650,11 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, goto out; } case MCAST_MSFILTER: - err = ip_get_mcast_msfilter(sk, optval, optlen, len); + if (in_compat_syscall()) + err = compat_ip_get_mcast_msfilter(sk, optval, optlen, + len); + else + err = ip_get_mcast_msfilter(sk, optval, optlen, len); goto out; case IP_MULTICAST_ALL: val = inet->mc_all; @@ -1724,7 +1671,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, msg.msg_control_is_user = true; msg.msg_control_user = optval; msg.msg_controllen = len; - msg.msg_flags = flags; + msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0; if (inet->cmsg_flags & IP_CMSG_PKTINFO) { struct in_pktinfo info; @@ -1788,7 +1735,8 @@ int ip_getsockopt(struct sock *sk, int level, { int err; - err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0); + err = do_ip_getsockopt(sk, level, optname, optval, optlen); + #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_GET_INFO && optname < BPFILTER_IPT_GET_MAX) @@ -1812,43 +1760,3 @@ int ip_getsockopt(struct sock *sk, int level, return err; } EXPORT_SYMBOL(ip_getsockopt); - -#ifdef CONFIG_COMPAT -int compat_ip_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - int err; - - if (optname == MCAST_MSFILTER) { - if (level != SOL_IP) - return -EOPNOTSUPP; - return compat_ip_get_mcast_msfilter(sk, optval, optlen); - } - - err = do_ip_getsockopt(sk, level, optname, optval, optlen, - MSG_CMSG_COMPAT); - -#if IS_ENABLED(CONFIG_BPFILTER_UMH) - if (optname >= BPFILTER_IPT_SO_GET_INFO && - optname < BPFILTER_IPT_GET_MAX) - err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen); -#endif -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS && - !ip_mroute_opt(optname)) { - int len; - - if (get_user(len, optlen)) - return -EFAULT; - - err = nf_getsockopt(sk, PF_INET, optname, optval, &len); - if (err >= 0) - err = put_user(len, optlen); - return err; - } -#endif - return err; -} -EXPORT_SYMBOL(compat_ip_getsockopt); -#endif diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 47665919048f..2a57d633b31e 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -857,16 +857,6 @@ static int raw_setsockopt(struct sock *sk, int level, int optname, return do_raw_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level != SOL_RAW) - return compat_ip_setsockopt(sk, level, optname, optval, optlen); - return do_raw_setsockopt(sk, level, optname, optval, optlen); -} -#endif - static int do_raw_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -887,16 +877,6 @@ static int raw_getsockopt(struct sock *sk, int level, int optname, return do_raw_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_raw_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level != SOL_RAW) - return compat_ip_getsockopt(sk, level, optname, optval, optlen); - return do_raw_getsockopt(sk, level, optname, optval, optlen); -} -#endif - static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { @@ -980,8 +960,6 @@ struct proto raw_prot = { .usersize = sizeof_field(struct raw_sock, filter), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_raw_setsockopt, - .compat_getsockopt = compat_raw_getsockopt, .compat_ioctl = compat_raw_ioctl, #endif .diag_destroy = raw_abort, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 116c11a0aaed..e5b7ef9a2887 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2134,10 +2134,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = { .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif .mtu_reduced = tcp_v4_mtu_reduced, }; EXPORT_SYMBOL(ipv4_specific); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 073d346f515c..d4be4471c424 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2656,17 +2656,6 @@ int udp_setsockopt(struct sock *sk, int level, int optname, return ip_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, - udp_push_pending_frames); - return compat_ip_setsockopt(sk, level, optname, optval, optlen); -} -#endif - int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -2732,15 +2721,6 @@ int udp_getsockopt(struct sock *sk, int level, int optname, return ip_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_getsockopt(sk, level, optname, optval, optlen); - return compat_ip_getsockopt(sk, level, optname, optval, optlen); -} -#endif /** * udp_poll - wait for a UDP event. * @file: - file struct @@ -2812,10 +2792,6 @@ struct proto udp_prot = { .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp_sock), .h.udp_table = &udp_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udp_setsockopt, - .compat_getsockopt = compat_udp_getsockopt, -#endif .diag_destroy = udp_abort, }; EXPORT_SYMBOL(udp_prot); diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 6b2fa77eeb1c..ab313702c87f 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -17,12 +17,6 @@ int udp_setsockopt(struct sock *sk, int level, int optname, int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT -int compat_udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_udp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -#endif int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 5936d66d1ce2..bd8773b49e72 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -56,10 +56,6 @@ struct proto udplite_prot = { .sysctl_mem = sysctl_udp_mem, .obj_size = sizeof(struct udp_sock), .h.udp_table = &udplite_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udp_setsockopt, - .compat_getsockopt = compat_udp_getsockopt, -#endif }; EXPORT_SYMBOL(udplite_prot); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index f8d7412cfb3d..2a3fd31fb589 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -612,10 +612,6 @@ static struct proto l2tp_ip_prot = { .hash = l2tp_ip_hash, .unhash = l2tp_ip_unhash, .obj_size = sizeof(struct l2tp_ip_sock), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif }; static const struct proto_ops l2tp_ip_ops = { diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 8d25cc464efd..7ecaf7d575c0 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1089,10 +1089,6 @@ static struct sctp_af sctp_af_inet = { .net_header_len = sizeof(struct iphdr), .sockaddr_len = sizeof(struct sockaddr_in), .ip_options_len = sctp_v4_ip_options_len, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ip_setsockopt, - .compat_getsockopt = compat_ip_getsockopt, -#endif }; struct sctp_pf *sctp_get_pf_specific(sa_family_t family) From d5541e85cd40118a817f015ce3af1f41a7d7de1b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:27 +0200 Subject: [PATCH 1210/2056] net/ipv6: factor out MCAST_MSFILTER getsockopt helpers Factor out one helper each for getting the native and compat version of the MCAST_MSFILTER option. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/ipv6_sockglue.c | 139 +++++++++++++++++++++------------------ 1 file changed, 74 insertions(+), 65 deletions(-) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 6adfbdcb7979..ef5656f876ac 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -1071,6 +1071,77 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, return len; } +static int ipv6_get_msfilter(struct sock *sk, void __user *optval, + int __user *optlen, int len) +{ + const int size0 = offsetof(struct group_filter, gf_slist); + struct group_filter __user *p = optval; + struct group_filter gsf; + int num; + int err; + + if (len < size0) + return -EINVAL; + if (copy_from_user(&gsf, p, size0)) + return -EFAULT; + if (gsf.gf_group.ss_family != AF_INET6) + return -EADDRNOTAVAIL; + num = gsf.gf_numsrc; + lock_sock(sk); + err = ip6_mc_msfget(sk, &gsf, p->gf_slist); + if (!err) { + if (num > gsf.gf_numsrc) + num = gsf.gf_numsrc; + if (put_user(GROUP_FILTER_SIZE(num), optlen) || + copy_to_user(p, &gsf, size0)) + err = -EFAULT; + } + release_sock(sk); + return err; +} + +#ifdef CONFIG_COMPAT +static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, + int __user *optlen) +{ + const int size0 = offsetof(struct compat_group_filter, gf_slist); + struct compat_group_filter __user *p = optval; + struct compat_group_filter gf32; + struct group_filter gf; + int len, err; + int num; + + if (get_user(len, optlen)) + return -EFAULT; + if (len < size0) + return -EINVAL; + + if (copy_from_user(&gf32, p, size0)) + return -EFAULT; + gf.gf_interface = gf32.gf_interface; + gf.gf_fmode = gf32.gf_fmode; + num = gf.gf_numsrc = gf32.gf_numsrc; + gf.gf_group = gf32.gf_group; + + if (gf.gf_group.ss_family != AF_INET6) + return -EADDRNOTAVAIL; + + lock_sock(sk); + err = ip6_mc_msfget(sk, &gf, p->gf_slist); + release_sock(sk); + if (err) + return err; + if (num > gf.gf_numsrc) + num = gf.gf_numsrc; + len = GROUP_FILTER_SIZE(num) - (sizeof(gf)-sizeof(gf32)); + if (put_user(len, optlen) || + put_user(gf.gf_fmode, &p->gf_fmode) || + put_user(gf.gf_numsrc, &p->gf_numsrc)) + return -EFAULT; + return 0; +} +#endif + static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) { @@ -1094,33 +1165,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, val = sk->sk_family; break; case MCAST_MSFILTER: - { - struct group_filter __user *p = (void __user *)optval; - struct group_filter gsf; - const int size0 = offsetof(struct group_filter, gf_slist); - int num; - int err; - - if (len < size0) - return -EINVAL; - if (copy_from_user(&gsf, p, size0)) - return -EFAULT; - if (gsf.gf_group.ss_family != AF_INET6) - return -EADDRNOTAVAIL; - num = gsf.gf_numsrc; - lock_sock(sk); - err = ip6_mc_msfget(sk, &gsf, p->gf_slist); - if (!err) { - if (num > gsf.gf_numsrc) - num = gsf.gf_numsrc; - if (put_user(GROUP_FILTER_SIZE(num), optlen) || - copy_to_user(p, &gsf, size0)) - err = -EFAULT; - } - release_sock(sk); - return err; - } - + return ipv6_get_msfilter(sk, optval, optlen, len); case IPV6_2292PKTOPTIONS: { struct msghdr msg; @@ -1481,44 +1526,8 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, if (level != SOL_IPV6) return -ENOPROTOOPT; - if (optname == MCAST_MSFILTER) { - const int size0 = offsetof(struct compat_group_filter, gf_slist); - struct compat_group_filter __user *p = (void __user *)optval; - struct compat_group_filter gf32; - struct group_filter gf; - int ulen, err; - int num; - - if (get_user(ulen, optlen)) - return -EFAULT; - - if (ulen < size0) - return -EINVAL; - - if (copy_from_user(&gf32, p, size0)) - return -EFAULT; - - gf.gf_interface = gf32.gf_interface; - gf.gf_fmode = gf32.gf_fmode; - num = gf.gf_numsrc = gf32.gf_numsrc; - gf.gf_group = gf32.gf_group; - - if (gf.gf_group.ss_family != AF_INET6) - return -EADDRNOTAVAIL; - lock_sock(sk); - err = ip6_mc_msfget(sk, &gf, p->gf_slist); - release_sock(sk); - if (err) - return err; - if (num > gf.gf_numsrc) - num = gf.gf_numsrc; - ulen = GROUP_FILTER_SIZE(num) - (sizeof(gf)-sizeof(gf32)); - if (put_user(ulen, optlen) || - put_user(gf.gf_fmode, &p->gf_fmode) || - put_user(gf.gf_numsrc, &p->gf_numsrc)) - return -EFAULT; - return 0; - } + if (optname == MCAST_MSFILTER) + return compat_ipv6_get_msfilter(sk, optval, optlen); err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, MSG_CMSG_COMPAT); From ca0e65eb295411229a8dbdea01b9379e48d2bf39 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:28 +0200 Subject: [PATCH 1211/2056] net/ipv6: factor out MCAST_MSFILTER setsockopt helpers Factor out one helper each for setting the native and compat version of the MCAST_MSFILTER option. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/ipv6_sockglue.c | 159 ++++++++++++++++++++------------------- 1 file changed, 83 insertions(+), 76 deletions(-) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ef5656f876ac..6aa49495d7bc 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -171,6 +171,87 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname, return ip6_mc_source(add, omode, sk, greqs); } +static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, + int optlen) +{ + struct group_filter *gsf; + int ret; + + if (optlen < GROUP_FILTER_SIZE(0)) + return -EINVAL; + if (optlen > sysctl_optmem_max) + return -ENOBUFS; + + gsf = memdup_user(optval, optlen); + if (IS_ERR(gsf)) + return PTR_ERR(gsf); + + /* numsrc >= (4G-140)/128 overflow in 32 bits */ + ret = -ENOBUFS; + if (gsf->gf_numsrc >= 0x1ffffffU || + gsf->gf_numsrc > sysctl_mld_max_msf) + goto out_free_gsf; + + ret = -EINVAL; + if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) + goto out_free_gsf; + + ret = ip6_mc_msfilter(sk, gsf, gsf->gf_slist); +out_free_gsf: + kfree(gsf); + return ret; +} + +#ifdef CONFIG_COMPAT +static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, + int optlen) +{ + const int size0 = offsetof(struct compat_group_filter, gf_slist); + struct compat_group_filter *gf32; + void *p; + int ret; + int n; + + if (optlen < size0) + return -EINVAL; + if (optlen > sysctl_optmem_max - 4) + return -ENOBUFS; + + p = kmalloc(optlen + 4, GFP_KERNEL); + if (!p) + return -ENOMEM; + + gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */ + ret = -EFAULT; + if (copy_from_user(gf32, optval, optlen)) + goto out_free_p; + + /* numsrc >= (4G-140)/128 overflow in 32 bits */ + ret = -ENOBUFS; + n = gf32->gf_numsrc; + if (n >= 0x1ffffffU || n > sysctl_mld_max_msf) + goto out_free_p; + + ret = -EINVAL; + if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) + goto out_free_p; + + rtnl_lock(); + lock_sock(sk); + ret = ip6_mc_msfilter(sk, &(struct group_filter){ + .gf_interface = gf32->gf_interface, + .gf_group = gf32->gf_group, + .gf_fmode = gf32->gf_fmode, + .gf_numsrc = gf32->gf_numsrc}, gf32->gf_slist); + release_sock(sk); + rtnl_unlock(); + +out_free_p: + kfree(p); + return ret; +} +#endif + static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -762,37 +843,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; } case MCAST_MSFILTER: - { - struct group_filter *gsf; - - if (optlen < GROUP_FILTER_SIZE(0)) - goto e_inval; - if (optlen > sysctl_optmem_max) { - retv = -ENOBUFS; - break; - } - gsf = memdup_user(optval, optlen); - if (IS_ERR(gsf)) { - retv = PTR_ERR(gsf); - break; - } - /* numsrc >= (4G-140)/128 overflow in 32 bits */ - if (gsf->gf_numsrc >= 0x1ffffffU || - gsf->gf_numsrc > sysctl_mld_max_msf) { - kfree(gsf); - retv = -ENOBUFS; - break; - } - if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) { - kfree(gsf); - retv = -EINVAL; - break; - } - retv = ip6_mc_msfilter(sk, gsf, gsf->gf_slist); - kfree(gsf); - + retv = ipv6_set_mcast_msfilter(sk, optval, optlen); break; - } case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) goto e_inval; @@ -977,52 +1029,7 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, return err; } case MCAST_MSFILTER: - { - const int size0 = offsetof(struct compat_group_filter, gf_slist); - struct compat_group_filter *gf32; - void *p; - int n; - - if (optlen < size0) - return -EINVAL; - if (optlen > sysctl_optmem_max - 4) - return -ENOBUFS; - - p = kmalloc(optlen + 4, GFP_KERNEL); - if (!p) - return -ENOMEM; - - gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */ - if (copy_from_user(gf32, optval, optlen)) { - err = -EFAULT; - goto mc_msf_out; - } - - n = gf32->gf_numsrc; - /* numsrc >= (4G-140)/128 overflow in 32 bits */ - if (n >= 0x1ffffffU || - n > sysctl_mld_max_msf) { - err = -ENOBUFS; - goto mc_msf_out; - } - if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) { - err = -EINVAL; - goto mc_msf_out; - } - - rtnl_lock(); - lock_sock(sk); - err = ip6_mc_msfilter(sk, &(struct group_filter){ - .gf_interface = gf32->gf_interface, - .gf_group = gf32->gf_group, - .gf_fmode = gf32->gf_fmode, - .gf_numsrc = gf32->gf_numsrc}, gf32->gf_slist); - release_sock(sk); - rtnl_unlock(); -mc_msf_out: - kfree(p); - return err; - } + return compat_ipv6_set_mcast_msfilter(sk, optval, optlen); } err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); From fdf5bdd87c01571f3256a799bdc217e7cca35fbd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:29 +0200 Subject: [PATCH 1212/2056] net/ipv6: factor out mcast join/leave setsockopt helpers Factor out one helper each for setting the native and compat version of the MCAST_MSFILTER option. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/ipv6_sockglue.c | 103 ++++++++++++++++++++------------------- 1 file changed, 53 insertions(+), 50 deletions(-) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 6aa49495d7bc..1ea0cd12beae 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -252,6 +252,56 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, } #endif +static int ipv6_mcast_join_leave(struct sock *sk, int optname, + void __user *optval, int optlen) +{ + struct sockaddr_in6 *psin6; + struct group_req greq; + + if (optlen < sizeof(greq)) + return -EINVAL; + if (copy_from_user(&greq, optval, sizeof(greq))) + return -EFAULT; + + if (greq.gr_group.ss_family != AF_INET6) + return -EADDRNOTAVAIL; + psin6 = (struct sockaddr_in6 *)&greq.gr_group; + if (optname == MCAST_JOIN_GROUP) + return ipv6_sock_mc_join(sk, greq.gr_interface, + &psin6->sin6_addr); + return ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); +} + +#ifdef CONFIG_COMPAT +static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, + void __user *optval, int optlen) +{ + struct compat_group_req gr32; + struct sockaddr_in6 *psin6; + int err; + + if (optlen < sizeof(gr32)) + return -EINVAL; + if (copy_from_user(&gr32, optval, sizeof(gr32))) + return -EFAULT; + + if (gr32.gr_group.ss_family != AF_INET6) + return -EADDRNOTAVAIL; + rtnl_lock(); + lock_sock(sk); + psin6 = (struct sockaddr_in6 *)&gr32.gr_group; + if (optname == MCAST_JOIN_GROUP) + err = ipv6_sock_mc_join(sk, gr32.gr_interface, + &psin6->sin6_addr); + else + err = ipv6_sock_mc_drop(sk, gr32.gr_interface, + &psin6->sin6_addr); + release_sock(sk); + rtnl_unlock(); + return err; +} +#endif + static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -803,29 +853,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - { - struct group_req greq; - struct sockaddr_in6 *psin6; - - if (optlen < sizeof(struct group_req)) - goto e_inval; - - retv = -EFAULT; - if (copy_from_user(&greq, optval, sizeof(struct group_req))) - break; - if (greq.gr_group.ss_family != AF_INET6) { - retv = -EADDRNOTAVAIL; - break; - } - psin6 = (struct sockaddr_in6 *)&greq.gr_group; - if (optname == MCAST_JOIN_GROUP) - retv = ipv6_sock_mc_join(sk, greq.gr_interface, - &psin6->sin6_addr); - else - retv = ipv6_sock_mc_drop(sk, greq.gr_interface, - &psin6->sin6_addr); + retv = ipv6_mcast_join_leave(sk, optname, optval, optlen); break; - } case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: @@ -975,34 +1004,8 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, switch (optname) { case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - { - struct compat_group_req __user *gr32 = (void __user *)optval; - struct group_req greq; - struct sockaddr_in6 *psin6 = (struct sockaddr_in6 *)&greq.gr_group; - - if (optlen < sizeof(struct compat_group_req)) - return -EINVAL; - - if (get_user(greq.gr_interface, &gr32->gr_interface) || - copy_from_user(&greq.gr_group, &gr32->gr_group, - sizeof(greq.gr_group))) - return -EFAULT; - - if (greq.gr_group.ss_family != AF_INET6) - return -EADDRNOTAVAIL; - - rtnl_lock(); - lock_sock(sk); - if (optname == MCAST_JOIN_GROUP) - err = ipv6_sock_mc_join(sk, greq.gr_interface, - &psin6->sin6_addr); - else - err = ipv6_sock_mc_drop(sk, greq.gr_interface, - &psin6->sin6_addr); - release_sock(sk); - rtnl_unlock(); - return err; - } + return compat_ipv6_mcast_join_leave(sk, optname, optval, + optlen); case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: From 3021ad529950d07e0408d65d0f1df00454c1d223 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:30 +0200 Subject: [PATCH 1213/2056] net/ipv6: remove compat_ipv6_{get,set}sockopt Handle the few cases that need special treatment in-line using in_compat_syscall(). This also removes all the now unused compat_{get,set}sockopt methods. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/inet_connection_sock.h | 13 -- include/net/ipv6.h | 4 - include/net/sctp/structs.h | 10 -- include/net/sock.h | 8 -- include/net/tcp.h | 4 - net/core/sock.c | 10 -- net/dccp/dccp.h | 6 - net/dccp/ipv4.c | 4 - net/dccp/ipv6.c | 12 -- net/dccp/proto.c | 26 ---- net/ipv4/inet_connection_sock.c | 28 ----- net/ipv4/tcp.c | 24 ---- net/ipv4/tcp_ipv4.c | 4 - net/ipv6/ipv6_sockglue.c | 187 ++++++++--------------------- net/ipv6/raw.c | 50 -------- net/ipv6/tcp_ipv6.c | 12 -- net/ipv6/udp.c | 25 ---- net/ipv6/udp_impl.h | 6 - net/ipv6/udplite.c | 4 - net/l2tp/l2tp_ip6.c | 4 - net/sctp/ipv6.c | 4 - 21 files changed, 53 insertions(+), 392 deletions(-) diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index e5b388f5fa20..157c60cca0ca 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -48,14 +48,6 @@ struct inet_connection_sock_af_ops { char __user *optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct sock *sk, - int level, int optname, - char __user *optval, unsigned int optlen); - int (*compat_getsockopt)(struct sock *sk, - int level, int optname, - char __user *optval, int __user *optlen); -#endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); void (*mtu_reduced)(struct sock *sk); }; @@ -311,11 +303,6 @@ void inet_csk_listen_stop(struct sock *sk); void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); -int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); - struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu); #define TCP_PINGPONG_THRESH 3 diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5e65bf2fd32d..262fc88dbd7e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1088,10 +1088,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 9bbb2f60db92..233bbf7df5d6 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -438,16 +438,6 @@ struct sctp_af { int optname, char __user *optval, int __user *optlen); - int (*compat_setsockopt) (struct sock *sk, - int level, - int optname, - char __user *optval, - unsigned int optlen); - int (*compat_getsockopt) (struct sock *sk, - int level, - int optname, - char __user *optval, - int __user *optlen); void (*get_dst) (struct sctp_transport *t, union sctp_addr *saddr, struct flowi *fl, diff --git a/include/net/sock.h b/include/net/sock.h index 1fd7cf5fc751..3bd8bc578bf3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1147,14 +1147,6 @@ struct proto { int __user *option); void (*keepalive)(struct sock *sk, int valbool); #ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct sock *sk, - int level, - int optname, char __user *optval, - unsigned int optlen); - int (*compat_getsockopt)(struct sock *sk, - int level, - int optname, char __user *optval, - int __user *option); int (*compat_ioctl)(struct sock *sk, unsigned int cmd, unsigned long arg); #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index d62e24533518..9f7f7c0c1104 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -401,10 +401,6 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); -int compat_tcp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -int compat_tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(const struct request_sock *req); int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/core/sock.c b/net/core/sock.c index 018404d17626..48655d5c4cf3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3199,11 +3199,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; -#ifdef CONFIG_COMPAT - if (in_compat_syscal() && sk->sk_prot->compat_getsockopt) - return sk->sk_prot->compat_getsockopt(sk, level, optname, - optval, optlen); -#endif return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_getsockopt); @@ -3231,11 +3226,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; -#ifdef CONFIG_COMPAT - if (in_compat_syscall() && sk->sk_prot->compat_setsockopt) - return sk->sk_prot->compat_setsockopt(sk, level, optname, - optval, optlen); -#endif return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(sock_common_setsockopt); diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 7dce4f6c7025..434eea91b767 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -296,12 +296,6 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int dccp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); -#ifdef CONFIG_COMPAT -int compat_dccp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -int compat_dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -#endif int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b91373eb1c79..9c28c8251125 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -959,10 +959,6 @@ static struct proto dccp_v4_prot = { .rsk_prot = &dccp_request_sock_ops, .twsk_prot = &dccp_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_dccp_setsockopt, - .compat_getsockopt = compat_dccp_getsockopt, -#endif }; static const struct net_protocol dccp_v4_protocol = { diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index b50f85a72cd5..ef4ab28cfde0 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -970,10 +970,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; /* @@ -990,10 +986,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; /* NOTE: A lot of things set to zero explicitly by call to @@ -1049,10 +1041,6 @@ static struct proto dccp_v6_prot = { .rsk_prot = &dccp6_request_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_dccp_setsockopt, - .compat_getsockopt = compat_dccp_getsockopt, -#endif }; static const struct inet6_protocol dccp_v6_protocol = { diff --git a/net/dccp/proto.c b/net/dccp/proto.c index c13b6609474b..fd92d3fe321f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -575,19 +575,6 @@ int dccp_setsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL_GPL(dccp_setsockopt); -#ifdef CONFIG_COMPAT -int compat_dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level != SOL_DCCP) - return inet_csk_compat_setsockopt(sk, level, optname, - optval, optlen); - return do_dccp_setsockopt(sk, level, optname, optval, optlen); -} - -EXPORT_SYMBOL_GPL(compat_dccp_setsockopt); -#endif - static int dccp_getsockopt_service(struct sock *sk, int len, __be32 __user *optval, int __user *optlen) @@ -696,19 +683,6 @@ int dccp_getsockopt(struct sock *sk, int level, int optname, EXPORT_SYMBOL_GPL(dccp_getsockopt); -#ifdef CONFIG_COMPAT -int compat_dccp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level != SOL_DCCP) - return inet_csk_compat_getsockopt(sk, level, optname, - optval, optlen); - return do_dccp_getsockopt(sk, level, optname, optval, optlen); -} - -EXPORT_SYMBOL_GPL(compat_dccp_getsockopt); -#endif - static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb) { struct cmsghdr *cmsg; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 22b0e7336360..d1a3913eebe0 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1057,34 +1057,6 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) } EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); -#ifdef CONFIG_COMPAT -int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_af_ops->compat_getsockopt) - return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname, - optval, optlen); - return icsk->icsk_af_ops->getsockopt(sk, level, optname, - optval, optlen); -} -EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); - -int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - const struct inet_connection_sock *icsk = inet_csk(sk); - - if (icsk->icsk_af_ops->compat_setsockopt) - return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname, - optval, optlen); - return icsk->icsk_af_ops->setsockopt(sk, level, optname, - optval, optlen); -} -EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); -#endif - static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl) { const struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 254b6a4cc95b..58ede3d62b2e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3337,18 +3337,6 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, } EXPORT_SYMBOL(tcp_setsockopt); -#ifdef CONFIG_COMPAT -int compat_tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level != SOL_TCP) - return inet_csk_compat_setsockopt(sk, level, optname, - optval, optlen); - return do_tcp_setsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(compat_tcp_setsockopt); -#endif - static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, struct tcp_info *info) { @@ -3896,18 +3884,6 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, } EXPORT_SYMBOL(tcp_getsockopt); -#ifdef CONFIG_COMPAT -int compat_tcp_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level != SOL_TCP) - return inet_csk_compat_getsockopt(sk, level, optname, - optval, optlen); - return do_tcp_getsockopt(sk, level, optname, optval, optlen); -} -EXPORT_SYMBOL(compat_tcp_getsockopt); -#endif - #ifdef CONFIG_TCP_MD5SIG static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); static DEFINE_MUTEX(tcp_md5sig_mutex); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e5b7ef9a2887..cd81b6e04efb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2769,10 +2769,6 @@ struct proto tcp_prot = { .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_tcp_setsockopt, - .compat_getsockopt = compat_tcp_getsockopt, -#endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL(tcp_prot); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 1ea0cd12beae..add8f7912299 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -136,13 +136,42 @@ static bool setsockopt_needs_rtnl(int optname) return false; } -static int do_ipv6_mcast_group_source(struct sock *sk, int optname, - struct group_source_req *greqs) +static int copy_group_source_from_user(struct group_source_req *greqs, + void __user *optval, int optlen) { - int omode, add; + if (in_compat_syscall()) { + struct compat_group_source_req gr32; - if (greqs->gsr_group.ss_family != AF_INET6 || - greqs->gsr_source.ss_family != AF_INET6) + if (optlen < sizeof(gr32)) + return -EINVAL; + if (copy_from_user(&gr32, optval, sizeof(gr32))) + return -EFAULT; + greqs->gsr_interface = gr32.gsr_interface; + greqs->gsr_group = gr32.gsr_group; + greqs->gsr_source = gr32.gsr_source; + } else { + if (optlen < sizeof(*greqs)) + return -EINVAL; + if (copy_from_user(greqs, optval, sizeof(*greqs))) + return -EFAULT; + } + + return 0; +} + +static int do_ipv6_mcast_group_source(struct sock *sk, int optname, + void __user *optval, int optlen) +{ + struct group_source_req greqs; + int omode, add; + int ret; + + ret = copy_group_source_from_user(&greqs, optval, optlen); + if (ret) + return ret; + + if (greqs.gsr_group.ss_family != AF_INET6 || + greqs.gsr_source.ss_family != AF_INET6) return -EADDRNOTAVAIL; if (optname == MCAST_BLOCK_SOURCE) { @@ -155,8 +184,8 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname, struct sockaddr_in6 *psin6; int retv; - psin6 = (struct sockaddr_in6 *)&greqs->gsr_group; - retv = ipv6_sock_mc_join_ssm(sk, greqs->gsr_interface, + psin6 = (struct sockaddr_in6 *)&greqs.gsr_group; + retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface, &psin6->sin6_addr, MCAST_INCLUDE); /* prior join w/ different source is ok */ @@ -168,7 +197,7 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname, omode = MCAST_INCLUDE; add = 0; } - return ip6_mc_source(add, omode, sk, greqs); + return ip6_mc_source(add, omode, sk, &greqs); } static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, @@ -202,7 +231,6 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, return ret; } -#ifdef CONFIG_COMPAT static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, int optlen) { @@ -236,21 +264,16 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) goto out_free_p; - rtnl_lock(); - lock_sock(sk); ret = ip6_mc_msfilter(sk, &(struct group_filter){ .gf_interface = gf32->gf_interface, .gf_group = gf32->gf_group, .gf_fmode = gf32->gf_fmode, .gf_numsrc = gf32->gf_numsrc}, gf32->gf_slist); - release_sock(sk); - rtnl_unlock(); out_free_p: kfree(p); return ret; } -#endif static int ipv6_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) @@ -272,13 +295,11 @@ static int ipv6_mcast_join_leave(struct sock *sk, int optname, return ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr); } -#ifdef CONFIG_COMPAT static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, void __user *optval, int optlen) { struct compat_group_req gr32; struct sockaddr_in6 *psin6; - int err; if (optlen < sizeof(gr32)) return -EINVAL; @@ -287,20 +308,12 @@ static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, if (gr32.gr_group.ss_family != AF_INET6) return -EADDRNOTAVAIL; - rtnl_lock(); - lock_sock(sk); psin6 = (struct sockaddr_in6 *)&gr32.gr_group; if (optname == MCAST_JOIN_GROUP) - err = ipv6_sock_mc_join(sk, gr32.gr_interface, + return ipv6_sock_mc_join(sk, gr32.gr_interface, &psin6->sin6_addr); - else - err = ipv6_sock_mc_drop(sk, gr32.gr_interface, - &psin6->sin6_addr); - release_sock(sk); - rtnl_unlock(); - return err; + return ipv6_sock_mc_drop(sk, gr32.gr_interface, &psin6->sin6_addr); } -#endif static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) @@ -853,26 +866,25 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case MCAST_JOIN_GROUP: case MCAST_LEAVE_GROUP: - retv = ipv6_mcast_join_leave(sk, optname, optval, optlen); + if (in_compat_syscall()) + retv = compat_ipv6_mcast_join_leave(sk, optname, optval, + optlen); + else + retv = ipv6_mcast_join_leave(sk, optname, optval, + optlen); break; case MCAST_JOIN_SOURCE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: - { - struct group_source_req greqs; - - if (optlen < sizeof(struct group_source_req)) - goto e_inval; - if (copy_from_user(&greqs, optval, sizeof(greqs))) { - retv = -EFAULT; - break; - } - retv = do_ipv6_mcast_group_source(sk, optname, &greqs); + retv = do_ipv6_mcast_group_source(sk, optname, optval, optlen); break; - } case MCAST_MSFILTER: - retv = ipv6_set_mcast_msfilter(sk, optval, optlen); + if (in_compat_syscall()) + retv = compat_ipv6_set_mcast_msfilter(sk, optval, + optlen); + else + retv = ipv6_set_mcast_msfilter(sk, optval, optlen); break; case IPV6_ROUTER_ALERT: if (optlen < sizeof(int)) @@ -989,64 +1001,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, } EXPORT_SYMBOL(ipv6_setsockopt); -#ifdef CONFIG_COMPAT -int compat_ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - int err; - - if (level == SOL_IP && sk->sk_type != SOCK_RAW) - return udp_prot.setsockopt(sk, level, optname, optval, optlen); - - if (level != SOL_IPV6) - return -ENOPROTOOPT; - - switch (optname) { - case MCAST_JOIN_GROUP: - case MCAST_LEAVE_GROUP: - return compat_ipv6_mcast_join_leave(sk, optname, optval, - optlen); - case MCAST_JOIN_SOURCE_GROUP: - case MCAST_LEAVE_SOURCE_GROUP: - case MCAST_BLOCK_SOURCE: - case MCAST_UNBLOCK_SOURCE: - { - struct compat_group_source_req __user *gsr32 = (void __user *)optval; - struct group_source_req greqs; - - if (optlen < sizeof(struct compat_group_source_req)) - return -EINVAL; - - if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) || - copy_from_user(&greqs.gsr_group, &gsr32->gsr_group, - sizeof(greqs.gsr_group)) || - copy_from_user(&greqs.gsr_source, &gsr32->gsr_source, - sizeof(greqs.gsr_source))) - return -EFAULT; - - rtnl_lock(); - lock_sock(sk); - err = do_ipv6_mcast_group_source(sk, optname, &greqs); - release_sock(sk); - rtnl_unlock(); - return err; - } - case MCAST_MSFILTER: - return compat_ipv6_set_mcast_msfilter(sk, optval, optlen); - } - - err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && - optname != IPV6_XFRM_POLICY) - err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); -#endif - return err; -} -EXPORT_SYMBOL(compat_ipv6_setsockopt); -#endif - static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt, int optname, char __user *optval, int len) { @@ -1110,7 +1064,6 @@ static int ipv6_get_msfilter(struct sock *sk, void __user *optval, return err; } -#ifdef CONFIG_COMPAT static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, int __user *optlen) { @@ -1150,7 +1103,6 @@ static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval, return -EFAULT; return 0; } -#endif static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen, unsigned int flags) @@ -1175,6 +1127,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, val = sk->sk_family; break; case MCAST_MSFILTER: + if (in_compat_syscall()) + return compat_ipv6_get_msfilter(sk, optval, optlen); return ipv6_get_msfilter(sk, optval, optlen, len); case IPV6_2292PKTOPTIONS: { @@ -1523,38 +1477,3 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname, return err; } EXPORT_SYMBOL(ipv6_getsockopt); - -#ifdef CONFIG_COMPAT -int compat_ipv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - int err; - - if (level == SOL_IP && sk->sk_type != SOCK_RAW) - return udp_prot.getsockopt(sk, level, optname, optval, optlen); - - if (level != SOL_IPV6) - return -ENOPROTOOPT; - - if (optname == MCAST_MSFILTER) - return compat_ipv6_get_msfilter(sk, optval, optlen); - - err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, - MSG_CMSG_COMPAT); -#ifdef CONFIG_NETFILTER - /* we need to exclude all possible ENOPROTOOPTs except default case */ - if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) { - int len; - - if (get_user(len, optlen)) - return -EFAULT; - - err = nf_getsockopt(sk, PF_INET6, optname, optval, &len); - if (err >= 0) - err = put_user(len, optlen); - } -#endif - return err; -} -EXPORT_SYMBOL(compat_ipv6_getsockopt); -#endif diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index e23c6b461758..594e01ad670a 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1084,30 +1084,6 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname, return do_rawv6_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - switch (level) { - case SOL_RAW: - break; - case SOL_ICMPV6: - if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) - return -EOPNOTSUPP; - return rawv6_seticmpfilter(sk, level, optname, optval, optlen); - case SOL_IPV6: - if (optname == IPV6_CHECKSUM || - optname == IPV6_HDRINCL) - break; - fallthrough; - default: - return compat_ipv6_setsockopt(sk, level, optname, - optval, optlen); - } - return do_rawv6_setsockopt(sk, level, optname, optval, optlen); -} -#endif - static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -1169,30 +1145,6 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname, return do_rawv6_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - switch (level) { - case SOL_RAW: - break; - case SOL_ICMPV6: - if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) - return -EOPNOTSUPP; - return rawv6_geticmpfilter(sk, level, optname, optval, optlen); - case SOL_IPV6: - if (optname == IPV6_CHECKSUM || - optname == IPV6_HDRINCL) - break; - fallthrough; - default: - return compat_ipv6_getsockopt(sk, level, optname, - optval, optlen); - } - return do_rawv6_getsockopt(sk, level, optname, optval, optlen); -} -#endif - static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { @@ -1297,8 +1249,6 @@ struct proto rawv6_prot = { .usersize = sizeof_field(struct raw6_sock, filter), .h.raw_hash = &raw_v6_hashinfo, #ifdef CONFIG_COMPAT - .compat_setsockopt = compat_rawv6_setsockopt, - .compat_getsockopt = compat_rawv6_getsockopt, .compat_ioctl = compat_rawv6_ioctl, #endif .diag_destroy = raw_abort, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4502db706f75..c34b7834fd84 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1831,10 +1831,6 @@ const struct inet_connection_sock_af_ops ipv6_specific = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif .mtu_reduced = tcp_v6_mtu_reduced, }; @@ -1861,10 +1857,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif .mtu_reduced = tcp_v4_mtu_reduced, }; @@ -2122,10 +2114,6 @@ struct proto tcpv6_prot = { .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_tcp_setsockopt, - .compat_getsockopt = compat_tcp_getsockopt, -#endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL_GPL(tcpv6_prot); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 38c0d9350c6b..5aff0856a05b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1570,17 +1570,6 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname, return ipv6_setsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, - udp_v6_push_pending_frames); - return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); -} -#endif - int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -1589,16 +1578,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, return ipv6_getsockopt(sk, level, optname, optval, optlen); } -#ifdef CONFIG_COMPAT -int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen) -{ - if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_getsockopt(sk, level, optname, optval, optlen); - return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); -} -#endif - /* thinking of making this const? Don't. * early_demux can change based on sysctl. */ @@ -1681,10 +1660,6 @@ struct proto udpv6_prot = { .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp6_sock), .h.udp_table = &udp_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udpv6_setsockopt, - .compat_getsockopt = compat_udpv6_getsockopt, -#endif .diag_destroy = udp_abort, }; diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 20e324b6f358..30dfb6f1b762 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -19,12 +19,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); -#ifdef CONFIG_COMPAT -int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); -int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, - char __user *optval, int __user *optlen); -#endif int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index bf7a7acd39b1..fbb700d3f437 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -52,10 +52,6 @@ struct proto udplitev6_prot = { .sysctl_mem = sysctl_udp_mem, .obj_size = sizeof(struct udp6_sock), .h.udp_table = &udplite_table, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_udpv6_setsockopt, - .compat_getsockopt = compat_udpv6_getsockopt, -#endif }; static struct inet_protosw udplite6_protosw = { diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 2cdc0b7a7a43..4799bec87b33 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -745,10 +745,6 @@ static struct proto l2tp_ip6_prot = { .hash = l2tp_ip6_hash, .unhash = l2tp_ip6_unhash, .obj_size = sizeof(struct l2tp_ip6_sock), -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; static const struct proto_ops l2tp_ip6_ops = { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index ebda31b7747d..aea2a982984d 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -1087,10 +1087,6 @@ static struct sctp_af sctp_af_inet6 = { .net_header_len = sizeof(struct ipv6hdr), .sockaddr_len = sizeof(struct sockaddr_in6), .ip_options_len = sctp_v6_ip_options_len, -#ifdef CONFIG_COMPAT - .compat_setsockopt = compat_ipv6_setsockopt, - .compat_getsockopt = compat_ipv6_getsockopt, -#endif }; static struct sctp_pf sctp_pf_inet6 = { From a44d9e72100f7044ac46e4e6dc475f5b4097830f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 17 Jul 2020 08:23:31 +0200 Subject: [PATCH 1214/2056] net: make ->{get,set}sockopt in proto_ops optional Just check for a NULL method instead of wiring up sock_no_{get,set}sockopt. Signed-off-by: Christoph Hellwig Acked-by: Marc Kleine-Budde Signed-off-by: David S. Miller --- crypto/af_alg.c | 1 - crypto/algif_aead.c | 4 ---- crypto/algif_hash.c | 4 ---- crypto/algif_rng.c | 2 -- crypto/algif_skcipher.c | 4 ---- drivers/isdn/mISDN/socket.c | 2 -- drivers/net/ppp/pppoe.c | 2 -- drivers/net/ppp/pptp.c | 2 -- include/net/sock.h | 2 -- net/appletalk/ddp.c | 2 -- net/bluetooth/bnep/sock.c | 2 -- net/bluetooth/cmtp/sock.c | 2 -- net/bluetooth/hidp/sock.c | 2 -- net/caif/caif_socket.c | 2 -- net/can/bcm.c | 2 -- net/core/sock.c | 14 -------------- net/key/af_key.c | 2 -- net/nfc/llcp_sock.c | 2 -- net/nfc/rawsock.c | 4 ---- net/packet/af_packet.c | 2 -- net/phonet/socket.c | 2 -- net/qrtr/qrtr.c | 2 -- net/smc/af_smc.c | 9 +++++++-- net/socket.c | 4 ++++ net/unix/af_unix.c | 6 ------ net/vmw_vsock/af_vsock.c | 2 -- 26 files changed, 11 insertions(+), 73 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 28fc323e3fe3..29f71428520b 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -335,7 +335,6 @@ static const struct proto_ops alg_proto_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, .sendmsg = sock_no_sendmsg, diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0ae000a61c7f..527d09a69462 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -361,11 +361,9 @@ static struct proto_ops algif_aead_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg, @@ -454,11 +452,9 @@ static struct proto_ops algif_aead_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = aead_sendmsg_nokey, diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index e71727c25a7d..50f7b22f1b48 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -279,10 +279,8 @@ static struct proto_ops algif_hash_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg, @@ -383,10 +381,8 @@ static struct proto_ops algif_hash_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = hash_sendmsg_nokey, diff --git a/crypto/algif_rng.c b/crypto/algif_rng.c index 087c0ad09d38..6300e0566dc5 100644 --- a/crypto/algif_rng.c +++ b/crypto/algif_rng.c @@ -101,11 +101,9 @@ static struct proto_ops algif_rng_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .sendmsg = sock_no_sendmsg, .sendpage = sock_no_sendpage, diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index ec5567c87a6d..c487887f4671 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -188,11 +188,9 @@ static struct proto_ops algif_skcipher_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg, @@ -281,11 +279,9 @@ static struct proto_ops algif_skcipher_ops_nokey = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, - .setsockopt = sock_no_setsockopt, .release = af_alg_release, .sendmsg = skcipher_sendmsg_nokey, diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index dff4132b3702..1b2b91479107 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -738,8 +738,6 @@ static const struct proto_ops base_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index beedaad08255..d7f50b835050 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -1110,8 +1110,6 @@ static const struct proto_ops pppoe_ops = { .poll = datagram_poll, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = pppoe_sendmsg, .recvmsg = pppoe_recvmsg, .mmap = sock_no_mmap, diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index acccb747aeda..ee5058445d06 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -618,8 +618,6 @@ static const struct proto_ops pptp_ops = { .getname = pptp_getname, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .mmap = sock_no_mmap, diff --git a/include/net/sock.h b/include/net/sock.h index 3bd8bc578bf3..62e18fc8ac9f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1714,8 +1714,6 @@ int sock_no_getname(struct socket *, struct sockaddr *, int); int sock_no_ioctl(struct socket *, unsigned int, unsigned long); int sock_no_listen(struct socket *, int); int sock_no_shutdown(struct socket *, int); -int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); -int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 15787e8c0629..1d48708c5a2e 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1917,8 +1917,6 @@ static const struct proto_ops atalk_dgram_ops = { #endif .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = atalk_sendmsg, .recvmsg = atalk_recvmsg, .mmap = sock_no_mmap, diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index cfd83c5521ae..d515571b2afb 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -182,8 +182,6 @@ static const struct proto_ops bnep_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c index defdd4871919..96d49d9fae96 100644 --- a/net/bluetooth/cmtp/sock.c +++ b/net/bluetooth/cmtp/sock.c @@ -185,8 +185,6 @@ static const struct proto_ops cmtp_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c index 03be6a4baef3..595fb3c9d6c3 100644 --- a/net/bluetooth/hidp/sock.c +++ b/net/bluetooth/hidp/sock.c @@ -233,8 +233,6 @@ static const struct proto_ops hidp_sock_ops = { .recvmsg = sock_no_recvmsg, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index ef14da50a981..b94ecd931002 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -981,7 +981,6 @@ static const struct proto_ops caif_seqpacket_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = caif_seqpkt_sendmsg, .recvmsg = caif_seqpkt_recvmsg, .mmap = sock_no_mmap, @@ -1002,7 +1001,6 @@ static const struct proto_ops caif_stream_ops = { .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = caif_stream_sendmsg, .recvmsg = caif_stream_recvmsg, .mmap = sock_no_mmap, diff --git a/net/can/bcm.c b/net/can/bcm.c index c96fa0f33db3..d14ea12affb1 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -1648,8 +1648,6 @@ static const struct proto_ops bcm_ops = { .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = bcm_sendmsg, .recvmsg = bcm_recvmsg, .mmap = sock_no_mmap, diff --git a/net/core/sock.c b/net/core/sock.c index 48655d5c4cf3..d828bfe1c47d 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2783,20 +2783,6 @@ int sock_no_shutdown(struct socket *sock, int how) } EXPORT_SYMBOL(sock_no_shutdown); -int sock_no_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) -{ - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(sock_no_setsockopt); - -int sock_no_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) -{ - return -EOPNOTSUPP; -} -EXPORT_SYMBOL(sock_no_getsockopt); - int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) { return -EOPNOTSUPP; diff --git a/net/key/af_key.c b/net/key/af_key.c index b67ed3a8486c..f13626c1a985 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3734,8 +3734,6 @@ static const struct proto_ops pfkey_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 28604414dec1..6da1e2334bb6 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -921,8 +921,6 @@ static const struct proto_ops llcp_rawsock_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = llcp_sock_recvmsg, .mmap = sock_no_mmap, diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index ba5ffd3badd3..b2061b6746ea 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -276,8 +276,6 @@ static const struct proto_ops rawsock_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = rawsock_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, @@ -296,8 +294,6 @@ static const struct proto_ops rawsock_raw_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = sock_no_sendmsg, .recvmsg = rawsock_recvmsg, .mmap = sock_no_mmap, diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 35aee9e98053..c240fb5de3f0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4503,8 +4503,6 @@ static const struct proto_ops packet_ops_spkt = { .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = packet_sendmsg_spkt, .recvmsg = packet_recvmsg, .mmap = sock_no_mmap, diff --git a/net/phonet/socket.c b/net/phonet/socket.c index 87c60f83c180..2599235d592e 100644 --- a/net/phonet/socket.c +++ b/net/phonet/socket.c @@ -439,8 +439,6 @@ const struct proto_ops phonet_dgram_ops = { .ioctl = pn_socket_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = pn_socket_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 24a8c3c6da0d..0cb4adfc6641 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1208,8 +1208,6 @@ static const struct proto_ops qrtr_proto_ops = { .gettstamp = sock_gettstamp, .poll = datagram_poll, .shutdown = sock_no_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .release = qrtr_release, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 903321543838..9711c9e0e515 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1742,8 +1742,11 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, /* generic setsockopts reaching us here always apply to the * CLC socket */ - rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, - optval, optlen); + if (unlikely(!smc->clcsock->ops->setsockopt)) + rc = -EOPNOTSUPP; + else + rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, + optval, optlen); if (smc->clcsock->sk->sk_err) { sk->sk_err = smc->clcsock->sk->sk_err; sk->sk_error_report(sk); @@ -1808,6 +1811,8 @@ static int smc_getsockopt(struct socket *sock, int level, int optname, smc = smc_sk(sock->sk); /* socket options apply to the CLC socket */ + if (unlikely(!smc->clcsock->ops->getsockopt)) + return -EOPNOTSUPP; return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, optval, optlen); } diff --git a/net/socket.c b/net/socket.c index dec345982abb..93846568c2fb 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2131,6 +2131,8 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) err = sock_setsockopt(sock, level, optname, optval, optlen); + else if (unlikely(!sock->ops->setsockopt)) + err = -EOPNOTSUPP; else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); @@ -2175,6 +2177,8 @@ int __sys_getsockopt(int fd, int level, int optname, char __user *optval, if (level == SOL_SOCKET) err = sock_getsockopt(sock, level, optname, optval, optlen); + else if (unlikely(!sock->ops->getsockopt)) + err = -EOPNOTSUPP; else err = sock->ops->getsockopt(sock, level, optname, optval, optlen); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3385a7a0b231..181ea6fb56a6 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -714,8 +714,6 @@ static const struct proto_ops unix_stream_ops = { #endif .listen = unix_listen, .shutdown = unix_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = unix_stream_sendmsg, .recvmsg = unix_stream_recvmsg, .mmap = sock_no_mmap, @@ -741,8 +739,6 @@ static const struct proto_ops unix_dgram_ops = { #endif .listen = sock_no_listen, .shutdown = unix_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = unix_dgram_sendmsg, .recvmsg = unix_dgram_recvmsg, .mmap = sock_no_mmap, @@ -767,8 +763,6 @@ static const struct proto_ops unix_seqpacket_ops = { #endif .listen = unix_listen, .shutdown = unix_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = unix_seqpacket_sendmsg, .recvmsg = unix_seqpacket_recvmsg, .mmap = sock_no_mmap, diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 626bf9044418..df204c6761c4 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1202,8 +1202,6 @@ static const struct proto_ops vsock_dgram_ops = { .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = vsock_shutdown, - .setsockopt = sock_no_setsockopt, - .getsockopt = sock_no_getsockopt, .sendmsg = vsock_dgram_sendmsg, .recvmsg = vsock_dgram_recvmsg, .mmap = sock_no_mmap, From ca84bd058daefef1d184dd0a7b6b43c67339e72c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:38 +0200 Subject: [PATCH 1215/2056] sctp: copy the optval from user space in sctp_setsockopt Prepare for for moving the copy_from_user from the individual sockopts to the main setsockopt helper. As of this commit the kopt variable is not used yet, but the following commits will start using it. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d57e1a002ffc..af1ebc8313d3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4677,6 +4677,7 @@ static int sctp_setsockopt_pf_expose(struct sock *sk, static int sctp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { + void *kopt = NULL; int retval = 0; pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); @@ -4693,6 +4694,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, goto out_nounlock; } + if (optlen > 0) { + kopt = memdup_user(optval, optlen); + if (IS_ERR(kopt)) + return PTR_ERR(kopt); + } + lock_sock(sk); switch (optname) { @@ -4878,6 +4885,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, } release_sock(sk); + kfree(kopt); out_nounlock: return retval; From 8c7517f54c8f24f3633383e8b552c1858fe95389 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:39 +0200 Subject: [PATCH 1216/2056] sctp: pass a kernel pointer to sctp_setsockopt_bindx Rename sctp_setsockopt_bindx_kernel back to sctp_setsockopt_bindx, and use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer in the old sctp_setsockopt_bindx. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index af1ebc8313d3..85ba5155b177 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -979,9 +979,8 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) * * Returns 0 if ok, <0 errno code on error. */ -static int sctp_setsockopt_bindx_kernel(struct sock *sk, - struct sockaddr *addrs, int addrs_size, - int op) +static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr *addrs, + int addrs_size, int op) { int err; int addrcnt = 0; @@ -991,7 +990,7 @@ static int sctp_setsockopt_bindx_kernel(struct sock *sk, struct sctp_af *af; pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", - __func__, sk, addrs, addrs_size, op); + __func__, sk, addr_buf, addrs_size, op); if (unlikely(addrs_size <= 0)) return -EINVAL; @@ -1037,29 +1036,13 @@ static int sctp_setsockopt_bindx_kernel(struct sock *sk, } } -static int sctp_setsockopt_bindx(struct sock *sk, - struct sockaddr __user *addrs, - int addrs_size, int op) -{ - struct sockaddr *kaddrs; - int err; - - kaddrs = memdup_user(addrs, addrs_size); - if (IS_ERR(kaddrs)) - return PTR_ERR(kaddrs); - err = sctp_setsockopt_bindx_kernel(sk, kaddrs, addrs_size, op); - kfree(kaddrs); - return err; -} - static int sctp_bind_add(struct sock *sk, struct sockaddr *addrs, int addrlen) { int err; lock_sock(sk); - err = sctp_setsockopt_bindx_kernel(sk, addrs, addrlen, - SCTP_BINDX_ADD_ADDR); + err = sctp_setsockopt_bindx(sk, addrs, addrlen, SCTP_BINDX_ADD_ADDR); release_sock(sk); return err; } @@ -4705,14 +4688,14 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, switch (optname) { case SCTP_SOCKOPT_BINDX_ADD: /* 'optlen' is the size of the addresses buffer. */ - retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, - optlen, SCTP_BINDX_ADD_ADDR); + retval = sctp_setsockopt_bindx(sk, kopt, optlen, + SCTP_BINDX_ADD_ADDR); break; case SCTP_SOCKOPT_BINDX_REM: /* 'optlen' is the size of the addresses buffer. */ - retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, - optlen, SCTP_BINDX_REM_ADDR); + retval = sctp_setsockopt_bindx(sk, kopt, optlen, + SCTP_BINDX_REM_ADDR); break; case SCTP_SOCKOPT_CONNECTX_OLD: From ce5b2f8929df3ecac3cea44371637241ba816827 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:40 +0200 Subject: [PATCH 1217/2056] sctp: pass a kernel pointer to __sctp_setsockopt_connectx Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 50 ++++++++++++++++++----------------------------- 1 file changed, 19 insertions(+), 31 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 85ba5155b177..44cf2848146a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1286,36 +1286,29 @@ static int __sctp_connect(struct sock *sk, struct sockaddr *kaddrs, * it. * * sk The sk of the socket - * addrs The pointer to the addresses in user land + * addrs The pointer to the addresses * addrssize Size of the addrs buffer * * Returns >=0 if ok, <0 errno code on error. */ -static int __sctp_setsockopt_connectx(struct sock *sk, - struct sockaddr __user *addrs, - int addrs_size, - sctp_assoc_t *assoc_id) +static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr *kaddrs, + int addrs_size, sctp_assoc_t *assoc_id) { - struct sockaddr *kaddrs; int err = 0, flags = 0; pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", - __func__, sk, addrs, addrs_size); + __func__, sk, kaddrs, addrs_size); /* make sure the 1st addr's sa_family is accessible later */ if (unlikely(addrs_size < sizeof(sa_family_t))) return -EINVAL; - kaddrs = memdup_user(addrs, addrs_size); - if (IS_ERR(kaddrs)) - return PTR_ERR(kaddrs); - /* Allow security module to validate connectx addresses. */ err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX, (struct sockaddr *)kaddrs, addrs_size); if (err) - goto out_free; + return err; /* in-kernel sockets don't generally have a file allocated to them * if all they do is call sock_create_kern(). @@ -1323,12 +1316,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk, if (sk->sk_socket->file) flags = sk->sk_socket->file->f_flags; - err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); - -out_free: - kfree(kaddrs); - - return err; + return __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id); } /* @@ -1336,10 +1324,10 @@ static int __sctp_setsockopt_connectx(struct sock *sk, * to the option that doesn't provide association id. */ static int sctp_setsockopt_connectx_old(struct sock *sk, - struct sockaddr __user *addrs, + struct sockaddr *kaddrs, int addrs_size) { - return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); + return __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, NULL); } /* @@ -1349,13 +1337,13 @@ static int sctp_setsockopt_connectx_old(struct sock *sk, * always positive. */ static int sctp_setsockopt_connectx(struct sock *sk, - struct sockaddr __user *addrs, + struct sockaddr *kaddrs, int addrs_size) { sctp_assoc_t assoc_id = 0; int err = 0; - err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); + err = __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, &assoc_id); if (err) return err; @@ -1385,6 +1373,7 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len, { struct sctp_getaddrs_old param; sctp_assoc_t assoc_id = 0; + struct sockaddr *kaddrs; int err = 0; #ifdef CONFIG_COMPAT @@ -1408,9 +1397,12 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len, return -EFAULT; } - err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) - param.addrs, param.addr_num, - &assoc_id); + kaddrs = memdup_user(param.addrs, param.addr_num); + if (IS_ERR(kaddrs)) + return PTR_ERR(kaddrs); + + err = __sctp_setsockopt_connectx(sk, kaddrs, param.addr_num, &assoc_id); + kfree(kaddrs); if (err == 0 || err == -EINPROGRESS) { if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) return -EFAULT; @@ -4700,16 +4692,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_SOCKOPT_CONNECTX_OLD: /* 'optlen' is the size of the addresses buffer. */ - retval = sctp_setsockopt_connectx_old(sk, - (struct sockaddr __user *)optval, - optlen); + retval = sctp_setsockopt_connectx_old(sk, kopt, optlen); break; case SCTP_SOCKOPT_CONNECTX: /* 'optlen' is the size of the addresses buffer. */ - retval = sctp_setsockopt_connectx(sk, - (struct sockaddr __user *)optval, - optlen); + retval = sctp_setsockopt_connectx(sk, kopt, optlen); break; case SCTP_DISABLE_FRAGMENTS: From 1083582558c3bdda87ae13cc6728e17d583b0b74 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:41 +0200 Subject: [PATCH 1218/2056] sctp: pass a kernel pointer to sctp_setsockopt_disable_fragments Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 44cf2848146a..b259ea94aedd 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2184,20 +2184,12 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, * exceeds the current PMTU size, the message will NOT be sent and * instead a error will be indicated to the user. */ -static int sctp_setsockopt_disable_fragments(struct sock *sk, - char __user *optval, +static int sctp_setsockopt_disable_fragments(struct sock *sk, int *val, unsigned int optlen) { - int val; - if (optlen < sizeof(int)) return -EINVAL; - - if (get_user(val, (int __user *)optval)) - return -EFAULT; - - sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; - + sctp_sk(sk)->disable_fragments = (*val == 0) ? 0 : 1; return 0; } @@ -4701,7 +4693,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, break; case SCTP_DISABLE_FRAGMENTS: - retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); + retval = sctp_setsockopt_disable_fragments(sk, kopt, optlen); break; case SCTP_EVENTS: From a98d21a173d1b6245310e90bbf60ad17125dadf3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:42 +0200 Subject: [PATCH 1219/2056] sctp: pass a kernel pointer to sctp_setsockopt_events Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b259ea94aedd..bc37174bd71a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2193,11 +2193,9 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk, int *val, return 0; } -static int sctp_setsockopt_events(struct sock *sk, char __user *optval, +static int sctp_setsockopt_events(struct sock *sk, __u8 *sn_type, unsigned int optlen) { - struct sctp_event_subscribe subscribe; - __u8 *sn_type = (__u8 *)&subscribe; struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; int i; @@ -2205,9 +2203,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval, if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; - if (copy_from_user(&subscribe, optval, optlen)) - return -EFAULT; - for (i = 0; i < optlen; i++) sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i, sn_type[i]); @@ -4697,7 +4692,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, break; case SCTP_EVENTS: - retval = sctp_setsockopt_events(sk, optval, optlen); + retval = sctp_setsockopt_events(sk, kopt, optlen); break; case SCTP_AUTOCLOSE: From 0b49a65c77d8a4864e1d524dc51c6a4b8a6e1585 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:43 +0200 Subject: [PATCH 1220/2056] sctp: pass a kernel pointer to sctp_setsockopt_autoclose Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index bc37174bd71a..d7ed8c6ea375 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2242,7 +2242,7 @@ static int sctp_setsockopt_events(struct sock *sk, __u8 *sn_type, * integer defining the number of seconds of idle time before an * association is closed. */ -static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, +static int sctp_setsockopt_autoclose(struct sock *sk, u32 *optval, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); @@ -2253,9 +2253,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, return -EOPNOTSUPP; if (optlen != sizeof(int)) return -EINVAL; - if (copy_from_user(&sp->autoclose, optval, optlen)) - return -EFAULT; + sp->autoclose = *optval; if (sp->autoclose > net->sctp.max_autoclose) sp->autoclose = net->sctp.max_autoclose; @@ -4696,7 +4695,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, break; case SCTP_AUTOCLOSE: - retval = sctp_setsockopt_autoclose(sk, optval, optlen); + retval = sctp_setsockopt_autoclose(sk, kopt, optlen); break; case SCTP_PEER_ADDR_PARAMS: From 9b7b0d1a395d54c12be9f18d1bf7be06aecaa785 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:44 +0200 Subject: [PATCH 1221/2056] sctp: pass a kernel pointer to sctp_setsockopt_peer_addr_params Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 44 +++++++++++++++++++------------------------- 1 file changed, 19 insertions(+), 25 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d7ed8c6ea375..19308c327b6d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2589,48 +2589,42 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, } static int sctp_setsockopt_peer_addr_params(struct sock *sk, - char __user *optval, + struct sctp_paddrparams *params, unsigned int optlen) { - struct sctp_paddrparams params; struct sctp_transport *trans = NULL; struct sctp_association *asoc = NULL; struct sctp_sock *sp = sctp_sk(sk); int error; int hb_change, pmtud_change, sackdelay_change; - if (optlen == sizeof(params)) { - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams, + if (optlen == ALIGN(offsetof(struct sctp_paddrparams, spp_ipv6_flowlabel), 4)) { - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL)) + if (params->spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL)) return -EINVAL; - } else { + } else if (optlen != sizeof(*params)) { return -EINVAL; } /* Validate flags and value parameters. */ - hb_change = params.spp_flags & SPP_HB; - pmtud_change = params.spp_flags & SPP_PMTUD; - sackdelay_change = params.spp_flags & SPP_SACKDELAY; + hb_change = params->spp_flags & SPP_HB; + pmtud_change = params->spp_flags & SPP_PMTUD; + sackdelay_change = params->spp_flags & SPP_SACKDELAY; if (hb_change == SPP_HB || pmtud_change == SPP_PMTUD || sackdelay_change == SPP_SACKDELAY || - params.spp_sackdelay > 500 || - (params.spp_pathmtu && - params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) + params->spp_sackdelay > 500 || + (params->spp_pathmtu && + params->spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) return -EINVAL; /* If an address other than INADDR_ANY is specified, and * no transport is found, then the request is invalid. */ - if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { - trans = sctp_addr_id2transport(sk, ¶ms.spp_address, - params.spp_assoc_id); + if (!sctp_is_any(sk, (union sctp_addr *)¶ms->spp_address)) { + trans = sctp_addr_id2transport(sk, ¶ms->spp_address, + params->spp_assoc_id); if (!trans) return -EINVAL; } @@ -2639,19 +2633,19 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, * socket is a one to many style socket, and an association * was not found, then the id was invalid. */ - asoc = sctp_id2assoc(sk, params.spp_assoc_id); - if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->spp_assoc_id); + if (!asoc && params->spp_assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) return -EINVAL; /* Heartbeat demand can only be sent on a transport or * association, but not a socket. */ - if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) + if (params->spp_flags & SPP_HB_DEMAND && !trans && !asoc) return -EINVAL; /* Process parameters. */ - error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, + error = sctp_apply_peer_addr_params(params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); @@ -2664,7 +2658,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk, if (!trans && asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { - sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, + sctp_apply_peer_addr_params(params, trans, asoc, sp, hb_change, pmtud_change, sackdelay_change); } @@ -4699,7 +4693,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, break; case SCTP_PEER_ADDR_PARAMS: - retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); + retval = sctp_setsockopt_peer_addr_params(sk, kopt, optlen); break; case SCTP_DELAYED_SACK: From ebb25defdc17b594715418f1aa99eeb9a217cf1f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:45 +0200 Subject: [PATCH 1222/2056] sctp: pass a kernel pointer to sctp_setsockopt_delayed_ack Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 49 +++++++++++++++++++++-------------------------- 1 file changed, 22 insertions(+), 27 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 19308c327b6d..2dfbd7f87991 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2751,17 +2751,14 @@ static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params, */ static int sctp_setsockopt_delayed_ack(struct sock *sk, - char __user *optval, unsigned int optlen) + struct sctp_sack_info *params, + unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; - struct sctp_sack_info params; if (optlen == sizeof(struct sctp_sack_info)) { - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - - if (params.sack_delay == 0 && params.sack_freq == 0) + if (params->sack_delay == 0 && params->sack_freq == 0) return 0; } else if (optlen == sizeof(struct sctp_assoc_value)) { pr_warn_ratelimited(DEPRECATED @@ -2769,59 +2766,57 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk, "Use of struct sctp_assoc_value in delayed_ack socket option.\n" "Use struct sctp_sack_info instead\n", current->comm, task_pid_nr(current)); - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - if (params.sack_delay == 0) - params.sack_freq = 1; + if (params->sack_delay == 0) + params->sack_freq = 1; else - params.sack_freq = 0; + params->sack_freq = 0; } else return -EINVAL; /* Validate value parameter. */ - if (params.sack_delay > 500) + if (params->sack_delay > 500) return -EINVAL; /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the * socket is a one to many style socket, and an association * was not found, then the id was invalid. */ - asoc = sctp_id2assoc(sk, params.sack_assoc_id); - if (!asoc && params.sack_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, params->sack_assoc_id); + if (!asoc && params->sack_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { - sctp_apply_asoc_delayed_ack(¶ms, asoc); + sctp_apply_asoc_delayed_ack(params, asoc); return 0; } if (sctp_style(sk, TCP)) - params.sack_assoc_id = SCTP_FUTURE_ASSOC; + params->sack_assoc_id = SCTP_FUTURE_ASSOC; - if (params.sack_assoc_id == SCTP_FUTURE_ASSOC || - params.sack_assoc_id == SCTP_ALL_ASSOC) { - if (params.sack_delay) { - sp->sackdelay = params.sack_delay; + if (params->sack_assoc_id == SCTP_FUTURE_ASSOC || + params->sack_assoc_id == SCTP_ALL_ASSOC) { + if (params->sack_delay) { + sp->sackdelay = params->sack_delay; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } - if (params.sack_freq == 1) { + if (params->sack_freq == 1) { sp->param_flags = sctp_spp_sackdelay_disable(sp->param_flags); - } else if (params.sack_freq > 1) { - sp->sackfreq = params.sack_freq; + } else if (params->sack_freq > 1) { + sp->sackfreq = params->sack_freq; sp->param_flags = sctp_spp_sackdelay_enable(sp->param_flags); } } - if (params.sack_assoc_id == SCTP_CURRENT_ASSOC || - params.sack_assoc_id == SCTP_ALL_ASSOC) + if (params->sack_assoc_id == SCTP_CURRENT_ASSOC || + params->sack_assoc_id == SCTP_ALL_ASSOC) list_for_each_entry(asoc, &sp->ep->asocs, asocs) - sctp_apply_asoc_delayed_ack(¶ms, asoc); + sctp_apply_asoc_delayed_ack(params, asoc); return 0; } @@ -4697,7 +4692,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, break; case SCTP_DELAYED_SACK: - retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); + retval = sctp_setsockopt_delayed_ack(sk, kopt, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); From bb13d647d95bd0fdbc4882c27ea818b4c879263f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:46 +0200 Subject: [PATCH 1223/2056] sctp: pass a kernel pointer to sctp_setsockopt_partial_delivery_point Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2dfbd7f87991..deccfe54d2fc 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3487,24 +3487,19 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk, * call as long as the user provided buffer is large enough to hold the * message. */ -static int sctp_setsockopt_partial_delivery_point(struct sock *sk, - char __user *optval, +static int sctp_setsockopt_partial_delivery_point(struct sock *sk, u32 *val, unsigned int optlen) { - u32 val; - if (optlen != sizeof(u32)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; /* Note: We double the receive buffer from what the user sets * it to be, also initial rwnd is based on rcvbuf/2. */ - if (val > (sk->sk_rcvbuf >> 1)) + if (*val > (sk->sk_rcvbuf >> 1)) return -EINVAL; - sctp_sk(sk)->pd_point = val; + sctp_sk(sk)->pd_point = *val; return 0; /* is this the right error code? */ } @@ -4695,7 +4690,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_delayed_ack(sk, kopt, optlen); break; case SCTP_PARTIAL_DELIVERY_POINT: - retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); + retval = sctp_setsockopt_partial_delivery_point(sk, kopt, optlen); break; case SCTP_INITMSG: From 9dfa6f0494899e4643ec82c907a7664777262b8c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:47 +0200 Subject: [PATCH 1224/2056] sctp: pass a kernel pointer to sctp_setsockopt_initmsg Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index deccfe54d2fc..d62c02d0b793 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2832,24 +2832,22 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk, * by the change). With TCP-style sockets, this option is inherited by * sockets derived from a listener socket. */ -static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) +static int sctp_setsockopt_initmsg(struct sock *sk, struct sctp_initmsg *sinit, + unsigned int optlen) { - struct sctp_initmsg sinit; struct sctp_sock *sp = sctp_sk(sk); if (optlen != sizeof(struct sctp_initmsg)) return -EINVAL; - if (copy_from_user(&sinit, optval, optlen)) - return -EFAULT; - if (sinit.sinit_num_ostreams) - sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; - if (sinit.sinit_max_instreams) - sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; - if (sinit.sinit_max_attempts) - sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; - if (sinit.sinit_max_init_timeo) - sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; + if (sinit->sinit_num_ostreams) + sp->initmsg.sinit_num_ostreams = sinit->sinit_num_ostreams; + if (sinit->sinit_max_instreams) + sp->initmsg.sinit_max_instreams = sinit->sinit_max_instreams; + if (sinit->sinit_max_attempts) + sp->initmsg.sinit_max_attempts = sinit->sinit_max_attempts; + if (sinit->sinit_max_init_timeo) + sp->initmsg.sinit_max_init_timeo = sinit->sinit_max_init_timeo; return 0; } @@ -4694,7 +4692,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, break; case SCTP_INITMSG: - retval = sctp_setsockopt_initmsg(sk, optval, optlen); + retval = sctp_setsockopt_initmsg(sk, kopt, optlen); break; case SCTP_DEFAULT_SEND_PARAM: retval = sctp_setsockopt_default_send_param(sk, optval, From c23ad6d2b71cf2731e162143a6665d6491766c61 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:48 +0200 Subject: [PATCH 1225/2056] sctp: pass a kernel pointer to sctp_setsockopt_default_send_param Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 56 ++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 30 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d62c02d0b793..9bcbb065cf9e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2867,57 +2867,54 @@ static int sctp_setsockopt_initmsg(struct sock *sk, struct sctp_initmsg *sinit, * to this call if the caller is using the UDP model. */ static int sctp_setsockopt_default_send_param(struct sock *sk, - char __user *optval, + struct sctp_sndrcvinfo *info, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; - struct sctp_sndrcvinfo info; - if (optlen != sizeof(info)) + if (optlen != sizeof(*info)) return -EINVAL; - if (copy_from_user(&info, optval, optlen)) - return -EFAULT; - if (info.sinfo_flags & + if (info->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; - asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); - if (!asoc && info.sinfo_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, info->sinfo_assoc_id); + if (!asoc && info->sinfo_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { - asoc->default_stream = info.sinfo_stream; - asoc->default_flags = info.sinfo_flags; - asoc->default_ppid = info.sinfo_ppid; - asoc->default_context = info.sinfo_context; - asoc->default_timetolive = info.sinfo_timetolive; + asoc->default_stream = info->sinfo_stream; + asoc->default_flags = info->sinfo_flags; + asoc->default_ppid = info->sinfo_ppid; + asoc->default_context = info->sinfo_context; + asoc->default_timetolive = info->sinfo_timetolive; return 0; } if (sctp_style(sk, TCP)) - info.sinfo_assoc_id = SCTP_FUTURE_ASSOC; + info->sinfo_assoc_id = SCTP_FUTURE_ASSOC; - if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC || - info.sinfo_assoc_id == SCTP_ALL_ASSOC) { - sp->default_stream = info.sinfo_stream; - sp->default_flags = info.sinfo_flags; - sp->default_ppid = info.sinfo_ppid; - sp->default_context = info.sinfo_context; - sp->default_timetolive = info.sinfo_timetolive; + if (info->sinfo_assoc_id == SCTP_FUTURE_ASSOC || + info->sinfo_assoc_id == SCTP_ALL_ASSOC) { + sp->default_stream = info->sinfo_stream; + sp->default_flags = info->sinfo_flags; + sp->default_ppid = info->sinfo_ppid; + sp->default_context = info->sinfo_context; + sp->default_timetolive = info->sinfo_timetolive; } - if (info.sinfo_assoc_id == SCTP_CURRENT_ASSOC || - info.sinfo_assoc_id == SCTP_ALL_ASSOC) { + if (info->sinfo_assoc_id == SCTP_CURRENT_ASSOC || + info->sinfo_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &sp->ep->asocs, asocs) { - asoc->default_stream = info.sinfo_stream; - asoc->default_flags = info.sinfo_flags; - asoc->default_ppid = info.sinfo_ppid; - asoc->default_context = info.sinfo_context; - asoc->default_timetolive = info.sinfo_timetolive; + asoc->default_stream = info->sinfo_stream; + asoc->default_flags = info->sinfo_flags; + asoc->default_ppid = info->sinfo_ppid; + asoc->default_context = info->sinfo_context; + asoc->default_timetolive = info->sinfo_timetolive; } } @@ -4695,8 +4692,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_initmsg(sk, kopt, optlen); break; case SCTP_DEFAULT_SEND_PARAM: - retval = sctp_setsockopt_default_send_param(sk, optval, - optlen); + retval = sctp_setsockopt_default_send_param(sk, kopt, optlen); break; case SCTP_DEFAULT_SNDINFO: retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); From 8a2409d3566b3965347d73b4ca008eaa76f4bbfc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:49 +0200 Subject: [PATCH 1226/2056] sctp: pass a kernel pointer to sctp_setsockopt_default_sndinfo Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 49 ++++++++++++++++++++++------------------------- 1 file changed, 23 insertions(+), 26 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9bcbb065cf9e..6695fa1cb0ca 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2925,54 +2925,51 @@ static int sctp_setsockopt_default_send_param(struct sock *sk, * (SCTP_DEFAULT_SNDINFO) */ static int sctp_setsockopt_default_sndinfo(struct sock *sk, - char __user *optval, + struct sctp_sndinfo *info, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; - struct sctp_sndinfo info; - if (optlen != sizeof(info)) + if (optlen != sizeof(*info)) return -EINVAL; - if (copy_from_user(&info, optval, optlen)) - return -EFAULT; - if (info.snd_flags & + if (info->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | SCTP_ABORT | SCTP_EOF)) return -EINVAL; - asoc = sctp_id2assoc(sk, info.snd_assoc_id); - if (!asoc && info.snd_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, info->snd_assoc_id); + if (!asoc && info->snd_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { - asoc->default_stream = info.snd_sid; - asoc->default_flags = info.snd_flags; - asoc->default_ppid = info.snd_ppid; - asoc->default_context = info.snd_context; + asoc->default_stream = info->snd_sid; + asoc->default_flags = info->snd_flags; + asoc->default_ppid = info->snd_ppid; + asoc->default_context = info->snd_context; return 0; } if (sctp_style(sk, TCP)) - info.snd_assoc_id = SCTP_FUTURE_ASSOC; + info->snd_assoc_id = SCTP_FUTURE_ASSOC; - if (info.snd_assoc_id == SCTP_FUTURE_ASSOC || - info.snd_assoc_id == SCTP_ALL_ASSOC) { - sp->default_stream = info.snd_sid; - sp->default_flags = info.snd_flags; - sp->default_ppid = info.snd_ppid; - sp->default_context = info.snd_context; + if (info->snd_assoc_id == SCTP_FUTURE_ASSOC || + info->snd_assoc_id == SCTP_ALL_ASSOC) { + sp->default_stream = info->snd_sid; + sp->default_flags = info->snd_flags; + sp->default_ppid = info->snd_ppid; + sp->default_context = info->snd_context; } - if (info.snd_assoc_id == SCTP_CURRENT_ASSOC || - info.snd_assoc_id == SCTP_ALL_ASSOC) { + if (info->snd_assoc_id == SCTP_CURRENT_ASSOC || + info->snd_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &sp->ep->asocs, asocs) { - asoc->default_stream = info.snd_sid; - asoc->default_flags = info.snd_flags; - asoc->default_ppid = info.snd_ppid; - asoc->default_context = info.snd_context; + asoc->default_stream = info->snd_sid; + asoc->default_flags = info->snd_flags; + asoc->default_ppid = info->snd_ppid; + asoc->default_context = info->snd_context; } } @@ -4695,7 +4692,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_default_send_param(sk, kopt, optlen); break; case SCTP_DEFAULT_SNDINFO: - retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); + retval = sctp_setsockopt_default_sndinfo(sk, kopt, optlen); break; case SCTP_PRIMARY_ADDR: retval = sctp_setsockopt_primary_addr(sk, optval, optlen); From 1eec69580414bd9f9be00ab2cf7b69d44adcae17 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:50 +0200 Subject: [PATCH 1227/2056] sctp: pass a kernel pointer to sctp_setsockopt_primary_addr Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6695fa1cb0ca..abdec7b412bc 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2982,10 +2982,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk, * the association primary. The enclosed address must be one of the * association peer's addresses. */ -static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, +static int sctp_setsockopt_primary_addr(struct sock *sk, struct sctp_prim *prim, unsigned int optlen) { - struct sctp_prim prim; struct sctp_transport *trans; struct sctp_af *af; int err; @@ -2993,21 +2992,18 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, if (optlen != sizeof(struct sctp_prim)) return -EINVAL; - if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) - return -EFAULT; - /* Allow security module to validate address but need address len. */ - af = sctp_get_af_specific(prim.ssp_addr.ss_family); + af = sctp_get_af_specific(prim->ssp_addr.ss_family); if (!af) return -EINVAL; err = security_sctp_bind_connect(sk, SCTP_PRIMARY_ADDR, - (struct sockaddr *)&prim.ssp_addr, + (struct sockaddr *)&prim->ssp_addr, af->sockaddr_len); if (err) return err; - trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); + trans = sctp_addr_id2transport(sk, &prim->ssp_addr, prim->ssp_assoc_id); if (!trans) return -EINVAL; @@ -4695,7 +4691,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_default_sndinfo(sk, kopt, optlen); break; case SCTP_PRIMARY_ADDR: - retval = sctp_setsockopt_primary_addr(sk, optval, optlen); + retval = sctp_setsockopt_primary_addr(sk, kopt, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); From 46a0ae9de3186897211e04c2ed5d4a06b8b9e1cf Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:51 +0200 Subject: [PATCH 1228/2056] sctp: pass a kernel pointer to sctp_setsockopt_peer_primary_addr Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index abdec7b412bc..ff3b720eab0a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3284,12 +3284,12 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned * locally bound addresses. The following structure is used to make a * set primary request: */ -static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, +static int sctp_setsockopt_peer_primary_addr(struct sock *sk, + struct sctp_setpeerprim *prim, unsigned int optlen) { struct sctp_sock *sp; struct sctp_association *asoc = NULL; - struct sctp_setpeerprim prim; struct sctp_chunk *chunk; struct sctp_af *af; int err; @@ -3302,10 +3302,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva if (optlen != sizeof(struct sctp_setpeerprim)) return -EINVAL; - if (copy_from_user(&prim, optval, optlen)) - return -EFAULT; - - asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); + asoc = sctp_id2assoc(sk, prim->sspp_assoc_id); if (!asoc) return -EINVAL; @@ -3318,26 +3315,26 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; - af = sctp_get_af_specific(prim.sspp_addr.ss_family); + af = sctp_get_af_specific(prim->sspp_addr.ss_family); if (!af) return -EINVAL; - if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) + if (!af->addr_valid((union sctp_addr *)&prim->sspp_addr, sp, NULL)) return -EADDRNOTAVAIL; - if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) + if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim->sspp_addr)) return -EADDRNOTAVAIL; /* Allow security module to validate address. */ err = security_sctp_bind_connect(sk, SCTP_SET_PEER_PRIMARY_ADDR, - (struct sockaddr *)&prim.sspp_addr, + (struct sockaddr *)&prim->sspp_addr, af->sockaddr_len); if (err) return err; /* Create an ASCONF chunk with SET_PRIMARY parameter */ chunk = sctp_make_asconf_set_prim(asoc, - (union sctp_addr *)&prim.sspp_addr); + (union sctp_addr *)&prim->sspp_addr); if (!chunk) return -ENOMEM; @@ -4694,7 +4691,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_primary_addr(sk, kopt, optlen); break; case SCTP_SET_PEER_PRIMARY_ADDR: - retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); + retval = sctp_setsockopt_peer_primary_addr(sk, kopt, optlen); break; case SCTP_NODELAY: retval = sctp_setsockopt_nodelay(sk, optval, optlen); From f87ddbc0c0fbe1f0ebd368b4047c0af0250667c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:52 +0200 Subject: [PATCH 1229/2056] sctp: pass a kernel pointer to sctp_setsockopt_nodelay Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ff3b720eab0a..f9fe93e865b9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3020,17 +3020,12 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, struct sctp_prim *prim, * introduced, at the cost of more packets in the network. Expects an * integer boolean flag. */ -static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, +static int sctp_setsockopt_nodelay(struct sock *sk, int *val, unsigned int optlen) { - int val; - if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - - sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; + sctp_sk(sk)->nodelay = (*val == 0) ? 0 : 1; return 0; } @@ -4694,7 +4689,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_peer_primary_addr(sk, kopt, optlen); break; case SCTP_NODELAY: - retval = sctp_setsockopt_nodelay(sk, optval, optlen); + retval = sctp_setsockopt_nodelay(sk, kopt, optlen); break; case SCTP_RTOINFO: retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); From af5ae60e426a44003003177b176d60173e98dfa4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:53 +0200 Subject: [PATCH 1230/2056] sctp: pass a kernel pointer to sctp_setsockopt_rtoinfo Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f9fe93e865b9..6339a08b62dd 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3041,9 +3041,10 @@ static int sctp_setsockopt_nodelay(struct sock *sk, int *val, * be changed. * */ -static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) +static int sctp_setsockopt_rtoinfo(struct sock *sk, + struct sctp_rtoinfo *rtoinfo, + unsigned int optlen) { - struct sctp_rtoinfo rtoinfo; struct sctp_association *asoc; unsigned long rto_min, rto_max; struct sctp_sock *sp = sctp_sk(sk); @@ -3051,18 +3052,15 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne if (optlen != sizeof (struct sctp_rtoinfo)) return -EINVAL; - if (copy_from_user(&rtoinfo, optval, optlen)) - return -EFAULT; - - asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); + asoc = sctp_id2assoc(sk, rtoinfo->srto_assoc_id); /* Set the values to the specific association */ - if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC && + if (!asoc && rtoinfo->srto_assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) return -EINVAL; - rto_max = rtoinfo.srto_max; - rto_min = rtoinfo.srto_min; + rto_max = rtoinfo->srto_max; + rto_min = rtoinfo->srto_min; if (rto_max) rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; @@ -3078,17 +3076,17 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne return -EINVAL; if (asoc) { - if (rtoinfo.srto_initial != 0) + if (rtoinfo->srto_initial != 0) asoc->rto_initial = - msecs_to_jiffies(rtoinfo.srto_initial); + msecs_to_jiffies(rtoinfo->srto_initial); asoc->rto_max = rto_max; asoc->rto_min = rto_min; } else { /* If there is no association or the association-id = 0 * set the values to the endpoint. */ - if (rtoinfo.srto_initial != 0) - sp->rtoinfo.srto_initial = rtoinfo.srto_initial; + if (rtoinfo->srto_initial != 0) + sp->rtoinfo.srto_initial = rtoinfo->srto_initial; sp->rtoinfo.srto_max = rto_max; sp->rtoinfo.srto_min = rto_min; } @@ -4692,7 +4690,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_nodelay(sk, kopt, optlen); break; case SCTP_RTOINFO: - retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); + retval = sctp_setsockopt_rtoinfo(sk, kopt, optlen); break; case SCTP_ASSOCINFO: retval = sctp_setsockopt_associnfo(sk, optval, optlen); From 5b864c8dab18ddc5b34d7e9f5117c7d5185d0d4b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:54 +0200 Subject: [PATCH 1231/2056] sctp: pass a kernel pointer to sctp_setsockopt_associnfo Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6339a08b62dd..2a655c65e294 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3105,26 +3105,25 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, * See [SCTP] for more information. * */ -static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) +static int sctp_setsockopt_associnfo(struct sock *sk, + struct sctp_assocparams *assocparams, + unsigned int optlen) { - struct sctp_assocparams assocparams; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assocparams)) return -EINVAL; - if (copy_from_user(&assocparams, optval, optlen)) - return -EFAULT; - asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); + asoc = sctp_id2assoc(sk, assocparams->sasoc_assoc_id); - if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC && + if (!asoc && assocparams->sasoc_assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) return -EINVAL; /* Set the values to the specific association */ if (asoc) { - if (assocparams.sasoc_asocmaxrxt != 0) { + if (assocparams->sasoc_asocmaxrxt != 0) { __u32 path_sum = 0; int paths = 0; struct sctp_transport *peer_addr; @@ -3141,24 +3140,25 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig * then one path. */ if (paths > 1 && - assocparams.sasoc_asocmaxrxt > path_sum) + assocparams->sasoc_asocmaxrxt > path_sum) return -EINVAL; - asoc->max_retrans = assocparams.sasoc_asocmaxrxt; + asoc->max_retrans = assocparams->sasoc_asocmaxrxt; } - if (assocparams.sasoc_cookie_life != 0) - asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); + if (assocparams->sasoc_cookie_life != 0) + asoc->cookie_life = + ms_to_ktime(assocparams->sasoc_cookie_life); } else { /* Set the values to the endpoint */ struct sctp_sock *sp = sctp_sk(sk); - if (assocparams.sasoc_asocmaxrxt != 0) + if (assocparams->sasoc_asocmaxrxt != 0) sp->assocparams.sasoc_asocmaxrxt = - assocparams.sasoc_asocmaxrxt; - if (assocparams.sasoc_cookie_life != 0) + assocparams->sasoc_asocmaxrxt; + if (assocparams->sasoc_cookie_life != 0) sp->assocparams.sasoc_cookie_life = - assocparams.sasoc_cookie_life; + assocparams->sasoc_cookie_life; } return 0; } @@ -4693,7 +4693,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_rtoinfo(sk, kopt, optlen); break; case SCTP_ASSOCINFO: - retval = sctp_setsockopt_associnfo(sk, optval, optlen); + retval = sctp_setsockopt_associnfo(sk, kopt, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: retval = sctp_setsockopt_mappedv4(sk, optval, optlen); From ffc08f086a563cdd18f43edf574724f65e4bf5e3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:55 +0200 Subject: [PATCH 1232/2056] sctp: pass a kernel pointer to sctp_setsockopt_mappedv4 Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2a655c65e294..5007af318e13 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3173,16 +3173,14 @@ static int sctp_setsockopt_associnfo(struct sock *sk, * addresses and a user will receive both PF_INET6 and PF_INET type * addresses on the socket. */ -static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) +static int sctp_setsockopt_mappedv4(struct sock *sk, int *val, + unsigned int optlen) { - int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - if (val) + if (*val) sp->v4mapped = 1; else sp->v4mapped = 0; @@ -4696,7 +4694,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_associnfo(sk, kopt, optlen); break; case SCTP_I_WANT_MAPPED_V4_ADDR: - retval = sctp_setsockopt_mappedv4(sk, optval, optlen); + retval = sctp_setsockopt_mappedv4(sk, kopt, optlen); break; case SCTP_MAXSEG: retval = sctp_setsockopt_maxseg(sk, optval, optlen); From dcd0357580cde2d058b50037c38ec499e7745497 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:56 +0200 Subject: [PATCH 1233/2056] sctp: pass a kernel pointer to sctp_setsockopt_maxseg Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5007af318e13..cc7d5430ad88 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3215,11 +3215,13 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, int *val, * changed (effecting future associations only). * assoc_value: This parameter specifies the maximum size in bytes. */ -static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) +static int sctp_setsockopt_maxseg(struct sock *sk, + struct sctp_assoc_value *params, + unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); - struct sctp_assoc_value params; struct sctp_association *asoc; + sctp_assoc_t assoc_id; int val; if (optlen == sizeof(int)) { @@ -3228,19 +3230,17 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned "Use of int in maxseg socket option.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); - if (copy_from_user(&val, optval, optlen)) - return -EFAULT; - params.assoc_id = SCTP_FUTURE_ASSOC; + assoc_id = SCTP_FUTURE_ASSOC; + val = *(int *)params; } else if (optlen == sizeof(struct sctp_assoc_value)) { - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - val = params.assoc_value; + assoc_id = params->assoc_id; + val = params->assoc_value; } else { return -EINVAL; } - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, assoc_id); + if (!asoc && assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) return -EINVAL; @@ -4697,7 +4697,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_mappedv4(sk, kopt, optlen); break; case SCTP_MAXSEG: - retval = sctp_setsockopt_maxseg(sk, optval, optlen); + retval = sctp_setsockopt_maxseg(sk, kopt, optlen); break; case SCTP_ADAPTATION_LAYER: retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); From 07e5035c6f1377e0f07d9dcaa1724c97ac56f543 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:57 +0200 Subject: [PATCH 1234/2056] sctp: pass a kernel pointer to sctp_setsockopt_adaptation_layer Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index cc7d5430ad88..b29452f58ff9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3336,17 +3336,14 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, return err; } -static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, +static int sctp_setsockopt_adaptation_layer(struct sock *sk, + struct sctp_setadaptation *adapt, unsigned int optlen) { - struct sctp_setadaptation adaptation; - if (optlen != sizeof(struct sctp_setadaptation)) return -EINVAL; - if (copy_from_user(&adaptation, optval, optlen)) - return -EFAULT; - sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; + sctp_sk(sk)->adaptation_ind = adapt->ssb_adaptation_ind; return 0; } @@ -4700,7 +4697,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_maxseg(sk, kopt, optlen); break; case SCTP_ADAPTATION_LAYER: - retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); + retval = sctp_setsockopt_adaptation_layer(sk, kopt, optlen); break; case SCTP_CONTEXT: retval = sctp_setsockopt_context(sk, optval, optlen); From 722eca9ecac7f83a0378fc48aa1acd025d9dd1da Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:58 +0200 Subject: [PATCH 1235/2056] sctp: pass a kernel pointer to sctp_setsockopt_context Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b29452f58ff9..2862047054d5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3362,40 +3362,38 @@ static int sctp_setsockopt_adaptation_layer(struct sock *sk, * received messages from the peer and does not effect the value that is * saved with outbound messages. */ -static int sctp_setsockopt_context(struct sock *sk, char __user *optval, +static int sctp_setsockopt_context(struct sock *sk, + struct sctp_assoc_value *params, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); - struct sctp_assoc_value params; struct sctp_association *asoc; if (optlen != sizeof(struct sctp_assoc_value)) return -EINVAL; - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { - asoc->default_rcv_context = params.assoc_value; + asoc->default_rcv_context = params->assoc_value; return 0; } if (sctp_style(sk, TCP)) - params.assoc_id = SCTP_FUTURE_ASSOC; + params->assoc_id = SCTP_FUTURE_ASSOC; - if (params.assoc_id == SCTP_FUTURE_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) - sp->default_rcv_context = params.assoc_value; + if (params->assoc_id == SCTP_FUTURE_ASSOC || + params->assoc_id == SCTP_ALL_ASSOC) + sp->default_rcv_context = params->assoc_value; - if (params.assoc_id == SCTP_CURRENT_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) + if (params->assoc_id == SCTP_CURRENT_ASSOC || + params->assoc_id == SCTP_ALL_ASSOC) list_for_each_entry(asoc, &sp->ep->asocs, asocs) - asoc->default_rcv_context = params.assoc_value; + asoc->default_rcv_context = params->assoc_value; return 0; } @@ -4700,7 +4698,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_adaptation_layer(sk, kopt, optlen); break; case SCTP_CONTEXT: - retval = sctp_setsockopt_context(sk, optval, optlen); + retval = sctp_setsockopt_context(sk, kopt, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); From 1031cea0019ec967a260ad835c0a5fe03d1b93a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:21:59 +0200 Subject: [PATCH 1236/2056] sctp: pass a kernel pointer to sctp_setsockopt_fragment_interleave Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2862047054d5..874cec731530 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3422,18 +3422,13 @@ static int sctp_setsockopt_context(struct sock *sk, * application using the one to many model may become confused and act * incorrectly. */ -static int sctp_setsockopt_fragment_interleave(struct sock *sk, - char __user *optval, +static int sctp_setsockopt_fragment_interleave(struct sock *sk, int *val, unsigned int optlen) { - int val; - if (optlen != sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - sctp_sk(sk)->frag_interleave = !!val; + sctp_sk(sk)->frag_interleave = !!*val; if (!sctp_sk(sk)->frag_interleave) sctp_sk(sk)->ep->intl_enable = 0; @@ -4701,7 +4696,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_context(sk, kopt, optlen); break; case SCTP_FRAGMENT_INTERLEAVE: - retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); + retval = sctp_setsockopt_fragment_interleave(sk, kopt, optlen); break; case SCTP_MAX_BURST: retval = sctp_setsockopt_maxburst(sk, optval, optlen); From f5bee0adb17eaa2f106c895d198b1e8fca96f1de Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:00 +0200 Subject: [PATCH 1237/2056] sctp: pass a kernel pointer to sctp_setsockopt_maxburst Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 35 ++++++++++++++++------------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 874cec731530..ac7dff849290 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3482,12 +3482,13 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk, u32 *val, * future associations inheriting the socket value. */ static int sctp_setsockopt_maxburst(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); - struct sctp_assoc_value params; struct sctp_association *asoc; + sctp_assoc_t assoc_id; + u32 assoc_value; if (optlen == sizeof(int)) { pr_warn_ratelimited(DEPRECATED @@ -3495,37 +3496,33 @@ static int sctp_setsockopt_maxburst(struct sock *sk, "Use of int in max_burst socket option deprecated.\n" "Use struct sctp_assoc_value instead\n", current->comm, task_pid_nr(current)); - if (copy_from_user(¶ms.assoc_value, optval, optlen)) - return -EFAULT; - params.assoc_id = SCTP_FUTURE_ASSOC; + assoc_id = SCTP_FUTURE_ASSOC; + assoc_value = *((int *)params); } else if (optlen == sizeof(struct sctp_assoc_value)) { - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; + assoc_id = params->assoc_id; + assoc_value = params->assoc_value; } else return -EINVAL; - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id > SCTP_ALL_ASSOC && - sctp_style(sk, UDP)) + asoc = sctp_id2assoc(sk, assoc_id); + if (!asoc && assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { - asoc->max_burst = params.assoc_value; + asoc->max_burst = assoc_value; return 0; } if (sctp_style(sk, TCP)) - params.assoc_id = SCTP_FUTURE_ASSOC; + assoc_id = SCTP_FUTURE_ASSOC; - if (params.assoc_id == SCTP_FUTURE_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) - sp->max_burst = params.assoc_value; + if (assoc_id == SCTP_FUTURE_ASSOC || assoc_id == SCTP_ALL_ASSOC) + sp->max_burst = assoc_value; - if (params.assoc_id == SCTP_CURRENT_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) + if (assoc_id == SCTP_CURRENT_ASSOC || assoc_id == SCTP_ALL_ASSOC) list_for_each_entry(asoc, &sp->ep->asocs, asocs) - asoc->max_burst = params.assoc_value; + asoc->max_burst = assoc_value; return 0; } @@ -4699,7 +4696,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_fragment_interleave(sk, kopt, optlen); break; case SCTP_MAX_BURST: - retval = sctp_setsockopt_maxburst(sk, optval, optlen); + retval = sctp_setsockopt_maxburst(sk, kopt, optlen); break; case SCTP_AUTH_CHUNK: retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); From 88266d31b819202ea6aa7d87a3e9aa4b6d6a046f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:01 +0200 Subject: [PATCH 1238/2056] sctp: pass a kernel pointer to sctp_setsockopt_auth_chunk Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ac7dff849290..f68aa3936df3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3535,21 +3535,18 @@ static int sctp_setsockopt_maxburst(struct sock *sk, * will only effect future associations on the socket. */ static int sctp_setsockopt_auth_chunk(struct sock *sk, - char __user *optval, + struct sctp_authchunk *val, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; - struct sctp_authchunk val; if (!ep->auth_enable) return -EACCES; if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; - if (copy_from_user(&val, optval, optlen)) - return -EFAULT; - switch (val.sauth_chunk) { + switch (val->sauth_chunk) { case SCTP_CID_INIT: case SCTP_CID_INIT_ACK: case SCTP_CID_SHUTDOWN_COMPLETE: @@ -3558,7 +3555,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, } /* add this chunk id to the endpoint */ - return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); + return sctp_auth_ep_add_chunkid(ep, val->sauth_chunk); } /* @@ -4699,7 +4696,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_maxburst(sk, kopt, optlen); break; case SCTP_AUTH_CHUNK: - retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); + retval = sctp_setsockopt_auth_chunk(sk, kopt, optlen); break; case SCTP_HMAC_IDENT: retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); From 3564ef442a174e42b58a215a55e4f0ef56b5fbb0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:02 +0200 Subject: [PATCH 1239/2056] sctp: pass a kernel pointer to sctp_setsockopt_hmac_ident Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f68aa3936df3..a573af7dfe41 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3565,13 +3565,11 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, * endpoint requires the peer to use. */ static int sctp_setsockopt_hmac_ident(struct sock *sk, - char __user *optval, + struct sctp_hmacalgo *hmacs, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; - struct sctp_hmacalgo *hmacs; u32 idents; - int err; if (!ep->auth_enable) return -EACCES; @@ -3581,21 +3579,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) + SCTP_AUTH_NUM_HMACS * sizeof(u16)); - hmacs = memdup_user(optval, optlen); - if (IS_ERR(hmacs)) - return PTR_ERR(hmacs); - idents = hmacs->shmac_num_idents; if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || - (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { - err = -EINVAL; - goto out; - } + (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) + return -EINVAL; - err = sctp_auth_ep_set_hmacs(ep, hmacs); -out: - kfree(hmacs); - return err; + return sctp_auth_ep_set_hmacs(ep, hmacs); } /* @@ -4699,7 +4688,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_auth_chunk(sk, kopt, optlen); break; case SCTP_HMAC_IDENT: - retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); + retval = sctp_setsockopt_hmac_ident(sk, kopt, optlen); break; case SCTP_AUTH_KEY: retval = sctp_setsockopt_auth_key(sk, optval, optlen); From 89fae01eef8b1572c71d2cfcc3307c1f1e97b778 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:03 +0200 Subject: [PATCH 1240/2056] sctp: switch sctp_setsockopt_auth_key to use memzero_explicit Switch from kzfree to sctp_setsockopt_auth_key + kfree to prepare for moving the kfree to common code. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a573af7dfe41..365145746b55 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3649,7 +3649,8 @@ static int sctp_setsockopt_auth_key(struct sock *sk, } out: - kzfree(authkey); + memzero_explicit(authkey, optlen); + kfree(authkey); return ret; } From 534d13d07e771d090d61cd98f96b2cbf29765cef Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:04 +0200 Subject: [PATCH 1241/2056] sctp: pass a kernel pointer to sctp_setsockopt_auth_key Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Adapt sctp_setsockopt to use a kzfree for this case. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 365145746b55..b4dcccba5787 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3594,11 +3594,10 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, * association shared key. */ static int sctp_setsockopt_auth_key(struct sock *sk, - char __user *optval, + struct sctp_authkey *authkey, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; - struct sctp_authkey *authkey; struct sctp_association *asoc; int ret = -EINVAL; @@ -3609,10 +3608,6 @@ static int sctp_setsockopt_auth_key(struct sock *sk, */ optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(*authkey)); - authkey = memdup_user(optval, optlen); - if (IS_ERR(authkey)) - return PTR_ERR(authkey); - if (authkey->sca_keylength > optlen - sizeof(*authkey)) goto out; @@ -3650,7 +3645,6 @@ static int sctp_setsockopt_auth_key(struct sock *sk, out: memzero_explicit(authkey, optlen); - kfree(authkey); return ret; } @@ -4692,7 +4686,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_hmac_ident(sk, kopt, optlen); break; case SCTP_AUTH_KEY: - retval = sctp_setsockopt_auth_key(sk, optval, optlen); + retval = sctp_setsockopt_auth_key(sk, kopt, optlen); break; case SCTP_AUTH_ACTIVE_KEY: retval = sctp_setsockopt_active_key(sk, optval, optlen); From dcab0a7a57a4c29e38af17c1106d2217e680995a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:05 +0200 Subject: [PATCH 1242/2056] sctp: pass a kernel pointer to sctp_setsockopt_active_key Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b4dcccba5787..02153f243683 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3655,42 +3655,39 @@ static int sctp_setsockopt_auth_key(struct sock *sk, * the association shared key. */ static int sctp_setsockopt_active_key(struct sock *sk, - char __user *optval, + struct sctp_authkeyid *val, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_association *asoc; - struct sctp_authkeyid val; int ret = 0; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; - if (copy_from_user(&val, optval, optlen)) - return -EFAULT; - asoc = sctp_id2assoc(sk, val.scact_assoc_id); - if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, val->scact_assoc_id); + if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) - return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); + return sctp_auth_set_active_key(ep, asoc, val->scact_keynumber); if (sctp_style(sk, TCP)) - val.scact_assoc_id = SCTP_FUTURE_ASSOC; + val->scact_assoc_id = SCTP_FUTURE_ASSOC; - if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || - val.scact_assoc_id == SCTP_ALL_ASSOC) { - ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); + if (val->scact_assoc_id == SCTP_FUTURE_ASSOC || + val->scact_assoc_id == SCTP_ALL_ASSOC) { + ret = sctp_auth_set_active_key(ep, asoc, val->scact_keynumber); if (ret) return ret; } - if (val.scact_assoc_id == SCTP_CURRENT_ASSOC || - val.scact_assoc_id == SCTP_ALL_ASSOC) { + if (val->scact_assoc_id == SCTP_CURRENT_ASSOC || + val->scact_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &ep->asocs, asocs) { int res = sctp_auth_set_active_key(ep, asoc, - val.scact_keynumber); + val->scact_keynumber); if (res && !ret) ret = res; @@ -4689,7 +4686,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_auth_key(sk, kopt, optlen); break; case SCTP_AUTH_ACTIVE_KEY: - retval = sctp_setsockopt_active_key(sk, optval, optlen); + retval = sctp_setsockopt_active_key(sk, kopt, optlen); break; case SCTP_AUTH_DELETE_KEY: retval = sctp_setsockopt_del_key(sk, optval, optlen); From 97dc9f2e3e8bdea754f59bd788ce25a0cb11f00d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:06 +0200 Subject: [PATCH 1243/2056] sctp: pass a kernel pointer to sctp_setsockopt_del_key Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 02153f243683..b692b9376d9d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3703,42 +3703,39 @@ static int sctp_setsockopt_active_key(struct sock *sk, * This set option will delete a shared secret key from use. */ static int sctp_setsockopt_del_key(struct sock *sk, - char __user *optval, + struct sctp_authkeyid *val, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_association *asoc; - struct sctp_authkeyid val; int ret = 0; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; - if (copy_from_user(&val, optval, optlen)) - return -EFAULT; - asoc = sctp_id2assoc(sk, val.scact_assoc_id); - if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, val->scact_assoc_id); + if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) - return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); + return sctp_auth_del_key_id(ep, asoc, val->scact_keynumber); if (sctp_style(sk, TCP)) - val.scact_assoc_id = SCTP_FUTURE_ASSOC; + val->scact_assoc_id = SCTP_FUTURE_ASSOC; - if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || - val.scact_assoc_id == SCTP_ALL_ASSOC) { - ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); + if (val->scact_assoc_id == SCTP_FUTURE_ASSOC || + val->scact_assoc_id == SCTP_ALL_ASSOC) { + ret = sctp_auth_del_key_id(ep, asoc, val->scact_keynumber); if (ret) return ret; } - if (val.scact_assoc_id == SCTP_CURRENT_ASSOC || - val.scact_assoc_id == SCTP_ALL_ASSOC) { + if (val->scact_assoc_id == SCTP_CURRENT_ASSOC || + val->scact_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &ep->asocs, asocs) { int res = sctp_auth_del_key_id(ep, asoc, - val.scact_keynumber); + val->scact_keynumber); if (res && !ret) ret = res; @@ -4689,7 +4686,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_active_key(sk, kopt, optlen); break; case SCTP_AUTH_DELETE_KEY: - retval = sctp_setsockopt_del_key(sk, optval, optlen); + retval = sctp_setsockopt_del_key(sk, kopt, optlen); break; case SCTP_AUTH_DEACTIVATE_KEY: retval = sctp_setsockopt_deactivate_key(sk, optval, optlen); From 76b3d0c4455b61f74301a1aae2dcdc243921f379 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:07 +0200 Subject: [PATCH 1244/2056] sctp: pass a kernel pointer to sctp_setsockopt_deactivate_key Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b692b9376d9d..ab155c15939e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3750,42 +3750,40 @@ static int sctp_setsockopt_del_key(struct sock *sk, * * This set option will deactivate a shared secret key. */ -static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval, +static int sctp_setsockopt_deactivate_key(struct sock *sk, + struct sctp_authkeyid *val, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; struct sctp_association *asoc; - struct sctp_authkeyid val; int ret = 0; if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; - if (copy_from_user(&val, optval, optlen)) - return -EFAULT; - asoc = sctp_id2assoc(sk, val.scact_assoc_id); - if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, val->scact_assoc_id); + if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) - return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); + return sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber); if (sctp_style(sk, TCP)) - val.scact_assoc_id = SCTP_FUTURE_ASSOC; + val->scact_assoc_id = SCTP_FUTURE_ASSOC; - if (val.scact_assoc_id == SCTP_FUTURE_ASSOC || - val.scact_assoc_id == SCTP_ALL_ASSOC) { - ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber); + if (val->scact_assoc_id == SCTP_FUTURE_ASSOC || + val->scact_assoc_id == SCTP_ALL_ASSOC) { + ret = sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber); if (ret) return ret; } - if (val.scact_assoc_id == SCTP_CURRENT_ASSOC || - val.scact_assoc_id == SCTP_ALL_ASSOC) { + if (val->scact_assoc_id == SCTP_CURRENT_ASSOC || + val->scact_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &ep->asocs, asocs) { int res = sctp_auth_deact_key_id(ep, asoc, - val.scact_keynumber); + val->scact_keynumber); if (res && !ret) ret = res; @@ -4689,7 +4687,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_del_key(sk, kopt, optlen); break; case SCTP_AUTH_DEACTIVATE_KEY: - retval = sctp_setsockopt_deactivate_key(sk, optval, optlen); + retval = sctp_setsockopt_deactivate_key(sk, kopt, optlen); break; case SCTP_AUTO_ASCONF: retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); From c9abc2c1c2973a30750fa00bfc912b683d88d1bc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:08 +0200 Subject: [PATCH 1245/2056] sctp: pass a kernel pointer to sctp_setsockopt_auto_asconf Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ab155c15939e..64f2a967ddf5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3807,26 +3807,23 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, * Note. In this implementation, socket operation overrides default parameter * being set by sysctl as well as FreeBSD implementation */ -static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, +static int sctp_setsockopt_auto_asconf(struct sock *sk, int *val, unsigned int optlen) { - int val; struct sctp_sock *sp = sctp_sk(sk); if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - if (!sctp_is_ep_boundall(sk) && val) + if (!sctp_is_ep_boundall(sk) && *val) return -EINVAL; - if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) + if ((*val && sp->do_auto_asconf) || (!*val && !sp->do_auto_asconf)) return 0; spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); - if (val == 0 && sp->do_auto_asconf) { + if (*val == 0 && sp->do_auto_asconf) { list_del(&sp->auto_asconf_list); sp->do_auto_asconf = 0; - } else if (val && !sp->do_auto_asconf) { + } else if (*val && !sp->do_auto_asconf) { list_add_tail(&sp->auto_asconf_list, &sock_net(sk)->sctp.auto_asconf_splist); sp->do_auto_asconf = 1; @@ -4690,7 +4687,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_deactivate_key(sk, kopt, optlen); break; case SCTP_AUTO_ASCONF: - retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); + retval = sctp_setsockopt_auto_asconf(sk, kopt, optlen); break; case SCTP_PEER_ADDR_THLDS: retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen, From b0ac3bb89464f6c54d860052aa52774c1ab95436 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:09 +0200 Subject: [PATCH 1246/2056] sctp: pass a kernel pointer to sctp_setsockopt_paddr_thresholds Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 55 ++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 29 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 64f2a967ddf5..ea027ea74ba5 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3840,66 +3840,63 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, int *val, * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt */ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, - char __user *optval, + struct sctp_paddrthlds_v2 *val, unsigned int optlen, bool v2) { - struct sctp_paddrthlds_v2 val; struct sctp_transport *trans; struct sctp_association *asoc; int len; - len = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds); + len = v2 ? sizeof(*val) : sizeof(struct sctp_paddrthlds); if (optlen < len) return -EINVAL; - if (copy_from_user(&val, optval, len)) - return -EFAULT; - if (v2 && val.spt_pathpfthld > val.spt_pathcpthld) + if (v2 && val->spt_pathpfthld > val->spt_pathcpthld) return -EINVAL; - if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { - trans = sctp_addr_id2transport(sk, &val.spt_address, - val.spt_assoc_id); + if (!sctp_is_any(sk, (const union sctp_addr *)&val->spt_address)) { + trans = sctp_addr_id2transport(sk, &val->spt_address, + val->spt_assoc_id); if (!trans) return -ENOENT; - if (val.spt_pathmaxrxt) - trans->pathmaxrxt = val.spt_pathmaxrxt; + if (val->spt_pathmaxrxt) + trans->pathmaxrxt = val->spt_pathmaxrxt; if (v2) - trans->ps_retrans = val.spt_pathcpthld; - trans->pf_retrans = val.spt_pathpfthld; + trans->ps_retrans = val->spt_pathcpthld; + trans->pf_retrans = val->spt_pathpfthld; return 0; } - asoc = sctp_id2assoc(sk, val.spt_assoc_id); - if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, val->spt_assoc_id); + if (!asoc && val->spt_assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) { list_for_each_entry(trans, &asoc->peer.transport_addr_list, transports) { - if (val.spt_pathmaxrxt) - trans->pathmaxrxt = val.spt_pathmaxrxt; + if (val->spt_pathmaxrxt) + trans->pathmaxrxt = val->spt_pathmaxrxt; if (v2) - trans->ps_retrans = val.spt_pathcpthld; - trans->pf_retrans = val.spt_pathpfthld; + trans->ps_retrans = val->spt_pathcpthld; + trans->pf_retrans = val->spt_pathpfthld; } - if (val.spt_pathmaxrxt) - asoc->pathmaxrxt = val.spt_pathmaxrxt; + if (val->spt_pathmaxrxt) + asoc->pathmaxrxt = val->spt_pathmaxrxt; if (v2) - asoc->ps_retrans = val.spt_pathcpthld; - asoc->pf_retrans = val.spt_pathpfthld; + asoc->ps_retrans = val->spt_pathcpthld; + asoc->pf_retrans = val->spt_pathpfthld; } else { struct sctp_sock *sp = sctp_sk(sk); - if (val.spt_pathmaxrxt) - sp->pathmaxrxt = val.spt_pathmaxrxt; + if (val->spt_pathmaxrxt) + sp->pathmaxrxt = val->spt_pathmaxrxt; if (v2) - sp->ps_retrans = val.spt_pathcpthld; - sp->pf_retrans = val.spt_pathpfthld; + sp->ps_retrans = val->spt_pathcpthld; + sp->pf_retrans = val->spt_pathpfthld; } return 0; @@ -4690,11 +4687,11 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_auto_asconf(sk, kopt, optlen); break; case SCTP_PEER_ADDR_THLDS: - retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen, + retval = sctp_setsockopt_paddr_thresholds(sk, kopt, optlen, false); break; case SCTP_PEER_ADDR_THLDS_V2: - retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen, + retval = sctp_setsockopt_paddr_thresholds(sk, kopt, optlen, true); break; case SCTP_RECVRCVINFO: From a98af7c84ad95d45058050ad3b9bb0cd31d93614 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:10 +0200 Subject: [PATCH 1247/2056] sctp: pass a kernel pointer to sctp_setsockopt_recvrcvinfo Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ea027ea74ba5..ac56fb7eb394 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3902,18 +3902,13 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk, return 0; } -static int sctp_setsockopt_recvrcvinfo(struct sock *sk, - char __user *optval, +static int sctp_setsockopt_recvrcvinfo(struct sock *sk, int *val, unsigned int optlen) { - int val; - if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *) optval)) - return -EFAULT; - sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; + sctp_sk(sk)->recvrcvinfo = (*val == 0) ? 0 : 1; return 0; } @@ -4695,7 +4690,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, true); break; case SCTP_RECVRCVINFO: - retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); + retval = sctp_setsockopt_recvrcvinfo(sk, kopt, optlen); break; case SCTP_RECVNXTINFO: retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); From cfa6fde26642553fcb0cdbfefe80f075eb6173f2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:11 +0200 Subject: [PATCH 1248/2056] sctp: pass a kernel pointer to sctp_setsockopt_recvnxtinfo Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ac56fb7eb394..7beb26e5749d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3913,18 +3913,13 @@ static int sctp_setsockopt_recvrcvinfo(struct sock *sk, int *val, return 0; } -static int sctp_setsockopt_recvnxtinfo(struct sock *sk, - char __user *optval, +static int sctp_setsockopt_recvnxtinfo(struct sock *sk, int *val, unsigned int optlen) { - int val; - if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *) optval)) - return -EFAULT; - sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; + sctp_sk(sk)->recvnxtinfo = (*val == 0) ? 0 : 1; return 0; } @@ -4693,7 +4688,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_recvrcvinfo(sk, kopt, optlen); break; case SCTP_RECVNXTINFO: - retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); + retval = sctp_setsockopt_recvnxtinfo(sk, kopt, optlen); break; case SCTP_PR_SUPPORTED: retval = sctp_setsockopt_pr_supported(sk, optval, optlen); From 4a97fa4f099b9ac82d0e0adbd2f63fccf3dd96fc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:12 +0200 Subject: [PATCH 1249/2056] sctp: pass a kernel pointer to sctp_setsockopt_pr_supported Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 7beb26e5749d..245186327896 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3925,24 +3925,20 @@ static int sctp_setsockopt_recvnxtinfo(struct sock *sk, int *val, } static int sctp_setsockopt_pr_supported(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { - struct sctp_assoc_value params; struct sctp_association *asoc; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) return -EINVAL; - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) return -EINVAL; - sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value; + sctp_sk(sk)->ep->prsctp_enable = !!params->assoc_value; return 0; } @@ -4691,7 +4687,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_recvnxtinfo(sk, kopt, optlen); break; case SCTP_PR_SUPPORTED: - retval = sctp_setsockopt_pr_supported(sk, optval, optlen); + retval = sctp_setsockopt_pr_supported(sk, kopt, optlen); break; case SCTP_DEFAULT_PRINFO: retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); From ac37435bfe9e09fc107daf6eddb57831b5961708 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:13 +0200 Subject: [PATCH 1250/2056] sctp: pass a kernel pointer to sctp_setsockopt_default_prinfo Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 45 ++++++++++++++++++++------------------------- 1 file changed, 20 insertions(+), 25 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 245186327896..0eeb6e6162ad 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3944,55 +3944,50 @@ static int sctp_setsockopt_pr_supported(struct sock *sk, } static int sctp_setsockopt_default_prinfo(struct sock *sk, - char __user *optval, + struct sctp_default_prinfo *info, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); - struct sctp_default_prinfo info; struct sctp_association *asoc; int retval = -EINVAL; - if (optlen != sizeof(info)) + if (optlen != sizeof(*info)) goto out; - if (copy_from_user(&info, optval, sizeof(info))) { - retval = -EFAULT; - goto out; - } - - if (info.pr_policy & ~SCTP_PR_SCTP_MASK) + if (info->pr_policy & ~SCTP_PR_SCTP_MASK) goto out; - if (info.pr_policy == SCTP_PR_SCTP_NONE) - info.pr_value = 0; + if (info->pr_policy == SCTP_PR_SCTP_NONE) + info->pr_value = 0; - asoc = sctp_id2assoc(sk, info.pr_assoc_id); - if (!asoc && info.pr_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, info->pr_assoc_id); + if (!asoc && info->pr_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) goto out; retval = 0; if (asoc) { - SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); - asoc->default_timetolive = info.pr_value; + SCTP_PR_SET_POLICY(asoc->default_flags, info->pr_policy); + asoc->default_timetolive = info->pr_value; goto out; } if (sctp_style(sk, TCP)) - info.pr_assoc_id = SCTP_FUTURE_ASSOC; + info->pr_assoc_id = SCTP_FUTURE_ASSOC; - if (info.pr_assoc_id == SCTP_FUTURE_ASSOC || - info.pr_assoc_id == SCTP_ALL_ASSOC) { - SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy); - sp->default_timetolive = info.pr_value; + if (info->pr_assoc_id == SCTP_FUTURE_ASSOC || + info->pr_assoc_id == SCTP_ALL_ASSOC) { + SCTP_PR_SET_POLICY(sp->default_flags, info->pr_policy); + sp->default_timetolive = info->pr_value; } - if (info.pr_assoc_id == SCTP_CURRENT_ASSOC || - info.pr_assoc_id == SCTP_ALL_ASSOC) { + if (info->pr_assoc_id == SCTP_CURRENT_ASSOC || + info->pr_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &sp->ep->asocs, asocs) { - SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy); - asoc->default_timetolive = info.pr_value; + SCTP_PR_SET_POLICY(asoc->default_flags, + info->pr_policy); + asoc->default_timetolive = info->pr_value; } } @@ -4690,7 +4685,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_pr_supported(sk, kopt, optlen); break; case SCTP_DEFAULT_PRINFO: - retval = sctp_setsockopt_default_prinfo(sk, optval, optlen); + retval = sctp_setsockopt_default_prinfo(sk, kopt, optlen); break; case SCTP_RECONFIG_SUPPORTED: retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen); From 3f49f72035d5625c06a23d72c22feee7f0d2b068 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:14 +0200 Subject: [PATCH 1251/2056] sctp: pass a kernel pointer to sctp_setsockopt_reconfig_supported Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0eeb6e6162ad..270c5d53fbf7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3996,27 +3996,21 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk, } static int sctp_setsockopt_reconfig_supported(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { - struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) goto out; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) goto out; - sctp_sk(sk)->ep->reconf_enable = !!params.assoc_value; + sctp_sk(sk)->ep->reconf_enable = !!params->assoc_value; retval = 0; @@ -4688,7 +4682,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_default_prinfo(sk, kopt, optlen); break; case SCTP_RECONFIG_SUPPORTED: - retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen); + retval = sctp_setsockopt_reconfig_supported(sk, kopt, optlen); break; case SCTP_ENABLE_STREAM_RESET: retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); From 356dc6f16a96615d89c8aab3ca18c5bc7c3d4baa Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:15 +0200 Subject: [PATCH 1252/2056] sctp: pass a kernel pointer to sctp_setsockopt_enable_strreset Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 34 ++++++++++++++-------------------- 1 file changed, 14 insertions(+), 20 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 270c5d53fbf7..9899d208f40f 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4019,48 +4019,42 @@ static int sctp_setsockopt_reconfig_supported(struct sock *sk, } static int sctp_setsockopt_enable_strreset(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { struct sctp_endpoint *ep = sctp_sk(sk)->ep; - struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) goto out; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK)) + if (params->assoc_value & (~SCTP_ENABLE_STRRESET_MASK)) goto out; - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) goto out; retval = 0; if (asoc) { - asoc->strreset_enable = params.assoc_value; + asoc->strreset_enable = params->assoc_value; goto out; } if (sctp_style(sk, TCP)) - params.assoc_id = SCTP_FUTURE_ASSOC; + params->assoc_id = SCTP_FUTURE_ASSOC; - if (params.assoc_id == SCTP_FUTURE_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) - ep->strreset_enable = params.assoc_value; + if (params->assoc_id == SCTP_FUTURE_ASSOC || + params->assoc_id == SCTP_ALL_ASSOC) + ep->strreset_enable = params->assoc_value; - if (params.assoc_id == SCTP_CURRENT_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) + if (params->assoc_id == SCTP_CURRENT_ASSOC || + params->assoc_id == SCTP_ALL_ASSOC) list_for_each_entry(asoc, &ep->asocs, asocs) - asoc->strreset_enable = params.assoc_value; + asoc->strreset_enable = params->assoc_value; out: return retval; @@ -4685,7 +4679,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_reconfig_supported(sk, kopt, optlen); break; case SCTP_ENABLE_STREAM_RESET: - retval = sctp_setsockopt_enable_strreset(sk, optval, optlen); + retval = sctp_setsockopt_enable_strreset(sk, kopt, optlen); break; case SCTP_RESET_STREAMS: retval = sctp_setsockopt_reset_streams(sk, optval, optlen); From d492243435bdffec229b1a9d694e5864e802833a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:16 +0200 Subject: [PATCH 1253/2056] sctp: pass a kernel pointer to sctp_setsockopt_reset_streams Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9899d208f40f..1365351fd2c8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4061,12 +4061,10 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk, } static int sctp_setsockopt_reset_streams(struct sock *sk, - char __user *optval, + struct sctp_reset_streams *params, unsigned int optlen) { - struct sctp_reset_streams *params; struct sctp_association *asoc; - int retval = -EINVAL; if (optlen < sizeof(*params)) return -EINVAL; @@ -4074,23 +4072,15 @@ static int sctp_setsockopt_reset_streams(struct sock *sk, optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(__u16) * sizeof(*params)); - params = memdup_user(optval, optlen); - if (IS_ERR(params)) - return PTR_ERR(params); - if (params->srs_number_streams * sizeof(__u16) > optlen - sizeof(*params)) - goto out; + return -EINVAL; asoc = sctp_id2assoc(sk, params->srs_assoc_id); if (!asoc) - goto out; + return -EINVAL; - retval = sctp_send_reset_streams(asoc, params); - -out: - kfree(params); - return retval; + return sctp_send_reset_streams(asoc, params); } static int sctp_setsockopt_reset_assoc(struct sock *sk, @@ -4682,7 +4672,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_enable_strreset(sk, kopt, optlen); break; case SCTP_RESET_STREAMS: - retval = sctp_setsockopt_reset_streams(sk, optval, optlen); + retval = sctp_setsockopt_reset_streams(sk, kopt, optlen); break; case SCTP_RESET_ASSOC: retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); From b97d20ce531e13ce6e65fc8bd2d726a0ec24b5a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:17 +0200 Subject: [PATCH 1254/2056] sctp: pass a kernel pointer to sctp_setsockopt_reset_assoc Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1365351fd2c8..f190f59f2959 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4083,30 +4083,19 @@ static int sctp_setsockopt_reset_streams(struct sock *sk, return sctp_send_reset_streams(asoc, params); } -static int sctp_setsockopt_reset_assoc(struct sock *sk, - char __user *optval, +static int sctp_setsockopt_reset_assoc(struct sock *sk, sctp_assoc_t *associd, unsigned int optlen) { struct sctp_association *asoc; - sctp_assoc_t associd; - int retval = -EINVAL; - if (optlen != sizeof(associd)) - goto out; + if (optlen != sizeof(*associd)) + return -EINVAL; - if (copy_from_user(&associd, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, associd); + asoc = sctp_id2assoc(sk, *associd); if (!asoc) - goto out; + return -EINVAL; - retval = sctp_send_reset_assoc(asoc); - -out: - return retval; + return sctp_send_reset_assoc(asoc); } static int sctp_setsockopt_add_streams(struct sock *sk, @@ -4675,7 +4664,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_reset_streams(sk, kopt, optlen); break; case SCTP_RESET_ASSOC: - retval = sctp_setsockopt_reset_assoc(sk, optval, optlen); + retval = sctp_setsockopt_reset_assoc(sk, kopt, optlen); break; case SCTP_ADD_STREAMS: retval = sctp_setsockopt_add_streams(sk, optval, optlen); From 4d6fb2606252a4a001388f52808edbf3c624d033 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:18 +0200 Subject: [PATCH 1255/2056] sctp: pass a kernel pointer to sctp_setsockopt_add_streams Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f190f59f2959..4dedb94bca77 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4099,29 +4099,19 @@ static int sctp_setsockopt_reset_assoc(struct sock *sk, sctp_assoc_t *associd, } static int sctp_setsockopt_add_streams(struct sock *sk, - char __user *optval, + struct sctp_add_streams *params, unsigned int optlen) { struct sctp_association *asoc; - struct sctp_add_streams params; - int retval = -EINVAL; - if (optlen != sizeof(params)) - goto out; + if (optlen != sizeof(*params)) + return -EINVAL; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.sas_assoc_id); + asoc = sctp_id2assoc(sk, params->sas_assoc_id); if (!asoc) - goto out; + return -EINVAL; - retval = sctp_send_add_streams(asoc, ¶ms); - -out: - return retval; + return sctp_send_add_streams(asoc, params); } static int sctp_setsockopt_scheduler(struct sock *sk, @@ -4667,7 +4657,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_reset_assoc(sk, kopt, optlen); break; case SCTP_ADD_STREAMS: - retval = sctp_setsockopt_add_streams(sk, optval, optlen); + retval = sctp_setsockopt_add_streams(sk, kopt, optlen); break; case SCTP_STREAM_SCHEDULER: retval = sctp_setsockopt_scheduler(sk, optval, optlen); From 4d2fba3a7e7bda272447bf77377bf330f8a83ec7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:19 +0200 Subject: [PATCH 1256/2056] sctp: pass a kernel pointer to sctp_setsockopt_scheduler Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 4dedb94bca77..a7d6a66fbe21 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4115,44 +4115,39 @@ static int sctp_setsockopt_add_streams(struct sock *sk, } static int sctp_setsockopt_scheduler(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; - struct sctp_assoc_value params; int retval = 0; - if (optlen < sizeof(params)) + if (optlen < sizeof(*params)) return -EINVAL; - optlen = sizeof(params); - if (copy_from_user(¶ms, optval, optlen)) - return -EFAULT; - - if (params.assoc_value > SCTP_SS_MAX) + if (params->assoc_value > SCTP_SS_MAX) return -EINVAL; - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) - return sctp_sched_set_sched(asoc, params.assoc_value); + return sctp_sched_set_sched(asoc, params->assoc_value); if (sctp_style(sk, TCP)) - params.assoc_id = SCTP_FUTURE_ASSOC; + params->assoc_id = SCTP_FUTURE_ASSOC; - if (params.assoc_id == SCTP_FUTURE_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) - sp->default_ss = params.assoc_value; + if (params->assoc_id == SCTP_FUTURE_ASSOC || + params->assoc_id == SCTP_ALL_ASSOC) + sp->default_ss = params->assoc_value; - if (params.assoc_id == SCTP_CURRENT_ASSOC || - params.assoc_id == SCTP_ALL_ASSOC) { + if (params->assoc_id == SCTP_CURRENT_ASSOC || + params->assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &sp->ep->asocs, asocs) { int ret = sctp_sched_set_sched(asoc, - params.assoc_value); + params->assoc_value); if (ret && !retval) retval = ret; @@ -4660,7 +4655,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_add_streams(sk, kopt, optlen); break; case SCTP_STREAM_SCHEDULER: - retval = sctp_setsockopt_scheduler(sk, optval, optlen); + retval = sctp_setsockopt_scheduler(sk, kopt, optlen); break; case SCTP_STREAM_SCHEDULER_VALUE: retval = sctp_setsockopt_scheduler_value(sk, optval, optlen); From d636e7f31fcc986525cb853f0fabccc2408f0840 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:20 +0200 Subject: [PATCH 1257/2056] sctp: pass a kernel pointer to sctp_setsockopt_scheduler_value Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a7d6a66fbe21..4b660e6bf3bb 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4158,38 +4158,32 @@ static int sctp_setsockopt_scheduler(struct sock *sk, } static int sctp_setsockopt_scheduler_value(struct sock *sk, - char __user *optval, + struct sctp_stream_value *params, unsigned int optlen) { - struct sctp_stream_value params; struct sctp_association *asoc; int retval = -EINVAL; - if (optlen < sizeof(params)) + if (optlen < sizeof(*params)) goto out; - optlen = sizeof(params); - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_CURRENT_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_CURRENT_ASSOC && sctp_style(sk, UDP)) goto out; if (asoc) { - retval = sctp_sched_set_value(asoc, params.stream_id, - params.stream_value, GFP_KERNEL); + retval = sctp_sched_set_value(asoc, params->stream_id, + params->stream_value, GFP_KERNEL); goto out; } retval = 0; list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) { - int ret = sctp_sched_set_value(asoc, params.stream_id, - params.stream_value, GFP_KERNEL); + int ret = sctp_sched_set_value(asoc, params->stream_id, + params->stream_value, + GFP_KERNEL); if (ret && !retval) /* try to return the 1st error. */ retval = ret; } @@ -4658,7 +4652,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_scheduler(sk, kopt, optlen); break; case SCTP_STREAM_SCHEDULER_VALUE: - retval = sctp_setsockopt_scheduler_value(sk, optval, optlen); + retval = sctp_setsockopt_scheduler_value(sk, kopt, optlen); break; case SCTP_INTERLEAVING_SUPPORTED: retval = sctp_setsockopt_interleaving_supported(sk, optval, From 5b8d3b24467a41388097411ac0b2fc1f4b10bbf5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:21 +0200 Subject: [PATCH 1258/2056] sctp: pass a kernel pointer to sctp_setsockopt_interleaving_supported Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 34 ++++++++++------------------------ 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 4b660e6bf3bb..6232e46c4cde 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4193,39 +4193,25 @@ static int sctp_setsockopt_scheduler_value(struct sock *sk, } static int sctp_setsockopt_interleaving_supported(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *p, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); - struct sctp_assoc_value params; struct sctp_association *asoc; - int retval = -EINVAL; - if (optlen < sizeof(params)) - goto out; + if (optlen < sizeof(*p)) + return -EINVAL; - optlen = sizeof(params); - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && - sctp_style(sk, UDP)) - goto out; + asoc = sctp_id2assoc(sk, p->assoc_id); + if (!asoc && p->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) + return -EINVAL; if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) { - retval = -EPERM; - goto out; + return -EPERM; } - sp->ep->intl_enable = !!params.assoc_value; - - retval = 0; - -out: - return retval; + sp->ep->intl_enable = !!p->assoc_value; + return 0; } static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval, @@ -4655,7 +4641,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_scheduler_value(sk, kopt, optlen); break; case SCTP_INTERLEAVING_SUPPORTED: - retval = sctp_setsockopt_interleaving_supported(sk, optval, + retval = sctp_setsockopt_interleaving_supported(sk, kopt, optlen); break; case SCTP_REUSE_PORT: From a42624669e1afd183a82dbfe7cc29cc1a403c1fe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:22 +0200 Subject: [PATCH 1259/2056] sctp: pass a kernel pointer to sctp_setsockopt_reuse_port Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6232e46c4cde..a5b95e68cc12 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4214,11 +4214,9 @@ static int sctp_setsockopt_interleaving_supported(struct sock *sk, return 0; } -static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval, +static int sctp_setsockopt_reuse_port(struct sock *sk, int *val, unsigned int optlen) { - int val; - if (!sctp_style(sk, TCP)) return -EOPNOTSUPP; @@ -4228,10 +4226,7 @@ static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - - sctp_sk(sk)->reuse = !!val; + sctp_sk(sk)->reuse = !!*val; return 0; } @@ -4645,7 +4640,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, optlen); break; case SCTP_REUSE_PORT: - retval = sctp_setsockopt_reuse_port(sk, optval, optlen); + retval = sctp_setsockopt_reuse_port(sk, kopt, optlen); break; case SCTP_EVENT: retval = sctp_setsockopt_event(sk, optval, optlen); From 565059cb9b24bd7804948802ef38cc99c35c2c64 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:23 +0200 Subject: [PATCH 1260/2056] sctp: pass a kernel pointer to sctp_setsockopt_event Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a5b95e68cc12..12a33fc42eee 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4252,45 +4252,40 @@ static int sctp_assoc_ulpevent_type_set(struct sctp_event *param, return 0; } -static int sctp_setsockopt_event(struct sock *sk, char __user *optval, +static int sctp_setsockopt_event(struct sock *sk, struct sctp_event *param, unsigned int optlen) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; - struct sctp_event param; int retval = 0; - if (optlen < sizeof(param)) + if (optlen < sizeof(*param)) return -EINVAL; - optlen = sizeof(param); - if (copy_from_user(¶m, optval, optlen)) - return -EFAULT; - - if (param.se_type < SCTP_SN_TYPE_BASE || - param.se_type > SCTP_SN_TYPE_MAX) + if (param->se_type < SCTP_SN_TYPE_BASE || + param->se_type > SCTP_SN_TYPE_MAX) return -EINVAL; - asoc = sctp_id2assoc(sk, param.se_assoc_id); - if (!asoc && param.se_assoc_id > SCTP_ALL_ASSOC && + asoc = sctp_id2assoc(sk, param->se_assoc_id); + if (!asoc && param->se_assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP)) return -EINVAL; if (asoc) - return sctp_assoc_ulpevent_type_set(¶m, asoc); + return sctp_assoc_ulpevent_type_set(param, asoc); if (sctp_style(sk, TCP)) - param.se_assoc_id = SCTP_FUTURE_ASSOC; + param->se_assoc_id = SCTP_FUTURE_ASSOC; - if (param.se_assoc_id == SCTP_FUTURE_ASSOC || - param.se_assoc_id == SCTP_ALL_ASSOC) + if (param->se_assoc_id == SCTP_FUTURE_ASSOC || + param->se_assoc_id == SCTP_ALL_ASSOC) sctp_ulpevent_type_set(&sp->subscribe, - param.se_type, param.se_on); + param->se_type, param->se_on); - if (param.se_assoc_id == SCTP_CURRENT_ASSOC || - param.se_assoc_id == SCTP_ALL_ASSOC) { + if (param->se_assoc_id == SCTP_CURRENT_ASSOC || + param->se_assoc_id == SCTP_ALL_ASSOC) { list_for_each_entry(asoc, &sp->ep->asocs, asocs) { - int ret = sctp_assoc_ulpevent_type_set(¶m, asoc); + int ret = sctp_assoc_ulpevent_type_set(param, asoc); if (ret && !retval) retval = ret; @@ -4643,7 +4638,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_reuse_port(sk, kopt, optlen); break; case SCTP_EVENT: - retval = sctp_setsockopt_event(sk, optval, optlen); + retval = sctp_setsockopt_event(sk, kopt, optlen); break; case SCTP_ASCONF_SUPPORTED: retval = sctp_setsockopt_asconf_supported(sk, optval, optlen); From 9263ac97af46e0f31ba38d730e45c64dc701954e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:24 +0200 Subject: [PATCH 1261/2056] sctp: pass a kernel pointer to sctp_setsockopt_event Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 12a33fc42eee..b52acbf231ab 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4296,29 +4296,23 @@ static int sctp_setsockopt_event(struct sock *sk, struct sctp_event *param, } static int sctp_setsockopt_asconf_supported(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { - struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_endpoint *ep; int retval = -EINVAL; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) goto out; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) goto out; ep = sctp_sk(sk)->ep; - ep->asconf_enable = !!params.assoc_value; + ep->asconf_enable = !!params->assoc_value; if (ep->asconf_enable && ep->auth_enable) { sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF); @@ -4641,7 +4635,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_event(sk, kopt, optlen); break; case SCTP_ASCONF_SUPPORTED: - retval = sctp_setsockopt_asconf_supported(sk, optval, optlen); + retval = sctp_setsockopt_asconf_supported(sk, kopt, optlen); break; case SCTP_AUTH_SUPPORTED: retval = sctp_setsockopt_auth_supported(sk, optval, optlen); From 963855a938cad73b950fb42fa5376e8ab3fd1475 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:25 +0200 Subject: [PATCH 1262/2056] sctp: pass a kernel pointer to sctp_setsockopt_auth_supported Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b52acbf231ab..ca369447237a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4326,29 +4326,23 @@ static int sctp_setsockopt_asconf_supported(struct sock *sk, } static int sctp_setsockopt_auth_supported(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { - struct sctp_assoc_value params; struct sctp_association *asoc; struct sctp_endpoint *ep; int retval = -EINVAL; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) goto out; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) goto out; ep = sctp_sk(sk)->ep; - if (params.assoc_value) { + if (params->assoc_value) { retval = sctp_auth_init(ep, GFP_KERNEL); if (retval) goto out; @@ -4358,7 +4352,7 @@ static int sctp_setsockopt_auth_supported(struct sock *sk, } } - ep->auth_enable = !!params.assoc_value; + ep->auth_enable = !!params->assoc_value; retval = 0; out: @@ -4638,7 +4632,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_asconf_supported(sk, kopt, optlen); break; case SCTP_AUTH_SUPPORTED: - retval = sctp_setsockopt_auth_supported(sk, optval, optlen); + retval = sctp_setsockopt_auth_supported(sk, kopt, optlen); break; case SCTP_ECN_SUPPORTED: retval = sctp_setsockopt_ecn_supported(sk, optval, optlen); From 92c4f172552a37f1dbc385615b4497bd15df1d8f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:26 +0200 Subject: [PATCH 1263/2056] sctp: pass a kernel pointer to sctp_setsockopt_ecn_supported Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index ca369447237a..9f7f9aa50d4b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4360,27 +4360,21 @@ static int sctp_setsockopt_auth_supported(struct sock *sk, } static int sctp_setsockopt_ecn_supported(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { - struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) goto out; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) goto out; - sctp_sk(sk)->ep->ecn_enable = !!params.assoc_value; + sctp_sk(sk)->ep->ecn_enable = !!params->assoc_value; retval = 0; out: @@ -4635,7 +4629,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_auth_supported(sk, kopt, optlen); break; case SCTP_ECN_SUPPORTED: - retval = sctp_setsockopt_ecn_supported(sk, optval, optlen); + retval = sctp_setsockopt_ecn_supported(sk, kopt, optlen); break; case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE: retval = sctp_setsockopt_pf_expose(sk, optval, optlen); From 26feba8090773550f255aed911d8f824ca42b7eb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:27 +0200 Subject: [PATCH 1264/2056] sctp: pass a kernel pointer to sctp_setsockopt_pf_expose Use the kernel pointer that sctp_setsockopt has available instead of directly handling the user pointer. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9f7f9aa50d4b..f2d4f8a0c426 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4382,33 +4382,27 @@ static int sctp_setsockopt_ecn_supported(struct sock *sk, } static int sctp_setsockopt_pf_expose(struct sock *sk, - char __user *optval, + struct sctp_assoc_value *params, unsigned int optlen) { - struct sctp_assoc_value params; struct sctp_association *asoc; int retval = -EINVAL; - if (optlen != sizeof(params)) + if (optlen != sizeof(*params)) goto out; - if (copy_from_user(¶ms, optval, optlen)) { - retval = -EFAULT; - goto out; - } - - if (params.assoc_value > SCTP_PF_EXPOSE_MAX) + if (params->assoc_value > SCTP_PF_EXPOSE_MAX) goto out; - asoc = sctp_id2assoc(sk, params.assoc_id); - if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + asoc = sctp_id2assoc(sk, params->assoc_id); + if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP)) goto out; if (asoc) - asoc->pf_expose = params.assoc_value; + asoc->pf_expose = params->assoc_value; else - sctp_sk(sk)->pf_expose = params.assoc_value; + sctp_sk(sk)->pf_expose = params->assoc_value; retval = 0; out: @@ -4632,7 +4626,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, retval = sctp_setsockopt_ecn_supported(sk, kopt, optlen); break; case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE: - retval = sctp_setsockopt_pf_expose(sk, optval, optlen); + retval = sctp_setsockopt_pf_expose(sk, kopt, optlen); break; default: retval = -ENOPROTOOPT; From 6c8983a606622b61a429830091fdfe643328b96a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sun, 19 Jul 2020 09:22:28 +0200 Subject: [PATCH 1265/2056] sctp: remove the out_nounlock label in sctp_setsockopt This is just used once, and a direct return for the redirect to the AF case is much easier to follow than jumping to the end of a very long function. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/sctp/socket.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f2d4f8a0c426..9a767f359718 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4444,8 +4444,8 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, */ if (level != SOL_SCTP) { struct sctp_af *af = sctp_sk(sk)->pf->af; - retval = af->setsockopt(sk, level, optname, optval, optlen); - goto out_nounlock; + + return af->setsockopt(sk, level, optname, optval, optlen); } if (optlen > 0) { @@ -4635,8 +4635,6 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, release_sock(sk); kfree(kopt); - -out_nounlock: return retval; } From 2f0bc54ba9a85032f2402faf0fccab513848300a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:29 +0200 Subject: [PATCH 1266/2056] xdp: introduce xdp_get_shared_info_from_{buff, frame} utility routines Introduce xdp_get_shared_info_from_{buff,frame} utility routines to get skb_shared_info from xdp buffer/frame pointer. xdp_get_shared_info_from_{buff,frame} will be used to implement xdp multi-buffer support Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- include/net/xdp.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/include/net/xdp.h b/include/net/xdp.h index 609f819ed08b..d3005bef812f 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -85,6 +85,12 @@ struct xdp_buff { ((xdp)->data_hard_start + (xdp)->frame_sz - \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +static inline struct skb_shared_info * +xdp_get_shared_info_from_buff(struct xdp_buff *xdp) +{ + return (struct skb_shared_info *)xdp_data_hard_end(xdp); +} + struct xdp_frame { void *data; u16 len; @@ -98,6 +104,15 @@ struct xdp_frame { struct net_device *dev_rx; /* used by cpumap */ }; +static inline struct skb_shared_info * +xdp_get_shared_info_from_frame(struct xdp_frame *frame) +{ + void *data_hard_start = frame->data - frame->headroom - sizeof(*frame); + + return (struct skb_shared_info *)(data_hard_start + frame->frame_sz - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); +} + /* Clear kernel pointers in xdp_frame */ static inline void xdp_scrub_frame(struct xdp_frame *frame) { From ca0e014609f05fab577dd251bb5400e815c9f3cf Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:30 +0200 Subject: [PATCH 1267/2056] net: mvneta: move skb build after descriptors processing Move skb build after all descriptors processing. This is a preliminary patch to enable multi-buffers and JUMBO frames support for XDP. Introduce mvneta_xdp_put_buff routine to release all pages used by a XDP multi-buffer Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 158 ++++++++++++++++---------- 1 file changed, 101 insertions(+), 57 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 7191902f6cfe..e9b3fb778d76 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2026,6 +2026,20 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) return i; } +static void +mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, int sync_len, bool napi) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int i; + + page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), + sync_len, napi); + for (i = 0; i < sinfo->nr_frags; i++) + page_pool_put_full_page(rxq->page_pool, + skb_frag_page(&sinfo->frags[i]), napi); +} + static int mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq, struct xdp_frame *xdpf, bool dma_map) @@ -2229,6 +2243,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, int data_len = -MVNETA_MH_SIZE, len; struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; + struct skb_shared_info *sinfo; int ret = 0; if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) { @@ -2252,35 +2267,13 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, xdp->data_end = xdp->data + data_len; xdp_set_data_meta_invalid(xdp); - if (xdp_prog) { + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = 0; + + if (xdp_prog) ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats); - if (ret) - goto out; - } - - rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE); - if (unlikely(!rxq->skb)) { - struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); - - netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id); - - u64_stats_update_begin(&stats->syncp); - stats->es.skb_alloc_error++; - stats->rx_dropped++; - u64_stats_update_end(&stats->syncp); - - return -ENOMEM; - } - page_pool_release_page(rxq->page_pool, page); - - skb_reserve(rxq->skb, - xdp->data - xdp->data_hard_start); - skb_put(rxq->skb, xdp->data_end - xdp->data); - mvneta_rx_csum(pp, rx_desc->status, rxq->skb); rxq->left_size = rx_desc->data_size - len; - -out: rx_desc->buf_phys_addr = 0; return ret; @@ -2290,8 +2283,10 @@ static void mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, struct page *page) { + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; int data_len, len; @@ -2307,18 +2302,53 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, dma_sync_single_for_cpu(dev->dev.parent, rx_desc->buf_phys_addr, len, dma_dir); - if (data_len > 0) { - /* refill descriptor with new buffer later */ - skb_add_rx_frag(rxq->skb, - skb_shinfo(rxq->skb)->nr_frags, - page, pp->rx_offset_correction, data_len, - PAGE_SIZE); + + if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) { + skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags]; + + skb_frag_off_set(frag, pp->rx_offset_correction); + skb_frag_size_set(frag, data_len); + __skb_frag_set_page(frag, page); + sinfo->nr_frags++; + + rx_desc->buf_phys_addr = 0; } - page_pool_release_page(rxq->page_pool, page); - rx_desc->buf_phys_addr = 0; rxq->left_size -= len; } +static struct sk_buff * +mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, + struct xdp_buff *xdp, u32 desc_status) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); + int i, num_frags = sinfo->nr_frags; + skb_frag_t frags[MAX_SKB_FRAGS]; + struct sk_buff *skb; + + memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags); + + skb = build_skb(xdp->data_hard_start, PAGE_SIZE); + if (!skb) + return ERR_PTR(-ENOMEM); + + page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data)); + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + mvneta_rx_csum(pp, desc_status, skb); + + for (i = 0; i < num_frags; i++) { + struct page *page = skb_frag_page(&frags[i]); + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, skb_frag_off(&frags[i]), + skb_frag_size(&frags[i]), PAGE_SIZE); + page_pool_release_page(rxq->page_pool, page); + } + + return skb; +} + /* Main rx processing when using software buffer management */ static int mvneta_rx_swbm(struct napi_struct *napi, struct mvneta_port *pp, int budget, @@ -2326,22 +2356,25 @@ static int mvneta_rx_swbm(struct napi_struct *napi, { int rx_proc = 0, rx_todo, refill; struct net_device *dev = pp->dev; + struct xdp_buff xdp_buf = { + .frame_sz = PAGE_SIZE, + .rxq = &rxq->xdp_rxq, + }; struct mvneta_stats ps = {}; struct bpf_prog *xdp_prog; - struct xdp_buff xdp_buf; + u32 desc_status; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); rcu_read_lock(); xdp_prog = READ_ONCE(pp->xdp_prog); - xdp_buf.rxq = &rxq->xdp_rxq; - xdp_buf.frame_sz = PAGE_SIZE; /* Fairness NAPI loop */ while (rx_proc < budget && rx_proc < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); u32 rx_status, index; + struct sk_buff *skb; struct page *page; index = rx_desc - rxq->descs; @@ -2357,21 +2390,21 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Check errors only for FIRST descriptor */ if (rx_status & MVNETA_RXD_ERR_SUMMARY) { mvneta_rx_error(pp, rx_desc); - /* leave the descriptor untouched */ - continue; + goto next; } err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, xdp_prog, page, &ps); if (err) continue; + + desc_status = rx_desc->status; } else { - if (unlikely(!rxq->skb)) { - pr_debug("no skb for rx_status 0x%x\n", - rx_status); + if (unlikely(!xdp_buf.data_hard_start)) continue; - } - mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page); + + mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, + page); } /* Middle or Last descriptor */ if (!(rx_status & MVNETA_RXD_LAST_DESC)) @@ -2379,27 +2412,38 @@ static int mvneta_rx_swbm(struct napi_struct *napi, continue; if (rxq->left_size) { - pr_err("get last desc, but left_size (%d) != 0\n", - rxq->left_size); - dev_kfree_skb_any(rxq->skb); rxq->left_size = 0; - rxq->skb = NULL; - continue; + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + goto next; } - ps.rx_bytes += rxq->skb->len; + skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); + if (IS_ERR(skb)) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + + u64_stats_update_begin(&stats->syncp); + stats->es.skb_alloc_error++; + stats->rx_dropped++; + u64_stats_update_end(&stats->syncp); + + goto next; + } + + ps.rx_bytes += skb->len; ps.rx_packets++; - /* Linux processing */ - rxq->skb->protocol = eth_type_trans(rxq->skb, dev); - - napi_gro_receive(napi, rxq->skb); - - /* clean uncomplete skb pointer in queue */ - rxq->skb = NULL; + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(napi, skb); +next: + xdp_buf.data_hard_start = NULL; } rcu_read_unlock(); + if (xdp_buf.data_hard_start) + mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); + if (ps.xdp_redirect) xdp_do_flush_map(); From afda408b61f473d462c1027ad57cd88711a47790 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:31 +0200 Subject: [PATCH 1268/2056] net: mvneta: move mvneta_run_xdp after descriptors processing Move mvneta_run_xdp routine after all descriptor processing. This is a preliminary patch to enable multi-buffers and JUMBO frames support for XDP Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e9b3fb778d76..021419428ff8 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2230,12 +2230,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, return ret; } -static int +static void mvneta_swbm_rx_frame(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct page *page, struct mvneta_stats *stats) { @@ -2244,7 +2243,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, struct net_device *dev = pp->dev; enum dma_data_direction dma_dir; struct skb_shared_info *sinfo; - int ret = 0; if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; @@ -2270,13 +2268,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, sinfo = xdp_get_shared_info_from_buff(xdp); sinfo->nr_frags = 0; - if (xdp_prog) - ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats); - rxq->left_size = rx_desc->data_size - len; rx_desc->buf_phys_addr = 0; - - return ret; } static void @@ -2385,20 +2378,15 @@ static int mvneta_rx_swbm(struct napi_struct *napi, rxq->refill_num++; if (rx_status & MVNETA_RXD_FIRST_DESC) { - int err; - /* Check errors only for FIRST descriptor */ if (rx_status & MVNETA_RXD_ERR_SUMMARY) { mvneta_rx_error(pp, rx_desc); goto next; } - err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, - xdp_prog, page, &ps); - if (err) - continue; - desc_status = rx_desc->status; + mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, page, + &ps); } else { if (unlikely(!xdp_buf.data_hard_start)) continue; @@ -2417,6 +2405,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, goto next; } + if (xdp_prog && + mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, &ps)) + goto next; + skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); if (IS_ERR(skb)) { struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); From 7d1643ebce125b0f9ed0b886d5c45ae37f750a29 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:32 +0200 Subject: [PATCH 1269/2056] net: mvneta: drop all fragments in XDP_DROP Release all consumed pages if the eBPF program returns XDP_DROP for XDP multi-buffers Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 021419428ff8..7746693a3de1 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2172,13 +2172,13 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame, static int mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct bpf_prog *prog, struct xdp_buff *xdp, - struct mvneta_stats *stats) + u32 frame_sz, struct mvneta_stats *stats) { - unsigned int len, sync; - struct page *page; + unsigned int len, data_len, sync; u32 ret, act; len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction; + data_len = xdp->data_end - xdp->data; act = bpf_prog_run_xdp(prog, xdp); /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ @@ -2194,9 +2194,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, err = xdp_do_redirect(pp->dev, xdp, prog); if (unlikely(err)) { + mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); ret = MVNETA_XDP_DROPPED; - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); } else { ret = MVNETA_XDP_REDIR; stats->xdp_redirect++; @@ -2205,10 +2204,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, } case XDP_TX: ret = mvneta_xdp_xmit_back(pp, xdp); - if (ret != MVNETA_XDP_TX) { - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); - } + if (ret != MVNETA_XDP_TX) + mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); break; default: bpf_warn_invalid_xdp_action(act); @@ -2217,14 +2214,13 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, trace_xdp_exception(pp->dev, prog, act); /* fall through */ case XDP_DROP: - page = virt_to_head_page(xdp->data); - page_pool_put_page(rxq->page_pool, page, sync, true); + mvneta_xdp_put_buff(pp, rxq, xdp, sync, true); ret = MVNETA_XDP_DROPPED; stats->xdp_drop++; break; } - stats->rx_bytes += xdp->data_end - xdp->data; + stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len; stats->rx_packets++; return ret; @@ -2355,7 +2351,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, }; struct mvneta_stats ps = {}; struct bpf_prog *xdp_prog; - u32 desc_status; + u32 desc_status, frame_sz; /* Get number of received packets */ rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); @@ -2384,7 +2380,9 @@ static int mvneta_rx_swbm(struct napi_struct *napi, goto next; } + frame_sz = rx_desc->data_size - ETH_FCS_LEN; desc_status = rx_desc->status; + mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, page, &ps); } else { @@ -2406,7 +2404,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, } if (xdp_prog && - mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, &ps)) + mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps)) goto next; skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); From 89f4a198c9cad3775f952129a82018b84995d923 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:33 +0200 Subject: [PATCH 1270/2056] net: mvneta: get rid of skb in mvneta_rx_queue Remove skb pointer in mvneta_rx_queue data structure since it is no longer used Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 7746693a3de1..8b7f6fcd4cca 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -699,8 +699,6 @@ struct mvneta_rx_queue { int first_to_refill; u32 refill_num; - /* pointer to uncomplete skb buffer */ - struct sk_buff *skb; int left_size; }; @@ -3362,9 +3360,6 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, { mvneta_rxq_drop_pkts(pp, rxq); - if (rxq->skb) - dev_kfree_skb_any(rxq->skb); - if (rxq->descs) dma_free_coherent(pp->dev->dev.parent, rxq->size * MVNETA_DESC_ALIGNED_SIZE, @@ -3377,7 +3372,6 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, rxq->descs_phys = 0; rxq->first_to_refill = 0; rxq->refill_num = 0; - rxq->skb = NULL; rxq->left_size = 0; } From c7a3a8cd9d41ee0a32659fc99d98ec83af803b77 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 00:16:34 +0200 Subject: [PATCH 1271/2056] net: mvneta: move rxq->left_size on the stack Allocate rxq->left_size on mvneta_rx_swbm stack since it is used just in sw bm napi_poll Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 29 ++++++++++++--------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 8b7f6fcd4cca..2c9277e73cef 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -698,8 +698,6 @@ struct mvneta_rx_queue { /* Index of first RX DMA descriptor to refill */ int first_to_refill; u32 refill_num; - - int left_size; }; static enum cpuhp_state online_hpstate; @@ -2228,7 +2226,7 @@ static void mvneta_swbm_rx_frame(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, - struct xdp_buff *xdp, + struct xdp_buff *xdp, int *size, struct page *page, struct mvneta_stats *stats) { @@ -2262,7 +2260,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, sinfo = xdp_get_shared_info_from_buff(xdp); sinfo->nr_frags = 0; - rxq->left_size = rx_desc->data_size - len; + *size = rx_desc->data_size - len; rx_desc->buf_phys_addr = 0; } @@ -2270,7 +2268,7 @@ static void mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc, struct mvneta_rx_queue *rxq, - struct xdp_buff *xdp, + struct xdp_buff *xdp, int *size, struct page *page) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); @@ -2278,11 +2276,11 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, enum dma_data_direction dma_dir; int data_len, len; - if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) { + if (*size > MVNETA_MAX_RX_BUF_SIZE) { len = MVNETA_MAX_RX_BUF_SIZE; data_len = len; } else { - len = rxq->left_size; + len = *size; data_len = len - ETH_FCS_LEN; } dma_dir = page_pool_get_dma_dir(rxq->page_pool); @@ -2300,7 +2298,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, rx_desc->buf_phys_addr = 0; } - rxq->left_size -= len; + *size -= len; } static struct sk_buff * @@ -2341,7 +2339,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, struct mvneta_port *pp, int budget, struct mvneta_rx_queue *rxq) { - int rx_proc = 0, rx_todo, refill; + int rx_proc = 0, rx_todo, refill, size = 0; struct net_device *dev = pp->dev; struct xdp_buff xdp_buf = { .frame_sz = PAGE_SIZE, @@ -2378,25 +2376,25 @@ static int mvneta_rx_swbm(struct napi_struct *napi, goto next; } - frame_sz = rx_desc->data_size - ETH_FCS_LEN; + size = rx_desc->data_size; + frame_sz = size - ETH_FCS_LEN; desc_status = rx_desc->status; - mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, page, - &ps); + mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, + &size, page, &ps); } else { if (unlikely(!xdp_buf.data_hard_start)) continue; mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, - page); + &size, page); } /* Middle or Last descriptor */ if (!(rx_status & MVNETA_RXD_LAST_DESC)) /* no last descriptor this time */ continue; - if (rxq->left_size) { - rxq->left_size = 0; + if (size) { mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true); goto next; } @@ -3372,7 +3370,6 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, rxq->descs_phys = 0; rxq->first_to_refill = 0; rxq->refill_num = 0; - rxq->left_size = 0; } static int mvneta_txq_sw_init(struct mvneta_port *pp, From eba75c587e811d3249c8bd50d22bb2266ccd3c0f Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 10 Jul 2020 09:29:02 -0400 Subject: [PATCH 1272/2056] icmp: support rfc 4884 Add setsockopt SOL_IP/IP_RECVERR_4884 to return the offset to an extension struct if present. ICMP messages may include an extension structure after the original datagram. RFC 4884 standardized this behavior. It stores the offset in words to the extension header in u8 icmphdr.un.reserved[1]. The field is valid only for ICMP types destination unreachable, time exceeded and parameter problem, if length is at least 128 bytes and entire packet does not exceed 576 bytes. Return the offset to the start of the extension struct when reading an ICMP error from the error queue, if it matches the above constraints. Do not return the raw u8 field. Return the offset from the start of the user buffer, in bytes. The kernel does not return the network and transport headers, so subtract those. Also validate the headers. Return the offset regardless of validation, as an invalid extension must still not be misinterpreted as part of the original datagram. Note that !invalid does not imply valid. If the extension version does not match, no validation can take place, for instance. For backward compatibility, make this optional, set by setsockopt SOL_IP/IP_RECVERR_RFC4884. For API example and feature test, see github.com/wdebruij/kerneltools/blob/master/tests/recv_icmp_v2.c For forward compatibility, reserve only setsockopt value 1, leaving other bits for additional icmp extensions. Changes v1->v2: - convert word offset to byte offset from start of user buffer - return in ee_data as u8 may be insufficient - define extension struct and object header structs - return len only if constraints met - if returning len, also validate Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/icmp.h | 4 ++ include/net/inet_sock.h | 1 + include/uapi/linux/errqueue.h | 14 ++++++- include/uapi/linux/icmp.h | 22 +++++++++++ include/uapi/linux/in.h | 1 + net/ipv4/icmp.c | 71 +++++++++++++++++++++++++++++++++++ net/ipv4/ip_sockglue.c | 12 ++++++ 7 files changed, 124 insertions(+), 1 deletion(-) diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 81ca84ce3119..8fc38a34cb20 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -15,6 +15,7 @@ #include #include +#include static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { @@ -35,4 +36,7 @@ static inline bool icmp_is_err(int type) return false; } +void ip_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out); + #endif /* _LINUX_ICMP_H */ diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index a7ce00af6c44..a3702d1d4875 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -225,6 +225,7 @@ struct inet_sock { mc_all:1, nodefrag:1; __u8 bind_address_no_port:1, + recverr_rfc4884:1, defer_connect:1; /* Indicates that fastopen_connect is set * and cookie exists so we defer connect * until first data frame is written diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index ca5cb3e3c6df..3c70e8ac14b8 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -5,6 +5,13 @@ #include #include +/* RFC 4884: return offset to extension struct + validation */ +struct sock_ee_data_rfc4884 { + __u16 len; + __u8 flags; + __u8 reserved; +}; + struct sock_extended_err { __u32 ee_errno; __u8 ee_origin; @@ -12,7 +19,10 @@ struct sock_extended_err { __u8 ee_code; __u8 ee_pad; __u32 ee_info; - __u32 ee_data; + union { + __u32 ee_data; + struct sock_ee_data_rfc4884 ee_rfc4884; + }; }; #define SO_EE_ORIGIN_NONE 0 @@ -31,6 +41,8 @@ struct sock_extended_err { #define SO_EE_CODE_TXTIME_INVALID_PARAM 1 #define SO_EE_CODE_TXTIME_MISSED 2 +#define SO_EE_RFC4884_FLAG_INVALID 1 + /** * struct scm_timestamping - timestamps exposed through cmsg * diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h index 5589eeb791ca..fb169a50895e 100644 --- a/include/uapi/linux/icmp.h +++ b/include/uapi/linux/icmp.h @@ -19,6 +19,7 @@ #define _UAPI_LINUX_ICMP_H #include +#include #define ICMP_ECHOREPLY 0 /* Echo Reply */ #define ICMP_DEST_UNREACH 3 /* Destination Unreachable */ @@ -95,5 +96,26 @@ struct icmp_filter { __u32 data; }; +/* RFC 4884 extension struct: one per message */ +struct icmp_ext_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + __u8 reserved1:4, + version:4; +#elif defined(__BIG_ENDIAN_BITFIELD) + __u8 version:4, + reserved1:4; +#else +#error "Please fix " +#endif + __u8 reserved2; + __sum16 checksum; +}; + +/* RFC 4884 extension object header: one for each object */ +struct icmp_extobj_hdr { + __be16 length; + __u8 class_num; + __u8 class_type; +}; #endif /* _UAPI_LINUX_ICMP_H */ diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 8533bf07450f..3d0d8231dc19 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -123,6 +123,7 @@ struct in_addr { #define IP_CHECKSUM 23 #define IP_BIND_ADDRESS_NO_PORT 24 #define IP_RECVFRAGSIZE 25 +#define IP_RECVERR_RFC4884 26 /* IP_MTU_DISCOVER values */ #define IP_PMTUDISC_DONT 0 /* Never send DF frames */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index e30515f89802..793aebf07c2a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1116,6 +1116,77 @@ int icmp_rcv(struct sk_buff *skb) goto drop; } +static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off) +{ + struct icmp_extobj_hdr *objh, _objh; + struct icmp_ext_hdr *exth, _exth; + u16 olen; + + exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth); + if (!exth) + return false; + if (exth->version != 2) + return true; + + if (exth->checksum && + csum_fold(skb_checksum(skb, off, skb->len - off, 0))) + return false; + + off += sizeof(_exth); + while (off < skb->len) { + objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh); + if (!objh) + return false; + + olen = ntohs(objh->length); + if (olen < sizeof(_objh)) + return false; + + off += olen; + if (off > skb->len) + return false; + } + + return true; +} + +void ip_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out) +{ + int hlen, off; + + switch (icmp_hdr(skb)->type) { + case ICMP_DEST_UNREACH: + case ICMP_TIME_EXCEEDED: + case ICMP_PARAMETERPROB: + break; + default: + return; + } + + /* outer headers up to inner iph. skb->data is at inner payload */ + hlen = -skb_transport_offset(skb) - sizeof(struct icmphdr); + + /* per rfc 791: maximum packet length of 576 bytes */ + if (hlen + skb->len > 576) + return; + + /* per rfc 4884: minimal datagram length of 128 bytes */ + off = icmp_hdr(skb)->un.reserved[1] * sizeof(u32); + if (off < 128) + return; + + /* kernel has stripped headers: return payload offset in bytes */ + off -= hlen; + if (off + sizeof(struct icmp_ext_hdr) > skb->len) + return; + + out->len = off; + + if (!ip_icmp_error_rfc4884_validate(skb, off)) + out->flags |= SO_EE_RFC4884_FLAG_INVALID; +} + int icmp_err(struct sk_buff *skb, u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 86b3b9a7cea3..a5ea02d7a183 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -411,6 +411,9 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr->port = port; if (skb_pull(skb, payload - skb->data)) { + if (inet_sk(sk)->recverr_rfc4884) + ip_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); + skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb) == 0) return; @@ -904,6 +907,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, case IP_RECVORIGDSTADDR: case IP_CHECKSUM: case IP_RECVFRAGSIZE: + case IP_RECVERR_RFC4884: if (optlen >= sizeof(int)) { if (get_user(val, (int __user *) optval)) return -EFAULT; @@ -1063,6 +1067,11 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (!val) skb_queue_purge(&sk->sk_error_queue); break; + case IP_RECVERR_RFC4884: + if (val < 0 || val > 1) + goto e_inval; + inet->recverr_rfc4884 = !!val; + break; case IP_MULTICAST_TTL: if (sk->sk_type == SOCK_STREAM) goto e_inval; @@ -1611,6 +1620,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_RECVERR: val = inet->recverr; break; + case IP_RECVERR_RFC4884: + val = inet->recverr_rfc4884; + break; case IP_MULTICAST_TTL: val = inet->mc_ttl; break; From f65b71aa25a65e13cf3d10445a48c63d3eeb942e Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 17 Jul 2020 01:45:29 +0300 Subject: [PATCH 1273/2056] ptp: add ability to configure duty cycle for periodic output There are external event timestampers (PHCs with support for PTP_EXTTS_REQUEST) that timestamp both event edges. When those edges are very close (such as in the case of a short pulse), there is a chance that the collected timestamp might be of the rising, or of the falling edge, we never know. There are also PHCs capable of generating periodic output with a configurable duty cycle. This is good news, because we can space the rising and falling edge out enough in time, that the risks to overrun the 1-entry timestamp FIFO of the extts PHC are lower (example: the perout PHC can be configured for a period of 1 second, and an "on" time of 0.5 seconds, resulting in a duty cycle of 50%). A flag is introduced for signaling that an on time is present in the perout request structure, for preserving compatibility. Logically speaking, the duty cycle cannot exceed 100% and the PTP core checks for this. PHC drivers that don't support this flag emit a periodic output of an unspecified duty cycle, same as before. The duty cycle is encoded as an "on" time, similar to the "start" and "period" times, and reuses the reserved space while preserving overall binary layout. Pahole reported before: struct ptp_perout_request { struct ptp_clock_time start; /* 0 16 */ struct ptp_clock_time period; /* 16 16 */ unsigned int index; /* 32 4 */ unsigned int flags; /* 36 4 */ unsigned int rsv[4]; /* 40 16 */ /* size: 56, cachelines: 1, members: 5 */ /* last cacheline: 56 bytes */ }; And now: struct ptp_perout_request { struct ptp_clock_time start; /* 0 16 */ struct ptp_clock_time period; /* 16 16 */ unsigned int index; /* 32 4 */ unsigned int flags; /* 36 4 */ union { struct ptp_clock_time on; /* 40 16 */ unsigned int rsv[4]; /* 40 16 */ }; /* 40 16 */ /* size: 56, cachelines: 1, members: 5 */ /* last cacheline: 56 bytes */ }; Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/ptp/ptp_chardev.c | 33 +++++++++++++++++++++++++++------ include/uapi/linux/ptp_clock.h | 17 ++++++++++++++--- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 375cd6e4aade..e0e6f85966e1 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -191,12 +191,33 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) err = -EFAULT; break; } - if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) || - req.perout.rsv[0] || req.perout.rsv[1] || - req.perout.rsv[2] || req.perout.rsv[3]) && - cmd == PTP_PEROUT_REQUEST2) { - err = -EINVAL; - break; + if (cmd == PTP_PEROUT_REQUEST2) { + struct ptp_perout_request *perout = &req.perout; + + if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) { + err = -EINVAL; + break; + } + /* + * The "on" field has undefined meaning if + * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat + * it as reserved, which must be set to zero. + */ + if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) && + (perout->rsv[0] || perout->rsv[1] || + perout->rsv[2] || perout->rsv[3])) { + err = -EINVAL; + break; + } + if (perout->flags & PTP_PEROUT_DUTY_CYCLE) { + /* The duty cycle must be subunitary. */ + if (perout->on.sec > perout->period.sec || + (perout->on.sec == perout->period.sec && + perout->on.nsec > perout->period.nsec)) { + err = -ERANGE; + break; + } + } } else if (cmd == PTP_PEROUT_REQUEST) { req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS; req.perout.rsv[0] = 0; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index ff070aa64278..1d2841155f7d 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -53,12 +53,14 @@ /* * Bits of the ptp_perout_request.flags field: */ -#define PTP_PEROUT_ONE_SHOT (1<<0) +#define PTP_PEROUT_ONE_SHOT (1<<0) +#define PTP_PEROUT_DUTY_CYCLE (1<<1) /* * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. */ -#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT) +#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \ + PTP_PEROUT_DUTY_CYCLE) /* * No flags are valid for the original PTP_PEROUT_REQUEST ioctl @@ -105,7 +107,16 @@ struct ptp_perout_request { struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ unsigned int flags; - unsigned int rsv[4]; /* Reserved for future use. */ + union { + /* + * The "on" time of the signal. + * Must be lower than the period. + * Valid only if (flags & PTP_PEROUT_DUTY_CYCLE) is set. + */ + struct ptp_clock_time on; + /* Reserved for future use. */ + unsigned int rsv[4]; + }; }; #define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */ From b6bd41363a1ca39282496803cc32f7515ed917fe Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 17 Jul 2020 01:45:30 +0300 Subject: [PATCH 1274/2056] ptp: introduce a phase offset in the periodic output request Some PHCs like the ocelot/felix switch cannot emit generic periodic output, but just PPS (pulse per second) signals, which: - don't start from arbitrary absolute times, but are rather phase-aligned to the beginning of [the closest next] second. - have an optional phase offset relative to that beginning of the second. For those, it was initially established that they should reject any other absolute time for the PTP_PEROUT_REQUEST than 0.000000000 [1]. But when it actually came to writing an application [2] that makes use of this functionality, we realized that we can't really deal generically with PHCs that support absolute start time, and with PHCs that don't, without an explicit interface. Namely, in an ideal world, PHC drivers would ensure that the "perout.start" value written to hardware will result in a functional output. This means that if the PTP time has become in the past of this PHC's current time, it should be automatically fast-forwarded by the driver into a close enough future time that is known to work (note: this is necessary only if the hardware doesn't do this fast-forward by itself). But we don't really know what is the status for PHC drivers in use today, so in the general sense, user space would be risking to have a non-functional periodic output if it simply asked for a start time of 0.000000000. So let's introduce a flag for this type of reduced-functionality hardware, named PTP_PEROUT_PHASE. The start time is just "soon", the only thing we know for sure about this signal is that its rising edge events, Rn, occur at: Rn = perout.phase + n * perout.period The "phase" in the periodic output structure is simply an alias to the "start" time, since both cannot logically be specified at the same time. Therefore, the binary layout of the structure is not affected. [1]: https://patchwork.ozlabs.org/project/netdev/patch/20200320103726.32559-7-yangbo.lu@nxp.com/ [2]: https://www.mail-archive.com/linuxptp-devel@lists.sourceforge.net/msg04142.html Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- include/uapi/linux/ptp_clock.h | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 1d2841155f7d..1d108d597f66 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -55,12 +55,14 @@ */ #define PTP_PEROUT_ONE_SHOT (1<<0) #define PTP_PEROUT_DUTY_CYCLE (1<<1) +#define PTP_PEROUT_PHASE (1<<2) /* * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. */ #define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \ - PTP_PEROUT_DUTY_CYCLE) + PTP_PEROUT_DUTY_CYCLE | \ + PTP_PEROUT_PHASE) /* * No flags are valid for the original PTP_PEROUT_REQUEST ioctl @@ -103,7 +105,20 @@ struct ptp_extts_request { }; struct ptp_perout_request { - struct ptp_clock_time start; /* Absolute start time. */ + union { + /* + * Absolute start time. + * Valid only if (flags & PTP_PEROUT_PHASE) is unset. + */ + struct ptp_clock_time start; + /* + * Phase offset. The signal should start toggling at an + * unspecified integer multiple of the period, plus this value. + * The start time should be "as soon as possible". + * Valid only if (flags & PTP_PEROUT_PHASE) is set. + */ + struct ptp_clock_time phase; + }; struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ unsigned int flags; From ecf9f9b77c637a3d9347a7c85b33964abac989ac Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Fri, 17 Jul 2020 01:45:31 +0300 Subject: [PATCH 1275/2056] net: mscc: ocelot: add support for PTP waveform configuration For PPS output (perout period is 1.000000000), accept the new "phase" parameter from the periodic output request structure. For both PPS and freeform output, accept the new "on" argument for specifying the duty cycle of the generated signal. Preserve the old defaults for this "on" time: 1 us for PPS, and half the period for freeform output. Also preserve the old behavior that accepted the "phase" via the "start" argument. Signed-off-by: Vladimir Oltean Reviewed-by: Horatiu Vultur Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_ptp.c | 74 +++++++++++++++++--------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index a3088a1676ed..1e08fe4daaef 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -184,18 +184,20 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); - struct timespec64 ts_start, ts_period; + struct timespec64 ts_phase, ts_period; enum ocelot_ptp_pins ptp_pin; unsigned long flags; bool pps = false; int pin = -1; + s64 wf_high; + s64 wf_low; u32 val; - s64 ns; switch (rq->type) { case PTP_CLK_REQ_PEROUT: /* Reject requests with unsupported flags */ - if (rq->perout.flags) + if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE | + PTP_PEROUT_PHASE)) return -EOPNOTSUPP; pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT, @@ -211,22 +213,12 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp, else return -EBUSY; - ts_start.tv_sec = rq->perout.start.sec; - ts_start.tv_nsec = rq->perout.start.nsec; ts_period.tv_sec = rq->perout.period.sec; ts_period.tv_nsec = rq->perout.period.nsec; if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0) pps = true; - if (ts_start.tv_sec || (ts_start.tv_nsec && !pps)) { - dev_warn(ocelot->dev, - "Absolute start time not supported!\n"); - dev_warn(ocelot->dev, - "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n"); - return -EINVAL; - } - /* Handle turning off */ if (!on) { spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); @@ -236,16 +228,48 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp, break; } + if (rq->perout.flags & PTP_PEROUT_PHASE) { + ts_phase.tv_sec = rq->perout.phase.sec; + ts_phase.tv_nsec = rq->perout.phase.nsec; + } else { + /* Compatibility */ + ts_phase.tv_sec = rq->perout.start.sec; + ts_phase.tv_nsec = rq->perout.start.nsec; + } + if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) { + dev_warn(ocelot->dev, + "Absolute start time not supported!\n"); + dev_warn(ocelot->dev, + "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n"); + return -EINVAL; + } + + /* Calculate waveform high and low times */ + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { + struct timespec64 ts_on; + + ts_on.tv_sec = rq->perout.on.sec; + ts_on.tv_nsec = rq->perout.on.nsec; + + wf_high = timespec64_to_ns(&ts_on); + } else { + if (pps) { + wf_high = 1000; + } else { + wf_high = timespec64_to_ns(&ts_period); + wf_high = div_s64(wf_high, 2); + } + } + + wf_low = timespec64_to_ns(&ts_period); + wf_low -= wf_high; + /* Handle PPS request */ if (pps) { spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); - /* Pulse generated perout.start.nsec after TOD has - * increased seconds. - * Pulse width is set to 1us. - */ - ocelot_write_rix(ocelot, ts_start.tv_nsec, + ocelot_write_rix(ocelot, ts_phase.tv_nsec, PTP_PIN_WF_LOW_PERIOD, ptp_pin); - ocelot_write_rix(ocelot, 1000, + ocelot_write_rix(ocelot, wf_high, PTP_PIN_WF_HIGH_PERIOD, ptp_pin); val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK); val |= PTP_PIN_CFG_SYNC; @@ -255,14 +279,16 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp, } /* Handle periodic clock */ - ns = timespec64_to_ns(&ts_period); - ns = ns >> 1; - if (ns > 0x3fffffff || ns <= 0x6) + if (wf_high > 0x3fffffff || wf_high <= 0x6) + return -EINVAL; + if (wf_low > 0x3fffffff || wf_low <= 0x6) return -EINVAL; spin_lock_irqsave(&ocelot->ptp_clock_lock, flags); - ocelot_write_rix(ocelot, ns, PTP_PIN_WF_LOW_PERIOD, ptp_pin); - ocelot_write_rix(ocelot, ns, PTP_PIN_WF_HIGH_PERIOD, ptp_pin); + ocelot_write_rix(ocelot, wf_low, PTP_PIN_WF_LOW_PERIOD, + ptp_pin); + ocelot_write_rix(ocelot, wf_high, PTP_PIN_WF_HIGH_PERIOD, + ptp_pin); val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK); ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin); spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); From 88a3c45482f63373f7cce37a182cd6f40429a5ac Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Mon, 20 Jul 2020 03:09:59 +0000 Subject: [PATCH 1276/2056] net: vxge-main: Remove unnecessary cast in kfree() Remove unnecassary casts in the argument to kfree. Signed-off-by: Xu Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index b0faa737b817..fc4ed0aa1b2f 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1076,7 +1076,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac) list_for_each_safe(entry, next, &vpath->mac_addr_list) { if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) { list_del(entry); - kfree((struct vxge_mac_addrs *)entry); + kfree(entry); vpath->mac_addr_cnt--; if (is_multicast_ether_addr(mac->macaddr)) @@ -2913,7 +2913,7 @@ static void vxge_free_mac_add_list(struct vxge_vpath *vpath) list_for_each_safe(entry, next, &vpath->mac_addr_list) { list_del(entry); - kfree((struct vxge_mac_addrs *)entry); + kfree(entry); } } From aad74d849dd56a04ccb289f7c61909a68337ce5e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 Jul 2020 20:49:51 -0700 Subject: [PATCH 1277/2056] net: Wrap ndo_do_ioctl() to prepare for DSA stacked ops In preparation for adding another layer of call into a DSA stacked ops singleton, wrap the ndo_do_ioctl() call into dev_do_ioctl(). Reviewed-by: Andrew Lunn Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/core/dev_ioctl.c | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 547b587c1950..a213c703c90a 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -225,6 +225,22 @@ static int net_hwtstamp_validate(struct ifreq *ifr) return 0; } +static int dev_do_ioctl(struct net_device *dev, + struct ifreq *ifr, unsigned int cmd) +{ + const struct net_device_ops *ops = dev->netdev_ops; + int err = -EOPNOTSUPP; + + if (ops->ndo_do_ioctl) { + if (netif_device_present(dev)) + err = ops->ndo_do_ioctl(dev, ifr, cmd); + else + err = -ENODEV; + } + + return err; +} + /* * Perform the SIOCxIFxxx calls, inside rtnl_lock() */ @@ -323,13 +339,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP || cmd == SIOCWANDEV) { - err = -EOPNOTSUPP; - if (ops->ndo_do_ioctl) { - if (netif_device_present(dev)) - err = ops->ndo_do_ioctl(dev, ifr, cmd); - else - err = -ENODEV; - } + err = dev_do_ioctl(dev, ifr, cmd); } else err = -EINVAL; From 4cfab3566710826e62adbf100875d2dca32434b6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 Jul 2020 20:49:52 -0700 Subject: [PATCH 1278/2056] net: dsa: Add wrappers for overloaded ndo_ops Add definitions for the dsa_netdevice_ops structure which is a subset of the net_device_ops structure for the specific operations that we care about overlaying on top of the DSA CPU port net_device and provide inline stubs that take core managing whether DSA code is reachable. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 70 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index 6fa418ff1175..343642ca4f63 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -86,6 +86,18 @@ struct dsa_device_ops { enum dsa_tag_protocol proto; }; +/* This structure defines the control interfaces that are overlayed by the + * DSA layer on top of the DSA CPU/management net_device instance. This is + * used by the core net_device layer while calling various net_device_ops + * function pointers. + */ +struct dsa_netdevice_ops { + int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, + int cmd); + int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, + size_t len); +}; + #define DSA_TAG_DRIVER_ALIAS "dsa_tag-" #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \ MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE)) @@ -217,6 +229,7 @@ struct dsa_port { /* * Original copy of the master netdev net_device_ops */ + const struct dsa_netdevice_ops *netdev_ops; const struct net_device_ops *orig_ndo_ops; bool setup; @@ -679,6 +692,63 @@ static inline bool dsa_can_decode(const struct sk_buff *skb, return false; } +#if IS_ENABLED(CONFIG_NET_DSA) +static inline int __dsa_netdevice_ops_check(struct net_device *dev) +{ + int err = -EOPNOTSUPP; + + if (!dev->dsa_ptr) + return err; + + if (!dev->dsa_ptr->netdev_ops) + return err; + + return 0; +} + +static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + const struct dsa_netdevice_ops *ops; + int err; + + err = __dsa_netdevice_ops_check(dev); + if (err) + return err; + + ops = dev->dsa_ptr->netdev_ops; + + return ops->ndo_do_ioctl(dev, ifr, cmd); +} + +static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + const struct dsa_netdevice_ops *ops; + int err; + + err = __dsa_netdevice_ops_check(dev); + if (err) + return err; + + ops = dev->dsa_ptr->netdev_ops; + + return ops->ndo_get_phys_port_name(dev, name, len); +} +#else +static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + return -EOPNOTSUPP; +} + +static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + return -EOPNOTSUPP; +} +#endif + void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds); struct dsa_switch *dsa_switch_find(int tree_index, int sw_index); From 3369afba1e46b0f7f33b4853a9d06616be6e22d5 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 Jul 2020 20:49:53 -0700 Subject: [PATCH 1279/2056] net: Call into DSA netdevice_ops wrappers Make the core net_device code call into our ndo_do_ioctl() and ndo_get_phys_port_name() functions via the wrappers defined previously Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/core/dev.c | 5 +++++ net/core/dev_ioctl.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index 062a00fdca9b..19f1abc26fcd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -98,6 +98,7 @@ #include #include #include +#include #include #include #include @@ -8602,6 +8603,10 @@ int dev_get_phys_port_name(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; int err; + err = dsa_ndo_get_phys_port_name(dev, name, len); + if (err == 0 || err != -EOPNOTSUPP) + return err; + if (ops->ndo_get_phys_port_name) { err = ops->ndo_get_phys_port_name(dev, name, len); if (err != -EOPNOTSUPP) diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index a213c703c90a..b2cf9b7bb7b8 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -5,6 +5,7 @@ #include #include #include +#include #include /* @@ -231,6 +232,10 @@ static int dev_do_ioctl(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; int err = -EOPNOTSUPP; + err = dsa_ndo_do_ioctl(dev, ifr, cmd); + if (err == 0 || err != -EOPNOTSUPP) + return err; + if (ops->ndo_do_ioctl) { if (netif_device_present(dev)) err = ops->ndo_do_ioctl(dev, ifr, cmd); From 9c0c7014f38206a2b63e7e832edf2e881a7b49ad Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sun, 19 Jul 2020 20:49:54 -0700 Subject: [PATCH 1280/2056] net: dsa: Setup dsa_netdev_ops Now that we have all the infrastructure in place for calling into the dsa_ptr->netdev_ops function pointers, install them when we configure the DSA CPU/management interface and tear them down. The flow is unchanged from before, but now we preserve equality of tests when network device drivers do tests like dev->netdev_ops == &foo_ops which was not the case before since we were allocating an entirely new structure. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 1 - net/dsa/master.c | 52 ++++++++++++----------------------------------- 2 files changed, 13 insertions(+), 40 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 343642ca4f63..f1b63d06d132 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -230,7 +230,6 @@ struct dsa_port { * Original copy of the master netdev net_device_ops */ const struct dsa_netdevice_ops *netdev_ops; - const struct net_device_ops *orig_ndo_ops; bool setup; }; diff --git a/net/dsa/master.c b/net/dsa/master.c index 480a61460c23..0a90911ae31b 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -220,12 +220,17 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; } - if (cpu_dp->orig_ndo_ops && cpu_dp->orig_ndo_ops->ndo_do_ioctl) - err = cpu_dp->orig_ndo_ops->ndo_do_ioctl(dev, ifr, cmd); + if (dev->netdev_ops->ndo_do_ioctl) + err = dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); return err; } +static const struct dsa_netdevice_ops dsa_netdev_ops = { + .ndo_do_ioctl = dsa_master_ioctl, + .ndo_get_phys_port_name = dsa_master_get_phys_port_name, +}; + static int dsa_master_ethtool_setup(struct net_device *dev) { struct dsa_port *cpu_dp = dev->dsa_ptr; @@ -260,38 +265,10 @@ static void dsa_master_ethtool_teardown(struct net_device *dev) cpu_dp->orig_ethtool_ops = NULL; } -static int dsa_master_ndo_setup(struct net_device *dev) +static void dsa_netdev_ops_set(struct net_device *dev, + const struct dsa_netdevice_ops *ops) { - struct dsa_port *cpu_dp = dev->dsa_ptr; - struct dsa_switch *ds = cpu_dp->ds; - struct net_device_ops *ops; - - if (dev->netdev_ops->ndo_get_phys_port_name) - return 0; - - ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL); - if (!ops) - return -ENOMEM; - - cpu_dp->orig_ndo_ops = dev->netdev_ops; - if (cpu_dp->orig_ndo_ops) - memcpy(ops, cpu_dp->orig_ndo_ops, sizeof(*ops)); - - ops->ndo_get_phys_port_name = dsa_master_get_phys_port_name; - ops->ndo_do_ioctl = dsa_master_ioctl; - - dev->netdev_ops = ops; - - return 0; -} - -static void dsa_master_ndo_teardown(struct net_device *dev) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - - if (cpu_dp->orig_ndo_ops) - dev->netdev_ops = cpu_dp->orig_ndo_ops; - cpu_dp->orig_ndo_ops = NULL; + dev->dsa_ptr->netdev_ops = ops; } static ssize_t tagging_show(struct device *d, struct device_attribute *attr, @@ -353,9 +330,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) if (ret) return ret; - ret = dsa_master_ndo_setup(dev); - if (ret) - goto out_err_ethtool_teardown; + dsa_netdev_ops_set(dev, &dsa_netdev_ops); ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); if (ret) @@ -364,8 +339,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) return ret; out_err_ndo_teardown: - dsa_master_ndo_teardown(dev); -out_err_ethtool_teardown: + dsa_netdev_ops_set(dev, NULL); dsa_master_ethtool_teardown(dev); return ret; } @@ -373,7 +347,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) void dsa_master_teardown(struct net_device *dev) { sysfs_remove_group(&dev->dev.kobj, &dsa_group); - dsa_master_ndo_teardown(dev); + dsa_netdev_ops_set(dev, NULL); dsa_master_ethtool_teardown(dev); dsa_master_reset_mtu(dev); From 74b5afea3b5e141e9a0b76863858c072e28713f1 Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Mon, 20 Jul 2020 06:24:10 +0000 Subject: [PATCH 1281/2056] net: hns: use eth_broadcast_addr() to assign broadcast address This patch is to use eth_broadcast_addr() to assign broadcast address insetad of memset(). Signed-off-by: Xu Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1c5243cc1dc6..acfa86e5296f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1724,7 +1724,7 @@ static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev, u8 port_num, u8 *mask, u8 *addr) { if (MAC_IS_BROADCAST(addr)) - memset(mask, 0xff, ETH_ALEN); + eth_broadcast_addr(mask); else memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN); } From d89d8d4db48b42d22c5b0e0e8a3f82dd00262c44 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Mon, 20 Jul 2020 15:56:14 +0800 Subject: [PATCH 1282/2056] net: ena: Fix using plain integer as NULL pointer in ena_init_napi_in_range Fix sparse build warning: drivers/net/ethernet/amazon/ena/ena_netdev.c:2193:34: warning: Using plain integer as NULL pointer Reported-by: Hulk Robot Signed-off-by: Wang Hai Suggested-by: Joe Perches Acked-by: Shay Agroskin Acked-by: Shay Agroskin Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 91be3ffa1c5c..3eb63b12dd68 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2190,14 +2190,13 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter, static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index, int count) { - struct ena_napi *napi = {0}; int i; for (i = first_index; i < first_index + count; i++) { - napi = &adapter->ena_napi[i]; + struct ena_napi *napi = &adapter->ena_napi[i]; netif_napi_add(adapter->netdev, - &adapter->ena_napi[i].napi, + &napi->napi, ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll, ENA_NAPI_BUDGET); From a8b7b2d0b3fc965d823e362840e5451e2eb4a71b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 20 Jul 2020 10:10:41 +0200 Subject: [PATCH 1283/2056] sched: sch_api: add missing rcu read lock to silence the warning In case the qdisc_match_from_root function() is called from non-rcu path with rtnl mutex held, a suspiciout rcu usage warning appears: [ 241.504354] ============================= [ 241.504358] WARNING: suspicious RCU usage [ 241.504366] 5.8.0-rc4-custom-01521-g72a7c7d549c3 #32 Not tainted [ 241.504370] ----------------------------- [ 241.504378] net/sched/sch_api.c:270 RCU-list traversed in non-reader section!! [ 241.504382] other info that might help us debug this: [ 241.504388] rcu_scheduler_active = 2, debug_locks = 1 [ 241.504394] 1 lock held by tc/1391: [ 241.504398] #0: ffffffff85a27850 (rtnl_mutex){+.+.}-{3:3}, at: rtnetlink_rcv_msg+0x49a/0xbd0 [ 241.504431] stack backtrace: [ 241.504440] CPU: 0 PID: 1391 Comm: tc Not tainted 5.8.0-rc4-custom-01521-g72a7c7d549c3 #32 [ 241.504446] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-2.fc32 04/01/2014 [ 241.504453] Call Trace: [ 241.504465] dump_stack+0x100/0x184 [ 241.504482] lockdep_rcu_suspicious+0x153/0x15d [ 241.504499] qdisc_match_from_root+0x293/0x350 Fix this by passing the rtnl held lockdep condition down to hlist_for_each_entry_rcu() Reported-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/hashtable.h | 4 ++-- net/sched/sch_api.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index 78b6ea5fa8ba..f6c666730b8c 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -173,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node) * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ -#define hash_for_each_possible_rcu(name, obj, member, key) \ +#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ - member) + member, ## cond) /** * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 11ebba60da3b..2a76a2f5ed88 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -267,7 +267,8 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) root->handle == handle) return root; - hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) { + hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle, + lockdep_rtnl_is_held()) { if (q->handle == handle) return q; } From 558e35ccfe9516c8786c3dd3b8a486fb37e6b858 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Mon, 20 Jul 2020 10:56:52 +0200 Subject: [PATCH 1284/2056] net: macb: WoL support for GEM type of Ethernet controller Adapt the Wake-on-Lan feature to the Cadence GEM Ethernet controller. This controller has different register layout and cannot be handled by previous code. We disable completely interrupts on all the queues but the queue 0. Handling of WoL interrupt is done in another interrupt handler positioned depending on the controller version used, just between suspend() and resume() calls. It allows to lower pressure on the generic interrupt hot path by removing the need to handle 2 tests for each IRQ: the first figuring out the controller revision, the second for actually knowing if the WoL bit is set. Queue management in suspend()/resume() functions inspired from RFC patch by Harini Katakam , thanks! Cc: Claudiu Beznea Cc: Harini Katakam Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.h | 3 + drivers/net/ethernet/cadence/macb_main.c | 155 +++++++++++++++++++---- 2 files changed, 130 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index ab827fb4b6b9..4f1b41569260 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -90,6 +90,7 @@ #define GEM_SA3T 0x009C /* Specific3 Top */ #define GEM_SA4B 0x00A0 /* Specific4 Bottom */ #define GEM_SA4T 0x00A4 /* Specific4 Top */ +#define GEM_WOL 0x00b8 /* Wake on LAN */ #define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */ #define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */ #define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */ @@ -396,6 +397,8 @@ #define MACB_PDRSFT_SIZE 1 #define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ #define MACB_SRI_SIZE 1 +#define GEM_WOL_OFFSET 28 /* Enable wake-on-lan interrupt */ +#define GEM_WOL_SIZE 1 /* Timer increment fields */ #define MACB_TI_CNS_OFFSET 0 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index e5e37aa81b02..0f2417f09514 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1517,6 +1517,35 @@ static void macb_tx_restart(struct macb_queue *queue) macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } +static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) +{ + struct macb_queue *queue = dev_id; + struct macb *bp = queue->bp; + u32 status; + + status = queue_readl(queue, ISR); + + if (unlikely(!status)) + return IRQ_NONE; + + spin_lock(&bp->lock); + + if (status & GEM_BIT(WOL)) { + queue_writel(queue, IDR, GEM_BIT(WOL)); + gem_writel(bp, WOL, 0); + netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", + (unsigned int)(queue - bp->queues), + (unsigned long)status); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, GEM_BIT(WOL)); + pm_wakeup_event(&bp->pdev->dev, 0); + } + + spin_unlock(&bp->lock); + + return IRQ_HANDLED; +} + static irqreturn_t macb_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -3316,6 +3345,8 @@ static const struct ethtool_ops macb_ethtool_ops = { static const struct ethtool_ops gem_ethtool_ops = { .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, + .get_wol = macb_get_wol, + .set_wol = macb_set_wol, .get_link = ethtool_op_get_link, .get_ts_info = macb_get_ts_info, .get_ethtool_stats = gem_get_ethtool_stats, @@ -4567,34 +4598,69 @@ static int __maybe_unused macb_suspend(struct device *dev) struct macb_queue *queue = bp->queues; unsigned long flags; unsigned int q; + int err; if (!netif_running(netdev)) return 0; if (bp->wol & MACB_WOL_ENABLED) { - macb_writel(bp, IER, MACB_BIT(WOL)); - macb_writel(bp, WOL, MACB_BIT(MAG)); - enable_irq_wake(bp->queues[0].irq); - netif_device_detach(netdev); - } else { - netif_device_detach(netdev); + spin_lock_irqsave(&bp->lock, flags); + /* Flush all status bits */ + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); for (q = 0, queue = bp->queues; q < bp->num_queues; - ++q, ++queue) - napi_disable(&queue->napi); + ++q, ++queue) { + /* Disable all interrupts */ + queue_writel(queue, IDR, -1); + queue_readl(queue, ISR); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, -1); + } + /* Change interrupt handler and + * Enable WoL IRQ on queue 0 + */ + if (macb_is_gem(bp)) { + devm_free_irq(dev, bp->queues[0].irq, bp->queues); + err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, + IRQF_SHARED, netdev->name, bp->queues); + if (err) { + dev_err(dev, + "Unable to request IRQ %d (error %d)\n", + bp->queues[0].irq, err); + spin_unlock_irqrestore(&bp->lock, flags); + return err; + } + queue_writel(bp->queues, IER, GEM_BIT(WOL)); + gem_writel(bp, WOL, MACB_BIT(MAG)); + } else { + queue_writel(bp->queues, IER, MACB_BIT(WOL)); + macb_writel(bp, WOL, MACB_BIT(MAG)); + } + spin_unlock_irqrestore(&bp->lock, flags); + + enable_irq_wake(bp->queues[0].irq); + } + + netif_device_detach(netdev); + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_disable(&queue->napi); + + if (!(bp->wol & MACB_WOL_ENABLED)) { rtnl_lock(); phylink_stop(bp->phylink); rtnl_unlock(); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); spin_unlock_irqrestore(&bp->lock, flags); - - if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) - bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); - - if (netdev->hw_features & NETIF_F_NTUPLE) - bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); } + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); + + if (netdev->hw_features & NETIF_F_NTUPLE) + bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); + if (bp->ptp_info) bp->ptp_info->ptp_remove(netdev); if (!device_may_wakeup(dev)) @@ -4608,7 +4674,9 @@ static int __maybe_unused macb_resume(struct device *dev) struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); struct macb_queue *queue = bp->queues; + unsigned long flags; unsigned int q; + int err; if (!netif_running(netdev)) return 0; @@ -4617,29 +4685,60 @@ static int __maybe_unused macb_resume(struct device *dev) pm_runtime_force_resume(dev); if (bp->wol & MACB_WOL_ENABLED) { - macb_writel(bp, IDR, MACB_BIT(WOL)); - macb_writel(bp, WOL, 0); + spin_lock_irqsave(&bp->lock, flags); + /* Disable WoL */ + if (macb_is_gem(bp)) { + queue_writel(bp->queues, IDR, GEM_BIT(WOL)); + gem_writel(bp, WOL, 0); + } else { + queue_writel(bp->queues, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + } + /* Clear ISR on queue 0 */ + queue_readl(bp->queues, ISR); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(bp->queues, ISR, -1); + /* Replace interrupt handler on queue 0 */ + devm_free_irq(dev, bp->queues[0].irq, bp->queues); + err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, + IRQF_SHARED, netdev->name, bp->queues); + if (err) { + dev_err(dev, + "Unable to request IRQ %d (error %d)\n", + bp->queues[0].irq, err); + spin_unlock_irqrestore(&bp->lock, flags); + return err; + } + spin_unlock_irqrestore(&bp->lock, flags); + disable_irq_wake(bp->queues[0].irq); - } else { - macb_writel(bp, NCR, MACB_BIT(MPE)); - if (netdev->hw_features & NETIF_F_NTUPLE) - gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); - - if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) - macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); - - for (q = 0, queue = bp->queues; q < bp->num_queues; - ++q, ++queue) - napi_enable(&queue->napi); + /* Now make sure we disable phy before moving + * to common restore path + */ rtnl_lock(); - phylink_start(bp->phylink); + phylink_stop(bp->phylink); rtnl_unlock(); } + for (q = 0, queue = bp->queues; q < bp->num_queues; + ++q, ++queue) + napi_enable(&queue->napi); + + if (netdev->hw_features & NETIF_F_NTUPLE) + gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); + + if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) + macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); + + macb_writel(bp, NCR, MACB_BIT(MPE)); macb_init_hw(bp); macb_set_rx_mode(netdev); macb_restore_features(bp); + rtnl_lock(); + phylink_start(bp->phylink); + rtnl_unlock(); + netif_device_attach(netdev); if (bp->ptp_info) bp->ptp_info->ptp_init(netdev); From 9d45c8e89079dfe5489efaae9cea12a539ebc3ff Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Mon, 20 Jul 2020 10:56:53 +0200 Subject: [PATCH 1285/2056] net: macb: Add WoL interrupt support for MACB type of Ethernet controller Handle the Wake-on-Lan interrupt for the Cadence MACB Ethernet controller. As we do for the GEM version, we handle of WoL interrupt in a specialized interrupt handler for MACB version that is positionned just between suspend() and resume() calls. Cc: Claudiu Beznea Cc: Harini Katakam Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 40 +++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 0f2417f09514..a6a35e1b0115 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1517,6 +1517,35 @@ static void macb_tx_restart(struct macb_queue *queue) macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); } +static irqreturn_t macb_wol_interrupt(int irq, void *dev_id) +{ + struct macb_queue *queue = dev_id; + struct macb *bp = queue->bp; + u32 status; + + status = queue_readl(queue, ISR); + + if (unlikely(!status)) + return IRQ_NONE; + + spin_lock(&bp->lock); + + if (status & MACB_BIT(WOL)) { + queue_writel(queue, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", + (unsigned int)(queue - bp->queues), + (unsigned long)status); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + queue_writel(queue, ISR, MACB_BIT(WOL)); + pm_wakeup_event(&bp->pdev->dev, 0); + } + + spin_unlock(&bp->lock); + + return IRQ_HANDLED; +} + static irqreturn_t gem_wol_interrupt(int irq, void *dev_id) { struct macb_queue *queue = dev_id; @@ -4619,8 +4648,8 @@ static int __maybe_unused macb_suspend(struct device *dev) /* Change interrupt handler and * Enable WoL IRQ on queue 0 */ + devm_free_irq(dev, bp->queues[0].irq, bp->queues); if (macb_is_gem(bp)) { - devm_free_irq(dev, bp->queues[0].irq, bp->queues); err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, IRQF_SHARED, netdev->name, bp->queues); if (err) { @@ -4633,6 +4662,15 @@ static int __maybe_unused macb_suspend(struct device *dev) queue_writel(bp->queues, IER, GEM_BIT(WOL)); gem_writel(bp, WOL, MACB_BIT(MAG)); } else { + err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, + IRQF_SHARED, netdev->name, bp->queues); + if (err) { + dev_err(dev, + "Unable to request IRQ %d (error %d)\n", + bp->queues[0].irq, err); + spin_unlock_irqrestore(&bp->lock, flags); + return err; + } queue_writel(bp->queues, IER, MACB_BIT(WOL)); macb_writel(bp, WOL, MACB_BIT(MAG)); } From cebd2cac905e5a665bc992c7725f98bf6cd4a21d Mon Sep 17 00:00:00 2001 From: Zhang Changzhong Date: Mon, 20 Jul 2020 19:12:33 +0800 Subject: [PATCH 1286/2056] net: fs_enet: remove redundant null check Because clk_prepare_enable and clk_disable_unprepare already checked NULL clock parameter, so the additional checks are unnecessary, just remove them. Signed-off-by: Zhang Changzhong Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index b0d4b1984a70..bf846b42bc74 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1043,8 +1043,7 @@ static int fs_enet_probe(struct platform_device *ofdev) out_free_dev: free_netdev(ndev); out_put: - if (fpi->clk_per) - clk_disable_unprepare(fpi->clk_per); + clk_disable_unprepare(fpi->clk_per); out_deregister_fixed_link: of_node_put(fpi->phy_node); if (of_phy_is_fixed_link(ofdev->dev.of_node)) @@ -1065,8 +1064,7 @@ static int fs_enet_remove(struct platform_device *ofdev) fep->ops->cleanup_data(ndev); dev_set_drvdata(fep->dev, NULL); of_node_put(fep->fpi->phy_node); - if (fep->fpi->clk_per) - clk_disable_unprepare(fep->fpi->clk_per); + clk_disable_unprepare(fep->fpi->clk_per); if (of_phy_is_fixed_link(ofdev->dev.of_node)) of_phy_deregister_fixed_link(ofdev->dev.of_node); free_netdev(ndev); From f1bfd71c8662f20d53e71ef4e18bfb0e5677c27f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 Jul 2020 13:36:09 +0200 Subject: [PATCH 1287/2056] arch, net: remove the last csum_partial_copy() leftovers Most of the tree only uses and implements csum_partial_copy_nocheck, but the c6x and lib/checksum.c implement a csum_partial_copy that isn't used anywere except to define csum_partial_copy. Get rid of this pointless alias. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- arch/c6x/lib/checksum.c | 2 +- arch/c6x/lib/csum_64plus.S | 8 ++++---- arch/nios2/include/asm/checksum.h | 5 ++--- include/asm-generic/checksum.h | 6 ++---- lib/checksum.c | 4 ++-- 5 files changed, 11 insertions(+), 14 deletions(-) diff --git a/arch/c6x/lib/checksum.c b/arch/c6x/lib/checksum.c index 335ca4900808..dff2e2ec6e64 100644 --- a/arch/c6x/lib/checksum.c +++ b/arch/c6x/lib/checksum.c @@ -6,6 +6,6 @@ /* These are from csum_64plus.S */ EXPORT_SYMBOL(csum_partial); -EXPORT_SYMBOL(csum_partial_copy); +EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(ip_compute_csum); EXPORT_SYMBOL(ip_fast_csum); diff --git a/arch/c6x/lib/csum_64plus.S b/arch/c6x/lib/csum_64plus.S index 8e625a30fd43..9c07127485d1 100644 --- a/arch/c6x/lib/csum_64plus.S +++ b/arch/c6x/lib/csum_64plus.S @@ -10,8 +10,8 @@ #include ; -;unsigned int csum_partial_copy(const char *src, char * dst, -; int len, int sum) +;unsigned int csum_partial_copy_nocheck(const char *src, char * dst, +; int len, int sum) ; ; A4: src ; B4: dst @@ -21,7 +21,7 @@ ; .text -ENTRY(csum_partial_copy) +ENTRY(csum_partial_copy_nocheck) MVC .S2 ILC,B30 MV .D1X B6,A31 ; given csum @@ -149,7 +149,7 @@ L10: ADD .D1 A31,A9,A9 BNOP .S2 B3,4 MVC .S2 B30,ILC -ENDPROC(csum_partial_copy) +ENDPROC(csum_partial_copy_nocheck) ; ;unsigned short diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h index ec39698d3bea..b4316c361729 100644 --- a/arch/nios2/include/asm/checksum.h +++ b/arch/nios2/include/asm/checksum.h @@ -12,10 +12,9 @@ /* Take these from lib/checksum.c */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -extern __wsum csum_partial_copy(const void *src, void *dst, int len, +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy((src), (dst), (len), (sum)) +#define csum_partial_copy_nocheck csum_partial_copy_nocheck extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); extern __sum16 ip_compute_csum(const void *buff, int len); diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 5a80f8e54300..cd8b75aa770d 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -23,11 +23,9 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); - #ifndef csum_partial_copy_nocheck -#define csum_partial_copy_nocheck(src, dst, len, sum) \ - csum_partial_copy((src), (dst), (len), (sum)) +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, + __wsum sum); #endif #ifndef ip_fast_csum diff --git a/lib/checksum.c b/lib/checksum.c index 7ac65a0000ff..c7861e84c526 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -149,12 +149,12 @@ EXPORT_SYMBOL(ip_compute_csum); * copy from ds while checksumming, otherwise like csum_partial */ __wsum -csum_partial_copy(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { memcpy(dst, src, len); return csum_partial(dst, len, sum); } -EXPORT_SYMBOL(csum_partial_copy); +EXPORT_SYMBOL(csum_partial_copy_nocheck); #ifndef csum_tcpudp_nofold static inline u32 from64to32(u64 x) From 73e283dfbf036f8830a0aaeb41036245c43245a4 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 20 Jul 2020 14:29:12 +0200 Subject: [PATCH 1288/2056] net: packetengines: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'hamachi_init_one()' (hamachi.c), GFP_KERNEL can be used because it is a probe function and no lock is acquired. When memory is allocated in 'yellowfin_init_one()' (yellowfin.c), GFP_KERNEL can be used because it is a probe function and no lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/packetengines/hamachi.c | 111 ++++++++++-------- .../net/ethernet/packetengines/yellowfin.c | 83 +++++++------ 2 files changed, 110 insertions(+), 84 deletions(-) diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 70816d2e2990..d058a63602a9 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -644,13 +644,15 @@ static int hamachi_init_one(struct pci_dev *pdev, hmp->mii_if.phy_id_mask = 0x1f; hmp->mii_if.reg_num_mask = 0x1f; - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) goto err_out_cleardev; hmp->tx_ring = ring_space; hmp->tx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) goto err_out_unmap_tx; hmp->rx_ring = ring_space; @@ -773,11 +775,11 @@ static int hamachi_init_one(struct pci_dev *pdev, return 0; err_out_unmap_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, - hmp->rx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, hmp->rx_ring, + hmp->rx_ring_dma); err_out_unmap_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring, - hmp->tx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, hmp->tx_ring, + hmp->tx_ring_dma); err_out_cleardev: free_netdev (dev); err_out_iounmap: @@ -1001,9 +1003,9 @@ static inline int hamachi_tx(struct net_device *dev) /* Free the original skb. */ skb = hmp->tx_skbuff[entry]; if (skb) { - pci_unmap_single(hmp->pci_dev, - leXX_to_cpu(hmp->tx_ring[entry].addr), - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->tx_ring[entry].addr), + skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); hmp->tx_skbuff[entry] = NULL; } @@ -1093,8 +1095,9 @@ static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue) hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff); skb = hmp->tx_skbuff[i]; if (skb){ - pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr), - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->tx_ring[i].addr), + skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); hmp->tx_skbuff[i] = NULL; } @@ -1115,9 +1118,9 @@ static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue) struct sk_buff *skb = hmp->rx_skbuff[i]; if (skb){ - pci_unmap_single(hmp->pci_dev, - leXX_to_cpu(hmp->rx_ring[i].addr), - hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->rx_ring[i].addr), + hmp->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); hmp->rx_skbuff[i] = NULL; } @@ -1131,8 +1134,10 @@ static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue) if (skb == NULL) break; - hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, - skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + hmp->rx_ring[i].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev, + skb->data, + hmp->rx_buf_sz, + DMA_FROM_DEVICE)); hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2)); } @@ -1183,8 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev) if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ - hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, - skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + hmp->rx_ring[i].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev, + skb->data, + hmp->rx_buf_sz, + DMA_FROM_DEVICE)); /* -2 because it doesn't REALLY have that first 2 bytes -KDU */ hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | DescEndPacket | DescIntr | (hmp->rx_buf_sz -2)); @@ -1233,8 +1240,10 @@ static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, hmp->tx_skbuff[entry] = skb; - hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, - skb->data, skb->len, PCI_DMA_TODEVICE)); + hmp->tx_ring[entry].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE)); /* Hmmmm, could probably put a DescIntr on these, but the way the driver is currently coded makes Tx interrupts unnecessary @@ -1333,10 +1342,10 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance) skb = hmp->tx_skbuff[entry]; /* Free the original skb. */ if (skb){ - pci_unmap_single(hmp->pci_dev, - leXX_to_cpu(hmp->tx_ring[entry].addr), - skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->tx_ring[entry].addr), + skb->len, + DMA_TO_DEVICE); dev_consume_skb_irq(skb); hmp->tx_skbuff[entry] = NULL; } @@ -1413,10 +1422,9 @@ static int hamachi_rx(struct net_device *dev) if (desc_status & DescOwn) break; - pci_dma_sync_single_for_cpu(hmp->pci_dev, - leXX_to_cpu(desc->addr), - hmp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&hmp->pci_dev->dev, + leXX_to_cpu(desc->addr), + hmp->rx_buf_sz, DMA_FROM_DEVICE); buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; frame_status = get_unaligned_le32(&(buf_addr[data_size - 12])); if (hamachi_debug > 4) @@ -1483,10 +1491,10 @@ static int hamachi_rx(struct net_device *dev) "not good with RX_CHECKSUM\n", dev->name); #endif skb_reserve(skb, 2); /* 16 byte align the IP header */ - pci_dma_sync_single_for_cpu(hmp->pci_dev, - leXX_to_cpu(hmp->rx_ring[entry].addr), - hmp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->rx_ring[entry].addr), + hmp->rx_buf_sz, + DMA_FROM_DEVICE); /* Call copy + cksum if available. */ #if 1 || USE_IP_COPYSUM skb_copy_to_linear_data(skb, @@ -1496,14 +1504,15 @@ static int hamachi_rx(struct net_device *dev) skb_put_data(skb, hmp->rx_ring_dma + entry*sizeof(*desc), pkt_len); #endif - pci_dma_sync_single_for_device(hmp->pci_dev, - leXX_to_cpu(hmp->rx_ring[entry].addr), - hmp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->rx_ring[entry].addr), + hmp->rx_buf_sz, + DMA_FROM_DEVICE); } else { - pci_unmap_single(hmp->pci_dev, + dma_unmap_single(&hmp->pci_dev->dev, leXX_to_cpu(hmp->rx_ring[entry].addr), - hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); + hmp->rx_buf_sz, + DMA_FROM_DEVICE); skb_put(skb = hmp->rx_skbuff[entry], pkt_len); hmp->rx_skbuff[entry] = NULL; } @@ -1586,8 +1595,10 @@ static int hamachi_rx(struct net_device *dev) if (skb == NULL) break; /* Better luck next round. */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, - skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + desc->addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev, + skb->data, + hmp->rx_buf_sz, + DMA_FROM_DEVICE)); } desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz); if (entry >= RX_RING_SIZE-1) @@ -1704,9 +1715,9 @@ static int hamachi_close(struct net_device *dev) skb = hmp->rx_skbuff[i]; hmp->rx_ring[i].status_n_length = 0; if (skb) { - pci_unmap_single(hmp->pci_dev, - leXX_to_cpu(hmp->rx_ring[i].addr), - hmp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->rx_ring[i].addr), + hmp->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); hmp->rx_skbuff[i] = NULL; } @@ -1715,9 +1726,9 @@ static int hamachi_close(struct net_device *dev) for (i = 0; i < TX_RING_SIZE; i++) { skb = hmp->tx_skbuff[i]; if (skb) { - pci_unmap_single(hmp->pci_dev, - leXX_to_cpu(hmp->tx_ring[i].addr), - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&hmp->pci_dev->dev, + leXX_to_cpu(hmp->tx_ring[i].addr), + skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); hmp->tx_skbuff[i] = NULL; } @@ -1899,10 +1910,10 @@ static void hamachi_remove_one(struct pci_dev *pdev) if (dev) { struct hamachi_private *hmp = netdev_priv(dev); - pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring, - hmp->rx_ring_dma); - pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring, - hmp->tx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, hmp->rx_ring, + hmp->rx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, hmp->tx_ring, + hmp->tx_ring_dma); unregister_netdev(dev); iounmap(hmp->base); free_netdev(dev); diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 520779f05e1a..647a1431b359 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -434,19 +434,22 @@ static int yellowfin_init_one(struct pci_dev *pdev, np->drv_flags = drv_flags; np->base = ioaddr; - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) goto err_out_cleardev; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE, + &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_unmap_rx; np->tx_status = ring_space; @@ -505,12 +508,14 @@ static int yellowfin_init_one(struct pci_dev *pdev, return 0; err_out_unmap_status: - pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, - np->tx_status_dma); + dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, + np->tx_status_dma); err_out_unmap_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); err_out_unmap_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); err_out_cleardev: pci_iounmap(pdev, ioaddr); err_out_free_res: @@ -740,8 +745,10 @@ static int yellowfin_init_ring(struct net_device *dev) if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ - yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + yp->rx_buf_sz, + DMA_FROM_DEVICE)); } if (i != RX_RING_SIZE) { for (j = 0; j < i; j++) @@ -831,8 +838,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, yp->tx_skbuff[entry] = skb; #ifdef NO_TXSTATS - yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, len, PCI_DMA_TODEVICE)); + yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + len, DMA_TO_DEVICE)); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ @@ -847,8 +855,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, yp->cur_tx++; #else yp->tx_ring[entry<<1].request_cnt = len; - yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, len, PCI_DMA_TODEVICE)); + yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + len, DMA_TO_DEVICE)); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ @@ -923,8 +932,9 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Free the original skb. */ - pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&yp->pci_dev->dev, + le32_to_cpu(yp->tx_ring[entry].addr), + skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = NULL; } @@ -980,9 +990,9 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) dev->stats.tx_packets++; } /* Free the original skb. */ - pci_unmap_single(yp->pci_dev, - yp->tx_ring[entry<<1].addr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&yp->pci_dev->dev, + yp->tx_ring[entry << 1].addr, + skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ @@ -1055,8 +1065,9 @@ static int yellowfin_rx(struct net_device *dev) if(!desc->result_status) break; - pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), - yp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&yp->pci_dev->dev, + le32_to_cpu(desc->addr), + yp->rx_buf_sz, DMA_FROM_DEVICE); desc_status = le32_to_cpu(desc->result_status) >> 16; buf_addr = rx_skb->data; data_size = (le32_to_cpu(desc->dbdma_cmd) - @@ -1121,10 +1132,10 @@ static int yellowfin_rx(struct net_device *dev) without copying to a properly sized skbuff. */ if (pkt_len > rx_copybreak) { skb_put(skb = rx_skb, pkt_len); - pci_unmap_single(yp->pci_dev, - le32_to_cpu(yp->rx_ring[entry].addr), - yp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&yp->pci_dev->dev, + le32_to_cpu(yp->rx_ring[entry].addr), + yp->rx_buf_sz, + DMA_FROM_DEVICE); yp->rx_skbuff[entry] = NULL; } else { skb = netdev_alloc_skb(dev, pkt_len + 2); @@ -1133,10 +1144,10 @@ static int yellowfin_rx(struct net_device *dev) skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); skb_put(skb, pkt_len); - pci_dma_sync_single_for_device(yp->pci_dev, - le32_to_cpu(desc->addr), - yp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&yp->pci_dev->dev, + le32_to_cpu(desc->addr), + yp->rx_buf_sz, + DMA_FROM_DEVICE); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); @@ -1155,8 +1166,10 @@ static int yellowfin_rx(struct net_device *dev) break; /* Better luck next round. */ yp->rx_skbuff[entry] = skb; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, - skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); + yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev, + skb->data, + yp->rx_buf_sz, + DMA_FROM_DEVICE)); } yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ @@ -1379,10 +1392,12 @@ static void yellowfin_remove_one(struct pci_dev *pdev) BUG_ON(!dev); np = netdev_priv(dev); - pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, - np->tx_status_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status, + np->tx_status_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); unregister_netdev (dev); pci_iounmap(pdev, np->base); From 0b0edb993c99bc9a2d6d92810736cd77300b3770 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 20 Jul 2020 15:02:42 +0200 Subject: [PATCH 1289/2056] r6040: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'r6040_open()', GFP_KERNEL can be used because this is a net_device_ops' 'ndo_open' function. This function is protected by the rtnl_lock() semaphore. So only a mutex is used and no spin_lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/rdc/r6040.c | 64 +++++++++++++++++--------------- 1 file changed, 34 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index f5ecc410ff85..7c74318620b1 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -262,9 +262,9 @@ static void r6040_free_txbufs(struct net_device *dev) for (i = 0; i < TX_DCNT; i++) { if (lp->tx_insert_ptr->skb_ptr) { - pci_unmap_single(lp->pdev, - le32_to_cpu(lp->tx_insert_ptr->buf), - MAX_BUF_SIZE, PCI_DMA_TODEVICE); + dma_unmap_single(&lp->pdev->dev, + le32_to_cpu(lp->tx_insert_ptr->buf), + MAX_BUF_SIZE, DMA_TO_DEVICE); dev_kfree_skb(lp->tx_insert_ptr->skb_ptr); lp->tx_insert_ptr->skb_ptr = NULL; } @@ -279,9 +279,9 @@ static void r6040_free_rxbufs(struct net_device *dev) for (i = 0; i < RX_DCNT; i++) { if (lp->rx_insert_ptr->skb_ptr) { - pci_unmap_single(lp->pdev, - le32_to_cpu(lp->rx_insert_ptr->buf), - MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&lp->pdev->dev, + le32_to_cpu(lp->rx_insert_ptr->buf), + MAX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(lp->rx_insert_ptr->skb_ptr); lp->rx_insert_ptr->skb_ptr = NULL; } @@ -335,9 +335,10 @@ static int r6040_alloc_rxbufs(struct net_device *dev) goto err_exit; } desc->skb_ptr = skb; - desc->buf = cpu_to_le32(pci_map_single(lp->pdev, - desc->skb_ptr->data, - MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); + desc->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, + desc->skb_ptr->data, + MAX_BUF_SIZE, + DMA_FROM_DEVICE)); desc->status = DSC_OWNER_MAC; desc = desc->vndescp; } while (desc != lp->rx_ring); @@ -484,14 +485,14 @@ static int r6040_close(struct net_device *dev) /* Free Descriptor memory */ if (lp->rx_ring) { - pci_free_consistent(pdev, - RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); + dma_free_coherent(&pdev->dev, RX_DESC_SIZE, lp->rx_ring, + lp->rx_ring_dma); lp->rx_ring = NULL; } if (lp->tx_ring) { - pci_free_consistent(pdev, - TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); + dma_free_coherent(&pdev->dev, TX_DESC_SIZE, lp->tx_ring, + lp->tx_ring_dma); lp->tx_ring = NULL; } @@ -544,8 +545,8 @@ static int r6040_rx(struct net_device *dev, int limit) /* Do not count the CRC */ skb_put(skb_ptr, descptr->len - 4); - pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), - MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf), + MAX_BUF_SIZE, DMA_FROM_DEVICE); skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev); /* Send to upper layer */ @@ -555,9 +556,10 @@ static int r6040_rx(struct net_device *dev, int limit) /* put new skb into descriptor */ descptr->skb_ptr = new_skb; - descptr->buf = cpu_to_le32(pci_map_single(priv->pdev, - descptr->skb_ptr->data, - MAX_BUF_SIZE, PCI_DMA_FROMDEVICE)); + descptr->buf = cpu_to_le32(dma_map_single(&priv->pdev->dev, + descptr->skb_ptr->data, + MAX_BUF_SIZE, + DMA_FROM_DEVICE)); next_descr: /* put the descriptor back to the MAC */ @@ -597,8 +599,8 @@ static void r6040_tx(struct net_device *dev) dev->stats.tx_packets++; dev->stats.tx_bytes += skb_ptr->len; - pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), - skb_ptr->len, PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf), + skb_ptr->len, DMA_TO_DEVICE); /* Free buffer */ dev_kfree_skb(skb_ptr); descptr->skb_ptr = NULL; @@ -750,14 +752,16 @@ static int r6040_open(struct net_device *dev) /* Allocate Descriptor memory */ lp->rx_ring = - pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma); + dma_alloc_coherent(&lp->pdev->dev, RX_DESC_SIZE, + &lp->rx_ring_dma, GFP_KERNEL); if (!lp->rx_ring) { ret = -ENOMEM; goto err_free_irq; } lp->tx_ring = - pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma); + dma_alloc_coherent(&lp->pdev->dev, TX_DESC_SIZE, + &lp->tx_ring_dma, GFP_KERNEL); if (!lp->tx_ring) { ret = -ENOMEM; goto err_free_rx_ring; @@ -773,11 +777,11 @@ static int r6040_open(struct net_device *dev) return 0; err_free_tx_ring: - pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring, - lp->tx_ring_dma); + dma_free_coherent(&lp->pdev->dev, TX_DESC_SIZE, lp->tx_ring, + lp->tx_ring_dma); err_free_rx_ring: - pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring, - lp->rx_ring_dma); + dma_free_coherent(&lp->pdev->dev, RX_DESC_SIZE, lp->rx_ring, + lp->rx_ring_dma); err_free_irq: free_irq(dev->irq, dev); out: @@ -811,8 +815,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, descptr = lp->tx_insert_ptr; descptr->len = skb->len; descptr->skb_ptr = skb; - descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, - skb->data, skb->len, PCI_DMA_TODEVICE)); + descptr->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE)); descptr->status = DSC_OWNER_MAC; skb_tx_timestamp(skb); @@ -1029,12 +1033,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out; /* this should always be supported */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n"); goto err_out_disable_dev; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n"); goto err_out_disable_dev; From 256ca7449fbcc0351c349abf83d1869e31504725 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 20 Jul 2020 15:36:09 +0200 Subject: [PATCH 1290/2056] sis: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'epic_init_one()' (sis190.c), GFP_KERNEL can be used because this is a net_device_ops' 'ndo_open' function. This function is protected by the rtnl_lock() semaphore. So only a mutex is used and no spin_lock is acquired. When memory is allocated in 'sis900_probe()' (sis900.c), GFP_KERNEL can be used because it is a probe function and no spin_lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/sis/sis190.c | 52 ++++++++++-------- drivers/net/ethernet/sis/sis900.c | 89 +++++++++++++++++-------------- 2 files changed, 80 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 5a4b6e3ab38f..676b193833c0 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -494,9 +494,9 @@ static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, skb = netdev_alloc_skb(tp->dev, rx_buf_sz); if (unlikely(!skb)) goto skb_alloc_failed; - mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(tp->pci_dev, mapping)) + mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz, + DMA_FROM_DEVICE); + if (dma_mapping_error(&tp->pci_dev->dev, mapping)) goto out; sis190_map_to_asic(desc, mapping, rx_buf_sz); @@ -542,8 +542,8 @@ static bool sis190_try_rx_copy(struct sis190_private *tp, if (!skb) goto out; - pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, tp->rx_buf_sz, + DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); *sk_buff = skb; done = true; @@ -612,12 +612,14 @@ static int sis190_rx_interrupt(struct net_device *dev, if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { - pci_dma_sync_single_for_device(pdev, addr, - tp->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&pdev->dev, addr, + tp->rx_buf_sz, + DMA_FROM_DEVICE); sis190_give_to_asic(desc, tp->rx_buf_sz); } else { - pci_unmap_single(pdev, addr, tp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, addr, + tp->rx_buf_sz, + DMA_FROM_DEVICE); tp->Rx_skbuff[entry] = NULL; sis190_make_unusable_by_asic(desc); } @@ -654,7 +656,8 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; - pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), len, + DMA_TO_DEVICE); memset(desc, 0x00, sizeof(*desc)); } @@ -785,8 +788,8 @@ static void sis190_free_rx_skb(struct sis190_private *tp, { struct pci_dev *pdev = tp->pci_dev; - pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), tp->rx_buf_sz, + DMA_FROM_DEVICE); dev_kfree_skb(*sk_buff); *sk_buff = NULL; sis190_make_unusable_by_asic(desc); @@ -1069,11 +1072,13 @@ static int sis190_open(struct net_device *dev) * Rx and Tx descriptors need 256 bytes alignment. * pci_alloc_consistent() guarantees a stronger alignment. */ - tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma); + tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES, + &tp->tx_dma, GFP_KERNEL); if (!tp->TxDescRing) goto out; - tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma); + tp->RxDescRing = dma_alloc_coherent(&pdev->dev, RX_RING_BYTES, + &tp->rx_dma, GFP_KERNEL); if (!tp->RxDescRing) goto err_free_tx_0; @@ -1095,9 +1100,11 @@ static int sis190_open(struct net_device *dev) sis190_delete_timer(dev); sis190_rx_clear(tp); err_free_rx_1: - pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); + dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing, + tp->rx_dma); err_free_tx_0: - pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); + dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing, + tp->tx_dma); goto out; } @@ -1159,8 +1166,10 @@ static int sis190_close(struct net_device *dev) free_irq(pdev->irq, dev); - pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); - pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); + dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing, + tp->tx_dma); + dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing, + tp->rx_dma); tp->TxDescRing = NULL; tp->RxDescRing = NULL; @@ -1197,8 +1206,9 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pci_dev, mapping)) { + mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pci_dev->dev, mapping)) { netif_err(tp, tx_err, dev, "PCI mapping failed, dropping packet"); return NETDEV_TX_BUSY; @@ -1498,7 +1508,7 @@ static struct net_device *sis190_init_board(struct pci_dev *pdev) goto err_pci_disable_2; } - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: DMA configuration failed\n", diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 81ed7589e33c..82e020add19f 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -446,7 +446,7 @@ static int sis900_probe(struct pci_dev *pci_dev, ret = pci_enable_device(pci_dev); if(ret) return ret; - i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); if(i){ printk(KERN_ERR "sis900.c: architecture does not support " "32bit PCI busmaster DMA\n"); @@ -481,7 +481,8 @@ static int sis900_probe(struct pci_dev *pci_dev, pci_set_drvdata(pci_dev, net_dev); - ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pci_dev->dev, TX_TOTAL_SIZE, + &ring_dma, GFP_KERNEL); if (!ring_space) { ret = -ENOMEM; goto err_out_unmap; @@ -489,7 +490,8 @@ static int sis900_probe(struct pci_dev *pci_dev, sis_priv->tx_ring = ring_space; sis_priv->tx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pci_dev->dev, RX_TOTAL_SIZE, + &ring_dma, GFP_KERNEL); if (!ring_space) { ret = -ENOMEM; goto err_unmap_tx; @@ -572,11 +574,11 @@ static int sis900_probe(struct pci_dev *pci_dev, return 0; err_unmap_rx: - pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, - sis_priv->rx_ring_dma); + dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring, + sis_priv->rx_ring_dma); err_unmap_tx: - pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, - sis_priv->tx_ring_dma); + dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring, + sis_priv->tx_ring_dma); err_out_unmap: pci_iounmap(pci_dev, ioaddr); err_out_cleardev: @@ -1188,10 +1190,12 @@ sis900_init_rx_ring(struct net_device *net_dev) } sis_priv->rx_skbuff[i] = skb; sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; - sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, - skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, - sis_priv->rx_ring[i].bufptr))) { + sis_priv->rx_ring[i].bufptr = dma_map_single(&sis_priv->pci_dev->dev, + skb->data, + RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev, + sis_priv->rx_ring[i].bufptr))) { dev_kfree_skb(skb); sis_priv->rx_skbuff[i] = NULL; break; @@ -1561,9 +1565,9 @@ static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue) struct sk_buff *skb = sis_priv->tx_skbuff[i]; if (skb) { - pci_unmap_single(sis_priv->pci_dev, - sis_priv->tx_ring[i].bufptr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&sis_priv->pci_dev->dev, + sis_priv->tx_ring[i].bufptr, + skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq(skb); sis_priv->tx_skbuff[i] = NULL; sis_priv->tx_ring[i].cmdsts = 0; @@ -1612,10 +1616,11 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) sis_priv->tx_skbuff[entry] = skb; /* set the transmit buffer descriptor and enable Transmit State Machine */ - sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, - skb->data, skb->len, PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, - sis_priv->tx_ring[entry].bufptr))) { + sis_priv->tx_ring[entry].bufptr = dma_map_single(&sis_priv->pci_dev->dev, + skb->data, skb->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev, + sis_priv->tx_ring[entry].bufptr))) { dev_kfree_skb_any(skb); sis_priv->tx_skbuff[entry] = NULL; net_dev->stats.tx_dropped++; @@ -1778,9 +1783,9 @@ static int sis900_rx(struct net_device *net_dev) struct sk_buff * skb; struct sk_buff * rx_skb; - pci_unmap_single(sis_priv->pci_dev, - sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&sis_priv->pci_dev->dev, + sis_priv->rx_ring[entry].bufptr, + RX_BUF_SIZE, DMA_FROM_DEVICE); /* refill the Rx buffer, what if there is not enough * memory for new socket buffer ?? */ @@ -1826,10 +1831,11 @@ static int sis900_rx(struct net_device *net_dev) sis_priv->rx_skbuff[entry] = skb; sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; sis_priv->rx_ring[entry].bufptr = - pci_map_single(sis_priv->pci_dev, skb->data, - RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, - sis_priv->rx_ring[entry].bufptr))) { + dma_map_single(&sis_priv->pci_dev->dev, + skb->data, RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev, + sis_priv->rx_ring[entry].bufptr))) { dev_kfree_skb_irq(skb); sis_priv->rx_skbuff[entry] = NULL; break; @@ -1860,10 +1866,11 @@ static int sis900_rx(struct net_device *net_dev) sis_priv->rx_skbuff[entry] = skb; sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; sis_priv->rx_ring[entry].bufptr = - pci_map_single(sis_priv->pci_dev, skb->data, - RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, - sis_priv->rx_ring[entry].bufptr))) { + dma_map_single(&sis_priv->pci_dev->dev, + skb->data, RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev, + sis_priv->rx_ring[entry].bufptr))) { dev_kfree_skb_irq(skb); sis_priv->rx_skbuff[entry] = NULL; break; @@ -1928,9 +1935,9 @@ static void sis900_finish_xmit (struct net_device *net_dev) } /* Free the original skb. */ skb = sis_priv->tx_skbuff[entry]; - pci_unmap_single(sis_priv->pci_dev, - sis_priv->tx_ring[entry].bufptr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&sis_priv->pci_dev->dev, + sis_priv->tx_ring[entry].bufptr, skb->len, + DMA_TO_DEVICE); dev_consume_skb_irq(skb); sis_priv->tx_skbuff[entry] = NULL; sis_priv->tx_ring[entry].bufptr = 0; @@ -1979,8 +1986,9 @@ static int sis900_close(struct net_device *net_dev) for (i = 0; i < NUM_RX_DESC; i++) { skb = sis_priv->rx_skbuff[i]; if (skb) { - pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr, - RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, + sis_priv->rx_ring[i].bufptr, + RX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(skb); sis_priv->rx_skbuff[i] = NULL; } @@ -1988,8 +1996,9 @@ static int sis900_close(struct net_device *net_dev) for (i = 0; i < NUM_TX_DESC; i++) { skb = sis_priv->tx_skbuff[i]; if (skb) { - pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, + sis_priv->tx_ring[i].bufptr, + skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); sis_priv->tx_skbuff[i] = NULL; } @@ -2484,10 +2493,10 @@ static void sis900_remove(struct pci_dev *pci_dev) kfree(phy); } - pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, - sis_priv->rx_ring_dma); - pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, - sis_priv->tx_ring_dma); + dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring, + sis_priv->rx_ring_dma); + dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring, + sis_priv->tx_ring_dma); pci_iounmap(pci_dev, sis_priv->ioaddr); free_netdev(net_dev); pci_release_regions(pci_dev); From e812916d3278baf03aabf10044661fc3b2848823 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:00 +0300 Subject: [PATCH 1291/2056] linkmode: introduce linkmode_intersects() Add a new helper to find intersections between Ethtool link modes, linkmode_intersects(), similar to the other linkmode helpers. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/linux/linkmode.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index c664c27a29a0..f8397f300fcd 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -82,6 +82,12 @@ static inline int linkmode_equal(const unsigned long *src1, return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } +static inline int linkmode_intersects(const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + static inline int linkmode_subset(const unsigned long *src1, const unsigned long *src2) { From bdb5d8ec47611ca61e168349f233e1dd1ed063f4 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:01 +0300 Subject: [PATCH 1292/2056] qed, qede, qedf: convert link mode from u32 to ETHTOOL_LINK_MODE Currently qed driver already ran out of 32 bits to store link modes, and this doesn't allow to add and support more speeds. Convert custom link mode to generic Ethtool bitmap and definitions (convenient Phylink shorthands are used for elegance and readability). This allowed us to drop all conversions/mappings between the driver and Ethtool. This involves changes in qede and qedf as well, as they used definitions from shared "qed_if.h". Suggested-by: Andrew Lunn Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 288 ++++++++++-------- .../net/ethernet/qlogic/qede/qede_ethtool.c | 200 ++++-------- drivers/scsi/qedf/qedf_main.c | 78 +++-- include/linux/qed/qed_if.h | 47 +-- 4 files changed, 268 insertions(+), 345 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4c5f5bd91359..afc4fa3bdcaa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "qed.h" #include "qed_sriov.h" @@ -1456,10 +1457,11 @@ static bool qed_can_link_change(struct qed_dev *cdev) static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { - struct qed_hwfn *hwfn; + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct qed_mcp_link_params *link_params; + struct qed_hwfn *hwfn; struct qed_ptt *ptt; - u32 sup_caps; + u32 as; int rc; if (!cdev) @@ -1482,57 +1484,79 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) return -EBUSY; link_params = qed_mcp_get_link_params(hwfn); + if (!link_params) + return -ENODATA; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) link_params->speed.autoneg = params->autoneg; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { - link_params->speed.advertised_speeds = 0; - sup_caps = QED_LM_1000baseT_Full_BIT | - QED_LM_1000baseKX_Full_BIT | - QED_LM_1000baseX_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; - sup_caps = QED_LM_10000baseT_Full_BIT | - QED_LM_10000baseKR_Full_BIT | - QED_LM_10000baseKX4_Full_BIT | - QED_LM_10000baseR_FEC_BIT | - QED_LM_10000baseCR_Full_BIT | - QED_LM_10000baseSR_Full_BIT | - QED_LM_10000baseLR_Full_BIT | - QED_LM_10000baseLRM_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; - if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; - sup_caps = QED_LM_25000baseKR_Full_BIT | - QED_LM_25000baseCR_Full_BIT | - QED_LM_25000baseSR_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; - sup_caps = QED_LM_40000baseLR4_Full_BIT | - QED_LM_40000baseKR4_Full_BIT | - QED_LM_40000baseCR4_Full_BIT | - QED_LM_40000baseSR4_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; - sup_caps = QED_LM_50000baseKR2_Full_BIT | - QED_LM_50000baseCR2_Full_BIT | - QED_LM_50000baseSR2_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; - sup_caps = QED_LM_100000baseKR4_Full_BIT | - QED_LM_100000baseSR4_Full_BIT | - QED_LM_100000baseCR4_Full_BIT | - QED_LM_100000baseLR4_ER4_Full_BIT; - if (params->adv_speeds & sup_caps) - link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; + as = 0; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 1000baseT_Full); + phylink_set(sup_caps, 1000baseKX_Full); + phylink_set(sup_caps, 1000baseX_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKR_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 20000baseKR2_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + + phylink_zero(sup_caps); + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); + + if (linkmode_intersects(params->adv_speeds, sup_caps)) + as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; + + link_params->speed.advertised_speeds = as; } + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { @@ -1644,7 +1668,7 @@ static int qed_get_link_data(struct qed_hwfn *hwfn, static void qed_fill_link_capability(struct qed_hwfn *hwfn, struct qed_ptt *ptt, u32 capability, - u32 *if_capability) + unsigned long *if_caps) { u32 media_type, tcvr_state, tcvr_type; u32 speed_mask, board_cfg; @@ -1667,113 +1691,117 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, switch (media_type) { case MEDIA_DA_TWINAX: - *if_capability |= QED_LM_FIBRE_BIT; + phylink_set(if_caps, FIBRE); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) - *if_capability |= QED_LM_20000baseKR2_Full_BIT; + phylink_set(if_caps, 20000baseKR2_Full); + /* For DAC media multiple speed capabilities are supported*/ capability = capability & speed_mask; if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - *if_capability |= QED_LM_1000baseKX_Full_BIT; + phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - *if_capability |= QED_LM_10000baseCR_Full_BIT; + phylink_set(if_caps, 10000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - *if_capability |= QED_LM_40000baseCR4_Full_BIT; + phylink_set(if_caps, 40000baseCR4_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - *if_capability |= QED_LM_25000baseCR_Full_BIT; + phylink_set(if_caps, 25000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - *if_capability |= QED_LM_50000baseCR2_Full_BIT; + phylink_set(if_caps, 50000baseCR2_Full); if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - *if_capability |= QED_LM_100000baseCR4_Full_BIT; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + phylink_set(if_caps, 100000baseCR4_Full); + break; case MEDIA_BASE_T: - *if_capability |= QED_LM_TP_BIT; + phylink_set(if_caps, TP); + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { - *if_capability |= QED_LM_1000baseT_Full_BIT; - } + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseT_Full); if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { - *if_capability |= QED_LM_10000baseT_Full_BIT; - } + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseT_Full); } + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { - *if_capability |= QED_LM_FIBRE_BIT; + phylink_set(if_caps, FIBRE); + if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) - *if_capability |= QED_LM_1000baseT_Full_BIT; + phylink_set(if_caps, 1000baseT_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) - *if_capability |= QED_LM_10000baseT_Full_BIT; + phylink_set(if_caps, 10000baseT_Full); } + break; case MEDIA_SFP_1G_FIBER: case MEDIA_SFPP_10G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: - *if_capability |= QED_LM_FIBRE_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { + phylink_set(if_caps, FIBRE); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) - *if_capability |= QED_LM_1000baseKX_Full_BIT; + phylink_set(if_caps, 1000baseKX_Full); } - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) - *if_capability |= QED_LM_10000baseSR_Full_BIT; + phylink_set(if_caps, 10000baseSR_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) - *if_capability |= QED_LM_10000baseLR_Full_BIT; + phylink_set(if_caps, 10000baseLR_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) - *if_capability |= QED_LM_10000baseLRM_Full_BIT; + phylink_set(if_caps, 10000baseLRM_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) - *if_capability |= QED_LM_10000baseR_FEC_BIT; + phylink_set(if_caps, 10000baseR_FEC); } + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) - *if_capability |= QED_LM_20000baseKR2_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { + phylink_set(if_caps, 20000baseKR2_Full); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) - *if_capability |= QED_LM_25000baseSR_Full_BIT; + phylink_set(if_caps, 25000baseSR_Full); } - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) - *if_capability |= QED_LM_40000baseLR4_Full_BIT; + phylink_set(if_caps, 40000baseLR4_Full); if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) - *if_capability |= QED_LM_40000baseSR4_Full_BIT; + phylink_set(if_caps, 40000baseSR4_Full); } - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - *if_capability |= QED_LM_50000baseKR2_Full_BIT; + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseKR2_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) - *if_capability |= QED_LM_100000baseSR4_Full_BIT; + phylink_set(if_caps, 100000baseSR4_Full); } break; case MEDIA_KR: - *if_capability |= QED_LM_Backplane_BIT; + phylink_set(if_caps, Backplane); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) - *if_capability |= QED_LM_20000baseKR2_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) - *if_capability |= QED_LM_1000baseKX_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) - *if_capability |= QED_LM_10000baseKR_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) - *if_capability |= QED_LM_25000baseKR_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - *if_capability |= QED_LM_40000baseKR4_Full_BIT; - if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) - *if_capability |= QED_LM_50000baseKR2_Full_BIT; + phylink_set(if_caps, 20000baseKR2_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseKX_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseKR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + phylink_set(if_caps, 25000baseKR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + phylink_set(if_caps, 40000baseKR4_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseKR2_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - *if_capability |= QED_LM_100000baseKR4_Full_BIT; + phylink_set(if_caps, 100000baseKR4_Full); + break; case MEDIA_UNSPECIFIED: case MEDIA_NOT_PRESENT: @@ -1806,26 +1834,27 @@ static void qed_fill_link(struct qed_hwfn *hwfn, /* TODO - at the moment assume supported and advertised speed equal */ if (link_caps.default_speed_autoneg) - if_link->supported_caps |= QED_LM_Autoneg_BIT; + phylink_set(if_link->supported_caps, Autoneg); if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) - if_link->supported_caps |= QED_LM_Asym_Pause_BIT; + phylink_set(if_link->supported_caps, Asym_Pause); if (params.pause.autoneg || params.pause.forced_rx || params.pause.forced_tx) - if_link->supported_caps |= QED_LM_Pause_BIT; + phylink_set(if_link->supported_caps, Pause); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); - if_link->advertised_caps = if_link->supported_caps; if (params.speed.autoneg) - if_link->advertised_caps |= QED_LM_Autoneg_BIT; + phylink_set(if_link->advertised_caps, Autoneg); else - if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; + phylink_clear(if_link->advertised_caps, Autoneg); /* Fill link advertised capability*/ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, - &if_link->advertised_caps); + if_link->advertised_caps); /* Fill link supported capability*/ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, - &if_link->supported_caps); + if_link->supported_caps); if (link.link_up) if_link->speed = link.speed; @@ -1845,30 +1874,29 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; /* Link partner capabilities */ - if (link.partner_adv_speed & - QED_LINK_PARTNER_SPEED_1G_FD) - if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; + + if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) + phylink_set(if_link->lp_caps, 1000baseT_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) - if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; + phylink_set(if_link->lp_caps, 10000baseKR_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) - if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; + phylink_set(if_link->lp_caps, 20000baseKR2_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) - if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; + phylink_set(if_link->lp_caps, 25000baseKR_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) - if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; + phylink_set(if_link->lp_caps, 40000baseLR4_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) - if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; + phylink_set(if_link->lp_caps, 50000baseKR2_Full); if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) - if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; + phylink_set(if_link->lp_caps, 100000baseKR4_Full); if (link.an_complete) - if_link->lp_caps |= QED_LM_Autoneg_BIT; - + phylink_set(if_link->lp_caps, Autoneg); if (link.partner_adv_pause) - if_link->lp_caps |= QED_LM_Pause_BIT; + phylink_set(if_link->lp_caps, Pause); if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) - if_link->lp_caps |= QED_LM_Asym_Pause_BIT; + phylink_set(if_link->lp_caps, Asym_Pause); if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { if_link->eee_supported = false; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index d3fc7403d095..0a564b06d697 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -13,6 +13,8 @@ #include #include #include +#include + #include "qede.h" #include "qede_ptp.h" @@ -418,76 +420,10 @@ static int qede_set_priv_flags(struct net_device *dev, u32 flags) return 0; } -struct qede_link_mode_mapping { - u32 qed_link_mode; - u32 ethtool_link_mode; -}; - -static const struct qede_link_mode_mapping qed_lm_map[] = { - {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, - {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, - {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT}, - {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, - {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT}, - {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, - {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, - {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT}, - {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT}, - {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, - {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT}, - {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT}, - {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT}, - {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT}, - {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT}, - {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT}, - {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT}, - {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT}, - {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT}, - {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT}, - {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT}, - {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT}, - {QED_LM_100000baseKR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT}, - {QED_LM_100000baseSR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT}, - {QED_LM_100000baseCR4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT}, - {QED_LM_100000baseLR4_ER4_Full_BIT, - ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT}, - {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT}, - {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT}, - {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT}, - {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT}, - {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT}, - {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT}, -}; - -#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name) \ -{ \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ - if ((caps) & (qed_lm_map[i].qed_link_mode)) \ - __set_bit(qed_lm_map[i].ethtool_link_mode,\ - lk_ksettings->link_modes.name); \ - } \ -} - -#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name) \ -{ \ - int i; \ - \ - for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) { \ - if (test_bit(qed_lm_map[i].ethtool_link_mode, \ - lk_ksettings->link_modes.name)) \ - caps |= qed_lm_map[i].qed_link_mode; \ - } \ -} - static int qede_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { + typeof(cmd->link_modes) *link_modes = &cmd->link_modes; struct ethtool_link_settings *base = &cmd->base; struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; @@ -497,14 +433,9 @@ static int qede_get_link_ksettings(struct net_device *dev, memset(¤t_link, 0, sizeof(current_link)); edev->ops->common->get_link(edev->cdev, ¤t_link); - ethtool_link_ksettings_zero_link_mode(cmd, supported); - QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported) - - ethtool_link_ksettings_zero_link_mode(cmd, advertising); - QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising) - - ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); - QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising) + linkmode_copy(link_modes->supported, current_link.supported_caps); + linkmode_copy(link_modes->advertising, current_link.advertised_caps); + linkmode_copy(link_modes->lp_advertising, current_link.lp_caps); if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) { base->speed = current_link.speed; @@ -527,10 +458,10 @@ static int qede_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { const struct ethtool_link_settings *base = &cmd->base; + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct qede_dev *edev = netdev_priv(dev); struct qed_link_output current_link; struct qed_link_params params; - u32 sup_caps; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); @@ -542,105 +473,79 @@ static int qede_set_link_ksettings(struct net_device *dev, params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS; params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG; + if (base->autoneg == AUTONEG_ENABLE) { - if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { + if (!phylink_test(current_link.supported_caps, Autoneg)) { DP_INFO(edev, "Auto negotiation is not supported\n"); return -EOPNOTSUPP; } params.autoneg = true; params.forced_speed = 0; - QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising) + + linkmode_copy(params.adv_speeds, cmd->link_modes.advertising); } else { /* forced speed */ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED; params.autoneg = false; params.forced_speed = base->speed; + + phylink_zero(sup_caps); + switch (base->speed) { case SPEED_1000: - sup_caps = QED_LM_1000baseT_Full_BIT | - QED_LM_1000baseKX_Full_BIT | - QED_LM_1000baseX_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "1G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 1000baseT_Full); + phylink_set(sup_caps, 1000baseKX_Full); + phylink_set(sup_caps, 1000baseX_Full); break; case SPEED_10000: - sup_caps = QED_LM_10000baseT_Full_BIT | - QED_LM_10000baseKR_Full_BIT | - QED_LM_10000baseKX4_Full_BIT | - QED_LM_10000baseR_FEC_BIT | - QED_LM_10000baseCR_Full_BIT | - QED_LM_10000baseSR_Full_BIT | - QED_LM_10000baseLR_Full_BIT | - QED_LM_10000baseLRM_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "10G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKR_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); break; case SPEED_20000: - if (!(current_link.supported_caps & - QED_LM_20000baseKR2_Full_BIT)) { - DP_INFO(edev, "20G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = QED_LM_20000baseKR2_Full_BIT; + phylink_set(sup_caps, 20000baseKR2_Full); break; case SPEED_25000: - sup_caps = QED_LM_25000baseKR_Full_BIT | - QED_LM_25000baseCR_Full_BIT | - QED_LM_25000baseSR_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "25G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); break; case SPEED_40000: - sup_caps = QED_LM_40000baseLR4_Full_BIT | - QED_LM_40000baseKR4_Full_BIT | - QED_LM_40000baseCR4_Full_BIT | - QED_LM_40000baseSR4_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "40G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); break; case SPEED_50000: - sup_caps = QED_LM_50000baseKR2_Full_BIT | - QED_LM_50000baseCR2_Full_BIT | - QED_LM_50000baseSR2_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "50G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); break; case SPEED_100000: - sup_caps = QED_LM_100000baseKR4_Full_BIT | - QED_LM_100000baseSR4_Full_BIT | - QED_LM_100000baseCR4_Full_BIT | - QED_LM_100000baseLR4_ER4_Full_BIT; - if (!(current_link.supported_caps & sup_caps)) { - DP_INFO(edev, "100G speed not supported\n"); - return -EINVAL; - } - params.adv_speeds = current_link.supported_caps & - sup_caps; + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); break; default: DP_INFO(edev, "Unsupported speed %u\n", base->speed); return -EINVAL; } + + if (!linkmode_intersects(current_link.supported_caps, + sup_caps)) { + DP_INFO(edev, "%uG speed not supported\n", + base->speed / 1000); + return -EINVAL; + } + + linkmode_and(params.adv_speeds, current_link.supported_caps, + sup_caps); } params.link_up = true; @@ -1006,13 +911,16 @@ static int qede_set_pauseparam(struct net_device *dev, memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; + if (epause->autoneg) { - if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { + if (!phylink_test(current_link.supported_caps, Autoneg)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } + params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; } + if (epause->rx_pause) params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; if (epause->tx_pause) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 36b1ca2dadbb..6e77e4908605 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -440,6 +441,7 @@ static void qedf_link_recovery(struct work_struct *work) static void qedf_update_link_speed(struct qedf_ctx *qedf, struct qed_link_output *link) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct fc_lport *lport = qedf->lport; lport->link_speed = FC_PORTSPEED_UNKNOWN; @@ -474,40 +476,60 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf, * Set supported link speed by querying the supported * capabilities of the link. */ - if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) || - (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) || - (link->supported_caps & QED_LM_10000baseR_FEC_BIT) || - (link->supported_caps & QED_LM_10000baseCR_Full_BIT) || - (link->supported_caps & QED_LM_10000baseSR_Full_BIT) || - (link->supported_caps & QED_LM_10000baseLR_Full_BIT) || - (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) || - (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 10000baseT_Full); + phylink_set(sup_caps, 10000baseKX4_Full); + phylink_set(sup_caps, 10000baseR_FEC); + phylink_set(sup_caps, 10000baseCR_Full); + phylink_set(sup_caps, 10000baseSR_Full); + phylink_set(sup_caps, 10000baseLR_Full); + phylink_set(sup_caps, 10000baseLRM_Full); + phylink_set(sup_caps, 10000baseKR_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; - } - if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) || - (link->supported_caps & QED_LM_25000baseCR_Full_BIT) || - (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 25000baseKR_Full); + phylink_set(sup_caps, 25000baseCR_Full); + phylink_set(sup_caps, 25000baseSR_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; - } - if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) || - (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) || - (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) || - (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 40000baseLR4_Full); + phylink_set(sup_caps, 40000baseKR4_Full); + phylink_set(sup_caps, 40000baseCR4_Full); + phylink_set(sup_caps, 40000baseSR4_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; - } - if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) || - (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) || - (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 50000baseKR2_Full); + phylink_set(sup_caps, 50000baseCR2_Full); + phylink_set(sup_caps, 50000baseSR2_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; - } - if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) || - (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) || - (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) || - (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) { + + phylink_zero(sup_caps); + phylink_set(sup_caps, 100000baseKR4_Full); + phylink_set(sup_caps, 100000baseSR4_Full); + phylink_set(sup_caps, 100000baseCR4_Full); + phylink_set(sup_caps, 100000baseLR4_ER4_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; - } - if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT) + + phylink_zero(sup_caps); + phylink_set(sup_caps, 20000baseKR2_Full); + + if (linkmode_intersects(link->supported_caps, sup_caps)) lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; + fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; } diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8a6e3ad436d1..f82db1b92d45 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -661,42 +661,6 @@ enum qed_protocol { QED_PROTOCOL_FCOE, }; -enum qed_link_mode_bits { - QED_LM_FIBRE_BIT = BIT(0), - QED_LM_Autoneg_BIT = BIT(1), - QED_LM_Asym_Pause_BIT = BIT(2), - QED_LM_Pause_BIT = BIT(3), - QED_LM_1000baseT_Full_BIT = BIT(4), - QED_LM_10000baseT_Full_BIT = BIT(5), - QED_LM_10000baseKR_Full_BIT = BIT(6), - QED_LM_20000baseKR2_Full_BIT = BIT(7), - QED_LM_25000baseKR_Full_BIT = BIT(8), - QED_LM_40000baseLR4_Full_BIT = BIT(9), - QED_LM_50000baseKR2_Full_BIT = BIT(10), - QED_LM_100000baseKR4_Full_BIT = BIT(11), - QED_LM_TP_BIT = BIT(12), - QED_LM_Backplane_BIT = BIT(13), - QED_LM_1000baseKX_Full_BIT = BIT(14), - QED_LM_10000baseKX4_Full_BIT = BIT(15), - QED_LM_10000baseR_FEC_BIT = BIT(16), - QED_LM_40000baseKR4_Full_BIT = BIT(17), - QED_LM_40000baseCR4_Full_BIT = BIT(18), - QED_LM_40000baseSR4_Full_BIT = BIT(19), - QED_LM_25000baseCR_Full_BIT = BIT(20), - QED_LM_25000baseSR_Full_BIT = BIT(21), - QED_LM_50000baseCR2_Full_BIT = BIT(22), - QED_LM_100000baseSR4_Full_BIT = BIT(23), - QED_LM_100000baseCR4_Full_BIT = BIT(24), - QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25), - QED_LM_50000baseSR2_Full_BIT = BIT(26), - QED_LM_1000baseX_Full_BIT = BIT(27), - QED_LM_10000baseCR_Full_BIT = BIT(28), - QED_LM_10000baseSR_Full_BIT = BIT(29), - QED_LM_10000baseLR_Full_BIT = BIT(30), - QED_LM_10000baseLRM_Full_BIT = BIT(31), - QED_LM_COUNT = 32 -}; - struct qed_link_params { bool link_up; @@ -708,7 +672,9 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) u32 override_flags; bool autoneg; - u32 adv_speeds; + + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); + u32 forced_speed; #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) #define QED_LINK_PAUSE_RX_ENABLE BIT(1) @@ -726,10 +692,9 @@ struct qed_link_params { struct qed_link_output { bool link_up; - /* In QED_LM_* defs */ - u32 supported_caps; - u32 advertised_caps; - u32 lp_caps; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps); u32 speed; /* In Mb/s */ u8 duplex; /* In DUPLEX defs */ From 1d4e4ecccb114449454592e24ad790e20f397c05 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:02 +0300 Subject: [PATCH 1293/2056] qede: populate supported link modes maps on module init Simplify and lighten qede_set_link_ksettings() by declaring static link modes maps and populating them on module init. This way we save plenty of text size at the low expense of __ro_after_init and __initconst data (the latter will be purged after module init is done). Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 2 + .../net/ethernet/qlogic/qede/qede_ethtool.c | 160 ++++++++++++------ drivers/net/ethernet/qlogic/qede/qede_main.c | 2 + 3 files changed, 109 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 8adda5dc9e88..f1d7f73de902 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -557,6 +557,8 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, struct flow_cls_offload *f); +void qede_forced_speed_maps_init(void); + #define RX_RING_SIZE_POW 13 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 0a564b06d697..7a985307cdd5 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -196,6 +196,96 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { "Nvram (online)\t\t", }; +/* Forced speed capabilities maps */ + +struct qede_forced_speed_map { + u32 speed; + __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); + + const u32 *cap_arr; + u32 arr_size; +}; + +#define QEDE_FORCED_SPEED_MAP(value) \ +{ \ + .speed = SPEED_##value, \ + .cap_arr = qede_forced_speed_##value, \ + .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \ +} + +static const u32 qede_forced_speed_1000[] __initconst = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 qede_forced_speed_10000[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 qede_forced_speed_20000[] __initconst = { + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, +}; + +static const u32 qede_forced_speed_25000[] __initconst = { + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 qede_forced_speed_40000[] __initconst = { + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, +}; + +static const u32 qede_forced_speed_50000[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 qede_forced_speed_100000[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = { + QEDE_FORCED_SPEED_MAP(1000), + QEDE_FORCED_SPEED_MAP(10000), + QEDE_FORCED_SPEED_MAP(20000), + QEDE_FORCED_SPEED_MAP(25000), + QEDE_FORCED_SPEED_MAP(40000), + QEDE_FORCED_SPEED_MAP(50000), + QEDE_FORCED_SPEED_MAP(100000), +}; + +void __init qede_forced_speed_maps_init(void) +{ + struct qede_forced_speed_map *map; + u32 i; + + for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) { + map = qede_forced_speed_maps + i; + + linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); + map->cap_arr = NULL; + map->arr_size = 0; + } +} + +/* Ethtool callbacks */ + static void qede_get_strings_stats_txq(struct qede_dev *edev, struct qede_tx_queue *txq, u8 **buf) { @@ -458,10 +548,11 @@ static int qede_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { const struct ethtool_link_settings *base = &cmd->base; - __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct qede_dev *edev = netdev_priv(dev); + const struct qede_forced_speed_map *map; struct qed_link_output current_link; struct qed_link_params params; + u32 i; if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, "Link settings are not allowed to be changed\n"); @@ -489,65 +580,24 @@ static int qede_set_link_ksettings(struct net_device *dev, params.autoneg = false; params.forced_speed = base->speed; - phylink_zero(sup_caps); + for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) { + map = qede_forced_speed_maps + i; - switch (base->speed) { - case SPEED_1000: - phylink_set(sup_caps, 1000baseT_Full); - phylink_set(sup_caps, 1000baseKX_Full); - phylink_set(sup_caps, 1000baseX_Full); - break; - case SPEED_10000: - phylink_set(sup_caps, 10000baseT_Full); - phylink_set(sup_caps, 10000baseKR_Full); - phylink_set(sup_caps, 10000baseKX4_Full); - phylink_set(sup_caps, 10000baseR_FEC); - phylink_set(sup_caps, 10000baseCR_Full); - phylink_set(sup_caps, 10000baseSR_Full); - phylink_set(sup_caps, 10000baseLR_Full); - phylink_set(sup_caps, 10000baseLRM_Full); - break; - case SPEED_20000: - phylink_set(sup_caps, 20000baseKR2_Full); - break; - case SPEED_25000: - phylink_set(sup_caps, 25000baseKR_Full); - phylink_set(sup_caps, 25000baseCR_Full); - phylink_set(sup_caps, 25000baseSR_Full); - break; - case SPEED_40000: - phylink_set(sup_caps, 40000baseLR4_Full); - phylink_set(sup_caps, 40000baseKR4_Full); - phylink_set(sup_caps, 40000baseCR4_Full); - phylink_set(sup_caps, 40000baseSR4_Full); - break; - case SPEED_50000: - phylink_set(sup_caps, 50000baseKR2_Full); - phylink_set(sup_caps, 50000baseCR2_Full); - phylink_set(sup_caps, 50000baseSR2_Full); - break; - case SPEED_100000: - phylink_set(sup_caps, 100000baseKR4_Full); - phylink_set(sup_caps, 100000baseSR4_Full); - phylink_set(sup_caps, 100000baseCR4_Full); - phylink_set(sup_caps, 100000baseLR4_ER4_Full); - break; - default: - DP_INFO(edev, "Unsupported speed %u\n", base->speed); - return -EINVAL; + if (base->speed != map->speed || + !linkmode_intersects(current_link.supported_caps, + map->caps)) + continue; + + linkmode_and(params.adv_speeds, + current_link.supported_caps, map->caps); + goto set_link; } - if (!linkmode_intersects(current_link.supported_caps, - sup_caps)) { - DP_INFO(edev, "%uG speed not supported\n", - base->speed / 1000); - return -EINVAL; - } - - linkmode_and(params.adv_speeds, current_link.supported_caps, - sup_caps); + DP_INFO(edev, "Unsupported speed %u\n", base->speed); + return -EINVAL; } +set_link: params.link_up = true; edev->ops->common->set_link(edev->cdev, ¶ms); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a653dd0e5c22..6f2171dc0dea 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -263,6 +263,8 @@ int __init qede_init(void) pr_info("qede_init: %s\n", version); + qede_forced_speed_maps_init(); + qed_ops = qed_get_eth_ops(); if (!qed_ops) { pr_notice("Failed to get qed ethtool operations\n"); From d47839f31e075ce5dd52c1bbbc3808ef9202671d Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:03 +0300 Subject: [PATCH 1294/2056] qed: reformat public_port::transceiver_data a bit Prior to adding new bitfields, reformat the existing ones from spaces to tabs, and unify all hex values to lowercase. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 108 +++++++++++----------- 1 file changed, 55 insertions(+), 53 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 6bb0bbc0013b..0d0a109d94b4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11973,59 +11973,61 @@ struct public_port { struct dcbx_mib operational_dcbx_mib; u32 reserved[2]; - u32 transceiver_data; -#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF -#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 -#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 -#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 -#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 -#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 -#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 -#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00 -#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 -#define ETH_TRANSCEIVER_TYPE_NONE 0x00 -#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xFF -#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 -#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 -#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 -#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 -#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 -#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 -#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 -#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 -#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 -#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a -#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b -#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c -#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d -#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e -#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f -#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 -#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 -#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 -#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 -#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 -#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 -#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 -#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 -#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 -#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a -#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b -#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c -#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d -#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e -#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f -#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 -#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 -#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 -#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 + + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 + u32 wol_info; u32 wol_pkt_len; u32 wol_pkt_details; From 9228b7c1f4ee8c2198a1b13e5a1fdb285ce2b555 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:04 +0300 Subject: [PATCH 1295/2056] qed: add support for multi-rate transceivers Set the corresponding advertised and supported link modes according to the detected transceiver type and device capabilities. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 4 + drivers/net/ethernet/qlogic/qed/qed_main.c | 120 ++++++++++++++++----- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 9 +- 3 files changed, 106 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 0d0a109d94b4..ebc25b34e491 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12027,6 +12027,10 @@ struct public_port { #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a u32 wol_info; u32 wol_pkt_len; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index afc4fa3bdcaa..172a107f9299 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1696,21 +1696,40 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) phylink_set(if_caps, 20000baseKR2_Full); - /* For DAC media multiple speed capabilities are supported*/ - capability = capability & speed_mask; + /* For DAC media multiple speed capabilities are supported */ + capability |= speed_mask; + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) phylink_set(if_caps, 1000baseKX_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) phylink_set(if_caps, 10000baseCR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) - phylink_set(if_caps, 40000baseCR4_Full); + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + phylink_set(if_caps, 40000baseCR4_Full); + break; + default: + break; + } + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) phylink_set(if_caps, 25000baseCR_Full); if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) phylink_set(if_caps, 50000baseCR2_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) - phylink_set(if_caps, 100000baseCR4_Full); + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + phylink_set(if_caps, 100000baseCR4_Full); + break; + default: + break; + } break; case MEDIA_BASE_T: @@ -1728,10 +1747,16 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { phylink_set(if_caps, FIBRE); - if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_1000BASET: phylink_set(if_caps, 1000baseT_Full); - if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) + break; + case ETH_TRANSCEIVER_TYPE_10G_BASET: phylink_set(if_caps, 10000baseT_Full); + break; + default: + break; + } } break; @@ -1740,47 +1765,89 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: phylink_set(if_caps, FIBRE); + capability |= speed_mask; - if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { - if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || - (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: phylink_set(if_caps, 1000baseKX_Full); - } + break; + default: + break; + } - if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { - if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: phylink_set(if_caps, 10000baseSR_Full); - if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) + break; + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: phylink_set(if_caps, 10000baseLR_Full); - if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) + break; + case ETH_TRANSCEIVER_TYPE_10G_LRM: phylink_set(if_caps, 10000baseLRM_Full); - if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) + break; + case ETH_TRANSCEIVER_TYPE_10G_ER: phylink_set(if_caps, 10000baseR_FEC); - } + break; + default: + break; + } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) phylink_set(if_caps, 20000baseKR2_Full); - if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { - if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: phylink_set(if_caps, 25000baseSR_Full); - } + break; + default: + break; + } - if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { - if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: phylink_set(if_caps, 40000baseLR4_Full); - if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) + break; + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: phylink_set(if_caps, 40000baseSR4_Full); - } + break; + default: + break; + } if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) phylink_set(if_caps, 50000baseKR2_Full); if (capability & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { - if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: phylink_set(if_caps, 100000baseSR4_Full); - } + break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + phylink_set(if_caps, 100000baseLR4_ER4_Full); + break; + default: + break; + } break; case MEDIA_KR: @@ -1805,6 +1872,7 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, break; case MEDIA_UNSPECIFIED: case MEDIA_NOT_PRESENT: + default: DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, "Unknown media and transceiver type;\n"); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2d12bb27560c..b10a92488630 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2193,6 +2193,11 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; case ETH_TRANSCEIVER_TYPE_40G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | @@ -2223,8 +2228,10 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; break; case ETH_TRANSCEIVER_TYPE_10G_BASET: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; default: DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n", From 3c41486e46405b84369bd19613326c0cc89d6ddc Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:05 +0300 Subject: [PATCH 1296/2056] qed: use transceiver data to fill link partner's advertising speeds Currently qed driver does not take into consideration transceiver's capabilities when generating link partner's speed advertisement. This leads to e.g. incorrect ethtool link info on 10GbaseT modules. Use transceiver info not only for advertisement and support arrays, but also for link partner's abilities to fix it. Misc: fix a couple of comments nearby. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 49 +++++++++++++--------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 172a107f9299..2be9ed39c450 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1879,6 +1879,27 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn, } } +static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) +{ + *speed_mask = 0; + + if (caps & + (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + if (caps & QED_LINK_PARTNER_SPEED_10G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (caps & QED_LINK_PARTNER_SPEED_20G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; + if (caps & QED_LINK_PARTNER_SPEED_25G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + if (caps & QED_LINK_PARTNER_SPEED_40G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + if (caps & QED_LINK_PARTNER_SPEED_50G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + if (caps & QED_LINK_PARTNER_SPEED_100G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; +} + static void qed_fill_link(struct qed_hwfn *hwfn, struct qed_ptt *ptt, struct qed_link_output *if_link) @@ -1886,7 +1907,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, struct qed_mcp_link_capabilities link_caps; struct qed_mcp_link_params params; struct qed_mcp_link_state link; - u32 media_type; + u32 media_type, speed_mask; memset(if_link, 0, sizeof(*if_link)); @@ -1917,13 +1938,18 @@ static void qed_fill_link(struct qed_hwfn *hwfn, else phylink_clear(if_link->advertised_caps, Autoneg); - /* Fill link advertised capability*/ + /* Fill link advertised capability */ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, if_link->advertised_caps); - /* Fill link supported capability*/ + + /* Fill link supported capability */ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, if_link->supported_caps); + /* Fill partner advertised capability */ + qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); + qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); + if (link.link_up) if_link->speed = link.speed; @@ -1941,23 +1967,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if (params.pause.forced_tx) if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; - /* Link partner capabilities */ - - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) - phylink_set(if_link->lp_caps, 1000baseT_Full); - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) - phylink_set(if_link->lp_caps, 10000baseKR_Full); - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) - phylink_set(if_link->lp_caps, 20000baseKR2_Full); - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) - phylink_set(if_link->lp_caps, 25000baseKR_Full); - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) - phylink_set(if_link->lp_caps, 40000baseLR4_Full); - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) - phylink_set(if_link->lp_caps, 50000baseKR2_Full); - if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) - phylink_set(if_link->lp_caps, 100000baseKR4_Full); - if (link.an_complete) phylink_set(if_link->lp_caps, Autoneg); if (link.partner_adv_pause) From 37237b5b7104f91b519133d7862d1b5169a3ba8e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:06 +0300 Subject: [PATCH 1297/2056] qed: reformat several structures a bit Prior to adding new fields and bitfields, reformat the related structures according to the Linux style (spaces to tabs, lowercase hex, indentation etc.). Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 248 +++++++++++----------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 75 ++++--- include/linux/qed/qed_if.h | 64 +++--- 3 files changed, 196 insertions(+), 191 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index ebc25b34e491..93d33c9cf145 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11536,34 +11536,35 @@ typedef u32 offsize_t; /* In DWORDS !!! */ /* PHY configuration */ struct eth_phy_cfg { - u32 speed; -#define ETH_SPEED_AUTONEG 0 -#define ETH_SPEED_SMARTLINQ 0x8 + u32 speed; +#define ETH_SPEED_AUTONEG 0x0 +#define ETH_SPEED_SMARTLINQ 0x8 - u32 pause; -#define ETH_PAUSE_NONE 0x0 -#define ETH_PAUSE_AUTONEG 0x1 -#define ETH_PAUSE_RX 0x2 -#define ETH_PAUSE_TX 0x4 + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 - u32 adv_speed; - u32 loopback_mode; -#define ETH_LOOPBACK_NONE (0) -#define ETH_LOOPBACK_INT_PHY (1) -#define ETH_LOOPBACK_EXT_PHY (2) -#define ETH_LOOPBACK_EXT (3) -#define ETH_LOOPBACK_MAC (4) + u32 adv_speed; - u32 eee_cfg; + u32 loopback_mode; +#define ETH_LOOPBACK_NONE 0x0 +#define ETH_LOOPBACK_INT_PHY 0x1 +#define ETH_LOOPBACK_EXT_PHY 0x2 +#define ETH_LOOPBACK_EXT 0x3 +#define ETH_LOOPBACK_MAC 0x4 + + u32 eee_cfg; #define EEE_CFG_EEE_ENABLED BIT(0) #define EEE_CFG_TX_LPI BIT(1) #define EEE_CFG_ADV_SPEED_1G BIT(2) #define EEE_CFG_ADV_SPEED_10G BIT(3) -#define EEE_TX_TIMER_USEC_MASK (0xfffffff0) +#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 #define EEE_TX_TIMER_USEC_OFFSET 4 -#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00) -#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100) -#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000) +#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 +#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 u32 feature_config_flags; #define ETH_EEE_MODE_ADV_LPI (1 << 0) @@ -11895,41 +11896,36 @@ struct public_path { }; struct public_port { - u32 validity_map; - - u32 link_status; -#define LINK_STATUS_LINK_UP 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) - -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 - -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + u32 validity_map; + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 #define LINK_STATUS_PFC_ENABLED 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 #define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 #define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 #define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 #define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 #define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 #define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 - -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) - #define LINK_STATUS_SFP_TX_FAULT 0x00100000 #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 @@ -12630,49 +12626,49 @@ struct public_drv_mb { #define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 - u32 fw_mb_param; -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 + u32 fw_mb_param; +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 - /* get pf rdma protocol command responce */ -#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 -#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 -#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 -#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + /* Get PF RDMA protocol command response */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 -/* get MFW feature support response */ -#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 -#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 -#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 + /* Get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 -#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF -#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - union drv_union_data union_data; + union drv_union_data union_data; }; #define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff @@ -13048,24 +13044,27 @@ struct nvm_cfg1_path { }; struct nvm_cfg1_port { - u32 reserved__m_relocated_to_option_123; - u32 reserved__m_relocated_to_option_124; - u32 generic_cont0; -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 + u32 rel_to_opt123; + u32 rel_to_opt124; + + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 #define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 #define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 - u32 pcie_cfg; - u32 features; - u32 speed_cap_mask; -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF + + u32 pcie_cfg; + u32 features; + + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 @@ -13074,8 +13073,9 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 - u32 link_settings; -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F + + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 @@ -13091,49 +13091,53 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 - u32 phy_cfg; - u32 mgmt_traffic; - u32 ext_phy; + u32 phy_cfg; + u32 mgmt_traffic; + + u32 ext_phy; /* EEE power saving mode */ -#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 - u32 mba_cfg1; - u32 mba_cfg2; - u32 vf_cfg; - struct nvm_cfg_mac_address lldp_mac_address; - u32 led_port_settings; - u32 transceiver_00; - u32 device_ids; - u32 board_cfg; -#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF -#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 -#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 -#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 -#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 - u32 mnm_10g_cap; - u32 mnm_10g_ctrl; - u32 mnm_10g_misc; - u32 mnm_25g_cap; - u32 mnm_25g_ctrl; - u32 mnm_25g_misc; - u32 mnm_40g_cap; - u32 mnm_40g_ctrl; - u32 mnm_40g_misc; - u32 mnm_50g_cap; - u32 mnm_50g_ctrl; - u32 mnm_50g_misc; - u32 mnm_100g_cap; - u32 mnm_100g_ctrl; - u32 mnm_100g_misc; - u32 reserved[116]; + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + + u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + + u32 reserved[116]; }; struct nvm_cfg1_func { diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 63a22a615e94..cf678b6966f8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -34,61 +34,60 @@ enum qed_mcp_eee_mode { }; struct qed_mcp_link_params { - struct qed_mcp_link_speed_params speed; - struct qed_mcp_link_pause_params pause; - u32 loopback_mode; - struct qed_link_eee_params eee; + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; + struct qed_link_eee_params eee; }; struct qed_mcp_link_capabilities { - u32 speed_capabilities; - bool default_speed_autoneg; - enum qed_mcp_eee_mode default_eee; - u32 eee_lpi_timer; - u8 eee_speed_caps; + u32 speed_capabilities; + bool default_speed_autoneg; + enum qed_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; }; struct qed_mcp_link_state { - bool link_up; - - u32 min_pf_rate; + bool link_up; + u32 min_pf_rate; /* Actual link speed in Mb/s */ - u32 line_speed; + u32 line_speed; /* PF max speed in Mb/s, deduced from line_speed * according to PF max bandwidth configuration. */ - u32 speed; - bool full_duplex; + u32 speed; - bool an; - bool an_complete; - bool parallel_detection; - bool pfc_enabled; + bool full_duplex; + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; -#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) -#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) -#define QED_LINK_PARTNER_SPEED_10G BIT(2) -#define QED_LINK_PARTNER_SPEED_20G BIT(3) -#define QED_LINK_PARTNER_SPEED_25G BIT(4) -#define QED_LINK_PARTNER_SPEED_40G BIT(5) -#define QED_LINK_PARTNER_SPEED_50G BIT(6) -#define QED_LINK_PARTNER_SPEED_100G BIT(7) - u32 partner_adv_speed; + u32 partner_adv_speed; +#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) +#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) +#define QED_LINK_PARTNER_SPEED_10G BIT(2) +#define QED_LINK_PARTNER_SPEED_20G BIT(3) +#define QED_LINK_PARTNER_SPEED_25G BIT(4) +#define QED_LINK_PARTNER_SPEED_40G BIT(5) +#define QED_LINK_PARTNER_SPEED_50G BIT(6) +#define QED_LINK_PARTNER_SPEED_100G BIT(7) - bool partner_tx_flow_ctrl_en; - bool partner_rx_flow_ctrl_en; + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; -#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1) -#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2) -#define QED_LINK_PARTNER_BOTH_PAUSE (3) - u8 partner_adv_pause; + u8 partner_adv_pause; +#define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1 +#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2 +#define QED_LINK_PARTNER_BOTH_PAUSE 0x3 - bool sfp_tx_fault; - bool eee_active; - u8 eee_adv_caps; - u8 eee_lp_adv_caps; + bool sfp_tx_fault; + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; }; struct qed_mcp_function_info { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f82db1b92d45..dde48f206d0d 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -662,51 +662,53 @@ enum qed_protocol { }; struct qed_link_params { - bool link_up; + bool link_up; -#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) -#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) -#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) -#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) -#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) -#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) - u32 override_flags; - bool autoneg; + u32 override_flags; +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) +#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) + bool autoneg; __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); + u32 forced_speed; - u32 forced_speed; -#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) -#define QED_LINK_PAUSE_RX_ENABLE BIT(1) -#define QED_LINK_PAUSE_TX_ENABLE BIT(2) - u32 pause_config; -#define QED_LINK_LOOPBACK_NONE BIT(0) -#define QED_LINK_LOOPBACK_INT_PHY BIT(1) -#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) -#define QED_LINK_LOOPBACK_EXT BIT(3) -#define QED_LINK_LOOPBACK_MAC BIT(4) - u32 loopback_mode; - struct qed_link_eee_params eee; + u32 pause_config; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + + u32 loopback_mode; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) + + struct qed_link_eee_params eee; }; struct qed_link_output { - bool link_up; + bool link_up; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps); __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps); - u32 speed; /* In Mb/s */ - u8 duplex; /* In DUPLEX defs */ - u8 port; /* In PORT defs */ - bool autoneg; - u32 pause_config; + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; /* EEE - capability & param */ - bool eee_supported; - bool eee_active; - u8 sup_caps; - struct qed_link_eee_params eee; + bool eee_supported; + bool eee_active; + u8 sup_caps; + struct qed_link_eee_params eee; }; struct qed_probe_params { From ae7e69379fd5a87141fd8c7f2efab8e73f2a9f7e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:07 +0300 Subject: [PATCH 1298/2056] qed: add support for Forward Error Correction Add all necessary routines for reading supported FEC modes from NVM and querying FEC control to the MFW (if the running version supports it). Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 54 +++++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 24 +++++++++- drivers/net/ethernet/qlogic/qed/qed_main.c | 6 +++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 47 +++++++++++++++---- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 4 ++ include/linux/qed/qed_if.h | 13 ++++++ 6 files changed, 121 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 491a6dbb5d73..d929556247a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3968,7 +3968,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fc; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; @@ -4081,16 +4081,38 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = link->speed.autoneg; - link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; - link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; - link->pause.autoneg = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); - link->pause.forced_rx = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); - link->pause.forced_tx = !!(link_temp & - NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + fc = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); + link->pause.autoneg = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_FEC_FORCE_MODE)) { + case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE: + p_caps->fec_default |= QED_FEC_MODE_NONE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE: + p_caps->fec_default |= QED_FEC_MODE_FIRECODE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_RS: + p_caps->fec_default |= QED_FEC_MODE_RS; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO: + p_caps->fec_default |= QED_FEC_MODE_AUTO; + break; + default: + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "unknown FEC mode in 0x%08x\n", link_temp); + } + } else { + p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED; + } + + link->fec = p_caps->fec_default; + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + offsetof(struct nvm_cfg1_port, ext_phy)); @@ -4122,14 +4144,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; } - DP_VERBOSE(p_hwfn, - NETIF_MSG_LINK, - "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", - link->speed.forced_speed, - link->speed.advertised_speeds, - link->speed.autoneg, - link->pause.autoneg, - p_caps->default_eee, p_caps->eee_lpi_timer); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer, + p_caps->fec_default); if (IS_LEAD_HWFN(p_hwfn)) { struct qed_dev *cdev = p_hwfn->cdev; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 93d33c9cf145..7c1d4efffbff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11566,8 +11566,15 @@ struct eth_phy_cfg { #define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 #define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 - u32 feature_config_flags; -#define ETH_EEE_MODE_ADV_LPI (1 << 0) + u32 deprecated; + + u32 fec_mode; +#define FEC_FORCE_MODE_MASK 0x000000ff +#define FEC_FORCE_MODE_OFFSET 0 +#define FEC_FORCE_MODE_NONE 0x00 +#define FEC_FORCE_MODE_FIRECODE 0x01 +#define FEC_FORCE_MODE_RS 0x02 +#define FEC_FORCE_MODE_AUTO 0x07 }; struct port_mf_cfg { @@ -11934,6 +11941,11 @@ struct public_port { #define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 #define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) + u32 link_status1; u32 ext_phy_fw_version; u32 drv_phy_cfg_addr; @@ -12553,6 +12565,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 /* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ @@ -12641,6 +12654,7 @@ struct public_drv_mb { /* Get MFW feature support response */ #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL 0x00000020 #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) @@ -13091,6 +13105,12 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 u32 phy_cfg; u32 mgmt_traffic; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 2be9ed39c450..91e7cfc544f0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1597,6 +1597,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) memcpy(&link_params->eee, ¶ms->eee, sizeof(link_params->eee)); + if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) + link_params->fec = params->fec; + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); qed_ptt_release(hwfn, ptt); @@ -1938,6 +1941,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, else phylink_clear(if_link->advertised_caps, Autoneg); + if_link->sup_fec = link_caps.fec_default; + if_link->active_fec = params.fec; + /* Fill link advertised capability */ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, if_link->advertised_caps); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b10a92488630..78c0d3a2d164 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1446,6 +1446,25 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (status & LINK_STATUS_FEC_MODE_MASK) { + case LINK_STATUS_FEC_MODE_NONE: + p_link->fec_active = QED_FEC_MODE_NONE; + break; + case LINK_STATUS_FEC_MODE_FIRECODE_CL74: + p_link->fec_active = QED_FEC_MODE_FIRECODE; + break; + case LINK_STATUS_FEC_MODE_RS_CL91: + p_link->fec_active = QED_FEC_MODE_RS; + break; + default: + p_link->fec_active = QED_FEC_MODE_AUTO; + } + } else { + p_link->fec_active = QED_FEC_MODE_UNSUPPORTED; + } + qed_link_update(p_hwfn, p_ptt); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); @@ -1456,8 +1475,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; + u32 cmd, fec_bit = 0; int rc = 0; - u32 cmd; /* Set the shmem configuration according to params */ memset(&phy_cfg, 0, sizeof(phy_cfg)); @@ -1489,16 +1508,27 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) EEE_TX_TIMER_USEC_MASK; } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + if (params->fec & QED_FEC_MODE_NONE) + fec_bit |= FEC_FORCE_MODE_NONE; + else if (params->fec & QED_FEC_MODE_FIRECODE) + fec_bit |= FEC_FORCE_MODE_FIRECODE; + else if (params->fec & QED_FEC_MODE_RS) + fec_bit |= FEC_FORCE_MODE_RS; + else if (params->fec & QED_FEC_MODE_AUTO) + fec_bit |= FEC_FORCE_MODE_AUTO; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); + } + p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", - phy_cfg.speed, - phy_cfg.pause, - phy_cfg.adv_speed, - phy_cfg.loopback_mode, - phy_cfg.feature_config_flags); + "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, FEC 0x%08x\n", + phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, + phy_cfg.loopback_mode, phy_cfg.fec_mode); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); @@ -3805,7 +3835,8 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | - DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index cf678b6966f8..5e50405854e6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -38,11 +38,13 @@ struct qed_mcp_link_params { struct qed_mcp_link_pause_params pause; u32 loopback_mode; struct qed_link_eee_params eee; + u32 fec; }; struct qed_mcp_link_capabilities { u32 speed_capabilities; bool default_speed_autoneg; + u32 fec_default; enum qed_mcp_eee_mode default_eee; u32 eee_lpi_timer; u8 eee_speed_caps; @@ -88,6 +90,8 @@ struct qed_mcp_link_state { bool eee_active; u8 eee_adv_caps; u8 eee_lp_adv_caps; + + u32 fec_active; }; struct qed_mcp_function_info { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index dde48f206d0d..f0b4cdc79299 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -661,6 +661,14 @@ enum qed_protocol { QED_PROTOCOL_FCOE, }; +enum qed_fec_mode { + QED_FEC_MODE_NONE = BIT(0), + QED_FEC_MODE_FIRECODE = BIT(1), + QED_FEC_MODE_RS = BIT(2), + QED_FEC_MODE_AUTO = BIT(3), + QED_FEC_MODE_UNSUPPORTED = BIT(4), +}; + struct qed_link_params { bool link_up; @@ -671,6 +679,7 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) +#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6) bool autoneg; __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); @@ -689,6 +698,7 @@ struct qed_link_params { #define QED_LINK_LOOPBACK_MAC BIT(4) struct qed_link_eee_params eee; + u32 fec; }; struct qed_link_output { @@ -709,6 +719,9 @@ struct qed_link_output { bool eee_active; u8 sup_caps; struct qed_link_eee_params eee; + + u32 sup_fec; + u32 active_fec; }; struct qed_probe_params { From 460761570ba3073b15c2903db9efc0142790defd Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:08 +0300 Subject: [PATCH 1299/2056] qede: format qede{,_vf}_ethtool_ops Prior to adding new callbacks, format qede ethtool_ops structs to make declarations more fancy and readable. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qede/qede_ethtool.c | 137 +++++++++--------- 1 file changed, 68 insertions(+), 69 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 7a985307cdd5..fdccceb69632 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -2032,78 +2032,77 @@ static int qede_get_dump_data(struct net_device *dev, } static const struct ethtool_ops qede_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, - .get_link_ksettings = qede_get_link_ksettings, - .set_link_ksettings = qede_set_link_ksettings, - .get_drvinfo = qede_get_drvinfo, - .get_regs_len = qede_get_regs_len, - .get_regs = qede_get_regs, - .get_wol = qede_get_wol, - .set_wol = qede_set_wol, - .get_msglevel = qede_get_msglevel, - .set_msglevel = qede_set_msglevel, - .nway_reset = qede_nway_reset, - .get_link = qede_get_link, - .get_coalesce = qede_get_coalesce, - .set_coalesce = qede_set_coalesce, - .get_ringparam = qede_get_ringparam, - .set_ringparam = qede_set_ringparam, - .get_pauseparam = qede_get_pauseparam, - .set_pauseparam = qede_set_pauseparam, - .get_strings = qede_get_strings, - .set_phys_id = qede_set_phys_id, - .get_ethtool_stats = qede_get_ethtool_stats, - .get_priv_flags = qede_get_priv_flags, - .set_priv_flags = qede_set_priv_flags, - .get_sset_count = qede_get_sset_count, - .get_rxnfc = qede_get_rxnfc, - .set_rxnfc = qede_set_rxnfc, - .get_rxfh_indir_size = qede_get_rxfh_indir_size, - .get_rxfh_key_size = qede_get_rxfh_key_size, - .get_rxfh = qede_get_rxfh, - .set_rxfh = qede_set_rxfh, - .get_ts_info = qede_get_ts_info, - .get_channels = qede_get_channels, - .set_channels = qede_set_channels, - .self_test = qede_self_test, - .get_module_info = qede_get_module_info, - .get_module_eeprom = qede_get_module_eeprom, - .get_eee = qede_get_eee, - .set_eee = qede_set_eee, - - .get_tunable = qede_get_tunable, - .set_tunable = qede_set_tunable, - .flash_device = qede_flash_device, - .get_dump_flag = qede_get_dump_flag, - .get_dump_data = qede_get_dump_data, - .set_dump = qede_set_dump, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_link_ksettings = qede_get_link_ksettings, + .set_link_ksettings = qede_set_link_ksettings, + .get_drvinfo = qede_get_drvinfo, + .get_regs_len = qede_get_regs_len, + .get_regs = qede_get_regs, + .get_wol = qede_get_wol, + .set_wol = qede_set_wol, + .get_msglevel = qede_get_msglevel, + .set_msglevel = qede_set_msglevel, + .nway_reset = qede_nway_reset, + .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, + .get_ringparam = qede_get_ringparam, + .set_ringparam = qede_set_ringparam, + .get_pauseparam = qede_get_pauseparam, + .set_pauseparam = qede_set_pauseparam, + .get_strings = qede_get_strings, + .set_phys_id = qede_set_phys_id, + .get_ethtool_stats = qede_get_ethtool_stats, + .get_priv_flags = qede_get_priv_flags, + .set_priv_flags = qede_set_priv_flags, + .get_sset_count = qede_get_sset_count, + .get_rxnfc = qede_get_rxnfc, + .set_rxnfc = qede_set_rxnfc, + .get_rxfh_indir_size = qede_get_rxfh_indir_size, + .get_rxfh_key_size = qede_get_rxfh_key_size, + .get_rxfh = qede_get_rxfh, + .set_rxfh = qede_set_rxfh, + .get_ts_info = qede_get_ts_info, + .get_channels = qede_get_channels, + .set_channels = qede_set_channels, + .self_test = qede_self_test, + .get_module_info = qede_get_module_info, + .get_module_eeprom = qede_get_module_eeprom, + .get_eee = qede_get_eee, + .set_eee = qede_set_eee, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, + .flash_device = qede_flash_device, + .get_dump_flag = qede_get_dump_flag, + .get_dump_data = qede_get_dump_data, + .set_dump = qede_set_dump, }; static const struct ethtool_ops qede_vf_ethtool_ops = { - .supported_coalesce_params = ETHTOOL_COALESCE_USECS, - .get_link_ksettings = qede_get_link_ksettings, - .get_drvinfo = qede_get_drvinfo, - .get_msglevel = qede_get_msglevel, - .set_msglevel = qede_set_msglevel, - .get_link = qede_get_link, - .get_coalesce = qede_get_coalesce, - .set_coalesce = qede_set_coalesce, - .get_ringparam = qede_get_ringparam, - .set_ringparam = qede_set_ringparam, - .get_strings = qede_get_strings, - .get_ethtool_stats = qede_get_ethtool_stats, - .get_priv_flags = qede_get_priv_flags, - .get_sset_count = qede_get_sset_count, - .get_rxnfc = qede_get_rxnfc, - .set_rxnfc = qede_set_rxnfc, - .get_rxfh_indir_size = qede_get_rxfh_indir_size, - .get_rxfh_key_size = qede_get_rxfh_key_size, - .get_rxfh = qede_get_rxfh, - .set_rxfh = qede_set_rxfh, - .get_channels = qede_get_channels, - .set_channels = qede_set_channels, - .get_tunable = qede_get_tunable, - .set_tunable = qede_set_tunable, + .supported_coalesce_params = ETHTOOL_COALESCE_USECS, + .get_link_ksettings = qede_get_link_ksettings, + .get_drvinfo = qede_get_drvinfo, + .get_msglevel = qede_get_msglevel, + .set_msglevel = qede_set_msglevel, + .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, + .get_ringparam = qede_get_ringparam, + .set_ringparam = qede_set_ringparam, + .get_strings = qede_get_strings, + .get_ethtool_stats = qede_get_ethtool_stats, + .get_priv_flags = qede_get_priv_flags, + .get_sset_count = qede_get_sset_count, + .get_rxnfc = qede_get_rxnfc, + .set_rxnfc = qede_set_rxnfc, + .get_rxfh_indir_size = qede_get_rxfh_indir_size, + .get_rxfh_key_size = qede_get_rxfh_key_size, + .get_rxfh = qede_get_rxfh, + .set_rxfh = qede_set_rxfh, + .get_channels = qede_get_channels, + .set_channels = qede_set_channels, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; void qede_set_ethtool_ops(struct net_device *dev) From 9bdca14a0e844f298de3bcfd167b214666cc35aa Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:09 +0300 Subject: [PATCH 1300/2056] qede: introduce support for FEC control Add Ethtool callbacks for querying and setting FEC parameters if it's supported by the underlying qed module and MFW version running on the device. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qede/qede_ethtool.c | 74 +++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index fdccceb69632..b9aa6384563b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1834,6 +1834,78 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) return 0; } +static u32 qede_link_to_ethtool_fec(u32 link_fec) +{ + u32 eth_fec = 0; + + if (link_fec & QED_FEC_MODE_NONE) + eth_fec |= ETHTOOL_FEC_OFF; + if (link_fec & QED_FEC_MODE_FIRECODE) + eth_fec |= ETHTOOL_FEC_BASER; + if (link_fec & QED_FEC_MODE_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (link_fec & QED_FEC_MODE_AUTO) + eth_fec |= ETHTOOL_FEC_AUTO; + if (link_fec & QED_FEC_MODE_UNSUPPORTED) + eth_fec |= ETHTOOL_FEC_NONE; + + return eth_fec; +} + +static u32 qede_ethtool_to_link_fec(u32 eth_fec) +{ + u32 link_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + link_fec |= QED_FEC_MODE_NONE; + if (eth_fec & ETHTOOL_FEC_BASER) + link_fec |= QED_FEC_MODE_FIRECODE; + if (eth_fec & ETHTOOL_FEC_RS) + link_fec |= QED_FEC_MODE_RS; + if (eth_fec & ETHTOOL_FEC_AUTO) + link_fec |= QED_FEC_MODE_AUTO; + if (eth_fec & ETHTOOL_FEC_NONE) + link_fec |= QED_FEC_MODE_UNSUPPORTED; + + return link_fec; +} + +static int qede_get_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output curr_link; + + memset(&curr_link, 0, sizeof(curr_link)); + edev->ops->common->get_link(edev->cdev, &curr_link); + + fecparam->active_fec = qede_link_to_ethtool_fec(curr_link.active_fec); + fecparam->fec = qede_link_to_ethtool_fec(curr_link.sup_fec); + + return 0; +} + +static int qede_set_fecparam(struct net_device *dev, + struct ethtool_fecparam *fecparam) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_params params; + + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { + DP_INFO(edev, "Link settings are not allowed to be changed\n"); + return -EOPNOTSUPP; + } + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_FEC_CONFIG; + params.fec = qede_ethtool_to_link_fec(fecparam->fec); + params.link_up = true; + + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + static int qede_get_module_info(struct net_device *dev, struct ethtool_modinfo *modinfo) { @@ -2070,6 +2142,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_module_eeprom = qede_get_module_eeprom, .get_eee = qede_get_eee, .set_eee = qede_set_eee, + .get_fecparam = qede_get_fecparam, + .set_fecparam = qede_set_fecparam, .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, .flash_device = qede_flash_device, From 5d4193c641dc4a2cd4ee31fae20efcbabaeb961f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:10 +0300 Subject: [PATCH 1301/2056] qed: reformat several structures a bit Reformat a few nvm_cfg* structures (and partly qed_dev) prior to adding new fields and definitions. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 101 +++---- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 333 +++++++++++----------- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 12 +- 3 files changed, 227 insertions(+), 219 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index ced805399e4f..e2d7a4bbab53 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -280,48 +280,49 @@ enum qed_db_rec_exec { struct qed_hw_info { /* PCI personality */ - enum qed_pci_personality personality; -#define QED_IS_RDMA_PERSONALITY(dev) \ - ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ - (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + enum qed_pci_personality personality; +#define QED_IS_RDMA_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ (dev)->hw_info.personality == QED_PCI_ETH_RDMA) -#define QED_IS_ROCE_PERSONALITY(dev) \ - ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ +#define QED_IS_ROCE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ (dev)->hw_info.personality == QED_PCI_ETH_RDMA) -#define QED_IS_IWARP_PERSONALITY(dev) \ - ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ +#define QED_IS_IWARP_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ (dev)->hw_info.personality == QED_PCI_ETH_RDMA) -#define QED_IS_L2_PERSONALITY(dev) \ - ((dev)->hw_info.personality == QED_PCI_ETH || \ +#define QED_IS_L2_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH || \ QED_IS_RDMA_PERSONALITY(dev)) -#define QED_IS_FCOE_PERSONALITY(dev) \ +#define QED_IS_FCOE_PERSONALITY(dev) \ ((dev)->hw_info.personality == QED_PCI_FCOE) -#define QED_IS_ISCSI_PERSONALITY(dev) \ +#define QED_IS_ISCSI_PERSONALITY(dev) \ ((dev)->hw_info.personality == QED_PCI_ISCSI) /* Resource Allocation scheme results */ u32 resc_start[QED_MAX_RESC]; u32 resc_num[QED_MAX_RESC]; - u32 feat_num[QED_MAX_FEATURES]; +#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) +#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) +#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ + RESC_NUM(_p_hwfn, resc)) -#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) -#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) -#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ - RESC_NUM(_p_hwfn, resc)) -#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) + u32 feat_num[QED_MAX_FEATURES]; +#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) /* Amount of traffic classes HW supports */ - u8 num_hw_tc; + u8 num_hw_tc; /* Amount of TCs which should be active according to DCBx or upper * layer driver configuration. */ - u8 num_active_tc; + u8 num_active_tc; + u8 offload_tc; bool offload_tc_set; bool multi_tc_roce_en; -#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en)) +#define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en) u32 concrete_fid; u16 opaque_fid; @@ -338,10 +339,10 @@ struct qed_hw_info { u32 port_mode; u32 hw_mode; - unsigned long device_capabilities; + unsigned long device_capabilities; u16 mtu; - enum qed_wol_support b_wol_support; + enum qed_wol_support b_wol_support; }; /* maximun size of read/write commands (HW limit) */ @@ -715,41 +716,41 @@ struct qed_dbg_feature { }; struct qed_dev { - u32 dp_module; - u8 dp_level; - char name[NAME_SIZE]; + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; - enum qed_dev_type type; -/* Translate type/revision combo into the proper conditions */ -#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) -#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ - CHIP_REV_IS_B0(dev)) -#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) -#define QED_IS_K2(dev) QED_IS_AH(dev) + enum qed_dev_type type; + /* Translate type/revision combo into the proper conditions */ +#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) +#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev)) +#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) +#define QED_IS_K2(dev) QED_IS_AH(dev) +#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) - u16 vendor_id; - u16 device_id; -#define QED_DEV_ID_MASK 0xff00 -#define QED_DEV_ID_MASK_BB 0x1600 -#define QED_DEV_ID_MASK_AH 0x8000 -#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) + u16 vendor_id; - u16 chip_num; -#define CHIP_NUM_MASK 0xffff -#define CHIP_NUM_SHIFT 16 + u16 device_id; +#define QED_DEV_ID_MASK 0xff00 +#define QED_DEV_ID_MASK_BB 0x1600 +#define QED_DEV_ID_MASK_AH 0x8000 - u16 chip_rev; -#define CHIP_REV_MASK 0xf -#define CHIP_REV_SHIFT 12 -#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) + u16 chip_num; +#define CHIP_NUM_MASK 0xffff +#define CHIP_NUM_SHIFT 16 + + u16 chip_rev; +#define CHIP_REV_MASK 0xf +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) u16 chip_metal; -#define CHIP_METAL_MASK 0xff -#define CHIP_METAL_SHIFT 4 +#define CHIP_METAL_MASK 0xff +#define CHIP_METAL_SHIFT 4 u16 chip_bond_id; -#define CHIP_BOND_ID_MASK 0xf -#define CHIP_BOND_ID_SHIFT 0 +#define CHIP_BOND_ID_MASK 0xf +#define CHIP_BOND_ID_SHIFT 0 u8 num_engines; u8 num_ports; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 7c1d4efffbff..a4a845579fd2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12532,67 +12532,67 @@ struct public_drv_mb { #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 -#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 -#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 -#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 -#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 -#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 /* Resource Allocation params - Driver version support */ -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF -#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 -#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 -#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 -#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 -#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 -#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 -#define DRV_MB_PARAM_BIST_RC_PASSED 1 -#define DRV_MB_PARAM_BIST_RC_FAILED 2 -#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 -#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 -#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 -#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 -#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 -#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 /* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 -#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xFF +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff /* Driver attributes params */ -#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 -#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF -#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 -#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff +#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 +#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000FFFF -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 -#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 -#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 -#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 -#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 @@ -12639,56 +12639,56 @@ struct public_drv_mb { #define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000 - u32 fw_mb_param; -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff -#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + u32 fw_mb_param; +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 /* Get PF RDMA protocol command response */ -#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 -#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 -#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 -#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 /* Get MFW feature support response */ -#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 -#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 -#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL 0x00000020 -#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) -#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 -#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 -#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 -#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff -#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 - u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 - union drv_union_data union_data; + union drv_union_data union_data; }; -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 -#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LINK_CHANGE, @@ -12975,86 +12975,93 @@ enum tlvs { }; struct nvm_cfg_mac_address { - u32 mac_addr_hi; -#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF -#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 - u32 mac_addr_lo; + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + + u32 mac_addr_lo; }; struct nvm_cfg1_glob { - u32 generic_cont0; -#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 -#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 -#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 -#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 -#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 -#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 -#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 - u32 engineering_change[3]; - u32 manufacturing_id; - u32 serial_number[4]; - u32 pcie_cfg; - u32 mgmt_traffic; - u32 core_cfg; -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 - u32 e_lane_cfg1; - u32 e_lane_cfg2; - u32 f_lane_cfg1; - u32 f_lane_cfg2; - u32 mps10_preemphasis; - u32 mps10_driver_current; - u32 mps25_preemphasis; - u32 mps25_driver_current; - u32 pci_id; - u32 pci_subsys_id; - u32 bar; - u32 mps10_txfir_main; - u32 mps10_txfir_post; - u32 mps25_txfir_main; - u32 mps25_txfir_post; - u32 manufacture_ver; - u32 manufacture_time; - u32 led_global_settings; - u32 generic_cont1; - u32 mbi_version; -#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF -#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 -#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 -#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 - u32 mbi_date; - u32 misc_sig; - u32 device_capabilities; -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 - u32 power_dissipated; - u32 power_consumed; - u32 efi_version; - u32 multi_network_modes_capability; - u32 reserved[41]; + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf + + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + + u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + + u32 mbi_date; + u32 misc_sig; + + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 + + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_net_modes_cap; + u32 reserved[41]; }; struct nvm_cfg1_path { - u32 reserved[30]; + u32 reserved[30]; }; struct nvm_cfg1_port { @@ -13082,7 +13089,7 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 @@ -13094,7 +13101,7 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 5e50405854e6..ea956c43e596 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -16,15 +16,15 @@ #include "qed_dev_api.h" struct qed_mcp_link_speed_params { - bool autoneg; - u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */ - u32 forced_speed; /* In Mb/s */ + bool autoneg; + u32 advertised_speeds; + u32 forced_speed; /* In Mb/s */ }; struct qed_mcp_link_pause_params { - bool autoneg; - bool forced_rx; - bool forced_tx; + bool autoneg; + bool forced_rx; + bool forced_tx; }; enum qed_mcp_eee_mode { From e9a5eb856411b371d6dde2f5aa3de7258be0f17e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:11 +0300 Subject: [PATCH 1302/2056] qed: remove unused qed_hw_info::port_mode and QED_PORT_MODE Struct field qed_hw_info::port_mode isn't used anywhere in the code, so can be safely removed to prevent possible dead code addition. Also remove the enumeration QED_PORT_MODE orphaned after this deletion. Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 15 --------------- drivers/net/ethernet/qlogic/qed/qed_dev.c | 21 --------------------- 2 files changed, 36 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e2d7a4bbab53..47da4f7d3be2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -245,20 +245,6 @@ enum QED_FEATURE { QED_MAX_FEATURES, }; -enum QED_PORT_MODE { - QED_PORT_MODE_DE_2X40G, - QED_PORT_MODE_DE_2X50G, - QED_PORT_MODE_DE_1X100G, - QED_PORT_MODE_DE_4X10G_F, - QED_PORT_MODE_DE_4X10G_E, - QED_PORT_MODE_DE_4X20G, - QED_PORT_MODE_DE_1X40G, - QED_PORT_MODE_DE_2X25G, - QED_PORT_MODE_DE_1X25G, - QED_PORT_MODE_DE_4X25G, - QED_PORT_MODE_DE_2X10G, -}; - enum qed_dev_cap { QED_DEV_CAP_ETH, QED_DEV_CAP_FCOE, @@ -337,7 +323,6 @@ struct qed_hw_info { struct qed_igu_info *p_igu_info; - u32 port_mode; u32 hw_mode; unsigned long device_capabilities; u16 mtu; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d929556247a5..eaf37822fed7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3994,37 +3994,16 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; - break; case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: - p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G; break; default: DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); From a396818c080db0fb0be046225aeafad02459ccae Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:12 +0300 Subject: [PATCH 1303/2056] qed: add support for new port modes These ports ship on new boards revisions and are supported by newer firmware versions. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 5 +++++ drivers/net/ethernet/qlogic/qed/qed_hsi.h | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index eaf37822fed7..66a520099c44 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4004,6 +4004,11 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4: break; default: DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a4a845579fd2..debc55923251 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -13015,6 +13015,11 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 u32 e_lane_cfg1; u32 e_lane_cfg2; From 98e675ec5a92a15f6f8ade41eda883cd39df5712 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:13 +0300 Subject: [PATCH 1304/2056] qed: add missing loopback modes These modes are relevant only for several boards, but may be reported by MFW as well as the others. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 5 +++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 19 +++++++++++++++++++ include/linux/qed/qed_if.h | 5 +++++ 3 files changed, 29 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index debc55923251..5b81d5d42397 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11554,6 +11554,11 @@ struct eth_phy_cfg { #define ETH_LOOPBACK_EXT_PHY 0x2 #define ETH_LOOPBACK_EXT 0x3 #define ETH_LOOPBACK_MAC 0x4 +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 +#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 u32 eee_cfg; #define EEE_CFG_EEE_ENABLED BIT(0) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 91e7cfc544f0..fea155c6ff11 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1587,6 +1587,25 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) case QED_LINK_LOOPBACK_MAC: link_params->loopback_mode = ETH_LOOPBACK_MAC; break; + case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: + link_params->loopback_mode = + ETH_LOOPBACK_CNIG_AH_ONLY_0123; + break; + case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: + link_params->loopback_mode = + ETH_LOOPBACK_CNIG_AH_ONLY_2301; + break; + case QED_LINK_LOOPBACK_PCS_AH_ONLY: + link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; + break; + case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: + link_params->loopback_mode = + ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; + break; + case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: + link_params->loopback_mode = + ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; + break; default: link_params->loopback_mode = ETH_LOOPBACK_NONE; break; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index f0b4cdc79299..2e780159a5fb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -696,6 +696,11 @@ struct qed_link_params { #define QED_LINK_LOOPBACK_EXT_PHY BIT(2) #define QED_LINK_LOOPBACK_EXT BIT(3) #define QED_LINK_LOOPBACK_MAC BIT(4) +#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5) +#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6) +#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7) +#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8) +#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9) struct qed_link_eee_params eee; u32 fec; From 097818fcf81d672e32229fcc52a3370ccae8d3c5 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:14 +0300 Subject: [PATCH 1305/2056] qed: populate supported link modes maps on module init Simplify and lighten qed_set_link() by declaring static link modes maps and populating them on module init. This way we save plenty of text size at the low expense of __ro_after_init and __initconst data (the latter will be purged after module init is done). Misc: sanitize exit callback. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 193 +++++++++++++-------- 1 file changed, 120 insertions(+), 73 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index fea155c6ff11..4fe66cf60f24 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -65,20 +65,118 @@ MODULE_VERSION(DRV_MODULE_VERSION); MODULE_FIRMWARE(QED_FW_FILE_NAME); +/* MFW speed capabilities maps */ + +struct qed_mfw_speed_map { + u32 mfw_val; + __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); + + const u32 *cap_arr; + u32 arr_size; +}; + +#define QED_MFW_SPEED_MAP(type, arr) \ +{ \ + .mfw_val = (type), \ + .cap_arr = (arr), \ + .arr_size = ARRAY_SIZE(arr), \ +} + +static const u32 qed_mfw_legacy_1g[] __initconst = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 qed_mfw_legacy_10g[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 qed_mfw_legacy_20g[] __initconst = { + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, +}; + +static const u32 qed_mfw_legacy_25g[] __initconst = { + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 qed_mfw_legacy_40g[] __initconst = { + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, +}; + +static const u32 qed_mfw_legacy_50g[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 qed_mfw_legacy_bb_100g[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, + qed_mfw_legacy_1g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, + qed_mfw_legacy_10g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, + qed_mfw_legacy_20g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, + qed_mfw_legacy_25g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, + qed_mfw_legacy_40g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, + qed_mfw_legacy_50g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, + qed_mfw_legacy_bb_100g), +}; + +static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) +{ + linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); + + map->cap_arr = NULL; + map->arr_size = 0; +} + +static void __init qed_mfw_speed_maps_init(void) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) + qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); +} + static int __init qed_init(void) { pr_info("%s", version); + qed_mfw_speed_maps_init(); + return 0; } - -static void __exit qed_cleanup(void) -{ - pr_notice("qed_cleanup called\n"); -} - module_init(qed_init); -module_exit(qed_cleanup); + +static void __exit qed_exit(void) +{ + /* To prevent marking this module as "permanent" */ +} +module_exit(qed_exit); /* Check if the DMA controller on the machine can properly handle the DMA * addressing required by the device. @@ -1457,12 +1555,13 @@ static bool qed_can_link_change(struct qed_dev *cdev) static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps); struct qed_mcp_link_params *link_params; + struct qed_mcp_link_speed_params *speed; + const struct qed_mfw_speed_map *map; struct qed_hwfn *hwfn; struct qed_ptt *ptt; - u32 as; int rc; + u32 i; if (!cdev) return -ENODEV; @@ -1487,78 +1586,25 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) if (!link_params) return -ENODATA; + speed = &link_params->speed; + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) - link_params->speed.autoneg = params->autoneg; + speed->autoneg = !!params->autoneg; if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { - as = 0; + speed->advertised_speeds = 0; - phylink_zero(sup_caps); - phylink_set(sup_caps, 1000baseT_Full); - phylink_set(sup_caps, 1000baseKX_Full); - phylink_set(sup_caps, 1000baseX_Full); + for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { + map = qed_mfw_legacy_maps + i; - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; - - phylink_zero(sup_caps); - phylink_set(sup_caps, 10000baseT_Full); - phylink_set(sup_caps, 10000baseKR_Full); - phylink_set(sup_caps, 10000baseKX4_Full); - phylink_set(sup_caps, 10000baseR_FEC); - phylink_set(sup_caps, 10000baseCR_Full); - phylink_set(sup_caps, 10000baseSR_Full); - phylink_set(sup_caps, 10000baseLR_Full); - phylink_set(sup_caps, 10000baseLRM_Full); - - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; - - phylink_zero(sup_caps); - phylink_set(sup_caps, 20000baseKR2_Full); - - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; - - phylink_zero(sup_caps); - phylink_set(sup_caps, 25000baseKR_Full); - phylink_set(sup_caps, 25000baseCR_Full); - phylink_set(sup_caps, 25000baseSR_Full); - - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; - - phylink_zero(sup_caps); - phylink_set(sup_caps, 40000baseLR4_Full); - phylink_set(sup_caps, 40000baseKR4_Full); - phylink_set(sup_caps, 40000baseCR4_Full); - phylink_set(sup_caps, 40000baseSR4_Full); - - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; - - phylink_zero(sup_caps); - phylink_set(sup_caps, 50000baseKR2_Full); - phylink_set(sup_caps, 50000baseCR2_Full); - phylink_set(sup_caps, 50000baseSR2_Full); - - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; - - phylink_zero(sup_caps); - phylink_set(sup_caps, 100000baseKR4_Full); - phylink_set(sup_caps, 100000baseSR4_Full); - phylink_set(sup_caps, 100000baseCR4_Full); - phylink_set(sup_caps, 100000baseLR4_ER4_Full); - - if (linkmode_intersects(params->adv_speeds, sup_caps)) - as |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; - - link_params->speed.advertised_speeds = as; + if (linkmode_intersects(params->adv_speeds, map->caps)) + speed->advertised_speeds |= map->mfw_val; + } } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) - link_params->speed.forced_speed = params->forced_speed; + speed->forced_speed = params->forced_speed; + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) link_params->pause.autoneg = true; @@ -1573,6 +1619,7 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) else link_params->pause.forced_tx = false; } + if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY: From 99785a87fc7d27207c7dca0f0fe04386f1981690 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 20 Jul 2020 21:08:15 +0300 Subject: [PATCH 1306/2056] qed: add support for the extended speed and FEC modes Add all necessary code (NVM parsing, MFW and Ethtool reports etc.) to support extended speed and FEC modes. These new modes are supported by the new boards revisions and newer MFW versions. Misc: correct port type for MEDIA_KR. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 1 + drivers/net/ethernet/qlogic/qed/qed_dev.c | 102 +++++++- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 85 ++++++- drivers/net/ethernet/qlogic/qed/qed_main.c | 264 ++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 74 +++++- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 45 ++++ include/linux/qed/qed_if.h | 1 + 7 files changed, 547 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 47da4f7d3be2..b2a7b53ee760 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -712,6 +712,7 @@ struct qed_dev { #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_K2(dev) QED_IS_AH(dev) #define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev)) +#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5) u16 vendor_id; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 66a520099c44..6516a1f921da 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3968,8 +3968,9 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fc; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_speed_params *ext_speed; struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; @@ -4026,8 +4027,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; link->speed.advertised_speeds = link_temp; - link_temp = link->speed.advertised_speeds; - p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; + p_caps->speed_capabilities = link->speed.advertised_speeds; link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + @@ -4062,13 +4062,12 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); } - p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = - link->speed.autoneg; + p_caps->default_speed_autoneg = link->speed.autoneg; - fc = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); - link->pause.autoneg = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); - link->pause.forced_rx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); - link->pause.forced_tx = !!(fc & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); + link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; if (p_hwfn->mcp_info->capabilities & @@ -4128,6 +4127,91 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = &link->ext_speed; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_speed)); + + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED); + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN) + ext_speed->autoneg = true; + + ext_speed->forced_speed = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G) + ext_speed->forced_speed |= QED_EXT_SPEED_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G) + ext_speed->forced_speed |= QED_EXT_SPEED_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G) + ext_speed->forced_speed |= QED_EXT_SPEED_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G) + ext_speed->forced_speed |= QED_EXT_SPEED_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G) + ext_speed->forced_speed |= QED_EXT_SPEED_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4; + + fld = GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_EXTENDED_SPEED_CAP); + + ext_speed->advertised_speeds = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_P4; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_fec_mode)); + link->ext_fec_mode = link_temp; + + p_caps->default_ext_speed_caps = ext_speed->advertised_speeds; + p_caps->default_ext_speed = ext_speed->forced_speed; + p_caps->default_ext_autoneg = ext_speed->autoneg; + p_caps->default_ext_fec = link->ext_fec_mode; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n", + ext_speed->forced_speed, + ext_speed->advertised_speeds, ext_speed->autoneg, + p_caps->default_ext_fec); + } + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", link->speed.forced_speed, link->speed.advertised_speeds, diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 5b81d5d42397..1af3f65ab862 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -11580,6 +11580,54 @@ struct eth_phy_cfg { #define FEC_FORCE_MODE_FIRECODE 0x01 #define FEC_FORCE_MODE_RS 0x02 #define FEC_FORCE_MODE_AUTO 0x07 +#define FEC_EXTENDED_MODE_MASK 0xffffff00 +#define FEC_EXTENDED_MODE_OFFSET 8 +#define ETH_EXT_FEC_NONE 0x00000100 +#define ETH_EXT_FEC_10G_NONE 0x00000200 +#define ETH_EXT_FEC_10G_BASE_R 0x00000400 +#define ETH_EXT_FEC_20G_NONE 0x00000800 +#define ETH_EXT_FEC_20G_BASE_R 0x00001000 +#define ETH_EXT_FEC_25G_NONE 0x00002000 +#define ETH_EXT_FEC_25G_BASE_R 0x00004000 +#define ETH_EXT_FEC_25G_RS528 0x00008000 +#define ETH_EXT_FEC_40G_NONE 0x00010000 +#define ETH_EXT_FEC_40G_BASE_R 0x00020000 +#define ETH_EXT_FEC_50G_NONE 0x00040000 +#define ETH_EXT_FEC_50G_BASE_R 0x00080000 +#define ETH_EXT_FEC_50G_RS528 0x00100000 +#define ETH_EXT_FEC_50G_RS544 0x00200000 +#define ETH_EXT_FEC_100G_NONE 0x00400000 +#define ETH_EXT_FEC_100G_BASE_R 0x00800000 +#define ETH_EXT_FEC_100G_RS528 0x01000000 +#define ETH_EXT_FEC_100G_RS544 0x02000000 + + u32 extended_speed; +#define ETH_EXT_SPEED_MASK 0x0000ffff +#define ETH_EXT_SPEED_OFFSET 0 +#define ETH_EXT_SPEED_AN 0x00000001 +#define ETH_EXT_SPEED_1G 0x00000002 +#define ETH_EXT_SPEED_10G 0x00000004 +#define ETH_EXT_SPEED_20G 0x00000008 +#define ETH_EXT_SPEED_25G 0x00000010 +#define ETH_EXT_SPEED_40G 0x00000020 +#define ETH_EXT_SPEED_50G_BASE_R 0x00000040 +#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080 +#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100 +#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200 +#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400 +#define ETH_EXT_ADV_SPEED_MASK 0xffff0000 +#define ETH_EXT_ADV_SPEED_OFFSET 16 +#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000 +#define ETH_EXT_ADV_SPEED_1G 0x00020000 +#define ETH_EXT_ADV_SPEED_10G 0x00040000 +#define ETH_EXT_ADV_SPEED_20G 0x00080000 +#define ETH_EXT_ADV_SPEED_25G 0x00100000 +#define ETH_EXT_ADV_SPEED_40G 0x00200000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000 +#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000 }; struct port_mf_cfg { @@ -12571,6 +12619,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 #define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 #define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 /* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ @@ -12660,6 +12709,7 @@ struct public_drv_mb { #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) #define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) #define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) @@ -13174,7 +13224,40 @@ struct nvm_cfg1_port { u32 mnm_100g_ctrl; u32 mnm_100g_misc; - u32 reserved[116]; + u32 temperature; + u32 ext_phy_cfg1; + + u32 extended_speed; +#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff +#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 + + u32 extended_fec_mode; + + u32 reserved[112]; }; struct nvm_cfg1_func { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 4fe66cf60f24..2558cb680db3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -82,6 +82,85 @@ struct qed_mfw_speed_map { .arr_size = ARRAY_SIZE(arr), \ } +static const u32 qed_mfw_ext_1g[] __initconst = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 qed_mfw_ext_10g[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 qed_mfw_ext_20g[] __initconst = { + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_25g[] __initconst = { + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 qed_mfw_ext_40g[] __initconst = { + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, +}; + +static const u32 qed_mfw_ext_50g_base_r[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, +}; + +static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, + qed_mfw_ext_50g_base_r), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, + qed_mfw_ext_50g_base_r2), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, + qed_mfw_ext_100g_base_r2), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, + qed_mfw_ext_100g_base_r4), +}; + static const u32 qed_mfw_legacy_1g[] __initconst = { ETHTOOL_LINK_MODE_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, @@ -158,6 +237,9 @@ static void __init qed_mfw_speed_maps_init(void) { u32 i; + for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) + qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); + for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); } @@ -1553,6 +1635,147 @@ static bool qed_can_link_change(struct qed_dev *cdev) return true; } +static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, + const struct qed_link_params *params) +{ + struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; + const struct qed_mfw_speed_map *map; + u32 i; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + ext_speed->autoneg = !!params->autoneg; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { + ext_speed->advertised_speeds = 0; + + for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { + map = qed_mfw_ext_maps + i; + + if (linkmode_intersects(params->adv_speeds, map->caps)) + ext_speed->advertised_speeds |= map->mfw_val; + } + } + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { + switch (params->forced_speed) { + case SPEED_1000: + ext_speed->forced_speed = QED_EXT_SPEED_1G; + break; + case SPEED_10000: + ext_speed->forced_speed = QED_EXT_SPEED_10G; + break; + case SPEED_20000: + ext_speed->forced_speed = QED_EXT_SPEED_20G; + break; + case SPEED_25000: + ext_speed->forced_speed = QED_EXT_SPEED_25G; + break; + case SPEED_40000: + ext_speed->forced_speed = QED_EXT_SPEED_40G; + break; + case SPEED_50000: + ext_speed->forced_speed = QED_EXT_SPEED_50G_R | + QED_EXT_SPEED_50G_R2; + break; + case SPEED_100000: + ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | + QED_EXT_SPEED_100G_R4 | + QED_EXT_SPEED_100G_P4; + break; + default: + break; + } + } + + if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) + return; + + switch (params->forced_speed) { + case SPEED_25000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | + ETH_EXT_FEC_25G_BASE_R | + ETH_EXT_FEC_25G_NONE; + break; + default: + break; + } + + break; + case SPEED_40000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | + ETH_EXT_FEC_40G_NONE; + break; + default: + break; + } + + break; + case SPEED_50000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | + ETH_EXT_FEC_50G_BASE_R | + ETH_EXT_FEC_50G_NONE; + break; + default: + break; + } + + break; + case SPEED_100000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | + ETH_EXT_FEC_100G_BASE_R | + ETH_EXT_FEC_100G_NONE; + break; + default: + break; + } + + break; + default: + break; + } +} + static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_mcp_link_params *link_params; @@ -1605,6 +1828,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) speed->forced_speed = params->forced_speed; + if (qed_mcp_is_ext_speed_supported(hwfn)) + qed_set_ext_speed_params(link_params, params); + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) link_params->pause.autoneg = true; @@ -1682,7 +1908,6 @@ static int qed_get_port_type(u32 media_type) case MEDIA_SFP_1G_FIBER: case MEDIA_XFP_FIBER: case MEDIA_MODULE_FIBER: - case MEDIA_KR: port_type = PORT_FIBRE; break; case MEDIA_DA_TWINAX: @@ -1691,6 +1916,7 @@ static int qed_get_port_type(u32 media_type) case MEDIA_BASE_T: port_type = PORT_TP; break; + case MEDIA_KR: case MEDIA_NOT_PRESENT: port_type = PORT_NONE; break; @@ -1990,9 +2216,32 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if (link.link_up) if_link->link_up = true; - /* TODO - at the moment assume supported and advertised speed equal */ - if (link_caps.default_speed_autoneg) - phylink_set(if_link->supported_caps, Autoneg); + if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { + if (link_caps.default_ext_autoneg) + phylink_set(if_link->supported_caps, Autoneg); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); + + if (params.ext_speed.autoneg) + phylink_set(if_link->advertised_caps, Autoneg); + else + phylink_clear(if_link->advertised_caps, Autoneg); + + qed_fill_link_capability(hwfn, ptt, + params.ext_speed.advertised_speeds, + if_link->advertised_caps); + } else { + if (link_caps.default_speed_autoneg) + phylink_set(if_link->supported_caps, Autoneg); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); + + if (params.speed.autoneg) + phylink_set(if_link->advertised_caps, Autoneg); + else + phylink_clear(if_link->advertised_caps, Autoneg); + } + if (params.pause.autoneg || (params.pause.forced_rx && params.pause.forced_tx)) phylink_set(if_link->supported_caps, Asym_Pause); @@ -2000,13 +2249,6 @@ static void qed_fill_link(struct qed_hwfn *hwfn, params.pause.forced_tx) phylink_set(if_link->supported_caps, Pause); - linkmode_copy(if_link->advertised_caps, if_link->supported_caps); - - if (params.speed.autoneg) - phylink_set(if_link->advertised_caps, Autoneg); - else - phylink_clear(if_link->advertised_caps, Autoneg); - if_link->sup_fec = link_caps.fec_default; if_link->active_fec = params.fec; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 78c0d3a2d164..988d84564849 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1476,6 +1476,7 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) struct qed_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; u32 cmd, fec_bit = 0; + u32 val, ext_speed; int rc = 0; /* Set the shmem configuration according to params */ @@ -1522,16 +1523,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = 0; + if (params->ext_speed.autoneg) + ext_speed |= ETH_EXT_SPEED_AN; + + val = params->ext_speed.forced_speed; + if (val & QED_EXT_SPEED_1G) + ext_speed |= ETH_EXT_SPEED_1G; + if (val & QED_EXT_SPEED_10G) + ext_speed |= ETH_EXT_SPEED_10G; + if (val & QED_EXT_SPEED_20G) + ext_speed |= ETH_EXT_SPEED_20G; + if (val & QED_EXT_SPEED_25G) + ext_speed |= ETH_EXT_SPEED_25G; + if (val & QED_EXT_SPEED_40G) + ext_speed |= ETH_EXT_SPEED_40G; + if (val & QED_EXT_SPEED_50G_R) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_50G_R2) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R2) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_100G_P4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_P4; + + SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED, + ext_speed); + + ext_speed = 0; + + val = params->ext_speed.advertised_speeds; + if (val & QED_EXT_SPEED_MASK_1G) + ext_speed |= ETH_EXT_ADV_SPEED_1G; + if (val & QED_EXT_SPEED_MASK_10G) + ext_speed |= ETH_EXT_ADV_SPEED_10G; + if (val & QED_EXT_SPEED_MASK_20G) + ext_speed |= ETH_EXT_ADV_SPEED_20G; + if (val & QED_EXT_SPEED_MASK_25G) + ext_speed |= ETH_EXT_ADV_SPEED_25G; + if (val & QED_EXT_SPEED_MASK_40G) + ext_speed |= ETH_EXT_ADV_SPEED_40G; + if (val & QED_EXT_SPEED_MASK_50G_R) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_MASK_50G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_MASK_100G_P4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4; + + phy_cfg.extended_speed |= ext_speed; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE, + params->ext_fec_mode); + } + p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, FEC 0x%08x\n", + "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n", phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, - phy_cfg.loopback_mode, phy_cfg.fec_mode); + phy_cfg.loopback_mode, phy_cfg.fec_mode, + phy_cfg.extended_speed); } else { - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Resetting link\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); } memset(&mb_params, 0, sizeof(mb_params)); @@ -3838,6 +3900,10 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; + if (QED_IS_E5(p_hwfn->cdev)) + features |= + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL; + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index ea956c43e596..8edb450d0abf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -17,8 +17,31 @@ struct qed_mcp_link_speed_params { bool autoneg; + u32 advertised_speeds; +#define QED_EXT_SPEED_MASK_RES 0x1 +#define QED_EXT_SPEED_MASK_1G 0x2 +#define QED_EXT_SPEED_MASK_10G 0x4 +#define QED_EXT_SPEED_MASK_20G 0x8 +#define QED_EXT_SPEED_MASK_25G 0x10 +#define QED_EXT_SPEED_MASK_40G 0x20 +#define QED_EXT_SPEED_MASK_50G_R 0x40 +#define QED_EXT_SPEED_MASK_50G_R2 0x80 +#define QED_EXT_SPEED_MASK_100G_R2 0x100 +#define QED_EXT_SPEED_MASK_100G_R4 0x200 +#define QED_EXT_SPEED_MASK_100G_P4 0x400 + u32 forced_speed; /* In Mb/s */ +#define QED_EXT_SPEED_1G 0x1 +#define QED_EXT_SPEED_10G 0x2 +#define QED_EXT_SPEED_20G 0x4 +#define QED_EXT_SPEED_25G 0x8 +#define QED_EXT_SPEED_40G 0x10 +#define QED_EXT_SPEED_50G_R 0x20 +#define QED_EXT_SPEED_50G_R2 0x40 +#define QED_EXT_SPEED_100G_R2 0x80 +#define QED_EXT_SPEED_100G_R4 0x100 +#define QED_EXT_SPEED_100G_P4 0x200 }; struct qed_mcp_link_pause_params { @@ -39,6 +62,9 @@ struct qed_mcp_link_params { u32 loopback_mode; struct qed_link_eee_params eee; u32 fec; + + struct qed_mcp_link_speed_params ext_speed; + u32 ext_fec_mode; }; struct qed_mcp_link_capabilities { @@ -48,6 +74,11 @@ struct qed_mcp_link_capabilities { enum qed_mcp_eee_mode default_eee; u32 eee_lpi_timer; u8 eee_speed_caps; + + u32 default_ext_speed_caps; + u32 default_ext_autoneg; + u32 default_ext_speed; + u32 default_ext_fec; }; struct qed_mcp_link_state { @@ -750,6 +781,20 @@ struct qed_drv_tlv_hdr { u8 tlv_flags; }; +/** + * qed_mcp_is_ext_speed_supported() - Check if management firmware supports + * extended speeds. + * @p_hwfn: HW device data. + * + * Return: true if supported, false otherwise. + */ +static inline bool +qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL); +} + /** * @brief Initialize the interface with the MCP * diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 2e780159a5fb..a5c6854343e6 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -594,6 +594,7 @@ enum qed_hw_err_type { enum qed_dev_type { QED_DEV_TYPE_BB, QED_DEV_TYPE_AH, + QED_DEV_TYPE_E5, }; struct qed_dev_info { From 0c17ac5424daaa5fbf4842792f112fb6d7c81841 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 20 Jul 2020 21:49:31 +0530 Subject: [PATCH 1307/2056] ethernet: myri10ge: use generic power management Drivers using legacy PM have to manage PCI states and device's PM states themselves. They also need to take care of configuration registers. With improved and powerful support of generic PM, PCI Core takes care of above mentioned, device-independent, jobs. This driver makes use of PCI helper functions like pci_save/restore_state(), pci_enable/disable_device(), pci_set_power_state() and pci_set_master() to do required operations. In generic mode, they are no longer needed. Change function parameter in both .suspend() and .resume() to "struct device*" type. Use to_pci_dev() and dev_get_drvdata() to get "struct pci_dev*" variable and drv data. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- .../net/ethernet/myricom/myri10ge/myri10ge.c | 37 ++++--------------- 1 file changed, 8 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index e1e1f4e3639e..4a5beafa0493 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3257,13 +3257,12 @@ static void myri10ge_mask_surprise_down(struct pci_dev *pdev) } } -#ifdef CONFIG_PM -static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused myri10ge_suspend(struct device *dev) { struct myri10ge_priv *mgp; struct net_device *netdev; - mgp = pci_get_drvdata(pdev); + mgp = dev_get_drvdata(dev); if (mgp == NULL) return -EINVAL; netdev = mgp->dev; @@ -3276,14 +3275,13 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) rtnl_unlock(); } myri10ge_dummy_rdma(mgp, 0); - pci_save_state(pdev); - pci_disable_device(pdev); - return pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; } -static int myri10ge_resume(struct pci_dev *pdev) +static int __maybe_unused myri10ge_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct myri10ge_priv *mgp; struct net_device *netdev; int status; @@ -3293,7 +3291,6 @@ static int myri10ge_resume(struct pci_dev *pdev) if (mgp == NULL) return -EINVAL; netdev = mgp->dev; - pci_set_power_state(pdev, PCI_D0); /* zeros conf space as a side effect */ msleep(5); /* give card time to respond */ pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor); if (vendor == 0xffff) { @@ -3301,23 +3298,9 @@ static int myri10ge_resume(struct pci_dev *pdev) return -EIO; } - pci_restore_state(pdev); - - status = pci_enable_device(pdev); - if (status) { - dev_err(&pdev->dev, "failed to enable device\n"); - return status; - } - - pci_set_master(pdev); - myri10ge_reset(mgp); myri10ge_dummy_rdma(mgp, 1); - /* Save configuration space to be restored if the - * nic resets due to a parity error */ - pci_save_state(pdev); - if (netif_running(netdev)) { rtnl_lock(); status = myri10ge_open(netdev); @@ -3331,11 +3314,8 @@ static int myri10ge_resume(struct pci_dev *pdev) return 0; abort_with_enabled: - pci_disable_device(pdev); return -EIO; - } -#endif /* CONFIG_PM */ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) { @@ -4017,15 +3997,14 @@ static const struct pci_device_id myri10ge_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl); +static SIMPLE_DEV_PM_OPS(myri10ge_pm_ops, myri10ge_suspend, myri10ge_resume); + static struct pci_driver myri10ge_driver = { .name = "myri10ge", .probe = myri10ge_probe, .remove = myri10ge_remove, .id_table = myri10ge_pci_tbl, -#ifdef CONFIG_PM - .suspend = myri10ge_suspend, - .resume = myri10ge_resume, -#endif + .driver.pm = &myri10ge_pm_ops, }; #ifdef CONFIG_MYRI10GE_DCA From 4a09a981002bafdac1decf4116c77bf8f5496326 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 20 Jul 2020 20:55:58 +0300 Subject: [PATCH 1308/2056] testptp: promote 'perout' variable to int64_t Since 'perout' holds the nanosecond value of the signal's period, it should be a 64-bit value. Current assumption is that it cannot be larger than 1 second. Signed-off-by: Vladimir Oltean Acked-by: Richard Cochran Signed-off-by: David S. Miller --- tools/testing/selftests/ptp/testptp.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c index da7a9dda9490..edc1e50768c2 100644 --- a/tools/testing/selftests/ptp/testptp.c +++ b/tools/testing/selftests/ptp/testptp.c @@ -35,6 +35,8 @@ #define CLOCK_INVALID -1 #endif +#define NSEC_PER_SEC 1000000000LL + /* clock_adjtime is not available in GLIBC < 2.14 */ #if !__GLIBC_PREREQ(2, 14) #include @@ -169,7 +171,6 @@ int main(int argc, char *argv[]) int list_pins = 0; int pct_offset = 0; int n_samples = 0; - int perout = -1; int pin_index = -1, pin_func; int pps = -1; int seconds = 0; @@ -177,6 +178,7 @@ int main(int argc, char *argv[]) int64_t t1, t2, tp; int64_t interval, offset; + int64_t perout = -1; progname = strrchr(argv[0], '/'); progname = progname ? 1+progname : argv[0]; @@ -215,7 +217,7 @@ int main(int argc, char *argv[]) } break; case 'p': - perout = atoi(optarg); + perout = atoll(optarg); break; case 'P': pps = atoi(optarg); @@ -400,8 +402,8 @@ int main(int argc, char *argv[]) perout_request.index = index; perout_request.start.sec = ts.tv_sec + 2; perout_request.start.nsec = 0; - perout_request.period.sec = 0; - perout_request.period.nsec = perout; + perout_request.period.sec = perout / NSEC_PER_SEC; + perout_request.period.nsec = perout % NSEC_PER_SEC; if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) { perror("PTP_PEROUT_REQUEST"); } else { From 7570ebe0410a1df1c9a44071c18685842d84a7e1 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 20 Jul 2020 20:55:59 +0300 Subject: [PATCH 1309/2056] testptp: add new options for perout phase and pulse width Extend the example program for PTP ancillary functionality with the ability to configure not only the periodic output's period (frequency), but also the phase and duty cycle (pulse width) which were newly introduced. The ioctl level also needs to be updated to the new PTP_PEROUT_REQUEST2, since the original PTP_PEROUT_REQUEST doesn't support this functionality. For an in-tree testing program, not having explicit backwards compatibility is fine, as it should always be tested with the current kernel headers and sources. Tested with an oscilloscope on the felix switch PHC: echo '2 0' > /sys/class/ptp/ptp1/pins/switch_1588_dat0 ./testptp -d /dev/ptp1 -p 1000000000 -w 100000000 -H 1000 -i 0 Signed-off-by: Vladimir Oltean Acked-by: Richard Cochran Signed-off-by: David S. Miller --- tools/testing/selftests/ptp/testptp.c | 41 ++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/ptp/testptp.c b/tools/testing/selftests/ptp/testptp.c index edc1e50768c2..f7911aaeb007 100644 --- a/tools/testing/selftests/ptp/testptp.c +++ b/tools/testing/selftests/ptp/testptp.c @@ -134,6 +134,8 @@ static void usage(char *progname) " 1 - external time stamp\n" " 2 - periodic output\n" " -p val enable output with a period of 'val' nanoseconds\n" + " -H val set output phase to 'val' nanoseconds (requires -p)\n" + " -w val set output pulse width to 'val' nanoseconds (requires -p)\n" " -P val enable or disable (val=1|0) the system clock PPS\n" " -s set the ptp clock time from the system time\n" " -S set the system time from the ptp clock time\n" @@ -178,11 +180,13 @@ int main(int argc, char *argv[]) int64_t t1, t2, tp; int64_t interval, offset; + int64_t perout_phase = -1; + int64_t pulsewidth = -1; int64_t perout = -1; progname = strrchr(argv[0], '/'); progname = progname ? 1+progname : argv[0]; - while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) { + while (EOF != (c = getopt(argc, argv, "cd:e:f:ghH:i:k:lL:p:P:sSt:T:w:z"))) { switch (c) { case 'c': capabilities = 1; @@ -199,6 +203,9 @@ int main(int argc, char *argv[]) case 'g': gettime = 1; break; + case 'H': + perout_phase = atoll(optarg); + break; case 'i': index = atoi(optarg); break; @@ -235,6 +242,9 @@ int main(int argc, char *argv[]) settime = 3; seconds = atoi(optarg); break; + case 'w': + pulsewidth = atoi(optarg); + break; case 'z': flagtest = 1; break; @@ -393,6 +403,16 @@ int main(int argc, char *argv[]) } } + if (pulsewidth >= 0 && perout < 0) { + puts("-w can only be specified together with -p"); + return -1; + } + + if (perout_phase >= 0 && perout < 0) { + puts("-H can only be specified together with -p"); + return -1; + } + if (perout >= 0) { if (clock_gettime(clkid, &ts)) { perror("clock_gettime"); @@ -400,11 +420,24 @@ int main(int argc, char *argv[]) } memset(&perout_request, 0, sizeof(perout_request)); perout_request.index = index; - perout_request.start.sec = ts.tv_sec + 2; - perout_request.start.nsec = 0; perout_request.period.sec = perout / NSEC_PER_SEC; perout_request.period.nsec = perout % NSEC_PER_SEC; - if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) { + perout_request.flags = 0; + if (pulsewidth >= 0) { + perout_request.flags |= PTP_PEROUT_DUTY_CYCLE; + perout_request.on.sec = pulsewidth / NSEC_PER_SEC; + perout_request.on.nsec = pulsewidth % NSEC_PER_SEC; + } + if (perout_phase >= 0) { + perout_request.flags |= PTP_PEROUT_PHASE; + perout_request.phase.sec = perout_phase / NSEC_PER_SEC; + perout_request.phase.nsec = perout_phase % NSEC_PER_SEC; + } else { + perout_request.start.sec = ts.tv_sec + 2; + perout_request.start.nsec = 0; + } + + if (ioctl(fd, PTP_PEROUT_REQUEST2, &perout_request)) { perror("PTP_PEROUT_REQUEST"); } else { puts("periodic output request okay"); From 519f0cefb4bcac8faf76b2a7b4042fb950eea23e Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:32 +0300 Subject: [PATCH 1310/2056] net: atlantic: move FRAC_PER_NS to aq_hw.h This patch moves FRAC_PER_NS to aq_hw.h so that it can be used in both hw_atl (A1) and hw_atl2 (A2) in the future. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_hw.h | 2 ++ .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 20 +++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index f2663ad22209..284ea943e8cd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -36,6 +36,8 @@ enum aq_tc_mode { (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U) #define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU +#define AQ_FRAC_PER_NS 0x100000000LL + /* Used for rate to Mbps conversion */ #define AQ_MBPS_DIVISOR 125000 /* 1000000 / 8 */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index b023c3324a59..97672ff142a8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -54,8 +54,6 @@ .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U -#define FRAC_PER_NS 0x100000000LL - const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, @@ -1233,7 +1231,7 @@ static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns) if (base_ns != nsi * NSEC_PER_SEC) { s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC, base_ns - nsi * NSEC_PER_SEC); - nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor); + nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor); } *ns = (u32)nsi; @@ -1246,23 +1244,23 @@ hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq, { s64 adj_fns_val; s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy + - FRAC_PER_NS * ptp_adj_freq->ns_phy); + AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy); s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac + - FRAC_PER_NS * ptp_adj_freq->ns_mac); - s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy; - s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac; + AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac); + s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy; + s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac; /* MAC MCP counter freq is macfreq / 4 */ s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) * - 4 * FRAC_PER_NS; + 4 * AQ_FRAC_PER_NS; diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow, AQ_HW_MAC_COUNTER_HZ); - adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS * + adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac) + diff_in_mcp_overflow; - ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS); + ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS); ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj * - FRAC_PER_NS; + AQ_FRAC_PER_NS; } static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta) From 3624aa3c2582e4b9097e7648f6f03c82e474ceb8 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:33 +0300 Subject: [PATCH 1311/2056] net: atlantic: use simple assignment in _get_stats and _get_sw_stats This patch replaces addition assignment operator with a simple assignment in aq_vec_get_stats() and aq_vec_get_sw_stats(), because it is sufficient in both cases and this change simplifies the introduction of u64_stats_update_* in these functions. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_vec.c | 47 ++++++++++--------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index d1d43c8ce400..2acdaee18ba0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings. @@ -349,7 +350,7 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) return &self->aq_ring_param.affinity_mask; } -static void aq_vec_add_stats(struct aq_vec_s *self, +static void aq_vec_get_stats(struct aq_vec_s *self, const unsigned int tc, struct aq_ring_stats_rx_s *stats_rx, struct aq_ring_stats_tx_s *stats_tx) @@ -359,23 +360,23 @@ static void aq_vec_add_stats(struct aq_vec_s *self, if (tc < self->rx_rings) { struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; - stats_rx->packets += rx->packets; - stats_rx->bytes += rx->bytes; - stats_rx->errors += rx->errors; - stats_rx->jumbo_packets += rx->jumbo_packets; - stats_rx->lro_packets += rx->lro_packets; - stats_rx->pg_losts += rx->pg_losts; - stats_rx->pg_flips += rx->pg_flips; - stats_rx->pg_reuses += rx->pg_reuses; + stats_rx->packets = rx->packets; + stats_rx->bytes = rx->bytes; + stats_rx->errors = rx->errors; + stats_rx->jumbo_packets = rx->jumbo_packets; + stats_rx->lro_packets = rx->lro_packets; + stats_rx->pg_losts = rx->pg_losts; + stats_rx->pg_flips = rx->pg_flips; + stats_rx->pg_reuses = rx->pg_reuses; } if (tc < self->tx_rings) { struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; - stats_tx->packets += tx->packets; - stats_tx->bytes += tx->bytes; - stats_tx->errors += tx->errors; - stats_tx->queue_restarts += tx->queue_restarts; + stats_tx->packets = tx->packets; + stats_tx->bytes = tx->bytes; + stats_tx->errors = tx->errors; + stats_tx->queue_restarts = tx->queue_restarts; } } @@ -389,16 +390,16 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); - aq_vec_add_stats(self, tc, &stats_rx, &stats_tx); + aq_vec_get_stats(self, tc, &stats_rx, &stats_tx); /* This data should mimic aq_ethtool_queue_stat_names structure */ - data[count] += stats_rx.packets; - data[++count] += stats_tx.packets; - data[++count] += stats_tx.queue_restarts; - data[++count] += stats_rx.jumbo_packets; - data[++count] += stats_rx.lro_packets; - data[++count] += stats_rx.errors; + data[count] = stats_rx.packets; + data[++count] = stats_tx.packets; + data[++count] = stats_tx.queue_restarts; + data[++count] = stats_rx.jumbo_packets; + data[++count] = stats_rx.lro_packets; + data[++count] = stats_rx.errors; if (p_count) *p_count = ++count; From b772112c5af0e45ddcacb144545f06bdc594c736 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:34 +0300 Subject: [PATCH 1312/2056] net: atlantic: make _get_sw_stats return count as return value This patch changes aq_vec_get_sw_stats() to return count as a return value (which was unused) instead of an out parameter. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 4 ++-- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 8 ++------ drivers/net/ethernet/aquantia/atlantic/aq_vec.h | 10 +++++----- 3 files changed, 9 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 43b8914c3ef5..d72f40259715 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -907,13 +907,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data) aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) { data += count; - aq_vec_get_sw_stats(aq_vec, tc, data, &count); + count = aq_vec_get_sw_stats(aq_vec, tc, data); } } data += count; -err_exit:; +err_exit: return data; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 2acdaee18ba0..8f0a0d18e711 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -380,8 +380,7 @@ static void aq_vec_get_stats(struct aq_vec_s *self, } } -int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, - unsigned int *p_count) +unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data) { struct aq_ring_stats_rx_s stats_rx; struct aq_ring_stats_tx_s stats_tx; @@ -401,8 +400,5 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, data[++count] = stats_rx.lro_packets; data[++count] = stats_rx.errors; - if (p_count) - *p_count = ++count; - - return 0; + return ++count; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index 541af85e6510..c079fef80da8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings. @@ -35,7 +36,6 @@ void aq_vec_ring_free(struct aq_vec_s *self); int aq_vec_start(struct aq_vec_s *self); void aq_vec_stop(struct aq_vec_s *self); cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); -int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data, - unsigned int *p_count); +unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data); #endif /* AQ_VEC_H */ From 508f2e3dce454843ffd689bb2cf0739a954dd1e9 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:35 +0300 Subject: [PATCH 1313/2056] net: atlantic: split rx and tx per-queue stats This patch splits rx and tx per-queue stats. This change simplifies the follow-up introduction of PTP stats and u64_stats_update_* usage. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 28 ++++++--- .../net/ethernet/aquantia/atlantic/aq_ptp.c | 6 +- .../net/ethernet/aquantia/atlantic/aq_ring.c | 30 +++++++--- .../net/ethernet/aquantia/atlantic/aq_ring.h | 10 +++- .../net/ethernet/aquantia/atlantic/aq_vec.c | 58 +++++-------------- .../net/ethernet/aquantia/atlantic/aq_vec.h | 1 + 6 files changed, 69 insertions(+), 64 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a8f0fbbbd91a..98ba8355a0f0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -89,15 +89,18 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { "InDroppedDma", }; -static const char * const aq_ethtool_queue_stat_names[] = { +static const char * const aq_ethtool_queue_rx_stat_names[] = { "%sQueue[%d] InPackets", - "%sQueue[%d] OutPackets", - "%sQueue[%d] Restarts", "%sQueue[%d] InJumboPackets", "%sQueue[%d] InLroPackets", "%sQueue[%d] InErrors", }; +static const char * const aq_ethtool_queue_tx_stat_names[] = { + "%sQueue[%d] OutPackets", + "%sQueue[%d] Restarts", +}; + #if IS_ENABLED(CONFIG_MACSEC) static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = { "MACSec InCtlPackets", @@ -164,11 +167,12 @@ static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = { static u32 aq_ethtool_n_stats(struct net_device *ndev) { + const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names); + const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names); struct aq_nic_s *nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic); u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) + - ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs * - cfg->tcs; + (rx_stat_cnt + tx_stat_cnt) * cfg->vecs * cfg->tcs; #if IS_ENABLED(CONFIG_MACSEC) if (nic->macsec_cfg) { @@ -237,7 +241,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev, switch (stringset) { case ETH_SS_STATS: { - const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names); + const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names); + const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names); char tc_string[8]; int tc; @@ -251,9 +256,16 @@ static void aq_ethtool_get_strings(struct net_device *ndev, snprintf(tc_string, 8, "TC%d ", tc); for (i = 0; i < cfg->vecs; i++) { - for (si = 0; si < stat_cnt; si++) { + for (si = 0; si < rx_stat_cnt; si++) { snprintf(p, ETH_GSTRING_LEN, - aq_ethtool_queue_stat_names[si], + aq_ethtool_queue_rx_stat_names[si], + tc_string, + AQ_NIC_CFG_TCVEC2RING(cfg, tc, i)); + p += ETH_GSTRING_LEN; + } + for (si = 0; si < tx_stat_cnt; si++) { + snprintf(p, ETH_GSTRING_LEN, + aq_ethtool_queue_tx_stat_names[si], tc_string, AQ_NIC_CFG_TCVEC2RING(cfg, tc, i)); p += ETH_GSTRING_LEN; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index cb9bf41470fd..116b8891c9c4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -844,7 +844,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic) if (!aq_ptp) return 0; - err = aq_ring_init(&aq_ptp->ptp_tx); + err = aq_ring_init(&aq_ptp->ptp_tx, ATL_RING_TX); if (err < 0) goto err_exit; err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw, @@ -853,7 +853,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic) if (err < 0) goto err_exit; - err = aq_ring_init(&aq_ptp->ptp_rx); + err = aq_ring_init(&aq_ptp->ptp_rx, ATL_RING_RX); if (err < 0) goto err_exit; err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw, @@ -871,7 +871,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic) if (err < 0) goto err_rx_free; - err = aq_ring_init(&aq_ptp->hwts_rx); + err = aq_ring_init(&aq_ptp->hwts_rx, ATL_RING_RX); if (err < 0) goto err_rx_free; err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 8dd59e9fc3aa..fc4e10b064fd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -206,11 +206,12 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, return self; } -int aq_ring_init(struct aq_ring_s *self) +int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) { self->hw_head = 0; self->sw_head = 0; self->sw_tail = 0; + self->ring_type = ring_type; return 0; } @@ -538,7 +539,7 @@ int aq_ring_rx_fill(struct aq_ring_s *self) void aq_ring_rx_deinit(struct aq_ring_s *self) { if (!self) - goto err_exit; + return; for (; self->sw_head != self->sw_tail; self->sw_head = aq_ring_next_dx(self, self->sw_head)) { @@ -546,14 +547,12 @@ void aq_ring_rx_deinit(struct aq_ring_s *self) aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); } - -err_exit:; } void aq_ring_free(struct aq_ring_s *self) { if (!self) - goto err_exit; + return; kfree(self->buff_ring); @@ -561,6 +560,23 @@ void aq_ring_free(struct aq_ring_s *self) dma_free_coherent(aq_nic_get_dev(self->aq_nic), self->size * self->dx_size, self->dx_ring, self->dx_ring_pa); - -err_exit:; +} + +unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) +{ + unsigned int count = 0U; + + if (self->ring_type == ATL_RING_RX) { + /* This data should mimic aq_ethtool_queue_rx_stat_names structure */ + data[count] = self->stats.rx.packets; + data[++count] = self->stats.rx.jumbo_packets; + data[++count] = self->stats.rx.lro_packets; + data[++count] = self->stats.rx.errors; + } else { + /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ + data[count] = self->stats.tx.packets; + data[++count] = self->stats.tx.queue_restarts; + } + + return ++count; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 2c96f20f6289..0cd761ba47a3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -110,6 +110,11 @@ union aq_ring_stats_s { struct aq_ring_stats_tx_s tx; }; +enum atl_ring_type { + ATL_RING_TX, + ATL_RING_RX, +}; + struct aq_ring_s { struct aq_ring_buff_s *buff_ring; u8 *dx_ring; /* descriptors ring, dma shared mem */ @@ -124,6 +129,7 @@ struct aq_ring_s { unsigned int page_order; union aq_ring_stats_s stats; dma_addr_t dx_ring_pa; + enum atl_ring_type ring_type; }; struct aq_ring_param_s { @@ -163,7 +169,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, unsigned int idx, struct aq_nic_cfg_s *aq_nic_cfg); -int aq_ring_init(struct aq_ring_s *self); +int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); void aq_ring_update_queue_state(struct aq_ring_s *ring); @@ -181,4 +187,6 @@ struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self, unsigned int size, unsigned int dx_size); void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic); +unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data); + #endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 8f0a0d18e711..b008d12e923a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -181,7 +181,7 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { - err = aq_ring_init(&ring[AQ_VEC_TX_ID]); + err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX); if (err < 0) goto err_exit; @@ -191,7 +191,7 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops, if (err < 0) goto err_exit; - err = aq_ring_init(&ring[AQ_VEC_RX_ID]); + err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX); if (err < 0) goto err_exit; @@ -350,55 +350,23 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self) return &self->aq_ring_param.affinity_mask; } -static void aq_vec_get_stats(struct aq_vec_s *self, - const unsigned int tc, - struct aq_ring_stats_rx_s *stats_rx, - struct aq_ring_stats_tx_s *stats_tx) +bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc) { - struct aq_ring_s *ring = self->ring[tc]; - - if (tc < self->rx_rings) { - struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx; - - stats_rx->packets = rx->packets; - stats_rx->bytes = rx->bytes; - stats_rx->errors = rx->errors; - stats_rx->jumbo_packets = rx->jumbo_packets; - stats_rx->lro_packets = rx->lro_packets; - stats_rx->pg_losts = rx->pg_losts; - stats_rx->pg_flips = rx->pg_flips; - stats_rx->pg_reuses = rx->pg_reuses; - } - - if (tc < self->tx_rings) { - struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx; - - stats_tx->packets = tx->packets; - stats_tx->bytes = tx->bytes; - stats_tx->errors = tx->errors; - stats_tx->queue_restarts = tx->queue_restarts; - } + return tc < self->rx_rings && tc < self->tx_rings; } unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data) { - struct aq_ring_stats_rx_s stats_rx; - struct aq_ring_stats_tx_s stats_tx; - unsigned int count = 0U; + unsigned int count; - memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); - memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); + WARN_ONCE(!aq_vec_is_valid_tc(self, tc), + "Invalid tc %u (#rx=%u, #tx=%u)\n", + tc, self->rx_rings, self->tx_rings); + if (!aq_vec_is_valid_tc(self, tc)) + return 0; - aq_vec_get_stats(self, tc, &stats_rx, &stats_tx); + count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data); + count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count); - /* This data should mimic aq_ethtool_queue_stat_names structure - */ - data[count] = stats_rx.packets; - data[++count] = stats_tx.packets; - data[++count] = stats_tx.queue_restarts; - data[++count] = stats_rx.jumbo_packets; - data[++count] = stats_rx.lro_packets; - data[++count] = stats_rx.errors; - - return ++count; + return count; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h index c079fef80da8..567f3d4b79a2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h @@ -36,6 +36,7 @@ void aq_vec_ring_free(struct aq_vec_s *self); int aq_vec_start(struct aq_vec_s *self); void aq_vec_stop(struct aq_vec_s *self); cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self); +bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc); unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data); #endif /* AQ_VEC_H */ From d7d8bb9286134bcc57941c38ed2d69c50fc59511 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:36 +0300 Subject: [PATCH 1314/2056] net: atlantic: use u64_stats_update_* to protect access to 64-bit stats This patch adds u64_stats_update_* usage to protect access to 64-bit stats, where necessary. This is necessary for per-ring stats, because they are updated by the driver directly, so there is a possibility for a partial read. Other stats require no additional protection, e.g.: * all MACSec stats are fetched directly from HW (under semaphore); * nic/ndev stats (aq_stats_s) are fetched directly from FW (under mutex). Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_ptp.c | 2 + .../net/ethernet/aquantia/atlantic/aq_ring.c | 46 ++++++++++++++++--- .../net/ethernet/aquantia/atlantic/aq_ring.h | 9 ++-- 3 files changed, 47 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index 116b8891c9c4..ec6aa9bb7dfc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -782,8 +782,10 @@ int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb) err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw, ring, frags); if (err >= 0) { + u64_stats_update_begin(&ring->stats.tx.syncp); ++ring->stats.tx.packets; ring->stats.tx.bytes += skb->len; + u64_stats_update_end(&ring->stats.tx.syncp); } } else { err = NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index fc4e10b064fd..b51ab2dbf6fe 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -70,18 +70,24 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= (PAGE_SIZE << order)) { + u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_flips++; + u64_stats_update_end(&self->stats.rx.syncp); } else { /* Buffer exhausted. We have other users and * should release this page and realloc */ aq_free_rxpage(&rxbuf->rxdata, aq_nic_get_dev(self->aq_nic)); + u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_losts++; + u64_stats_update_end(&self->stats.rx.syncp); } } else { rxbuf->rxdata.pg_off = 0; + u64_stats_update_begin(&self->stats.rx.syncp); self->stats.rx.pg_reuses++; + u64_stats_update_end(&self->stats.rx.syncp); } } @@ -213,6 +219,11 @@ int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) self->sw_tail = 0; self->ring_type = ring_type; + if (self->ring_type == ATL_RING_RX) + u64_stats_init(&self->stats.rx.syncp); + else + u64_stats_init(&self->stats.tx.syncp); + return 0; } @@ -239,7 +250,9 @@ void aq_ring_queue_wake(struct aq_ring_s *ring) ring->idx))) { netif_wake_subqueue(ndev, AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); + u64_stats_update_begin(&ring->stats.tx.syncp); ring->stats.tx.queue_restarts++; + u64_stats_update_end(&ring->stats.tx.syncp); } } @@ -281,8 +294,10 @@ bool aq_ring_tx_clean(struct aq_ring_s *self) } if (unlikely(buff->is_eop)) { + u64_stats_update_begin(&self->stats.tx.syncp); ++self->stats.tx.packets; self->stats.tx.bytes += buff->skb->len; + u64_stats_update_end(&self->stats.tx.syncp); dev_kfree_skb_any(buff->skb); } @@ -302,7 +317,9 @@ static void aq_rx_checksum(struct aq_ring_s *self, return; if (unlikely(buff->is_cso_err)) { + u64_stats_update_begin(&self->stats.rx.syncp); ++self->stats.rx.errors; + u64_stats_update_end(&self->stats.rx.syncp); skb->ip_summed = CHECKSUM_NONE; return; } @@ -372,13 +389,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff_->is_cleaned = true; } while (!buff_->is_eop); + u64_stats_update_begin(&self->stats.rx.syncp); ++self->stats.rx.errors; + u64_stats_update_end(&self->stats.rx.syncp); continue; } } if (buff->is_error) { + u64_stats_update_begin(&self->stats.rx.syncp); ++self->stats.rx.errors; + u64_stats_update_end(&self->stats.rx.syncp); continue; } @@ -479,8 +500,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, : AQ_NIC_RING2QMAP(self->aq_nic, self->idx)); + u64_stats_update_begin(&self->stats.rx.syncp); ++self->stats.rx.packets; self->stats.rx.bytes += skb->len; + u64_stats_update_end(&self->stats.rx.syncp); napi_gro_receive(napi, skb); } @@ -564,18 +587,27 @@ void aq_ring_free(struct aq_ring_s *self) unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) { - unsigned int count = 0U; + unsigned int count; + unsigned int start; if (self->ring_type == ATL_RING_RX) { /* This data should mimic aq_ethtool_queue_rx_stat_names structure */ - data[count] = self->stats.rx.packets; - data[++count] = self->stats.rx.jumbo_packets; - data[++count] = self->stats.rx.lro_packets; - data[++count] = self->stats.rx.errors; + do { + count = 0; + start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp); + data[count] = self->stats.rx.packets; + data[++count] = self->stats.rx.jumbo_packets; + data[++count] = self->stats.rx.lro_packets; + data[++count] = self->stats.rx.errors; + } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ - data[count] = self->stats.tx.packets; - data[++count] = self->stats.tx.queue_restarts; + do { + count = 0; + start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp); + data[count] = self->stats.tx.packets; + data[++count] = self->stats.tx.queue_restarts; + } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start)); } return ++count; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 0cd761ba47a3..c92c3a0651a9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_ring.h: Declaration of functions for Rx/Tx rings. */ @@ -88,6 +89,7 @@ struct __packed aq_ring_buff_s { }; struct aq_ring_stats_rx_s { + struct u64_stats_sync syncp; /* must be first */ u64 errors; u64 packets; u64 bytes; @@ -99,6 +101,7 @@ struct aq_ring_stats_rx_s { }; struct aq_ring_stats_tx_s { + struct u64_stats_sync syncp; /* must be first */ u64 errors; u64 packets; u64 bytes; From aa7e17a3e35a6e3fbf4ab2055a64097efcd09310 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Mon, 20 Jul 2020 21:32:37 +0300 Subject: [PATCH 1315/2056] net: atlantic: additional per-queue stats This patch adds additional per-queue stats, these could be useful for debugging and diagnostics. Signed-off-by: Dmitry Bogdanov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_ethtool.c | 3 +++ drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 14 ++++++++++++++ drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 3 +++ drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 3 +++ 4 files changed, 23 insertions(+) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 98ba8355a0f0..9e18d30d2e44 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -94,6 +94,9 @@ static const char * const aq_ethtool_queue_rx_stat_names[] = { "%sQueue[%d] InJumboPackets", "%sQueue[%d] InLroPackets", "%sQueue[%d] InErrors", + "%sQueue[%d] AllocFails", + "%sQueue[%d] SkbAllocFails", + "%sQueue[%d] Polls", }; static const char * const aq_ethtool_queue_tx_stat_names[] = { diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b51ab2dbf6fe..4f913658eea4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -94,6 +94,11 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, if (!rxbuf->rxdata.page) { ret = aq_get_rxpage(&rxbuf->rxdata, order, aq_nic_get_dev(self->aq_nic)); + if (ret) { + u64_stats_update_begin(&self->stats.rx.syncp); + self->stats.rx.alloc_fails++; + u64_stats_update_end(&self->stats.rx.syncp); + } return ret; } @@ -414,6 +419,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self, skb = build_skb(aq_buf_vaddr(&buff->rxdata), AQ_CFG_RX_FRAME_MAX); if (unlikely(!skb)) { + u64_stats_update_begin(&self->stats.rx.syncp); + self->stats.rx.skb_alloc_fails++; + u64_stats_update_end(&self->stats.rx.syncp); err = -ENOMEM; goto err_exit; } @@ -427,6 +435,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } else { skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); if (unlikely(!skb)) { + u64_stats_update_begin(&self->stats.rx.syncp); + self->stats.rx.skb_alloc_fails++; + u64_stats_update_end(&self->stats.rx.syncp); err = -ENOMEM; goto err_exit; } @@ -599,6 +610,9 @@ unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) data[++count] = self->stats.rx.jumbo_packets; data[++count] = self->stats.rx.lro_packets; data[++count] = self->stats.rx.errors; + data[++count] = self->stats.rx.alloc_fails; + data[++count] = self->stats.rx.skb_alloc_fails; + data[++count] = self->stats.rx.polls; } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); } else { /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index c92c3a0651a9..93659e58f1ce 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -95,6 +95,9 @@ struct aq_ring_stats_rx_s { u64 bytes; u64 lro_packets; u64 jumbo_packets; + u64 alloc_fails; + u64 skb_alloc_fails; + u64 polls; u64 pg_losts; u64 pg_flips; u64 pg_reuses; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index b008d12e923a..d281322d7dd2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -45,6 +45,9 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) } else { for (i = 0U, ring = self->ring[0]; self->tx_rings > i; ++i, ring = self->ring[i]) { + u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp); + ring[AQ_VEC_RX_ID].stats.rx.polls++; + u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp); if (self->aq_hw_ops->hw_ring_tx_head_update) { err = self->aq_hw_ops->hw_ring_tx_head_update( self->aq_hw, From 14b539a3490102750c86a63a8f27a69935e6a84e Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Jul 2020 21:32:38 +0300 Subject: [PATCH 1316/2056] net: atlantic: PTP statistics This patch adds PTP rings statistics. Before that these were missing from overall stats, hardening debugging and analysis. Signed-off-by: Pavel Belous Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_ethtool.c | 37 ++++++++++++ .../net/ethernet/aquantia/atlantic/aq_ptp.c | 60 +++++++++++++------ .../net/ethernet/aquantia/atlantic/aq_ptp.h | 27 ++++++++- 3 files changed, 105 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 9e18d30d2e44..1ab5314c4c1b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -177,6 +177,11 @@ static u32 aq_ethtool_n_stats(struct net_device *ndev) u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) + (rx_stat_cnt + tx_stat_cnt) * cfg->vecs * cfg->tcs; +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + n_stats += rx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_RX) + + tx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_TX); +#endif + #if IS_ENABLED(CONFIG_MACSEC) if (nic->macsec_cfg) { n_stats += ARRAY_SIZE(aq_macsec_stat_names) + @@ -199,6 +204,9 @@ static void aq_ethtool_stats(struct net_device *ndev, memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64)); data = aq_nic_get_stats(aq_nic, data); +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + data = aq_ptp_get_stats(aq_nic, data); +#endif #if IS_ENABLED(CONFIG_MACSEC) data = aq_macsec_get_stats(aq_nic, data); #endif @@ -275,6 +283,35 @@ static void aq_ethtool_get_strings(struct net_device *ndev, } } } +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) + if (nic->aq_ptp) { + const int rx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_RX); + const int tx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_TX); + unsigned int ptp_ring_idx = + aq_ptp_ring_idx(nic->aq_nic_cfg.tc_mode); + + snprintf(tc_string, 8, "PTP "); + + for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) { + for (si = 0; si < rx_stat_cnt; si++) { + snprintf(p, ETH_GSTRING_LEN, + aq_ethtool_queue_rx_stat_names[si], + tc_string, + i ? PTP_HWST_RING_IDX : ptp_ring_idx); + p += ETH_GSTRING_LEN; + } + if (i >= tx_ring_cnt) + continue; + for (si = 0; si < tx_stat_cnt; si++) { + snprintf(p, ETH_GSTRING_LEN, + aq_ethtool_queue_tx_stat_names[si], + tc_string, + i ? PTP_HWST_RING_IDX : ptp_ring_idx); + p += ETH_GSTRING_LEN; + } + } + } +#endif #if IS_ENABLED(CONFIG_MACSEC) if (!nic->macsec_cfg) break; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c index ec6aa9bb7dfc..06de19f63287 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c @@ -81,6 +81,8 @@ struct aq_ptp_s { bool extts_pin_enabled; u64 last_sync1588_ts; + + bool a1_ptp; }; struct ptp_tm_offset { @@ -947,21 +949,6 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic) aq_ring_rx_deinit(&aq_ptp->ptp_rx); } -#define PTP_8TC_RING_IDX 8 -#define PTP_4TC_RING_IDX 16 -#define PTP_HWST_RING_IDX 31 - -/* Index must be 8 (8 TCs) or 16 (4 TCs). - * It depends on Traffic Class mode. - */ -static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode) -{ - if (tc_mode == AQ_TC_MODE_8TCS) - return PTP_8TC_RING_IDX; - - return PTP_4TC_RING_IDX; -} - int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) { struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; @@ -973,7 +960,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) if (!aq_ptp) return 0; - tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); + tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic, tx_ring_idx, &aq_nic->aq_nic_cfg); @@ -982,7 +969,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic) goto err_exit; } - rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); + rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode); ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic, rx_ring_idx, &aq_nic->aq_nic_cfg); @@ -1174,11 +1161,17 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w); int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) { + bool a1_ptp = ATL_HW_IS_CHIP_FEATURE(aq_nic->aq_hw, ATLANTIC); struct hw_atl_utils_mbox mbox; struct ptp_clock *clock; struct aq_ptp_s *aq_ptp; int err = 0; + if (!a1_ptp) { + aq_nic->aq_ptp = NULL; + return 0; + } + if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) { aq_nic->aq_ptp = NULL; return 0; @@ -1205,6 +1198,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) } aq_ptp->aq_nic = aq_nic; + aq_ptp->a1_ptp = a1_ptp; spin_lock_init(&aq_ptp->ptp_lock); spin_lock_init(&aq_ptp->ptp_ring_lock); @@ -1395,4 +1389,36 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w) schedule_delayed_work(&aq_ptp->poll_sync, timeout); } } + +int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type) +{ + if (!aq_nic->aq_ptp) + return 0; + + /* Additional RX ring is allocated for PTP HWTS on A1 */ + return (aq_nic->aq_ptp->a1_ptp && ring_type == ATL_RING_RX) ? 2 : 1; +} + +u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data) +{ + struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp; + unsigned int count = 0U; + + if (!aq_ptp) + return data; + + count = aq_ring_fill_stats_data(&aq_ptp->ptp_rx, data); + data += count; + count = aq_ring_fill_stats_data(&aq_ptp->ptp_tx, data); + data += count; + + if (aq_ptp->a1_ptp) { + /* Only Receive ring for HWTS */ + count = aq_ring_fill_stats_data(&aq_ptp->hwts_rx, data); + data += count; + } + + return data; +} + #endif diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h index 231906431a48..28ccb7ca2df9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Aquantia Corporation Network Driver - * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_ptp.h: Declaration of PTP functions. @@ -10,6 +12,23 @@ #include +#include "aq_ring.h" + +#define PTP_8TC_RING_IDX 8 +#define PTP_4TC_RING_IDX 16 +#define PTP_HWST_RING_IDX 31 + +/* Index must to be 8 (8 TCs) or 16 (4 TCs). + * It depends from Traffic Class mode. + */ +static inline unsigned int aq_ptp_ring_idx(const enum aq_tc_mode tc_mode) +{ + if (tc_mode == AQ_TC_MODE_8TCS) + return PTP_8TC_RING_IDX; + + return PTP_4TC_RING_IDX; +} + #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) /* Common functions */ @@ -55,6 +74,10 @@ struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp); int aq_ptp_link_change(struct aq_nic_s *aq_nic); +/* PTP ring statistics */ +int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type); +u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data); + #else static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec) From 8bd60710852fffe4c32b5b7f84de2a6c19d12971 Mon Sep 17 00:00:00 2001 From: Igor Russkikh Date: Mon, 20 Jul 2020 21:32:39 +0300 Subject: [PATCH 1317/2056] net: atlantic: enable ipv6 support for TCP LSO and UDP GSO This patch enables ipv6 support for TCP LSO and UDP GSO. The code itself (aq_nic_map_skb) was ready for this after udp gso feature, but corresponding NETIF_F_TSO6 wasn't enabled. We now have tested both tcp and udp v6 GSO, and enabling them safely. Signed-off-by: Igor Russkikh Signed-off-by: Mark Starovoytov Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_nic.c | 2 +- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index d72f40259715..c6b0981c8751 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -371,7 +371,7 @@ void aq_nic_ndev_init(struct aq_nic_s *self) self->ndev->features = aq_hw_caps->hw_features; self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_SG | - NETIF_F_LRO | NETIF_F_TSO; + NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6; self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4; self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 97672ff142a8..51c8962b7a0e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -40,6 +40,7 @@ NETIF_F_RXHASH | \ NETIF_F_SG | \ NETIF_F_TSO | \ + NETIF_F_TSO6 | \ NETIF_F_LRO | \ NETIF_F_NTUPLE | \ NETIF_F_HW_VLAN_CTAG_FILTER | \ From 1e41b3fee795faa8a67713749a21e60828ae21ad Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Mon, 20 Jul 2020 21:32:40 +0300 Subject: [PATCH 1318/2056] net: atlantic: add support for 64-bit reads/writes This patch adds support for 64-bit reads/writes where applicable, e.g. A2 supports them. Signed-off-by: Pavel Belous Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../net/ethernet/aquantia/atlantic/aq_hw.h | 1 + .../ethernet/aquantia/atlantic/aq_hw_utils.c | 29 +++++++++++++++---- .../ethernet/aquantia/atlantic/aq_hw_utils.h | 8 +++-- .../aquantia/atlantic/hw_atl/hw_atl_a0.c | 1 + .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 1 + .../aquantia/atlantic/hw_atl2/hw_atl2.c | 1 + 6 files changed, 33 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 284ea943e8cd..96d59ee3eb70 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -67,6 +67,7 @@ struct aq_hw_caps_s { u8 rx_rings; bool flow_control; bool is_64_dma; + bool op64bit; u32 priv_data_len; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c index 342c5179f846..ae85c0a7d238 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_hw_utils.c: Definitions of helper functions used across @@ -9,6 +10,9 @@ */ #include "aq_hw_utils.h" + +#include + #include "aq_hw.h" #include "aq_nic.h" @@ -56,13 +60,28 @@ void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value) */ u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg) { - u64 value = aq_hw_read_reg(hw, reg); + u64 value = U64_MAX; - value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32; + if (hw->aq_nic_cfg->aq_hw_caps->op64bit) + value = readq(hw->mmio + reg); + else + value = lo_hi_readq(hw->mmio + reg); + + if (value == U64_MAX && + readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX) + aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG); return value; } +void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value) +{ + if (hw->aq_nic_cfg->aq_hw_caps->op64bit) + writeq(value, hw->mmio + reg); + else + lo_hi_writeq(value, hw->mmio + reg); +} + int aq_hw_err_from_flags(struct aq_hw_s *hw) { int err = 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h index 32aa5f2fb840..ffa6e4067c21 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * aQuantia Corporation Network Driver - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. */ /* File aq_hw_utils.h: Declaration of helper functions used across hardware @@ -33,6 +34,7 @@ u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift); u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg); void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value); u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg); +void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value); int aq_hw_err_from_flags(struct aq_hw_s *hw); int aq_hw_num_tcs(struct aq_hw_s *hw); int aq_hw_q_per_tc(struct aq_hw_s *hw); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index a312864969af..8f8b90436ced 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -18,6 +18,7 @@ #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ .is_64_dma = true, \ + .op64bit = false, \ .msix_irqs = 4U, \ .irq_mask = ~0U, \ .vecs = HW_ATL_A0_RSS_MAX, \ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 51c8962b7a0e..ee74cad4a168 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -20,6 +20,7 @@ #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ .is_64_dma = true, \ + .op64bit = false, \ .msix_irqs = 8U, \ .irq_mask = ~0U, \ .vecs = HW_ATL_B0_RSS_MAX, \ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c index c65e6daad0e5..92f64048bf69 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -21,6 +21,7 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, #define DEFAULT_BOARD_BASIC_CAPABILITIES \ .is_64_dma = true, \ + .op64bit = true, \ .msix_irqs = 8U, \ .irq_mask = ~0U, \ .vecs = HW_ATL2_RSS_MAX, \ From 88bc9cf143a1f34978bcda21114cc81324c9e118 Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:41 +0300 Subject: [PATCH 1319/2056] net: atlantic: use U32_MAX in aq_hw_utils.c This patch replaces magic constant ~0U usage with U32_MAX in aq_hw_utils.c Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c index ae85c0a7d238..1921741f7311 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c @@ -41,9 +41,8 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg) { u32 value = readl(hw->mmio + reg); - if ((~0U) == value && - (~0U) == readl(hw->mmio + - hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr)) + if (value == U32_MAX && + readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX) aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG); return value; From b98ffe6fa415a6ea5cbc7e8d58dd016e83fe33ea Mon Sep 17 00:00:00 2001 From: Nikita Danilov Date: Mon, 20 Jul 2020 21:32:42 +0300 Subject: [PATCH 1320/2056] net: atlantic: use intermediate variable to improve readability a bit This patch syncs up hw_atl_a0.c with an out-of-tree driver, where an intermediate variable was introduced in a couple of functions to improve the code readability a bit. Signed-off-by: Nikita Danilov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../aquantia/atlantic/hw_atl/hw_atl_a0.c | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 8f8b90436ced..e1877d520135 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -752,6 +752,7 @@ static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask) static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, unsigned int packet_filter) { + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; unsigned int i = 0U; hw_atl_rpfl2promiscuous_mode_en_set(self, @@ -760,14 +761,13 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, IS_FILTER_ENABLED(IFF_MULTICAST), 0); hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST)); - self->aq_nic_cfg->is_mc_list_enabled = - IS_FILTER_ENABLED(IFF_MULTICAST); + cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST); for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i) hw_atl_rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled && - (i <= self->aq_nic_cfg->mc_list_count)) ? - 1U : 0U, i); + (cfg->is_mc_list_enabled && + (i <= cfg->mc_list_count)) ? 1U : 0U, + i); return aq_hw_err_from_flags(self); } @@ -780,19 +780,18 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, [ETH_ALEN], u32 count) { + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { err = EBADRQC; goto err_exit; } - for (self->aq_nic_cfg->mc_list_count = 0U; - self->aq_nic_cfg->mc_list_count < count; - ++self->aq_nic_cfg->mc_list_count) { - u32 i = self->aq_nic_cfg->mc_list_count; + for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) { + u32 i = cfg->mc_list_count; u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | - (ar_mac[i][4] << 8) | ar_mac[i][5]; + (ar_mac[i][4] << 8) | ar_mac[i][5]; hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i); @@ -805,7 +804,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, HW_ATL_A0_MAC_MIN + i); hw_atl_rpfl2_uc_flr_en_set(self, - (self->aq_nic_cfg->is_mc_list_enabled), + (cfg->is_mc_list_enabled), HW_ATL_A0_MAC_MIN + i); } From a89df867ce1a3368015ff6f85ab4a23d2a0aaba1 Mon Sep 17 00:00:00 2001 From: Dmitry Bogdanov Date: Mon, 20 Jul 2020 21:32:43 +0300 Subject: [PATCH 1321/2056] net: atlantic: A0 ntuple filters This patch adds support for ntuple filters on A0. Signed-off-by: Dmitry Bogdanov Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../aquantia/atlantic/hw_atl/hw_atl_a0.c | 116 +++++++++++++----- 1 file changed, 88 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index e1877d520135..c38a4b8a14cb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -16,33 +16,35 @@ #include "hw_atl_llh.h" #include "hw_atl_a0_internal.h" -#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ - .is_64_dma = true, \ - .op64bit = false, \ - .msix_irqs = 4U, \ - .irq_mask = ~0U, \ - .vecs = HW_ATL_A0_RSS_MAX, \ - .tcs_max = HW_ATL_A0_TC_MAX, \ - .rxd_alignment = 1U, \ - .rxd_size = HW_ATL_A0_RXD_SIZE, \ - .rxds_max = HW_ATL_A0_MAX_RXD, \ - .rxds_min = HW_ATL_A0_MIN_RXD, \ - .txd_alignment = 1U, \ - .txd_size = HW_ATL_A0_TXD_SIZE, \ - .txds_max = HW_ATL_A0_MAX_TXD, \ - .txds_min = HW_ATL_A0_MIN_RXD, \ - .txhwb_alignment = 4096U, \ - .tx_rings = HW_ATL_A0_TX_RINGS, \ - .rx_rings = HW_ATL_A0_RX_RINGS, \ - .hw_features = NETIF_F_HW_CSUM | \ - NETIF_F_RXHASH | \ - NETIF_F_RXCSUM | \ - NETIF_F_SG | \ - NETIF_F_TSO, \ - .hw_priv_flags = IFF_UNICAST_FLT, \ - .flow_control = true, \ - .mtu = HW_ATL_A0_MTU_JUMBO, \ - .mac_regs_count = 88, \ +#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \ + .is_64_dma = true, \ + .op64bit = false, \ + .msix_irqs = 4U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL_A0_RSS_MAX, \ + .tcs_max = HW_ATL_A0_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL_A0_RXD_SIZE, \ + .rxds_max = HW_ATL_A0_MAX_RXD, \ + .rxds_min = HW_ATL_A0_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL_A0_TXD_SIZE, \ + .txds_max = HW_ATL_A0_MAX_TXD, \ + .txds_min = HW_ATL_A0_MIN_RXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL_A0_TX_RINGS, \ + .rx_rings = HW_ATL_A0_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_NTUPLE | \ + NETIF_F_HW_VLAN_CTAG_FILTER, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL_A0_MTU_JUMBO, \ + .mac_regs_count = 88, \ .hw_alive_check_addr = 0x10U const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { @@ -330,6 +332,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) err = -EINVAL; goto err_exit; } + h = (mac_addr[0] << 8) | (mac_addr[1]); l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | (mac_addr[4] << 8) | mac_addr[5]; @@ -356,7 +359,6 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr) struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; int err = 0; - hw_atl_a0_hw_init_tx_path(self); hw_atl_a0_hw_init_rx_path(self); @@ -885,6 +887,63 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, return aq_hw_err_from_flags(self); } +static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s *self, + struct aq_rx_filter_l3l4 *data) +{ + u8 location = data->location; + + if (!data->is_ipv6) { + hw_atl_rpfl3l4_cmd_clear(self, location); + hw_atl_rpf_l4_spd_set(self, 0U, location); + hw_atl_rpf_l4_dpd_set(self, 0U, location); + hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location); + hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location); + } else { + int i; + + for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) { + hw_atl_rpfl3l4_cmd_clear(self, location + i); + hw_atl_rpf_l4_spd_set(self, 0U, location + i); + hw_atl_rpf_l4_dpd_set(self, 0U, location + i); + } + hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location); + hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s *self, + struct aq_rx_filter_l3l4 *data) +{ + u8 location = data->location; + + hw_atl_a0_hw_fl3l4_clear(self, data); + + if (data->cmd) { + if (!data->is_ipv6) { + hw_atl_rpfl3l4_ipv4_dest_addr_set(self, + location, + data->ip_dst[0]); + hw_atl_rpfl3l4_ipv4_src_addr_set(self, + location, + data->ip_src[0]); + } else { + hw_atl_rpfl3l4_ipv6_dest_addr_set(self, + location, + data->ip_dst); + hw_atl_rpfl3l4_ipv6_src_addr_set(self, + location, + data->ip_src); + } + } + hw_atl_rpf_l4_dpd_set(self, data->p_dst, location); + hw_atl_rpf_l4_spd_set(self, data->p_src, location); + hw_atl_rpfl3l4_cmd_set(self, location, data->cmd); + + return aq_hw_err_from_flags(self); +} + const struct aq_hw_ops hw_atl_ops_a0 = { .hw_soft_reset = hw_atl_utils_soft_reset, .hw_prepare = hw_atl_utils_initfw, @@ -911,6 +970,7 @@ const struct aq_hw_ops hw_atl_ops_a0 = { .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init, .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init, .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set, + .hw_filter_l3l4_set = hw_atl_a0_hw_fl3l4_set, .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set, .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set, .hw_rss_set = hw_atl_a0_hw_rss_set, From 8dcf2ad39fdb2d183b7bd4307c837713e3150b9a Mon Sep 17 00:00:00 2001 From: Mark Starovoytov Date: Mon, 20 Jul 2020 21:32:44 +0300 Subject: [PATCH 1322/2056] net: atlantic: add hwmon getter for MAC temperature This patch adds the possibility to obtain MAC temperature via hwmon. On A1 there are two separate temperature sensors. On A2 there's only one temperature sensor, which is used for reporting both MAC and PHY temperature. Signed-off-by: Mark Starovoytov Signed-off-by: Igor Russkikh Signed-off-by: David S. Miller --- .../ethernet/aquantia/atlantic/aq_drvinfo.c | 62 ++++++++++++++----- .../ethernet/aquantia/atlantic/aq_drvinfo.h | 10 +-- .../net/ethernet/aquantia/atlantic/aq_hw.h | 4 ++ .../aquantia/atlantic/hw_atl/hw_atl_b0.c | 44 +++++++++++++ .../aquantia/atlantic/hw_atl/hw_atl_llh.c | 44 +++++++++++++ .../aquantia/atlantic/hw_atl/hw_atl_llh.h | 18 ++++++ .../atlantic/hw_atl/hw_atl_llh_internal.h | 30 +++++++++ .../aquantia/atlantic/hw_atl/hw_atl_utils.c | 1 + .../atlantic/hw_atl/hw_atl_utils_fw2x.c | 3 +- .../atlantic/hw_atl2/hw_atl2_utils_fw.c | 21 +++++++ 10 files changed, 215 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c index 6da65099047d..d3526cd38f3d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (C) 2014-2019 aQuantia Corporation. */ +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + */ /* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/ @@ -12,32 +16,51 @@ #include #include "aq_drvinfo.h" +#include "aq_nic.h" #if IS_REACHABLE(CONFIG_HWMON) +static const char * const atl_temp_label[] = { + "PHY Temperature", + "MAC Temperature", +}; + static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *value) { struct aq_nic_s *aq_nic = dev_get_drvdata(dev); + int err = 0; int temp; - int err; if (!aq_nic) return -EIO; - if (type != hwmon_temp) + if (type != hwmon_temp || attr != hwmon_temp_input) return -EOPNOTSUPP; - if (!aq_nic->aq_fw_ops->get_phy_temp) - return -EOPNOTSUPP; + switch (channel) { + case 0: + if (!aq_nic->aq_fw_ops->get_phy_temp) + return -EOPNOTSUPP; - switch (attr) { - case hwmon_temp_input: err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp); *value = temp; - return err; + break; + case 1: + if (!aq_nic->aq_fw_ops->get_mac_temp && + !aq_nic->aq_hw_ops->hw_get_mac_temp) + return -EOPNOTSUPP; + + if (aq_nic->aq_fw_ops->get_mac_temp) + err = aq_nic->aq_fw_ops->get_mac_temp(aq_nic->aq_hw, &temp); + else + err = aq_nic->aq_hw_ops->hw_get_mac_temp(aq_nic->aq_hw, &temp); + *value = temp; + break; default: return -EOPNOTSUPP; } + + return err; } static int aq_hwmon_read_string(struct device *dev, @@ -49,28 +72,32 @@ static int aq_hwmon_read_string(struct device *dev, if (!aq_nic) return -EIO; - if (type != hwmon_temp) + if (type != hwmon_temp || attr != hwmon_temp_label) return -EOPNOTSUPP; - if (!aq_nic->aq_fw_ops->get_phy_temp) + if (channel < ARRAY_SIZE(atl_temp_label)) + *str = atl_temp_label[channel]; + else return -EOPNOTSUPP; - switch (attr) { - case hwmon_temp_label: - *str = "PHY Temperature"; - return 0; - default: - return -EOPNOTSUPP; - } + return 0; } static umode_t aq_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { + const struct aq_nic_s *nic = data; + if (type != hwmon_temp) return 0; + if (channel == 0 && !nic->aq_fw_ops->get_phy_temp) + return 0; + else if (channel == 1 && !nic->aq_fw_ops->get_mac_temp && + !nic->aq_hw_ops->hw_get_mac_temp) + return 0; + switch (attr) { case hwmon_temp_input: case hwmon_temp_label: @@ -87,6 +114,7 @@ static const struct hwmon_ops aq_hwmon_ops = { }; static u32 aq_hwmon_temp_config[] = { + HWMON_T_INPUT | HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL, 0, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h index 23a0487893a7..59113a20622a 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h @@ -1,14 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (C) 2014-2017 aQuantia Corporation. */ +/* Atlantic Network Driver + * + * Copyright (C) 2014-2019 aQuantia Corporation + * Copyright (C) 2019-2020 Marvell International Ltd. + */ /* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/ #ifndef AQ_DRVINFO_H #define AQ_DRVINFO_H -#include "aq_nic.h" -#include "aq_hw.h" -#include "hw_atl/hw_atl_utils.h" +struct net_device; int aq_drvinfo_init(struct net_device *ndev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 96d59ee3eb70..95ee1336ac79 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -333,6 +333,8 @@ struct aq_hw_ops { int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc); int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable); + + int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp); }; struct aq_fw_ops { @@ -355,6 +357,8 @@ struct aq_fw_ops { int (*update_stats)(struct aq_hw_s *self); + int (*get_mac_temp)(struct aq_hw_s *self, int *temp); + int (*get_phy_temp)(struct aq_hw_s *self, int *temp); u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index ee74cad4a168..34626eef2909 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -1581,6 +1581,48 @@ int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable) return 0; } +static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self) +{ + if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self)) + return 1; + + return 0; +} + +static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp) +{ + bool ts_disabled; + int err; + u32 val; + u32 ts; + + ts_disabled = (hw_atl_ts_power_down_get(self) == 1U); + + if (ts_disabled) { + // Set AFE Temperature Sensor to on (off by default) + hw_atl_ts_power_down_set(self, 0U); + + // Reset internal capacitors, biasing, and counters + hw_atl_ts_reset_set(self, 1); + hw_atl_ts_reset_set(self, 0); + } + + err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get, + self, val, val == 1, 10000U, 500000U); + if (err) + return err; + + ts = hw_atl_ts_data_get(self); + *temp = ts * ts * 16 / 100000 + 60 * ts - 83410; + + if (ts_disabled) { + // Set AFE Temperature Sensor back to off + hw_atl_ts_power_down_set(self, 1U); + } + + return 0; +} + const struct aq_hw_ops hw_atl_ops_b0 = { .hw_soft_reset = hw_atl_utils_soft_reset, .hw_prepare = hw_atl_utils_initfw, @@ -1637,4 +1679,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_offload = hw_atl_b0_hw_offload_set, .hw_set_loopback = hw_atl_b0_set_loopback, .hw_set_fc = hw_atl_b0_set_fc, + + .hw_get_mac_temp = hw_atl_b0_get_mac_temp, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 9c3debae425f..7b67bdd8a258 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -13,6 +13,50 @@ #include "hw_atl_llh_internal.h" #include "../aq_hw_utils.h" +void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_RESET_ADR, + HW_ATL_TS_RESET_MSK, + HW_ATL_TS_RESET_SHIFT, + val); +} + +void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR, + HW_ATL_TS_POWER_DOWN_MSK, + HW_ATL_TS_POWER_DOWN_SHIFT, + val); +} + +u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR, + HW_ATL_TS_POWER_DOWN_MSK, + HW_ATL_TS_POWER_DOWN_SHIFT); +} + +u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_ADR, + HW_ATL_TS_READY_MSK, + HW_ATL_TS_READY_SHIFT); +} + +u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_LATCH_HIGH_ADR, + HW_ATL_TS_READY_LATCH_HIGH_MSK, + HW_ATL_TS_READY_LATCH_HIGH_SHIFT); +} + +u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_DATA_OUT_ADR, + HW_ATL_TS_DATA_OUT_MSK, + HW_ATL_TS_DATA_OUT_SHIFT); +} + /* global */ void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index f0954711df24..58f5ee0a6214 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -16,6 +16,24 @@ struct aq_hw_s; +/* set temperature sense reset */ +void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val); + +/* set temperature sense power down */ +void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val); + +/* get temperature sense power down */ +u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw); + +/* get temperature sense ready */ +u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw); + +/* get temperature sense ready latch high */ +u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw); + +/* get temperature sense data */ +u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw); + /* global */ /* set global microprocessor semaphore */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index ee11cb88325e..4a6467031b9e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -12,6 +12,36 @@ #ifndef HW_ATL_LLH_INTERNAL_H #define HW_ATL_LLH_INTERNAL_H +/* COM Temperature Sense Reset Bitfield Definitions */ +#define HW_ATL_TS_RESET_ADR 0x00003100 +#define HW_ATL_TS_RESET_MSK 0x00000004 +#define HW_ATL_TS_RESET_SHIFT 2 +#define HW_ATL_TS_RESET_WIDTH 1 + +/* COM Temperature Sense Power Down Bitfield Definitions */ +#define HW_ATL_TS_POWER_DOWN_ADR 0x00003100 +#define HW_ATL_TS_POWER_DOWN_MSK 0x00000001 +#define HW_ATL_TS_POWER_DOWN_SHIFT 0 +#define HW_ATL_TS_POWER_DOWN_WIDTH 1 + +/* COM Temperature Sense Ready Bitfield Definitions */ +#define HW_ATL_TS_READY_ADR 0x00003120 +#define HW_ATL_TS_READY_MSK 0x80000000 +#define HW_ATL_TS_READY_SHIFT 31 +#define HW_ATL_TS_READY_WIDTH 1 + +/* COM Temperature Sense Ready Latch High Bitfield Definitions */ +#define HW_ATL_TS_READY_LATCH_HIGH_ADR 0x00003120 +#define HW_ATL_TS_READY_LATCH_HIGH_MSK 0x40000000 +#define HW_ATL_TS_READY_LATCH_HIGH_SHIFT 30 +#define HW_ATL_TS_READY_LATCH_HIGH_WIDTH 1 + +/* COM Temperature Sense Data Out [B:0] Bitfield Definitions */ +#define HW_ATL_TS_DATA_OUT_ADR 0x00003120 +#define HW_ATL_TS_DATA_OUT_MSK 0x00000FFF +#define HW_ATL_TS_DATA_OUT_SHIFT 0 +#define HW_ATL_TS_DATA_OUT_WIDTH 12 + /* global microprocessor semaphore definitions * base address: 0x000003a0 * parameter: semaphore {s} | stride size 0x4 | range [0, 15] diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index cacab3352cb8..404cbf60d3f2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -1066,6 +1066,7 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .get_mac_temp = NULL, .get_phy_temp = NULL, .set_power = aq_fw1x_set_power, .set_eee_rate = NULL, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index 013676cd38e4..93c06dfa6c55 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -353,7 +353,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp) /* Convert PHY temperature from 1/256 degree Celsius * to 1/1000 degree Celsius. */ - *temp = (temp_res & 0xFFFF) * 1000 / 256; + *temp = (int16_t)(temp_res & 0xFFFF) * 1000 / 256; return 0; } @@ -681,6 +681,7 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, + .get_mac_temp = NULL, .get_phy_temp = aq_fw2x_get_phy_temp, .set_power = aq_fw2x_set_power, .set_eee_rate = aq_fw2x_set_eee_rate, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c index a8ce9a2c1c51..85628acbcc1d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -379,6 +379,25 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self) return 0; } +static int aq_a2_fw_get_phy_temp(struct aq_hw_s *self, int *temp) +{ + struct phy_health_monitor_s phy_health_monitor; + + hw_atl2_shared_buffer_read_safe(self, phy_health_monitor, + &phy_health_monitor); + + *temp = (int8_t)phy_health_monitor.phy_temperature * 1000; + return 0; +} + +static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp) +{ + /* There's only one temperature sensor on A2, use it for + * both MAC and PHY. + */ + return aq_a2_fw_get_phy_temp(self, temp); +} + static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed) { struct link_options_s link_options; @@ -510,6 +529,8 @@ const struct aq_fw_ops aq_a2_fw_ops = { .set_state = aq_a2_fw_set_state, .update_link_status = aq_a2_fw_update_link_status, .update_stats = aq_a2_fw_update_stats, + .get_mac_temp = aq_a2_fw_get_mac_temp, + .get_phy_temp = aq_a2_fw_get_phy_temp, .set_eee_rate = aq_a2_fw_set_eee_rate, .get_eee_rate = aq_a2_fw_get_eee_rate, .set_flow_control = aq_a2_fw_set_flow_control, From c3466a768eaa8d08551dafd552bac2f2698f73cc Mon Sep 17 00:00:00 2001 From: Vinay Kumar Yadav Date: Sat, 18 Jul 2020 00:46:40 +0530 Subject: [PATCH 1323/2056] crypto/chtls: Enable tcp window scaling option Enable tcp window scaling option in hw based on sysctl settings and option in connection request. v1->v2: - Set window scale option based on option in connection request. Signed-off-by: Vinay Kumar Yadav Signed-off-by: David S. Miller --- drivers/crypto/chelsio/chtls/chtls_cm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index f200fae6f7cb..fd5c558c6c23 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1056,6 +1056,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb, opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO); opt2 |= T5_ISS_F; opt2 |= T5_OPT_2_VALID_F; + opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp)); rpl5->opt0 = cpu_to_be64(opt0); rpl5->opt2 = cpu_to_be32(opt2); rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1); From 2b9843fbe15276a98cae74d5818dd55600e19052 Mon Sep 17 00:00:00 2001 From: Briana Oursler Date: Fri, 17 Jul 2020 14:54:39 -0700 Subject: [PATCH 1324/2056] tc-testing: Add tdc to kselftests Add tdc to existing kselftest infrastructure so that it can be run with existing kselftests. TDC now generates objects in objdir/kselftest without cluttering main objdir, leaves source directory clean, and installs correctly in kselftest_install, properly adding itself to run_kselftest.sh script. Add tc-testing as a target of selftests/Makefile. Create tdc.sh to run tdc.py targets with correct arguments. To support single target from selftest/Makefile, combine tc-testing/bpf/Makefile and tc-testing/Makefile. Move action.c up a directory to tc-testing/. Tested with: make O=/tmp/{objdir} TARGETS="tc-testing" kselftest cd /tmp/{objdir} cd kselftest cd tc-testing ./tdc.sh make -C tools/testing/selftests/ TARGETS=tc-testing run_tests make TARGETS="tc-testing" kselftest cd tools/testing/selftests ./kselftest_install.sh /tmp/exampledir My VM doesn't run all the kselftests so I commented out all except my target and net/pmtu.sh then: cd /tmp/exampledir && ./run_kselftest.sh Co-developed-by: Davide Caratti Signed-off-by: Davide Caratti Signed-off-by: Briana Oursler Signed-off-by: David S. Miller --- tools/testing/selftests/Makefile | 1 + tools/testing/selftests/tc-testing/{bpf => }/Makefile | 9 ++++++--- tools/testing/selftests/tc-testing/{bpf => }/action.c | 0 tools/testing/selftests/tc-testing/tdc.sh | 5 +++++ tools/testing/selftests/tc-testing/tdc_config.py | 2 +- 5 files changed, 13 insertions(+), 4 deletions(-) rename tools/testing/selftests/tc-testing/{bpf => }/Makefile (79%) rename tools/testing/selftests/tc-testing/{bpf => }/action.c (100%) create mode 100755 tools/testing/selftests/tc-testing/tdc.sh diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 1195bd85af38..f4522e0a2cab 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -54,6 +54,7 @@ TARGETS += splice TARGETS += static_keys TARGETS += sync TARGETS += sysctl +TARGETS += tc-testing TARGETS += timens ifneq (1, $(quicktest)) TARGETS += timers diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/Makefile similarity index 79% rename from tools/testing/selftests/tc-testing/bpf/Makefile rename to tools/testing/selftests/tc-testing/Makefile index be5a5e542804..91fee5c43274 100644 --- a/tools/testing/selftests/tc-testing/bpf/Makefile +++ b/tools/testing/selftests/tc-testing/Makefile @@ -1,11 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 -APIDIR := ../../../../include/uapi +top_srcdir = $(abspath ../../../..) +APIDIR := $(top_scrdir)/include/uapi TEST_GEN_FILES = action.o -top_srcdir = ../../../../.. KSFT_KHDR_INSTALL := 1 -include ../../lib.mk +include ../lib.mk CLANG ?= clang LLC ?= llc @@ -28,3 +28,6 @@ $(OUTPUT)/%.o: %.c $(CLANG) $(CLANG_FLAGS) \ -O2 -target bpf -emit-llvm -c $< -o - | \ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@ + +TEST_PROGS += ./tdc.sh +TEST_FILES := tdc*.py Tdc*.py plugins plugin-lib tc-tests diff --git a/tools/testing/selftests/tc-testing/bpf/action.c b/tools/testing/selftests/tc-testing/action.c similarity index 100% rename from tools/testing/selftests/tc-testing/bpf/action.c rename to tools/testing/selftests/tc-testing/action.c diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh new file mode 100755 index 000000000000..7fe38c76db44 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tdc.sh @@ -0,0 +1,5 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +./tdc.py -c actions --nobuildebpf +./tdc.py -c qdisc diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py index 080709cc4297..cd4a27ee1466 100644 --- a/tools/testing/selftests/tc-testing/tdc_config.py +++ b/tools/testing/selftests/tc-testing/tdc_config.py @@ -24,7 +24,7 @@ NAMES = { # Name of the namespace to use 'NS': 'tcut', # Directory containing eBPF test programs - 'EBPFDIR': './bpf' + 'EBPFDIR': './' } From e85da794f658d405f24094b3ccb2ea295144a206 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 18 Jul 2020 12:30:33 +0200 Subject: [PATCH 1325/2056] mISDN: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'setup_hw()' (hfcpci.c) GFP_KERNEL can be used because it is called from the probe function and no lock is taken. The call chain is: hfc_probe() --> setup_card() --> setup_hw() When memory is allocated in 'inittiger()' (netjet.c) GFP_ATOMIC must be used because a spin_lock is taken by the caller (i.e. 'nj_init_card()') This is also consistent with the other allocations done in the function. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/hfcpci.c | 12 +++++++----- drivers/isdn/hardware/mISDN/netjet.c | 8 ++++---- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index abdf787c1a71..904a4f4c5ff9 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -158,7 +158,8 @@ release_io_hfcpci(struct hfc_pci *hc) /* disable memory mapped ports + busmaster */ pci_write_config_word(hc->pdev, PCI_COMMAND, 0); del_timer(&hc->hw.timer); - pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle); + dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos, + hc->hw.dmahandle); iounmap(hc->hw.pci_io); } @@ -2004,8 +2005,9 @@ setup_hw(struct hfc_pci *hc) } /* Allocate memory for FIFOS */ /* the memory needs to be on a 32k boundary within the first 4G */ - pci_set_dma_mask(hc->pdev, 0xFFFF8000); - buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle); + dma_set_mask(&hc->pdev->dev, 0xFFFF8000); + buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle, + GFP_KERNEL); /* We silently assume the address is okay if nonzero */ if (!buffer) { printk(KERN_WARNING @@ -2018,8 +2020,8 @@ setup_hw(struct hfc_pci *hc) if (unlikely(!hc->hw.pci_io)) { printk(KERN_WARNING "HFC-PCI: Error in ioremap for PCI!\n"); - pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, - hc->hw.dmahandle); + dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos, + hc->hw.dmahandle); return 1; } diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 6aae97e827b7..ee925b58bbce 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -297,8 +297,8 @@ inittiger(struct tiger_hw *card) { int i; - card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE, - &card->dma); + card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE, + &card->dma, GFP_ATOMIC); if (!card->dma_p) { pr_info("%s: No DMA memory\n", card->name); return -ENOMEM; @@ -965,8 +965,8 @@ nj_release(struct tiger_hw *card) kfree(card->bc[i].hrbuf); } if (card->dma_p) - pci_free_consistent(card->pdev, NJ_DMA_SIZE, - card->dma_p, card->dma); + dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p, + card->dma); write_lock_irqsave(&card_lock, flags); list_del(&card->list); write_unlock_irqrestore(&card_lock, flags); From 405e30e23cb37c1a1cd521f41db89d2d93bd1e3b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 18 Jul 2020 13:03:38 +0200 Subject: [PATCH 1326/2056] net/fealnx: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated, GFP_KERNEL can be used because it is called from the probe function (i.e. 'fealnx_init_one()') and no lock is taken. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/fealnx.c | 91 ++++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 73e896a7d8fd..c696651dd735 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -543,7 +543,8 @@ static int fealnx_init_one(struct pci_dev *pdev, np->mii.phy_id_mask = 0x1f; np->mii.reg_num_mask = 0x1f; - ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) { err = -ENOMEM; goto err_out_free_dev; @@ -551,7 +552,8 @@ static int fealnx_init_one(struct pci_dev *pdev, np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; - ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); + ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, + GFP_KERNEL); if (!ring_space) { err = -ENOMEM; goto err_out_free_rx; @@ -656,9 +658,11 @@ static int fealnx_init_one(struct pci_dev *pdev, return 0; err_out_free_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); err_out_free_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); err_out_free_dev: free_netdev(dev); err_out_unmap: @@ -676,10 +680,10 @@ static void fealnx_remove_one(struct pci_dev *pdev) if (dev) { struct netdev_private *np = netdev_priv(dev); - pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, - np->tx_ring_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, - np->rx_ring_dma); + dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, + np->tx_ring_dma); + dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, + np->rx_ring_dma); unregister_netdev(dev); pci_iounmap(pdev, np->mem); free_netdev(dev); @@ -1056,8 +1060,10 @@ static void allocate_rx_buffers(struct net_device *dev) np->lack_rxbuf = np->lack_rxbuf->next_desc_logical; np->lack_rxbuf->skbuff = skb; - np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, - np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev, + skb->data, + np->rx_buf_sz, + DMA_FROM_DEVICE); np->lack_rxbuf->status = RXOWN; ++np->really_rx_count; } @@ -1251,8 +1257,10 @@ static void init_ring(struct net_device *dev) ++np->really_rx_count; np->rx_ring[i].skbuff = skb; - np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, - np->rx_buf_sz, PCI_DMA_FROMDEVICE); + np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev, + skb->data, + np->rx_buf_sz, + DMA_FROM_DEVICE); np->rx_ring[i].status = RXOWN; np->rx_ring[i].control |= RXIC; } @@ -1290,8 +1298,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) #define one_buffer #define BPT 1022 #if defined(one_buffer) - np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, - skb->len, PCI_DMA_TODEVICE); + np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data, + skb->len, DMA_TO_DEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ @@ -1306,8 +1314,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) struct fealnx_desc *next; /* for the first descriptor */ - np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, - BPT, PCI_DMA_TODEVICE); + np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, + skb->data, BPT, + DMA_TO_DEVICE); np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (BPT << TBSShift); /* buffer size */ @@ -1321,8 +1330,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) // 89/12/29 add, if (np->pci_dev->device == 0x891) np->cur_tx_copy->control |= ETIControl | RetryTxLC; - next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT, - skb->len - BPT, PCI_DMA_TODEVICE); + next->buffer = dma_map_single(&ep->pci_dev->dev, + skb->data + BPT, skb->len - BPT, + DMA_TO_DEVICE); next->status = TXOWN; np->cur_tx_copy->status = TXOWN; @@ -1330,8 +1340,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) np->cur_tx_copy = next->next_desc_logical; np->free_tx_count -= 2; } else { - np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data, - skb->len, PCI_DMA_TODEVICE); + np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, + skb->data, skb->len, + DMA_TO_DEVICE); np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable; np->cur_tx_copy->control |= (skb->len << PKTSShift); /* pkt size */ np->cur_tx_copy->control |= (skb->len << TBSShift); /* buffer size */ @@ -1371,8 +1382,8 @@ static void reset_tx_descriptors(struct net_device *dev) for (i = 0; i < TX_RING_SIZE; i++) { cur = &np->tx_ring[i]; if (cur->skbuff) { - pci_unmap_single(np->pci_dev, cur->buffer, - cur->skbuff->len, PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, cur->buffer, + cur->skbuff->len, DMA_TO_DEVICE); dev_kfree_skb_any(cur->skbuff); cur->skbuff = NULL; } @@ -1515,8 +1526,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance) } /* Free the original skb. */ - pci_unmap_single(np->pci_dev, np->cur_tx->buffer, - np->cur_tx->skbuff->len, PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, + np->cur_tx->buffer, + np->cur_tx->skbuff->len, + DMA_TO_DEVICE); dev_consume_skb_irq(np->cur_tx->skbuff); np->cur_tx->skbuff = NULL; --np->really_tx_count; @@ -1682,10 +1695,10 @@ static int netdev_rx(struct net_device *dev) if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ - pci_dma_sync_single_for_cpu(np->pci_dev, - np->cur_rx->buffer, - np->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&np->pci_dev->dev, + np->cur_rx->buffer, + np->rx_buf_sz, + DMA_FROM_DEVICE); /* Call copy + cksum if available. */ #if ! defined(__alpha__) @@ -1696,15 +1709,15 @@ static int netdev_rx(struct net_device *dev) skb_put_data(skb, np->cur_rx->skbuff->data, pkt_len); #endif - pci_dma_sync_single_for_device(np->pci_dev, - np->cur_rx->buffer, - np->rx_buf_sz, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&np->pci_dev->dev, + np->cur_rx->buffer, + np->rx_buf_sz, + DMA_FROM_DEVICE); } else { - pci_unmap_single(np->pci_dev, + dma_unmap_single(&np->pci_dev->dev, np->cur_rx->buffer, np->rx_buf_sz, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb_put(skb = np->cur_rx->skbuff, pkt_len); np->cur_rx->skbuff = NULL; --np->really_rx_count; @@ -1896,8 +1909,9 @@ static int netdev_close(struct net_device *dev) np->rx_ring[i].status = 0; if (skb) { - pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer, - np->rx_buf_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&np->pci_dev->dev, + np->rx_ring[i].buffer, np->rx_buf_sz, + DMA_FROM_DEVICE); dev_kfree_skb(skb); np->rx_ring[i].skbuff = NULL; } @@ -1907,8 +1921,9 @@ static int netdev_close(struct net_device *dev) struct sk_buff *skb = np->tx_ring[i].skbuff; if (skb) { - pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer, - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, + np->tx_ring[i].buffer, skb->len, + DMA_TO_DEVICE); dev_kfree_skb(skb); np->tx_ring[i].skbuff = NULL; } From 2b96692bcfcd2be7c9aa55dc13440213cd54c654 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Sat, 18 Jul 2020 20:53:38 +0800 Subject: [PATCH 1327/2056] net: hsr: remove redundant null check Because kfree_skb already checked NULL skb parameter, so the additional checks are unnecessary, just remove them. Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: David S. Miller --- net/hsr/hsr_forward.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ed13760463de..92c8ad75200b 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -367,10 +367,8 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) port->dev->stats.tx_bytes += skb->len; } - if (frame.skb_hsr) - kfree_skb(frame.skb_hsr); - if (frame.skb_std) - kfree_skb(frame.skb_std); + kfree_skb(frame.skb_hsr); + kfree_skb(frame.skb_std); return; out_drop: From f58d2598cf70d41f73e761b62a114d2e8f94a676 Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Sat, 18 Jul 2020 17:32:14 +0100 Subject: [PATCH 1328/2056] net: dsa: qca8k: implement the port MTU callbacks This switch has a single max frame size configuration register, so we track the requested MTU for each port and apply the largest. v2: - Address review feedback from Vladimir Oltean Signed-off-by: Jonathan McDowell Acked-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 31 +++++++++++++++++++++++++++++++ drivers/net/dsa/qca8k.h | 3 +++ 2 files changed, 34 insertions(+) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 4acad5fa0c84..a5566de82853 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -670,6 +670,11 @@ qca8k_setup(struct dsa_switch *ds) } } + /* Setup our port MTUs to match power on defaults */ + for (i = 0; i < QCA8K_NUM_PORTS; i++) + priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN; + qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN); + /* Flush the FDB table */ qca8k_fdb_flush(priv); @@ -1098,6 +1103,30 @@ qca8k_port_disable(struct dsa_switch *ds, int port) priv->port_sts[port].enabled = 0; } +static int +qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) +{ + struct qca8k_priv *priv = ds->priv; + int i, mtu = 0; + + priv->port_mtu[port] = new_mtu; + + for (i = 0; i < QCA8K_NUM_PORTS; i++) + if (priv->port_mtu[port] > mtu) + mtu = priv->port_mtu[port]; + + /* Include L2 header / FCS length */ + qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); + + return 0; +} + +static int +qca8k_port_max_mtu(struct dsa_switch *ds, int port) +{ + return QCA8K_MAX_MTU; +} + static int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, u16 port_mask, u16 vid) @@ -1174,6 +1203,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .set_mac_eee = qca8k_set_mac_eee, .port_enable = qca8k_port_enable, .port_disable = qca8k_port_disable, + .port_change_mtu = qca8k_port_change_mtu, + .port_max_mtu = qca8k_port_max_mtu, .port_stp_state_set = qca8k_port_stp_state_set, .port_bridge_join = qca8k_port_bridge_join, .port_bridge_leave = qca8k_port_bridge_leave, diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 10ef2bca2cde..31439396401c 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -13,6 +13,7 @@ #include #define QCA8K_NUM_PORTS 7 +#define QCA8K_MAX_MTU 9000 #define PHY_ID_QCA8337 0x004dd036 #define QCA8K_ID_QCA8337 0x13 @@ -58,6 +59,7 @@ #define QCA8K_MDIO_MASTER_MAX_REG 32 #define QCA8K_GOL_MAC_ADDR0 0x60 #define QCA8K_GOL_MAC_ADDR1 0x64 +#define QCA8K_MAX_FRAME_SIZE 0x78 #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) #define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) #define QCA8K_PORT_STATUS_SPEED_10 0 @@ -189,6 +191,7 @@ struct qca8k_priv { struct device *dev; struct dsa_switch_ops ops; struct gpio_desc *reset_gpio; + unsigned int port_mtu[QCA8K_NUM_PORTS]; }; struct qca8k_mib_desc { From 71d4364abdc50cb1f0ff5af0f932b110278f620c Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 18 Jul 2020 21:04:18 +0300 Subject: [PATCH 1329/2056] net: dsa: use the ETH_MIN_MTU and ETH_DATA_LEN default values Now that DSA supports MTU configuration, undo the effects of commit 8b1efc0f83f1 ("net: remove MTU limits on a few ether_setup callers") and let DSA interfaces use the default min_mtu and max_mtu specified by ether_setup(). This is more important for min_mtu: since DSA is Ethernet, the minimum MTU is the same as of any other Ethernet interface, and definitely not zero. For the max_mtu, we have a callback through which drivers can override that, if they want to. Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/slave.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index e147e10b411c..41d60eeefdbd 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1754,11 +1754,8 @@ int dsa_slave_create(struct dsa_port *port) eth_hw_addr_inherit(slave_dev, master); slave_dev->priv_flags |= IFF_NO_QUEUE; slave_dev->netdev_ops = &dsa_slave_netdev_ops; - slave_dev->min_mtu = 0; if (ds->ops->port_max_mtu) slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); - else - slave_dev->max_mtu = ETH_MAX_MTU; SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, From b328ecc468f8f92433c9ad82675c0ce9f99b10cf Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Fri, 17 Jul 2020 10:35:32 +0200 Subject: [PATCH 1330/2056] xfrm: Make the policy hold queue work with VTI. We forgot to support the xfrm policy hold queue when VTI was implemented. This patch adds everything we need so that we can use the policy hold queue together with VTI interfaces. Signed-off-by: Steffen Klassert --- net/ipv4/ip_vti.c | 6 +++++- net/ipv6/ip6_vti.c | 6 +++++- net/xfrm/xfrm_policy.c | 11 +++++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 3e5d54517145..8b962eac9ed8 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -218,12 +218,15 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, } dst_hold(dst); - dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0); + dst = xfrm_lookup_route(tunnel->net, dst, fl, NULL, 0); if (IS_ERR(dst)) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } + if (dst->flags & DST_XFRM_QUEUE) + goto queued; + if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) { dev->stats.tx_carrier_errors++; dst_release(dst); @@ -255,6 +258,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, goto tx_error; } +queued: skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 53f12b40528e..f5a4c4a6492b 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -491,13 +491,16 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) } dst_hold(dst); - dst = xfrm_lookup(t->net, dst, fl, NULL, 0); + dst = xfrm_lookup_route(t->net, dst, fl, NULL, 0); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto tx_err_link_failure; } + if (dst->flags & DST_XFRM_QUEUE) + goto queued; + x = dst->xfrm; if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr)) goto tx_err_link_failure; @@ -533,6 +536,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_dst_release; } +queued: skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 564aa6492e7c..be150475b28b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2758,6 +2758,7 @@ static void xfrm_policy_queue_process(struct timer_list *t) struct xfrm_policy_queue *pq = &pol->polq; struct flowi fl; struct sk_buff_head list; + __u32 skb_mark; spin_lock(&pq->hold_queue.lock); skb = skb_peek(&pq->hold_queue); @@ -2767,7 +2768,12 @@ static void xfrm_policy_queue_process(struct timer_list *t) } dst = skb_dst(skb); sk = skb->sk; + + /* Fixup the mark to support VTI. */ + skb_mark = skb->mark; + skb->mark = pol->mark.v; xfrm_decode_session(skb, &fl, dst->ops->family); + skb->mark = skb_mark; spin_unlock(&pq->hold_queue.lock); dst_hold(xfrm_dst_path(dst)); @@ -2799,7 +2805,12 @@ static void xfrm_policy_queue_process(struct timer_list *t) while (!skb_queue_empty(&list)) { skb = __skb_dequeue(&list); + /* Fixup the mark to support VTI. */ + skb_mark = skb->mark; + skb->mark = pol->mark.v; xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); + skb->mark = skb_mark; + dst_hold(xfrm_dst_path(skb_dst(skb))); dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0); if (IS_ERR(dst)) { From 343ead287dde0064c430ee1db477726f52e0269c Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Tue, 21 Jul 2020 12:07:16 +0200 Subject: [PATCH 1331/2056] bpf, netns: Fix build without CONFIG_INET When CONFIG_NET is set but CONFIG_INET isn't, build fails with: ld: kernel/bpf/net_namespace.o: in function `netns_bpf_attach_type_unneed': kernel/bpf/net_namespace.c:32: undefined reference to `bpf_sk_lookup_enabled' ld: kernel/bpf/net_namespace.o: in function `netns_bpf_attach_type_need': kernel/bpf/net_namespace.c:43: undefined reference to `bpf_sk_lookup_enabled' This is because without CONFIG_INET bpf_sk_lookup_enabled symbol is not available. Wrap references to bpf_sk_lookup_enabled with preprocessor conditionals. Fixes: 1559b4aa1db4 ("inet: Run SK_LOOKUP BPF program on socket lookup") Reported-by: Randy Dunlap Reported-by: Stephen Rothwell Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Randy Dunlap # build-tested Link: https://lore.kernel.org/bpf/20200721100716.720477-1-jakub@cloudflare.com --- kernel/bpf/net_namespace.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 4e1bcaa2c3cb..71405edd667c 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -28,9 +28,11 @@ DEFINE_MUTEX(netns_bpf_mutex); static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type) { switch (type) { +#ifdef CONFIG_INET case NETNS_BPF_SK_LOOKUP: static_branch_dec(&bpf_sk_lookup_enabled); break; +#endif default: break; } @@ -39,9 +41,11 @@ static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type) static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type) { switch (type) { +#ifdef CONFIG_INET case NETNS_BPF_SK_LOOKUP: static_branch_inc(&bpf_sk_lookup_enabled); break; +#endif default: break; } From c576b9c77bea9a8ad11ddc277af3edbf15733ddd Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sun, 19 Jul 2020 17:52:41 +0200 Subject: [PATCH 1332/2056] bpf: cpumap: Fix possible rcpu kthread hung Fix the following cpumap kthread hung. The issue is currently occurring when __cpu_map_load_bpf_program fails (e.g if the bpf prog has not BPF_XDP_CPUMAP as expected_attach_type) $./test_progs -n 101 101/1 cpumap_with_progs:OK 101 xdp_cpumap_attach:OK Summary: 1/1 PASSED, 0 SKIPPED, 0 FAILED [ 369.996478] INFO: task cpumap/0/map:7:205 blocked for more than 122 seconds. [ 369.998463] Not tainted 5.8.0-rc4-01472-ge57892f50a07 #212 [ 370.000102] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 370.001918] cpumap/0/map:7 D 0 205 2 0x00004000 [ 370.003228] Call Trace: [ 370.003930] __schedule+0x5c7/0xf50 [ 370.004901] ? io_schedule_timeout+0xb0/0xb0 [ 370.005934] ? static_obj+0x31/0x80 [ 370.006788] ? mark_held_locks+0x24/0x90 [ 370.007752] ? cpu_map_bpf_prog_run_xdp+0x6c0/0x6c0 [ 370.008930] schedule+0x6f/0x160 [ 370.009728] schedule_preempt_disabled+0x14/0x20 [ 370.010829] kthread+0x17b/0x240 [ 370.011433] ? kthread_create_worker_on_cpu+0xd0/0xd0 [ 370.011944] ret_from_fork+0x1f/0x30 [ 370.012348] Showing all locks held in the system: [ 370.013025] 1 lock held by khungtaskd/33: [ 370.013432] #0: ffffffff82b24720 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x28/0x1c3 [ 370.014461] ============================================= Fixes: 9216477449f3 ("bpf: cpumap: Add the possibility to attach an eBPF program to cpumap") Reported-by: Jakub Sitnicki Signed-off-by: Lorenzo Bianconi Signed-off-by: Alexei Starovoitov Tested-by: Jakub Sitnicki Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/e54f2aabf959f298939e5507b09c48f8c2e380be.1595170625.git.lorenzo@kernel.org --- kernel/bpf/cpumap.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 4c95d0615ca2..f1c46529929b 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -453,24 +453,27 @@ __cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id) rcpu->map_id = map_id; rcpu->value.qsize = value->qsize; + if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) + goto free_ptr_ring; + /* Setup kthread */ rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, "cpumap/%d/map:%d", cpu, map_id); if (IS_ERR(rcpu->kthread)) - goto free_ptr_ring; + goto free_prog; get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */ get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */ - if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd)) - goto free_ptr_ring; - /* Make sure kthread runs on a single CPU */ kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); return rcpu; +free_prog: + if (rcpu->prog) + bpf_prog_put(rcpu->prog); free_ptr_ring: ptr_ring_cleanup(rcpu->queue, NULL); free_queue: From 2cb002e3c29434b0159a3a4d343d460264dae2ac Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 2 Jun 2020 22:26:38 +0200 Subject: [PATCH 1333/2056] mt76: add missing lock configuring coverage class Coverage class callback can potentially run in parallel with other routines (e.g. mt7615_set_channel) that configures timing registers. Run coverage class callback holding mt76 mutex Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7603/main.c | 2 ++ drivers/net/wireless/mediatek/mt76/mt7615/main.c | 3 +++ drivers/net/wireless/mediatek/mt76/mt7915/main.c | 3 +++ 3 files changed, 8 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 26cb711b465f..83dfa6da4761 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7603_dev *dev = hw->priv; + mutex_lock(&dev->mt76.mutex); dev->coverage_class = max_t(s16, coverage_class, 0); mt7603_mac_set_timing(dev); + mutex_unlock(&dev->mt76.mutex); } static void mt7603_tx(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index c26f99b368d9..52d6544698fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -735,9 +735,12 @@ static void mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt7615_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7615_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 0575c259f245..05b5650c56c8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -716,9 +716,12 @@ static void mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { struct mt7915_phy *phy = mt7915_hw_phy(hw); + struct mt7915_dev *dev = phy->dev; + mutex_lock(&dev->mt76.mutex); phy->coverage_class = max_t(s16, coverage_class, 0); mt7915_mac_set_timing(phy); + mutex_unlock(&dev->mt76.mutex); } static int From 8fc49625a33ad070013773b923fb945340000e37 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 30 May 2020 23:51:27 +0200 Subject: [PATCH 1334/2056] mt76: mt7615: fix lmac queue debugsfs entry acs and wmm index are swapped in mt7615_queues_acq respect to the hw design Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index fd3ef483a87c..d06afcf46d67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) int i; for (i = 0; i < 16; i++) { - int j, acs = i / 4, index = i % 4; + int j, wmm_idx = i % MT7615_MAX_WMM_SETS; + int acs = i / MT7615_MAX_WMM_SETS; u32 ctrl, val, qlen = 0; - val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index)); + val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx)); ctrl = BIT(31) | BIT(15) | (acs << 8); for (j = 0; j < 32; j++) { @@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data) continue; mt76_wr(dev, MT_PLE_FL_Q0_CTRL, - ctrl | (j + (index << 5))); + ctrl | (j + (wmm_idx << 5))); qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL, GENMASK(11, 0)); } - seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen); + seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); } return 0; From 1fec635bcc9e14604f06d0947725fd3c613e1af0 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sun, 31 May 2020 00:32:04 +0200 Subject: [PATCH 1335/2056] mt76: mt7615: fix hw queue mapping mt7622/mt7663 chipsets rely on a fixed reverse queue map order respect to mac80211 one: - q(0): IEEE80211_AC_BK - q(1): IEEE80211_AC_BE - q(2): IEEE80211_AC_VI - q(3): IEEE80211_AC_VO Fixes: cdad4874057d ("mt76: mt7615: add dma and tx queue initialization for MT7622") Fixes: f40ac0f3d3c0 ("mt76: mt7615: introduce mt7663e support") Co-developed-by: Sean Wang Signed-off-by: Sean Wang Co-developed-by: Ryder Lee Signed-off-by: Ryder Lee Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/dma.c | 8 ++--- .../net/wireless/mediatek/mt76/mt7615/mac.c | 2 +- .../net/wireless/mediatek/mt76/mt7615/mac.h | 15 ---------- .../net/wireless/mediatek/mt76/mt7615/main.c | 1 + .../wireless/mediatek/mt76/mt7615/mt7615.h | 30 +++++++++++++++++++ drivers/net/wireless/mediatek/mt76/usb.c | 17 ++++++----- 6 files changed, 45 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 5a124610d4af..83fdcf5db3c7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -36,10 +36,10 @@ static int mt7622_init_tx_queues_multi(struct mt7615_dev *dev) { static const u8 wmm_queue_map[] = { - MT7622_TXQ_AC0, - MT7622_TXQ_AC1, - MT7622_TXQ_AC2, - MT7622_TXQ_AC3, + [IEEE80211_AC_BK] = MT7622_TXQ_AC0, + [IEEE80211_AC_BE] = MT7622_TXQ_AC1, + [IEEE80211_AC_VI] = MT7622_TXQ_AC2, + [IEEE80211_AC_VO] = MT7622_TXQ_AC3, }; int ret; int i; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 9f1c6ca7a665..5990355bc731 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -528,7 +528,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { q_idx = wmm_idx * MT7615_MAX_WMM_SETS + - skb_get_queue_mapping(skb); + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; } else if (beacon) { if (ext_phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index f0d4b29a52a2..81608ab656b8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -124,21 +124,6 @@ enum tx_pkt_type { MT_TX_TYPE_FW, }; -enum tx_pkt_queue_idx { - MT_LMAC_AC00, - MT_LMAC_AC01, - MT_LMAC_AC02, - MT_LMAC_AC03, - MT_LMAC_ALTX0 = 0x10, - MT_LMAC_BMC0, - MT_LMAC_BCN0, - MT_LMAC_PSMP0, - MT_LMAC_ALTX1, - MT_LMAC_BMC1, - MT_LMAC_BCN1, - MT_LMAC_PSMP1, -}; - enum tx_port_idx { MT_TX_PORT_IDX_LMAC, MT_TX_PORT_IDX_MCU diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 52d6544698fe..beaca8127680 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); + queue = mt7615_lmac_mapping(dev, queue); queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; return mt7615_mcu_set_wmm(dev, queue, params); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index d6176d316bee..3e7d51bf42a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -282,6 +282,21 @@ struct mt7615_dev { struct list_head wd_head; }; +enum tx_pkt_queue_idx { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, + MT_LMAC_PSMP0, + MT_LMAC_ALTX1, + MT_LMAC_BMC1, + MT_LMAC_BCN1, + MT_LMAC_PSMP1, +}; + enum { HW_BSSID_0 = 0x0, HW_BSSID_1, @@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) return MT7615_WTBL_SIZE; } +static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) +{ + static const u8 lmac_queue_map[] = { + [IEEE80211_AC_BK] = MT_LMAC_AC00, + [IEEE80211_AC_BE] = MT_LMAC_AC01, + [IEEE80211_AC_VI] = MT_LMAC_AC02, + [IEEE80211_AC_VO] = MT_LMAC_AC03, + }; + + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) + return MT_LMAC_AC01; /* BE */ + + return lmac_queue_map[ac]; +} + void mt7615_dma_reset(struct mt7615_dev *dev); void mt7615_scan_work(struct work_struct *work); void mt7615_roc_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index fb97ea25b4d4..27abcdb61a06 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac) { if (mt76_chip(dev) == 0x7663) { - static const u8 wmm_queue_map[] = { - [IEEE80211_AC_VO] = 0, - [IEEE80211_AC_VI] = 1, - [IEEE80211_AC_BE] = 2, - [IEEE80211_AC_BK] = 4, + static const u8 lmac_queue_map[] = { + /* ac to lmac mapping */ + [IEEE80211_AC_BK] = 0, + [IEEE80211_AC_BE] = 1, + [IEEE80211_AC_VI] = 2, + [IEEE80211_AC_VO] = 4, }; - if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map))) - return 2; /* BE */ + if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map))) + return 1; /* BE */ - return wmm_queue_map[ac]; + return lmac_queue_map[ac]; } return mt76_ac_to_hwq(ac); From d3c829985234450c0278e4fad994b70c8376c3b6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 1 Jun 2020 09:45:33 +0200 Subject: [PATCH 1336/2056] mt76: overwrite qid for non-bufferable mgmt frames Overwrite hw queue id for non-bufferable management frames if the hw support always txq (altxq) in order to be in sync with mac txwi code Fixes: cdad4874057d ("mt76: mt7615: add dma and tx queue initialization for MT7622") Fixes: f40ac0f3d3c0 ("mt76: mt7615: introduce mt7663e support") Suggested-by: Felix Fietkau Tested-by: Sean Wang Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76.h | 1 + .../net/wireless/mediatek/mt76/mt7615/dma.c | 1 + .../net/wireless/mediatek/mt76/mt7615/mac.c | 22 +++++++------------ .../net/wireless/mediatek/mt76/mt7615/mmio.c | 2 +- .../net/wireless/mediatek/mt76/mt7615/usb.c | 2 +- drivers/net/wireless/mediatek/mt76/tx.c | 7 ++++++ 6 files changed, 19 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dfe625a53c63..3d7db6ffb599 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -301,6 +301,7 @@ struct mt76_hw_cap { #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) #define MT_DRV_SW_RX_AIRTIME BIT(2) #define MT_DRV_RX_DMA_HDR BIT(3) +#define MT_DRV_HW_MGMT_TXQ BIT(4) struct mt76_driver_ops { u32 drv_flags; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index 83fdcf5db3c7..e5a965df899a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev) int i; mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); if (is_mt7615(&dev->mt76)) { mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); } else { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 5990355bc731..d97315ec7265 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { + if (beacon) { + p_fmt = MT_TX_TYPE_FW; + q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; + } else { + p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; q_idx = wmm_idx * MT7615_MAX_WMM_SETS + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; - } else if (beacon) { - if (ext_phy) - q_idx = MT_LMAC_BCN1; - else - q_idx = MT_LMAC_BCN0; - p_fmt = MT_TX_TYPE_FW; - } else { - if (ext_phy) - q_idx = MT_LMAC_ALTX1; - else - q_idx = MT_LMAC_ALTX0; - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; } val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) | diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index e670393506f0..2e99845b9c96 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common), - .drv_flags = MT_DRV_TXWI_NO_FREE, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index a50077eb24d7..f5dc1b828518 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf, { static const struct mt76_driver_ops drv_ops = { .txwi_size = MT_USB_TXD_SIZE, - .drv_flags = MT_DRV_RX_DMA_HDR, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, .tx_prepare_skb = mt7663u_tx_prepare_skb, .tx_complete_skb = mt7663u_tx_complete_skb, .tx_status_data = mt7663u_tx_status_data, diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index fca38ea2441f..f10c98aa883c 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, skb_set_queue_mapping(skb, qid); } + if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) && + !ieee80211_is_data(hdr->frame_control) && + !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) { + qid = MT_TXQ_PSD; + skb_set_queue_mapping(skb, qid); + } + if (!(wcid->tx_info & MT_WCID_TX_INFO_SET)) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); From a28bef561a5c144e7c6237626d748767fef7fc79 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 30 May 2020 17:19:45 +0200 Subject: [PATCH 1337/2056] mt76: mt7615: re-enable offloading of sequence number assignment Preparation for supporting more offload features Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mac.c | 42 +++++++++++++++---- .../net/wireless/mediatek/mt76/mt7615/main.c | 5 +++ .../wireless/mediatek/mt76/mt7615/mt7615.h | 1 + 3 files changed, 40 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index d97315ec7265..d150fac50c00 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -617,16 +617,19 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, } val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); - if (ieee80211_is_data_qos(hdr->frame_control)) { - seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - val |= MT_TXD3_SN_VALID; - } else if (ieee80211_is_back_req(hdr->frame_control)) { - struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; + if (info->flags & IEEE80211_TX_CTL_INJECTED) { + seqno = le16_to_cpu(hdr->seq_ctrl); - seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); - val |= MT_TXD3_SN_VALID; + if (ieee80211_is_back_req(hdr->frame_control)) { + struct ieee80211_bar *bar; + + bar = (struct ieee80211_bar *)skb->data; + seqno = le16_to_cpu(bar->start_seq_num); + } + + val |= MT_TXD3_SN_VALID | + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); } - val |= FIELD_PREP(MT_TXD3_SEQ, seqno); txwi[3] |= cpu_to_le32(val); @@ -893,6 +896,29 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, return 0; } +u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) +{ + u32 addr, val, val2; + u8 offset; + + addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; + + offset = tid * 12; + addr += 4 * (offset / 32); + offset %= 32; + + val = mt76_rr(dev, addr); + val >>= (tid % 32); + + if (offset > 20) { + addr += 4; + val2 = mt76_rr(dev, addr); + val |= val2 << (32 - offset); + } + + return val & GENMASK(11, 0); +} + void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *probe_rate, struct ieee80211_tx_rate *rates) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index beaca8127680..dfebf86b86d6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -660,6 +660,9 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq->aggr = true; mtxq->send_bar = false; mt7615_mcu_add_tx_ba(dev, params, true); + ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); + ieee80211_send_bar(vif, sta->addr, tid, + IEEE80211_SN_TO_SEQ(ssn)); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: @@ -667,6 +670,8 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt7615_mcu_add_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: + ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); + params->ssn = ssn; mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 3e7d51bf42a4..640ff8b9b266 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -516,6 +516,7 @@ int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev, enum mt7615_cipher_type cipher, enum set_key_cmd cmd); void mt7615_mac_reset_work(struct work_struct *work); +u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid); int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, From be49c5356f3923015ef7ace59c71be3013d18608 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 30 May 2020 23:48:56 +0200 Subject: [PATCH 1338/2056] mt76: usb: rely on mt76_for_each_q_rx Rely on mt76_for_each_q_rx whenever possible in order to simply the code Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/debugfs.c | 3 -- drivers/net/wireless/mediatek/mt76/usb.c | 41 ++++++-------------- 2 files changed, 12 insertions(+), 32 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index 3a5de1d1b121..9fda65db8cac 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -54,9 +54,6 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data) mt76_for_each_q_rx(dev, i) { struct mt76_queue *q = &dev->q_rx[i]; - if (!q->ndesc) - continue; - queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; seq_printf(s, "%d: queued=%d head=%d tail=%d\n", i, queued, q->head, q->tail); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 27abcdb61a06..0ff3096f7455 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -672,17 +672,11 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) static void mt76u_rx_tasklet(unsigned long data) { struct mt76_dev *dev = (struct mt76_dev *)data; - struct mt76_queue *q; int i; rcu_read_lock(); - for (i = 0; i < __MT_RXQ_MAX; i++) { - q = &dev->q_rx[i]; - if (!q->ndesc) - continue; - - mt76u_process_rx_queue(dev, q); - } + mt76_for_each_q_rx(dev, i) + mt76u_process_rx_queue(dev, &dev->q_rx[i]); rcu_read_unlock(); } @@ -756,27 +750,19 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) static void mt76u_free_rx(struct mt76_dev *dev) { - struct mt76_queue *q; int i; - for (i = 0; i < __MT_RXQ_MAX; i++) { - q = &dev->q_rx[i]; - if (!q->ndesc) - continue; - - mt76u_free_rx_queue(dev, q); - } + mt76_for_each_q_rx(dev, i) + mt76u_free_rx_queue(dev, &dev->q_rx[i]); } void mt76u_stop_rx(struct mt76_dev *dev) { - struct mt76_queue *q; - int i, j; + int i; - for (i = 0; i < __MT_RXQ_MAX; i++) { - q = &dev->q_rx[i]; - if (!q->ndesc) - continue; + mt76_for_each_q_rx(dev, i) { + struct mt76_queue *q = &dev->q_rx[i]; + int j; for (j = 0; j < q->ndesc; j++) usb_poison_urb(q->entry[j].urb); @@ -788,14 +774,11 @@ EXPORT_SYMBOL_GPL(mt76u_stop_rx); int mt76u_resume_rx(struct mt76_dev *dev) { - struct mt76_queue *q; - int i, j, err; + int i; - for (i = 0; i < __MT_RXQ_MAX; i++) { - q = &dev->q_rx[i]; - - if (!q->ndesc) - continue; + mt76_for_each_q_rx(dev, i) { + struct mt76_queue *q = &dev->q_rx[i]; + int err, j; for (j = 0; j < q->ndesc; j++) usb_unpoison_urb(q->entry[j].urb); From 73741b9bee690ffd7c22e25c419b9f7979afadeb Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Sun, 7 Jun 2020 11:34:40 +0200 Subject: [PATCH 1339/2056] mt76: mt7663: introduce ARP filter offload Introduce ARP filter offload Co-developed-by: Wan-Feng Jiang Signed-off-by: Wan-Feng Jiang Co-developed-by: Soul Huang Signed-off-by: Soul Huang Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/main.c | 3 + .../net/wireless/mediatek/mt76/mt7615/mcu.c | 74 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/mcu.h | 13 +++- .../wireless/mediatek/mt76/mt7615/mt7615.h | 4 +- 4 files changed, 91 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index dfebf86b86d6..e6dbd7034bd7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -491,6 +491,9 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_PS) mt7615_mcu_set_vif_ps(dev, vif); + if (changed & BSS_CHANGED_ARP_FILTER) + mt7615_mcu_update_arp_filter(hw, vif, info); + mutex_unlock(&dev->mt76.mutex); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 6e869b8c5e26..b76ecc24f333 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -3542,6 +3542,32 @@ mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev, &req, sizeof(req), true); } +static int +mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool suspend) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7615_arpns_tlv arpns; + } req = { + .hdr = { + .bss_idx = mvif->idx, + }, + .arpns = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), + .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)), + .mode = suspend, + }, + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, + &req, sizeof(req), true); +} + void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { @@ -3554,6 +3580,7 @@ void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, mt7615_mcu_set_bss_pm(phy->dev, vif, suspend); mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend); + mt7615_mcu_set_arp_filter(phy->dev, vif, suspend); mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); @@ -3653,6 +3680,53 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif, sizeof(req), false); } +int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info) +{ + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct sk_buff *skb; + int i, len = min_t(int, info->arp_addr_cnt, + IEEE80211_BSS_ARP_ADDR_LIST_LEN); + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt7615_arpns_tlv arp; + } req_hdr = { + .hdr = { + .bss_idx = mvif->idx, + }, + .arp = { + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), + .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)), + .ips_num = len, + .mode = 2, /* update */ + .option = 1, + }, + }; + + if (!mt7615_firmware_offload(dev)) + return 0; + + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, + sizeof(req_hdr) + len * sizeof(__be32)); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + for (i = 0; i < len; i++) { + u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); + + memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); + } + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD_OFFLOAD, true); +} + int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 2314d0b23af1..64f7471a57bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -545,6 +545,15 @@ struct mt7615_roc_tlv { u8 rsv1[8]; } __packed; +struct mt7615_arpns_tlv { + __le16 tag; + __le16 len; + u8 mode; + u8 ips_num; + u8 option; + u8 pad[1]; +} __packed; + /* offload mcu commands */ enum { MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03, @@ -580,8 +589,8 @@ enum { }; enum { - UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4, - UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6, + UNI_OFFLOAD_OFFLOAD_ARP, + UNI_OFFLOAD_OFFLOAD_ND, UNI_OFFLOAD_OFFLOAD_GTK_REKEY, UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 640ff8b9b266..913dac5c3006 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -586,7 +586,9 @@ void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *key); - +int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info); int __mt7663_load_firmware(struct mt7615_dev *dev); /* usb */ From 642023d04335458a057b549da329ee4e30c8a838 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Tue, 9 Jun 2020 23:42:21 +0800 Subject: [PATCH 1340/2056] mt76: mt7615: fix up typo in Kconfig for MT7663U Fix up typo in Kconfig with indicating MT7663U is an 802.11ac device Fixes: eb99cc95c3b6 ("mt76: mt7615: introduce mt7663u support") Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index e25db1135eda..c094260b0f89 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -35,6 +35,6 @@ config MT7663U depends on MAC80211 depends on USB help - This adds support for MT7663U 802.11ax 2x2:2 wireless devices. + This adds support for MT7663U 802.11ac 2x2:2 wireless devices. To compile this driver as a module, choose M here. From 9da82fb76d6cea157e31bf0c247d2b4c6883cead Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Thu, 11 Jun 2020 14:40:22 +0200 Subject: [PATCH 1341/2056] mt76: allow more channels, allowed in ETSI domain While looking at the ETSI regulatory domain definitions and a patch, which allows more channels for ath10k, I also checked the channels allowed for mt76. ETSI regulations would possibly allow to add channels 32, 68, 96, 144, 169 and 173. IEEE 802.11-2016 defines no operating class for channels 32, 68 and 96. This leaves us channels 144, 169 and 173, which are included in this patch. I tested 169 and 173 with a mt76 based USB dongle (AVM AC 860) and they worked fine. If I saw that right, these channels are also covered by register definitions inside the driver. Channel 144 should also work, but gets disabled by the kernel as of now. Signed-off-by: Markus Theil Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 907098101898..4c10c8164aee 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -58,12 +58,15 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = { CHAN5G(132, 5660), CHAN5G(136, 5680), CHAN5G(140, 5700), + CHAN5G(144, 5720), CHAN5G(149, 5745), CHAN5G(153, 5765), CHAN5G(157, 5785), CHAN5G(161, 5805), CHAN5G(165, 5825), + CHAN5G(169, 5845), + CHAN5G(173, 5865), }; static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { From e39da597e5782a5fcdad0c23dee61dbe05d327b8 Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Fri, 12 Jun 2020 10:05:23 +0200 Subject: [PATCH 1342/2056] mt76: fix include in pci.h kernel test robot found the following issue and Kalle Valo forwarded it to Linux wireless. drivers/net/wireless/mediatek/mt76/pci.c:8:6: warning: no previous prototype for 'mt76_pci_disable_aspm' Fix this by adding the missing include of mt76.h as Kalle suggested. Signed-off-by: Markus Theil Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c index 04c5a692bc85..4c1c159fbb62 100644 --- a/drivers/net/wireless/mediatek/mt76/pci.c +++ b/drivers/net/wireless/mediatek/mt76/pci.c @@ -3,6 +3,7 @@ * Copyright (C) 2019 Lorenzo Bianconi */ +#include "mt76.h" #include void mt76_pci_disable_aspm(struct pci_dev *pdev) From 4a58d5d1097a6b1918c4e307a8d30211ba721932 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 12 Jun 2020 12:08:59 +0200 Subject: [PATCH 1343/2056] mt76: rely on register macros For consistency with the rest of the code always rely on defined macros for register access Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/debugfs.c | 4 ++-- drivers/net/wireless/mediatek/mt76/util.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index 9fda65db8cac..5d58b16bfe9f 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -9,7 +9,7 @@ mt76_reg_set(void *data, u64 val) { struct mt76_dev *dev = data; - dev->bus->wr(dev, dev->debugfs_reg, val); + __mt76_wr(dev, dev->debugfs_reg, val); return 0; } @@ -18,7 +18,7 @@ mt76_reg_get(void *data, u64 *val) { struct mt76_dev *dev = data; - *val = dev->bus->rr(dev, dev->debugfs_reg); + *val = __mt76_rr(dev, dev->debugfs_reg); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c index ecde87465bf6..f53bb4ae5001 100644 --- a/drivers/net/wireless/mediatek/mt76/util.c +++ b/drivers/net/wireless/mediatek/mt76/util.c @@ -13,7 +13,7 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, timeout /= 10; do { - cur = dev->bus->rr(dev, offset) & mask; + cur = __mt76_rr(dev, offset) & mask; if (cur == val) return true; @@ -31,7 +31,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, timeout /= 10; do { - cur = dev->bus->rr(dev, offset) & mask; + cur = __mt76_rr(dev, offset) & mask; if (cur == val) return true; From b807b368c4f9bbdb8410dcc6241d7903094f0bef Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 12 Jun 2020 13:46:31 +0200 Subject: [PATCH 1344/2056] mt76: add U-APSD support on AP side Introduce U-APSD support in mt76 driver for AP interface Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 4 +- .../net/wireless/mediatek/mt76/mt7615/mcu.c | 46 ++++++++++++++++++- .../net/wireless/mediatek/mt76/mt7615/mcu.h | 21 +++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 4c10c8164aee..d8533d0c6e81 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -282,7 +282,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | - WIPHY_FLAG_SUPPORTS_TDLS; + WIPHY_FLAG_SUPPORTS_TDLS | + WIPHY_FLAG_AP_UAPSD; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); @@ -292,6 +293,7 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) wiphy->available_antennas_rx = dev->phy.antenna_mask; hw->txq_data_size = sizeof(struct mt76_txq); + hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; if (!hw->max_tx_fragments) hw->max_tx_fragments = 16; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index b76ecc24f333..d3a8ada3b779 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -926,6 +926,38 @@ mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) } } +static void +mt7615_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct sta_rec_uapsd *uapsd; + struct tlv *tlv; + + if (vif->type != NL80211_IFTYPE_AP || !sta->wme) + return; + + tlv = mt7615_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd)); + uapsd = (struct sta_rec_uapsd *)tlv; + + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) { + uapsd->dac_map |= BIT(3); + uapsd->tac_map |= BIT(3); + } + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) { + uapsd->dac_map |= BIT(2); + uapsd->tac_map |= BIT(2); + } + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) { + uapsd->dac_map |= BIT(1); + uapsd->tac_map |= BIT(1); + } + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) { + uapsd->dac_map |= BIT(0); + uapsd->tac_map |= BIT(0); + } + uapsd->max_sp = sta->max_sp; +} + static void mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb, struct ieee80211_ampdu_params *params, @@ -1188,8 +1220,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(sskb); mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable); - if (enable && sta) + if (enable && sta) { mt7615_mcu_sta_ht_tlv(sskb, sta); + mt7615_mcu_sta_uapsd(sskb, vif, sta); + } wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, NULL, &wskb); @@ -1285,8 +1319,10 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable); - if (enable && sta) + if (enable && sta) { mt7615_mcu_sta_ht_tlv(skb, sta); + mt7615_mcu_sta_uapsd(skb, vif, sta); + } sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); @@ -1429,6 +1465,7 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, u8 pad[3]; } __packed hdr; struct mt7615_bss_basic_tlv basic; + struct mt7615_bss_qos_tlv qos; } basic_req = { .hdr = { .bss_idx = mvif->idx, @@ -1444,6 +1481,11 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif, .active = true, /* keep bss deactivated */ .phymode = 0x38, }, + .qos = { + .tag = cpu_to_le16(UNI_BSS_INFO_QBSS), + .len = cpu_to_le16(sizeof(struct mt7615_bss_qos_tlv)), + .qos = vif->bss_conf.qos, + }, }; struct { struct { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 64f7471a57bb..4f70c4de69a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -454,6 +454,13 @@ struct mt7615_bss_basic_tlv { u8 pad[3]; } __packed; +struct mt7615_bss_qos_tlv { + __le16 tag; + __le16 len; + u8 qos; + u8 pad[3]; +} __packed; + struct mt7615_wow_ctrl_tlv { __le16 tag; __le16 len; @@ -578,6 +585,8 @@ enum { UNI_BSS_INFO_BASIC = 0, UNI_BSS_INFO_RLM = 2, UNI_BSS_INFO_BCN_CONTENT = 7, + UNI_BSS_INFO_QBSS = 15, + UNI_BSS_INFO_UAPSD = 19, }; enum { @@ -891,6 +900,7 @@ struct wtbl_raw { sizeof(struct sta_rec_basic) + \ sizeof(struct sta_rec_ht) + \ sizeof(struct sta_rec_vht) + \ + sizeof(struct sta_rec_uapsd) + \ sizeof(struct tlv) + \ MT7615_WTBL_UPDATE_MAX_SIZE) @@ -980,6 +990,17 @@ struct sta_rec_ba { __le16 winsize; } __packed; +struct sta_rec_uapsd { + __le16 tag; + __le16 len; + u8 dac_map; + u8 tac_map; + u8 max_sp; + u8 rsv0; + __le16 listen_interval; + u8 rsv1[2]; +} __packed; + enum { STA_REC_BASIC, STA_REC_RA, From b876658b5e554c6bd251720b7cd40de5b5f6c366 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Thu, 18 Jun 2020 12:41:48 +0800 Subject: [PATCH 1345/2056] mt76: mt7615: add .set_tsf callback It is useful for IBSS Mesh to adjust t_clockdrift. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/main.c | 21 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/regs.h | 1 + 2 files changed, 22 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index e6dbd7034bd7..f8ce485c8d19 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -740,6 +740,26 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) return tsf.t64; } +static void +mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u64 timestamp) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + union { + u64 t64; + u32 t32[2]; + } tsf = { .t64 = timestamp, }; + + mutex_lock(&dev->mt76.mutex); + + mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); + mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); + /* TSF software overwrite */ + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE); + + mutex_unlock(&dev->mt76.mutex); +} + static void mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) { @@ -1038,6 +1058,7 @@ const struct ieee80211_ops mt7615_ops = { .channel_switch_beacon = mt7615_channel_switch_beacon, .get_stats = mt7615_get_stats, .get_tsf = mt7615_get_tsf, + .set_tsf = mt7615_set_tsf, .get_survey = mt76_get_survey, .get_antenna = mt76_get_antenna, .set_antenna = mt7615_set_antenna, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index aee433a9eff6..f0b36b162bf3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -417,6 +417,7 @@ enum mt7615_reg_base { #define MT_LPON_T0CR MT_LPON(0x010) #define MT_LPON_T0CR_MODE GENMASK(1, 0) +#define MT_LPON_T0CR_WRITE BIT(0) #define MT_LPON_UTTR0 MT_LPON(0x018) #define MT_LPON_UTTR1 MT_LPON(0x01c) From dcdecb128b2e0856e3131d93102d80bd748f2d9b Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Mon, 15 Jun 2020 02:23:34 +0800 Subject: [PATCH 1346/2056] mt76: mt7915: add a fixed AC queue mapping In MT7915, hardware queue map is flexible. However, certain firmware modules like MU and U-APSD presume a fixed queue order to adapt some devices that have DMA scheduler with a strict order, so this patch can help in the long run. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/mac.c | 2 +- .../net/wireless/mediatek/mt76/mt7915/mac.h | 10 -------- .../net/wireless/mediatek/mt76/mt7915/main.c | 3 +++ .../wireless/mediatek/mt76/mt7915/mt7915.h | 25 +++++++++++++++++++ 4 files changed, 29 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index a264e304a3df..660e1820cccf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -593,7 +593,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { q_idx = wmm_idx * MT7915_MAX_WMM_SETS + - skb_get_queue_mapping(skb); + mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb)); p_fmt = MT_TX_TYPE_CT; } else if (beacon) { q_idx = MT_LMAC_BCN0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index b9bc8b25b031..4b0871ab2414 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -149,16 +149,6 @@ enum tx_pkt_type { MT_TX_TYPE_FW, }; -enum tx_pkt_queue_idx { - MT_LMAC_AC00, - MT_LMAC_AC01, - MT_LMAC_AC02, - MT_LMAC_AC03, - MT_LMAC_ALTX0 = 0x10, - MT_LMAC_BMC0 = 0x10, - MT_LMAC_BCN0 = 0x12, -}; - enum tx_port_idx { MT_TX_PORT_IDX_LMAC, MT_TX_PORT_IDX_MCU diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 05b5650c56c8..e09899748c9b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -350,9 +350,12 @@ static int mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { + struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; /* no need to update right away, we'll get BSS_CHANGED_QOS */ + queue = mt7915_lmac_mapping(dev, queue); + mvif->wmm[queue].cw_min = params->cw_min; mvif->wmm[queue].cw_max = params->cw_max; mvif->wmm[queue].aifs = params->aifs; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 85d74ecd0351..b3b00d099236 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -199,6 +199,16 @@ enum { EXT_BSSID_END }; +enum { + MT_LMAC_AC00, + MT_LMAC_AC01, + MT_LMAC_AC02, + MT_LMAC_AC03, + MT_LMAC_ALTX0 = 0x10, + MT_LMAC_BMC0, + MT_LMAC_BCN0, +}; + enum { MT_RX_SEL0, MT_RX_SEL1, @@ -254,6 +264,21 @@ mt7915_ext_phy(struct mt7915_dev *dev) return phy->priv; } +static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac) +{ + static const u8 lmac_queue_map[] = { + [IEEE80211_AC_BK] = MT_LMAC_AC00, + [IEEE80211_AC_BE] = MT_LMAC_AC01, + [IEEE80211_AC_VI] = MT_LMAC_AC02, + [IEEE80211_AC_VO] = MT_LMAC_AC03, + }; + + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) + return MT_LMAC_AC01; /* BE */ + + return lmac_queue_map[ac]; +} + static inline void mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid, enum mt7915_ampdu_state state) From f68e6a1f85c148f0b238bdaef22f31b01d5cf51e Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Mon, 15 Jun 2020 02:23:35 +0800 Subject: [PATCH 1347/2056] mt76: mt7915: add MU-MIMO support Enable MU-MIMO DL/UL and add relative counters in debugfs. Currently MU modules read WTBL first to notify BA changes to other cross modules, so adjust mt7915_mcu_sta_ba() accordingly. Tested-by: Evelyn Tsai Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt7915/debugfs.c | 9 +- .../net/wireless/mediatek/mt76/mt7915/mcu.c | 85 ++++++++++++++++--- .../net/wireless/mediatek/mt76/mt7915/mcu.h | 6 +- .../net/wireless/mediatek/mt76/mt7915/regs.h | 5 ++ 4 files changed, 91 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 5278bee812f1..95e073c02ce6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -178,7 +178,14 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s) seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n", FIELD_GET(MT_ETBF_TX_FB_TRI, cnt)); - /* Tx SU counters */ + /* Tx SU & MU counters */ + cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy)); + seq_printf(s, "Tx multi-user Beamforming counts: %ld\n", + FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt)); + cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy)); + seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt); + cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy)); + seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt); cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy)); seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index c8c12c740c1a..ca3e700425b6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1166,6 +1166,23 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, struct wtbl_req_hdr *wtbl_hdr; struct tlv *sta_wtbl; struct sk_buff *skb; + int ret; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, + MT7915_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); + + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, + &skb); + mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); + + ret = __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); + if (ret) + return ret; skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, MT7915_STA_UPDATE_MAX_SIZE); @@ -1173,11 +1190,6 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev, return PTR_ERR(skb); mt7915_mcu_sta_ba_tlv(skb, params, enable, tx); - sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); - - wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, - &skb); - mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); return __mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD_STA_REC_UPDATE, true); @@ -1466,16 +1478,39 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); } +static int +mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct sk_buff *skb; + int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru); + + if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he) + return 0; + + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* starec muru */ + mt7915_mcu_sta_muru_tlv(skb, sta); + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_STA_REC_UPDATE, true); +} + static void mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb, struct ieee80211_sta *sta) { struct tlv *tlv; + /* starec ht */ if (sta->ht_cap.ht_supported) { struct sta_rec_ht *ht; - /* starec ht */ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); ht = (struct sta_rec_ht *)tlv; ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); @@ -1495,10 +1530,6 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb, /* starec he */ if (sta->he_cap.has_he) mt7915_mcu_sta_he_tlv(skb, sta); - - /* starec muru */ - if (sta->he_cap.has_he || sta->vht_cap.vht_supported) - mt7915_mcu_sta_muru_tlv(skb, sta); } static void @@ -2064,6 +2095,32 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, MCU_EXT_CMD_STA_REC_UPDATE, true); } +static int +mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ +#define MT_STA_BSS_GROUP 1 + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; + struct { + __le32 action; + u8 wlan_idx_lo; + u8 status; + u8 wlan_idx_hi; + u8 rsv0[5]; + __le32 val; + u8 rsv1[8]; + } __packed req = { + .action = cpu_to_le32(MT_STA_BSS_GROUP), + .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), + .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), + .val = cpu_to_le32(mvif->idx), + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL, + &req, sizeof(req), true); +} + int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable) { @@ -2073,10 +2130,18 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, return 0; /* must keep the order */ + ret = mt7915_mcu_add_group(dev, vif, sta); + if (ret) + return ret; + ret = mt7915_mcu_add_txbf(dev, vif, sta, enable); if (ret) return ret; + ret = mt7915_mcu_add_mu(dev, vif, sta); + if (ret) + return ret; + if (enable) return mt7915_mcu_add_rate_ctrl(dev, vif, sta); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h index c241dd7c4c36..cb35e718409a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h @@ -201,6 +201,7 @@ enum { MCU_EXT_CMD_EDCA_UPDATE = 0x27, MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, MCU_EXT_CMD_THERMAL_CTRL = 0x2c, + MCU_EXT_CMD_SET_DRR_CTRL = 0x36, MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, MCU_EXT_CMD_PROTECT_CTRL = 0x3e, MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, @@ -653,7 +654,7 @@ struct sta_rec_muru { bool ofdma_ul_en; bool mimo_dl_en; bool mimo_ul_en; - bool rsv[4]; + u8 rsv[4]; } cfg; struct { @@ -664,7 +665,7 @@ struct sta_rec_muru { bool lt16_sigb; bool rx_su_comp_sigb; bool rx_su_non_comp_sigb; - bool rsv; + u8 rsv; } ofdma_dl; struct { @@ -951,7 +952,6 @@ enum { sizeof(struct sta_rec_ba) + \ sizeof(struct sta_rec_vht) + \ sizeof(struct tlv) + \ - sizeof(struct sta_rec_muru) + \ MT7915_WTBL_UPDATE_MAX_SIZE) #define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \ diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h index c121715f8bff..e0989141d9da 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h @@ -117,11 +117,16 @@ #define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) #define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) +#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090) +#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0) + #define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098) #define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) #define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c) #define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) +#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0) +#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4) #define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc) #define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4)) From 80c9934004046a99f29ec89140556a2733641783 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Mon, 15 Jun 2020 02:23:36 +0800 Subject: [PATCH 1348/2056] mt76: mt7915: use ieee80211_tx_queue_params to avoid open coded This is easy to add MU EDCA parameters in the future. This patch also fixes a wrong cw_min assignment. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 6 +----- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 14 +++++++------- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 9 ++------- 3 files changed, 10 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index e09899748c9b..0c1217d3acd8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -355,11 +355,7 @@ mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, /* no need to update right away, we'll get BSS_CHANGED_QOS */ queue = mt7915_lmac_mapping(dev, queue); - - mvif->wmm[queue].cw_min = params->cw_min; - mvif->wmm[queue].cw_max = params->cw_max; - mvif->wmm[queue].aifs = params->aifs; - mvif->wmm[queue].txop = params->txop; + mvif->queue_params[queue] = *params; return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index ca3e700425b6..fa9f32fa9f2e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -2888,23 +2888,23 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) int ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; struct edca *e = &req.edca[ac]; e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS; - e->aifs = mvif->wmm[ac].aifs; - e->txop = cpu_to_le16(mvif->wmm[ac].txop); + e->aifs = q->aifs; + e->txop = cpu_to_le16(q->txop); - if (mvif->wmm[ac].cw_min) - e->cw_min = fls(mvif->wmm[ac].cw_max); + if (q->cw_min) + e->cw_min = fls(q->cw_min); else e->cw_min = 5; - if (mvif->wmm[ac].cw_max) - e->cw_max = cpu_to_le16(fls(mvif->wmm[ac].cw_max)); + if (q->cw_max) + e->cw_max = cpu_to_le16(fls(q->cw_max)); else e->cw_max = cpu_to_le16(10); } - return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req, sizeof(req), true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index b3b00d099236..4a063c1e5ea2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -99,15 +99,10 @@ struct mt7915_vif { u8 band_idx; u8 wmm_idx; - struct { - u16 cw_min; - u16 cw_max; - u16 txop; - u8 aifs; - } wmm[IEEE80211_NUM_ACS]; - struct mt7915_sta sta; struct mt7915_dev *dev; + + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; }; struct mib_stats { From dc076af55b86b4992114f6b3b889fbc96c5d4fd4 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Mon, 22 Jun 2020 10:16:55 +0800 Subject: [PATCH 1349/2056] mt76: mt7915: overwrite qid for non-bufferable mgmt frames Overwrite hw queue id for non-bufferable management frames if the hw/fw support always txq (altxq) in order to be in sync with mac txwi code Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/dma.c | 21 ++++++++++--------- .../net/wireless/mediatek/mt76/mt7915/mac.c | 17 ++++++++------- .../net/wireless/mediatek/mt76/mt7915/pci.c | 2 +- 3 files changed, 21 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index 766185d1aa21..a8832c5e6004 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -79,26 +79,27 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, } } +static void +mt7915_tx_cleanup(struct mt7915_dev *dev) +{ + mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); + mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); +} + static int mt7915_poll_tx(struct napi_struct *napi, int budget) { - static const u8 queue_map[] = { - MT_TXQ_MCU, - MT_TXQ_MCU_WA, - MT_TXQ_BE - }; struct mt7915_dev *dev; - int i; dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); - for (i = 0; i < ARRAY_SIZE(queue_map); i++) - mt76_queue_tx_cleanup(dev, queue_map[i], false); + mt7915_tx_cleanup(dev); if (napi_complete_done(napi, 0)) mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL); - for (i = 0; i < ARRAY_SIZE(queue_map); i++) - mt76_queue_tx_cleanup(dev, queue_map[i], false); + mt7915_tx_cleanup(dev); mt7915_mac_sta_poll(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 660e1820cccf..822540e5df6b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -591,16 +591,16 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { + if (beacon) { + p_fmt = MT_TX_TYPE_FW; + q_idx = MT_LMAC_BCN0; + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { + p_fmt = MT_TX_TYPE_CT; + q_idx = MT_LMAC_ALTX0; + } else { + p_fmt = MT_TX_TYPE_CT; q_idx = wmm_idx * MT7915_MAX_WMM_SETS + mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb)); - p_fmt = MT_TX_TYPE_CT; - } else if (beacon) { - q_idx = MT_LMAC_BCN0; - p_fmt = MT_TX_TYPE_FW; - } else { - q_idx = MT_LMAC_ALTX0; - p_fmt = MT_TX_TYPE_CT; } val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | @@ -616,6 +616,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi, FIELD_PREP(MT_TXD1_TID, skb->priority & IEEE80211_QOS_CTL_TID_MASK) | FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); + if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) val |= MT_TXD1_TGID; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c index 7937c6965f59..0ec4e184b889 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c @@ -103,7 +103,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev, static const struct mt76_driver_ops drv_ops = { /* txwi_size = txd size + txp size */ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp), - .drv_flags = MT_DRV_TXWI_NO_FREE, + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, .survey_flags = SURVEY_INFO_TIME_TX | SURVEY_INFO_TIME_RX | SURVEY_INFO_TIME_BSS_RX, From 6ed942af4f68173eb511bc342d53f2593182d449 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Thu, 18 Jun 2020 19:00:23 +0200 Subject: [PATCH 1350/2056] mt76: mt76x2e: rename routines in pci.c Rely on mt76x2e prefix in mt76x2/pci.c and align to the rest of mt76 code Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76x2/pci.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index 53ca0cedf026..f36c71c1bc58 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -9,7 +9,7 @@ #include "mt76x2.h" -static const struct pci_device_id mt76pci_device_table[] = { +static const struct pci_device_id mt76x2e_device_table[] = { { PCI_DEVICE(0x14c3, 0x7662) }, { PCI_DEVICE(0x14c3, 0x7612) }, { PCI_DEVICE(0x14c3, 0x7602) }, @@ -17,7 +17,7 @@ static const struct pci_device_id mt76pci_device_table[] = { }; static int -mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), @@ -93,7 +93,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } static void -mt76pci_remove(struct pci_dev *pdev) +mt76x2e_remove(struct pci_dev *pdev) { struct mt76_dev *mdev = pci_get_drvdata(pdev); struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); @@ -103,16 +103,16 @@ mt76pci_remove(struct pci_dev *pdev) mt76_free_device(mdev); } -MODULE_DEVICE_TABLE(pci, mt76pci_device_table); +MODULE_DEVICE_TABLE(pci, mt76x2e_device_table); MODULE_FIRMWARE(MT7662_FIRMWARE); MODULE_FIRMWARE(MT7662_ROM_PATCH); MODULE_LICENSE("Dual BSD/GPL"); static struct pci_driver mt76pci_driver = { .name = KBUILD_MODNAME, - .id_table = mt76pci_device_table, - .probe = mt76pci_probe, - .remove = mt76pci_remove, + .id_table = mt76x2e_device_table, + .probe = mt76x2e_probe, + .remove = mt76x2e_remove, }; module_pci_driver(mt76pci_driver); From 5bb923c767a4d2b4c20e7d4f437e35754ccd6554 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 17 Jun 2020 15:21:39 +0200 Subject: [PATCH 1351/2056] mt76: mt7615: schedule tx tasklet and sta poll on mac tx free Unlike on earlier chips, DMA completion on MT7615 does not imply actually having sent out any packets. Since AQL will prevent filling the hardware queues and will only allow more packets to be passed to the driver after tx completion, it makes much more sense to schedule the tx tasklet there. This is also needed for scheduling tx in testmode support Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/dma.c | 4 ---- drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index e5a965df899a..1231a5ddf9ea 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -122,10 +122,6 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget) mt7615_tx_cleanup(dev); - rcu_read_lock(); - mt7615_mac_sta_poll(dev); - rcu_read_unlock(); - tasklet_schedule(&dev->mt76.tx_tasklet); return 0; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index d150fac50c00..1dc291e8b766 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1399,6 +1399,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb) } dev_kfree_skb(skb); + + rcu_read_lock(); + mt7615_mac_sta_poll(dev); + rcu_read_unlock(); + + tasklet_schedule(&dev->mt76.tx_tasklet); } void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, From 557b5a17476893e2f40fd8e66eb46362804df2f5 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 17 Jun 2020 20:44:23 +0200 Subject: [PATCH 1352/2056] mt76: mt7615: add support for accessing mapped registers via bus ops Makes it possible to read/write them via debugfs, similar to mt7603 Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mmio.c | 46 +++++++++++++++++++ .../wireless/mediatek/mt76/mt7615/mt7615.h | 1 + 2 files changed, 47 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index 2e99845b9c96..07ad64e4a77a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -140,6 +140,38 @@ static void mt7615_irq_tasklet(unsigned long data) mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); } +static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr) +{ + if (addr < 0x100000) + return addr; + + return mt7615_reg_map(dev, addr); +} + +static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + u32 addr = __mt7615_reg_addr(dev, offset); + + return dev->bus_ops->rr(mdev, addr); +} + +static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + u32 addr = __mt7615_reg_addr(dev, offset); + + dev->bus_ops->wr(mdev, addr, val); +} + +static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + u32 addr = __mt7615_reg_addr(dev, offset); + + return dev->bus_ops->rmw(mdev, addr, mask, val); +} + int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, int irq, const u32 *map) { @@ -159,6 +191,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, .sta_remove = mt7615_mac_sta_remove, .update_survey = mt7615_update_channel, }; + struct mt76_bus_ops *bus_ops; struct ieee80211_ops *ops; struct mt7615_dev *dev; struct mt76_dev *mdev; @@ -182,6 +215,19 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, (mt76_rr(dev, MT_HW_REV) & 0xff); dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + dev->bus_ops = dev->mt76.bus; + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), + GFP_KERNEL); + if (!bus_ops) { + ret = -ENOMEM; + goto error; + } + + bus_ops->rr = mt7615_rr; + bus_ops->wr = mt7615_wr; + bus_ops->rmw = mt7615_rmw; + dev->mt76.bus = bus_ops; + ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (ret) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 913dac5c3006..78cc2a2c31f7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -240,6 +240,7 @@ struct mt7615_dev { struct mt76_phy mphy; }; + const struct mt76_bus_ops *bus_ops; struct tasklet_struct irq_tasklet; struct mt7615_phy phy; From dc80405868e4d8ac94209cbdeb1e4de5b4991a01 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 18 Jun 2020 20:33:32 +0200 Subject: [PATCH 1353/2056] mt76: mt7615: add support for accessing RF registers via MCU Includes debugfs files for testing it. Will be used for testmode support. Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt7615/debugfs.c | 28 ++++++++++++ .../net/wireless/mediatek/mt76/mt7615/mcu.c | 43 ++++++++++++++++++- .../net/wireless/mediatek/mt76/mt7615/mcu.h | 4 +- .../wireless/mediatek/mt76/mt7615/mt7615.h | 5 +++ 4 files changed, 77 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index d06afcf46d67..8bb7c64db738 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -285,6 +285,29 @@ mt7615_queues_read(struct seq_file *s, void *data) return 0; } +static int +mt7615_rf_reg_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + mt7615_rf_wr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg, val); + + return 0; +} + +static int +mt7615_rf_reg_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = mt7615_rf_rr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set, + "0x%08llx\n"); + int mt7615_init_debugfs(struct mt7615_dev *dev) { struct dentry *dir; @@ -324,6 +347,11 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, mt7615_read_temperature); + debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf); + debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg); + debugfs_create_file_unsafe("rf_regval", 0600, dir, dev, + &fops_rf_reg); + return 0; } EXPORT_SYMBOL_GPL(mt7615_init_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index d3a8ada3b779..b4440f131925 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -151,8 +151,11 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, break; default: mcu_txd->cid = MCU_CMD_EXT_CID; - mcu_txd->set_query = MCU_Q_SET; - mcu_txd->ext_cid = cmd; + if (cmd & MCU_QUERY_PREFIX) + mcu_txd->set_query = MCU_Q_QUERY; + else + mcu_txd->set_query = MCU_Q_SET; + mcu_txd->ext_cid = mcu_cmd; mcu_txd->ext_cid_ack = 1; break; } @@ -192,6 +195,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, skb_pull(skb, sizeof(*rxd)); ret = le32_to_cpu(*(__le32 *)skb->data); break; + case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX: + skb_pull(skb, sizeof(*rxd)); + ret = le32_to_cpu(*(__le32 *)&skb->data[8]); + break; case MCU_UNI_CMD_DEV_INFO_UPDATE: case MCU_UNI_CMD_BSS_INFO_UPDATE: case MCU_UNI_CMD_STA_REC_UPDATE: @@ -271,6 +278,38 @@ int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, } EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send); +u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg) +{ + struct { + __le32 wifi_stream; + __le32 address; + __le32 data; + } req = { + .wifi_stream = cpu_to_le32(wf), + .address = cpu_to_le32(reg), + }; + + return __mt76_mcu_send_msg(&dev->mt76, + MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX, + &req, sizeof(req), true); +} + +int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) +{ + struct { + __le32 wifi_stream; + __le32 address; + __le32 data; + } req = { + .wifi_stream = cpu_to_le32(wf), + .address = cpu_to_le32(reg), + .data = cpu_to_le32(val), + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req, + sizeof(req), false); +} + static void mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 4f70c4de69a4..8e2be150a556 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -238,8 +238,9 @@ enum { #define MCU_FW_PREFIX BIT(31) #define MCU_UNI_PREFIX BIT(30) #define MCU_CE_PREFIX BIT(29) +#define MCU_QUERY_PREFIX BIT(28) #define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \ - MCU_CE_PREFIX) + MCU_CE_PREFIX | MCU_QUERY_PREFIX) enum { MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01, @@ -254,6 +255,7 @@ enum { }; enum { + MCU_EXT_CMD_RF_REG_ACCESS = 0x02, MCU_EXT_CMD_PM_STATE_CTRL = 0x07, MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 78cc2a2c31f7..6e423f45b63f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -281,6 +281,9 @@ struct mt7615_dev { struct work_struct wtbl_work; struct list_head wd_head; + + u32 debugfs_rf_wf; + u32 debugfs_rf_reg; }; enum tx_pkt_queue_idx { @@ -522,6 +525,8 @@ u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid); int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, int len, bool wait_resp); +u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg); +int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val); int mt7615_mcu_set_dbdc(struct mt7615_dev *dev); int mt7615_mcu_set_eeprom(struct mt7615_dev *dev); int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable); From b8c978663efb3991369b4944f79b098fc7ebb4e4 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 18 Jun 2020 20:38:45 +0200 Subject: [PATCH 1354/2056] mt76: mt7615: use full on-chip memory address for WF_PHY registers Now that the bus access functions can use mapping for accessing full register addresses, use it for WF_PHY registers to keep them constant. Needed for follow-up work on testmode support Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mmio.c | 2 -- drivers/net/wireless/mediatek/mt76/mt7615/regs.h | 3 +-- drivers/net/wireless/mediatek/mt76/mt7615/usb.c | 1 - 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index 07ad64e4a77a..461ac6c42271 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -17,7 +17,6 @@ const u32 mt7615e_reg_map[] = { [MT_CSR_BASE] = 0x07000, [MT_PLE_BASE] = 0x08000, [MT_PSE_BASE] = 0x0c000, - [MT_PHY_BASE] = 0x10000, [MT_CFG_BASE] = 0x20200, [MT_AGG_BASE] = 0x20a00, [MT_TMAC_BASE] = 0x21000, @@ -44,7 +43,6 @@ const u32 mt7663e_reg_map[] = { [MT_CSR_BASE] = 0x07000, [MT_PLE_BASE] = 0x08000, [MT_PSE_BASE] = 0x0c000, - [MT_PHY_BASE] = 0x10000, [MT_CFG_BASE] = 0x20000, [MT_AGG_BASE] = 0x22000, [MT_TMAC_BASE] = 0x24000, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index f0b36b162bf3..4743c12005a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -14,7 +14,6 @@ enum mt7615_reg_base { MT_CSR_BASE, MT_PLE_BASE, MT_PSE_BASE, - MT_PHY_BASE, MT_CFG_BASE, MT_AGG_BASE, MT_TMAC_BASE, @@ -169,7 +168,7 @@ enum mt7615_reg_base { #define MT_PSE_PG_INFO MT_PSE(0x194) #define MT_PSE_SRC_CNT GENMASK(27, 16) -#define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE]) +#define MT_WF_PHY_BASE 0x82070000 #define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) #define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index f5dc1b828518..0ba28d37c414 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -25,7 +25,6 @@ static const u32 mt7663u_reg_map[] = { [MT_TOP_MISC_BASE] = 0x81020000, [MT_PLE_BASE] = 0x82060000, [MT_PSE_BASE] = 0x82068000, - [MT_PHY_BASE] = 0x82070000, [MT_WTBL_BASE_ADDR] = 0x820e0000, [MT_CFG_BASE] = 0x820f0000, [MT_AGG_BASE] = 0x820f2000, From beffe070b1880e7c53e3fc76f2e4e8f31d18f7c5 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 11 Jun 2020 17:45:58 +0200 Subject: [PATCH 1355/2056] mt76: vif_mask to struct mt76_phy All drivers use this in pretty much the same way. Moving it to core helps with some checks for the upcoming testmode support Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76.h | 2 ++ drivers/net/wireless/mediatek/mt76/mt7603/main.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h | 2 -- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h | 1 - drivers/net/wireless/mediatek/mt76/mt76x02.h | 1 - drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c | 2 +- drivers/net/wireless/mediatek/mt76/mt76x02_util.c | 8 ++++---- drivers/net/wireless/mediatek/mt76/mt7915/main.c | 6 +++--- drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h | 1 - 10 files changed, 16 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 3d7db6ffb599..ece462e9c9ab 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -491,6 +491,8 @@ struct mt76_phy { struct mt76_sband sband_2g; struct mt76_sband sband_5g; + u32 vif_mask; + int txpower_cur; u8 antenna_mask; }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 83dfa6da4761..447f2c63ef38 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mutex_lock(&dev->mt76.mutex); - mvif->idx = ffs(~dev->vif_mask) - 1; + mvif->idx = ffs(~dev->mphy.vif_mask) - 1; if (mvif->idx >= MT7603_MAX_INTERFACES) { ret = -ENOSPC; goto out; @@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) } idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; - dev->vif_mask |= BIT(mvif->idx); + dev->mphy.vif_mask |= BIT(mvif->idx); INIT_LIST_HEAD(&mvif->sta.poll_list); mvif->sta.wcid.idx = idx; mvif->sta.wcid.hw_key_idx = -1; @@ -107,7 +107,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) spin_unlock_bh(&dev->sta_poll_lock); mutex_lock(&dev->mt76.mutex); - dev->vif_mask &= ~BIT(mvif->idx); + dev->mphy.vif_mask &= ~BIT(mvif->idx); mutex_unlock(&dev->mt76.mutex); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h index 7fadf094e9be..c86305241e66 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h +++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h @@ -108,8 +108,6 @@ struct mt7603_dev { u32 rxfilter; - u8 vif_mask; - struct list_head sta_poll_list; spinlock_t sta_poll_lock; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index f8ce485c8d19..abb9853eaa8b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -137,7 +137,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mutex_lock(&dev->mt76.mutex); - mvif->idx = ffs(~dev->vif_mask) - 1; + mvif->idx = ffs(~dev->mphy.vif_mask) - 1; if (mvif->idx >= MT7615_MAX_INTERFACES) { ret = -ENOSPC; goto out; @@ -157,7 +157,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, else mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS; - dev->vif_mask |= BIT(mvif->idx); + dev->mphy.vif_mask |= BIT(mvif->idx); dev->omac_mask |= BIT(mvif->omac_idx); phy->omac_mask |= BIT(mvif->omac_idx); @@ -204,7 +204,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, mt76_txq_remove(&dev->mt76, vif->txq); mutex_lock(&dev->mt76.mutex); - dev->vif_mask &= ~BIT(mvif->idx); + dev->mphy.vif_mask &= ~BIT(mvif->idx); dev->omac_mask &= ~BIT(mvif->omac_idx); phy->omac_mask &= ~BIT(mvif->omac_idx); mutex_unlock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 6e423f45b63f..af86cf6925dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -244,7 +244,6 @@ struct mt7615_dev { struct tasklet_struct irq_tasklet; struct mt7615_phy phy; - u32 vif_mask; u32 omac_mask; u16 chainmask; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 4c9bbc7ce023..4660b9691ec3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -80,7 +80,6 @@ struct mt76x02_dev { struct mutex phy_mutex; - u16 vif_mask; u16 chainmask; u8 txdone_seq; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index cbbe986655fe..99611515a093 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -439,7 +439,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev) memset(msta, 0, sizeof(*msta)); } - dev->vif_mask = 0; + dev->mphy.vif_mask = 0; dev->mt76.beacon_mask = 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index 44822a849eb1..dbd4077ea283 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -305,7 +305,7 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) unsigned int idx = 0; /* Allow to change address in HW if we create first interface. */ - if (!dev->vif_mask && + if (!dev->mphy.vif_mask && (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) || memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1))) mt76x02_mac_setaddr(dev, vif->addr); @@ -330,11 +330,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) idx += 8; /* vif is already set or idx is 8 for AP/Mesh/... */ - if (dev->vif_mask & BIT(idx) || + if (dev->mphy.vif_mask & BIT(idx) || (vif->type != NL80211_IFTYPE_STATION && idx > 7)) return -EBUSY; - dev->vif_mask |= BIT(idx); + dev->mphy.vif_mask |= BIT(idx); mt76x02_vif_init(dev, vif, idx); return 0; @@ -348,7 +348,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw, struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; mt76_txq_remove(&dev->mt76, vif->txq); - dev->vif_mask &= ~BIT(mvif->idx); + dev->mphy.vif_mask &= ~BIT(mvif->idx); } EXPORT_SYMBOL_GPL(mt76x02_remove_interface); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 0c1217d3acd8..f95a0b55c4a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -125,7 +125,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, mutex_lock(&dev->mt76.mutex); - mvif->idx = ffs(~phy->vif_mask) - 1; + mvif->idx = ffs(~phy->mt76->vif_mask) - 1; if (mvif->idx >= MT7915_MAX_INTERFACES) { ret = -ENOSPC; goto out; @@ -150,7 +150,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw, if (ret) goto out; - phy->vif_mask |= BIT(mvif->idx); + phy->mt76->vif_mask |= BIT(mvif->idx); phy->omac_mask |= BIT(mvif->omac_idx); idx = MT7915_WTBL_RESERVED - mvif->idx; @@ -194,7 +194,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw, mt76_txq_remove(&dev->mt76, vif->txq); mutex_lock(&dev->mt76.mutex); - phy->vif_mask &= ~BIT(mvif->idx); + phy->mt76->vif_mask &= ~BIT(mvif->idx); phy->omac_mask &= ~BIT(mvif->omac_idx); mutex_unlock(&dev->mt76.mutex); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h index 4a063c1e5ea2..d8a13b4a2359 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h @@ -120,7 +120,6 @@ struct mt7915_phy { struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES]; u32 rxfilter; - u32 vif_mask; u32 omac_mask; u16 noise; From f0efa8621550e77492719072d056f30569242b6b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 18 Jun 2020 21:12:24 +0200 Subject: [PATCH 1356/2056] mt76: add API for testmode support This can be used for calibration in the manufacturing process. It supports sending a configurable number of packets with a specific rate and configurable tx power levels / antenna settings. It also supports receiving packets and showing some statistics, including packet counters and detailed RSSI information. It will only be compiled in if CONFIG_NL80211_TESTMODE is enabled Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/Makefile | 1 + drivers/net/wireless/mediatek/mt76/dma.c | 6 + drivers/net/wireless/mediatek/mt76/eeprom.c | 5 + drivers/net/wireless/mediatek/mt76/mac80211.c | 7 + drivers/net/wireless/mediatek/mt76/mt76.h | 75 +++ drivers/net/wireless/mediatek/mt76/testmode.c | 497 ++++++++++++++++++ drivers/net/wireless/mediatek/mt76/testmode.h | 156 ++++++ drivers/net/wireless/mediatek/mt76/tx.c | 18 + 8 files changed, 765 insertions(+) create mode 100644 drivers/net/wireless/mediatek/mt76/testmode.c create mode 100644 drivers/net/wireless/mediatek/mt76/testmode.h diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index ef663b873b0b..949b3c8ffa1b 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -9,6 +9,7 @@ mt76-y := \ tx.o agg-rx.o mcu.o mt76-$(CONFIG_PCI) += pci.o +mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o mt76-usb-y := usb.o usb_trace.o diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index f4d6074fe32a..6c25859dd386 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -370,6 +370,12 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, tx_info.buf[n].len, DMA_TO_DEVICE); free: +#ifdef CONFIG_NL80211_TESTMODE + /* fix tx_done accounting on queue overflow */ + if (tx_info.skb == dev->test.tx_skb) + dev->test.tx_done--; +#endif + e.skb = tx_info.skb; e.txwi = t; dev->drv->tx_complete_skb(dev, qid, &e); diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index c236e303ccfd..3044e0069991 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -74,6 +74,11 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len) &data[i]); } +#ifdef CONFIG_NL80211_TESTMODE + dev->test.mtd_name = devm_kstrdup(dev->dev, part, GFP_KERNEL); + dev->test.mtd_offset = offset; +#endif + out_put_node: of_node_put(np); return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index d8533d0c6e81..10e6dbb64996 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -505,6 +505,13 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) return; } +#ifdef CONFIG_NL80211_TESTMODE + if (dev->test.state == MT76_TM_STATE_RX_FRAMES) { + dev->test.rx_stats.packets[q]++; + if (status->flag & RX_FLAG_FAILED_FCS_CRC) + dev->test.rx_stats.fcs_error[q]++; + } +#endif __skb_queue_tail(&dev->rx_skb[q], skb); } EXPORT_SYMBOL_GPL(mt76_rx); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index ece462e9c9ab..14b3a39cd55a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -15,6 +15,7 @@ #include #include #include "util.h" +#include "testmode.h" #define MT_TX_RING_SIZE 256 #define MT_MCU_RING_SIZE 32 @@ -475,6 +476,47 @@ struct mt76_rx_status { s8 chain_signal[IEEE80211_MAX_CHAINS]; }; +struct mt76_testmode_ops { + int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state); + int (*set_params)(struct mt76_dev *dev, struct nlattr **tb, + enum mt76_testmode_state new_state); + int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg); +}; + +struct mt76_testmode_data { + enum mt76_testmode_state state; + + u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; + struct sk_buff *tx_skb; + + u32 tx_count; + u16 tx_msdu_len; + + u8 tx_rate_mode; + u8 tx_rate_idx; + u8 tx_rate_nss; + u8 tx_rate_sgi; + u8 tx_rate_ldpc; + + u8 tx_antenna_mask; + + u32 freq_offset; + + u8 tx_power[4]; + u8 tx_power_control; + + const char *mtd_name; + u32 mtd_offset; + + u32 tx_pending; + u32 tx_queued; + u32 tx_done; + struct { + u64 packets[__MT_RXQ_MAX]; + u64 fcs_error[__MT_RXQ_MAX]; + } rx_stats; +}; + struct mt76_phy { struct ieee80211_hw *hw; struct mt76_dev *dev; @@ -574,6 +616,11 @@ struct mt76_dev { u32 rxfilter; +#ifdef CONFIG_NL80211_TESTMODE + const struct mt76_testmode_ops *test_ops; + struct mt76_testmode_data test; +#endif + union { struct mt76_mmio mmio; struct mt76_usb usb; @@ -807,6 +854,15 @@ static inline u8 mt76_tx_power_nss_delta(u8 nss) return nss_delta[nss - 1]; } +static inline bool mt76_testmode_enabled(struct mt76_dev *dev) +{ +#ifdef CONFIG_NL80211_TESTMODE + return dev->test.state != MT76_TM_STATE_OFF; +#else + return false; +#endif +} + void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb); @@ -879,6 +935,24 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac); void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif); +int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len); +int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, + struct netlink_callback *cb, void *data, int len); +int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state); + +static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable) +{ +#ifdef CONFIG_NL80211_TESTMODE + enum mt76_testmode_state state = MT76_TM_STATE_IDLE; + + if (disable || dev->test.state == MT76_TM_STATE_OFF) + state = MT76_TM_STATE_OFF; + + mt76_testmode_set_state(dev, state); +#endif +} + /* internal */ static inline struct ieee80211_hw * @@ -903,6 +977,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, struct napi_struct *napi); void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); +void mt76_testmode_tx_pending(struct mt76_dev *dev); /* usb */ static inline bool mt76u_urb_error(struct urb *urb) diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c new file mode 100644 index 000000000000..75bb02cdfdae --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/testmode.c @@ -0,0 +1,497 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 Felix Fietkau */ +#include "mt76.h" + +static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { + [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG }, + [MT76_TM_ATTR_STATE] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 }, + [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 }, + [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED }, + [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 }, +}; + +void mt76_testmode_tx_pending(struct mt76_dev *dev) +{ + struct mt76_testmode_data *td = &dev->test; + struct mt76_wcid *wcid = &dev->global_wcid; + struct sk_buff *skb = td->tx_skb; + struct mt76_queue *q; + int qid; + + if (!skb || !td->tx_pending) + return; + + qid = skb_get_queue_mapping(skb); + q = dev->q_tx[qid].q; + + spin_lock_bh(&q->lock); + + while (td->tx_pending > 0 && q->queued < q->ndesc / 2) { + int ret; + + ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL); + if (ret < 0) + break; + + td->tx_pending--; + td->tx_queued++; + } + + dev->queue_ops->kick(dev, q); + + spin_unlock_bh(&q->lock); +} + + +static int +mt76_testmode_tx_init(struct mt76_dev *dev) +{ + struct mt76_testmode_data *td = &dev->test; + struct ieee80211_tx_info *info; + struct ieee80211_hdr *hdr; + struct sk_buff *skb; + u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | + IEEE80211_FCTL_FROMDS; + struct ieee80211_tx_rate *rate; + u8 max_nss = hweight8(dev->phy.antenna_mask); + + if (td->tx_antenna_mask) + max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); + + skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + dev_kfree_skb(td->tx_skb); + td->tx_skb = skb; + hdr = __skb_put_zero(skb, td->tx_msdu_len); + hdr->frame_control = cpu_to_le16(fc); + memcpy(hdr->addr1, dev->macaddr, sizeof(dev->macaddr)); + memcpy(hdr->addr2, dev->macaddr, sizeof(dev->macaddr)); + memcpy(hdr->addr3, dev->macaddr, sizeof(dev->macaddr)); + + info = IEEE80211_SKB_CB(skb); + info->flags = IEEE80211_TX_CTL_INJECTED | + IEEE80211_TX_CTL_NO_ACK | + IEEE80211_TX_CTL_NO_PS_BUFFER; + rate = &info->control.rates[0]; + rate->count = 1; + rate->idx = td->tx_rate_idx; + + switch (td->tx_rate_mode) { + case MT76_TM_TX_MODE_CCK: + if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ) + return -EINVAL; + + if (rate->idx > 4) + return -EINVAL; + break; + case MT76_TM_TX_MODE_OFDM: + if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ) + break; + + if (rate->idx > 8) + return -EINVAL; + + rate->idx += 4; + break; + case MT76_TM_TX_MODE_HT: + if (rate->idx > 8 * max_nss && + !(rate->idx == 32 && + dev->phy.chandef.width >= NL80211_CHAN_WIDTH_40)) + return -EINVAL; + + rate->flags |= IEEE80211_TX_RC_MCS; + break; + case MT76_TM_TX_MODE_VHT: + if (rate->idx > 9) + return -EINVAL; + + if (td->tx_rate_nss > max_nss) + return -EINVAL; + + ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss); + rate->flags |= IEEE80211_TX_RC_VHT_MCS; + break; + default: + break; + } + + if (td->tx_rate_sgi) + rate->flags |= IEEE80211_TX_RC_SHORT_GI; + + if (td->tx_rate_ldpc) + info->flags |= IEEE80211_TX_CTL_LDPC; + + if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) { + switch (dev->phy.chandef.width) { + case NL80211_CHAN_WIDTH_40: + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case NL80211_CHAN_WIDTH_80: + rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; + break; + default: + break; + } + } + + skb_set_queue_mapping(skb, IEEE80211_AC_BE); + + return 0; +} + +static void +mt76_testmode_tx_start(struct mt76_dev *dev) +{ + struct mt76_testmode_data *td = &dev->test; + + td->tx_queued = 0; + td->tx_done = 0; + td->tx_pending = td->tx_count; + tasklet_schedule(&dev->tx_tasklet); +} + +static void +mt76_testmode_tx_stop(struct mt76_dev *dev) +{ + struct mt76_testmode_data *td = &dev->test; + + tasklet_disable(&dev->tx_tasklet); + + td->tx_pending = 0; + + tasklet_enable(&dev->tx_tasklet); + + wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ); + + dev_kfree_skb(td->tx_skb); + td->tx_skb = NULL; +} + +static inline void +mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx) +{ + td->param_set[idx / 32] |= BIT(idx % 32); +} + +static inline bool +mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx) +{ + return td->param_set[idx / 32] & BIT(idx % 32); +} + +static void +mt76_testmode_init_defaults(struct mt76_dev *dev) +{ + struct mt76_testmode_data *td = &dev->test; + + if (td->tx_msdu_len > 0) + return; + + td->tx_msdu_len = 1024; + td->tx_count = 1; + td->tx_rate_mode = MT76_TM_TX_MODE_OFDM; + td->tx_rate_nss = 1; +} + +static int +__mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state) +{ + enum mt76_testmode_state prev_state = dev->test.state; + int err; + + if (prev_state == MT76_TM_STATE_TX_FRAMES) + mt76_testmode_tx_stop(dev); + + if (state == MT76_TM_STATE_TX_FRAMES) { + err = mt76_testmode_tx_init(dev); + if (err) + return err; + } + + err = dev->test_ops->set_state(dev, state); + if (err) { + if (state == MT76_TM_STATE_TX_FRAMES) + mt76_testmode_tx_stop(dev); + + return err; + } + + if (state == MT76_TM_STATE_TX_FRAMES) + mt76_testmode_tx_start(dev); + else if (state == MT76_TM_STATE_RX_FRAMES) { + memset(&dev->test.rx_stats, 0, sizeof(dev->test.rx_stats)); + } + + dev->test.state = state; + + return 0; +} + +int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state) +{ + struct mt76_testmode_data *td = &dev->test; + struct ieee80211_hw *hw = dev->phy.hw; + + if (state == td->state && state == MT76_TM_STATE_OFF) + return 0; + + if (state > MT76_TM_STATE_OFF && + (!test_bit(MT76_STATE_RUNNING, &dev->phy.state) || + !(hw->conf.flags & IEEE80211_CONF_MONITOR))) + return -ENOTCONN; + + if (state != MT76_TM_STATE_IDLE && + td->state != MT76_TM_STATE_IDLE) { + int ret; + + ret = __mt76_testmode_set_state(dev, MT76_TM_STATE_IDLE); + if (ret) + return ret; + } + + return __mt76_testmode_set_state(dev, state); + +} +EXPORT_SYMBOL(mt76_testmode_set_state); + +static int +mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max) +{ + u8 val; + + if (!attr) + return 0; + + val = nla_get_u8(attr); + if (val < min || val > max) + return -EINVAL; + + *dest = val; + return 0; +} + +int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + void *data, int len) +{ + struct mt76_phy *phy = hw->priv; + struct mt76_dev *dev = phy->dev; + struct mt76_testmode_data *td = &dev->test; + struct nlattr *tb[NUM_MT76_TM_ATTRS]; + u32 state; + int err; + int i; + + if (!dev->test_ops) + return -EOPNOTSUPP; + + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, + mt76_tm_policy, NULL); + if (err) + return err; + + err = -EINVAL; + + mutex_lock(&dev->mutex); + + if (tb[MT76_TM_ATTR_RESET]) { + mt76_testmode_set_state(dev, MT76_TM_STATE_OFF); + memset(td, 0, sizeof(*td)); + } + + mt76_testmode_init_defaults(dev); + + if (tb[MT76_TM_ATTR_TX_COUNT]) + td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]); + + if (tb[MT76_TM_ATTR_TX_LENGTH]) { + u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); + + if (val > IEEE80211_MAX_FRAME_LEN || + val < sizeof(struct ieee80211_hdr)) + goto out; + + td->tx_msdu_len = val; + } + + if (tb[MT76_TM_ATTR_TX_RATE_IDX]) + td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]); + + if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode, + 0, MT76_TM_TX_MODE_MAX) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss, + 1, hweight8(phy->antenna_mask)) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 1) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1, + phy->antenna_mask) || + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL], + &td->tx_power_control, 0, 1)) + goto out; + + if (tb[MT76_TM_ATTR_FREQ_OFFSET]) + td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]); + + if (tb[MT76_TM_ATTR_STATE]) { + state = nla_get_u32(tb[MT76_TM_ATTR_STATE]); + if (state > MT76_TM_STATE_MAX) + goto out; + } else { + state = td->state; + } + + if (tb[MT76_TM_ATTR_TX_POWER]) { + struct nlattr *cur; + int idx = 0; + int rem; + + nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) { + if (nla_len(cur) != 1 || + idx >= ARRAY_SIZE(td->tx_power)) + goto out; + + td->tx_power[idx++] = nla_get_u8(cur); + } + } + + if (dev->test_ops->set_params) { + err = dev->test_ops->set_params(dev, tb, state); + if (err) + goto out; + } + + for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++) + if (tb[i]) + mt76_testmode_param_set(td, i); + + err = 0; + if (tb[MT76_TM_ATTR_STATE]) + err = mt76_testmode_set_state(dev, state); + +out: + mutex_unlock(&dev->mutex); + + return err; +} +EXPORT_SYMBOL(mt76_testmode_cmd); + +static int +mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg) +{ + struct mt76_testmode_data *td = &dev->test; + u64 rx_packets = 0; + u64 rx_fcs_error = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) { + rx_packets += td->rx_stats.packets[i]; + rx_fcs_error += td->rx_stats.fcs_error[i]; + } + + if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) || + nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) || + nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) || + nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets, + MT76_TM_STATS_ATTR_PAD) || + nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error, + MT76_TM_STATS_ATTR_PAD)) + return -EMSGSIZE; + + if (dev->test_ops->dump_stats) + return dev->test_ops->dump_stats(dev, msg); + + return 0; +} + +int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, + struct netlink_callback *cb, void *data, int len) +{ + struct mt76_phy *phy = hw->priv; + struct mt76_dev *dev = phy->dev; + struct mt76_testmode_data *td = &dev->test; + struct nlattr *tb[NUM_MT76_TM_ATTRS] = {}; + int err = 0; + void *a; + int i; + + if (!dev->test_ops) + return -EOPNOTSUPP; + + if (cb->args[2]++ > 0) + return -ENOENT; + + if (data) { + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, + mt76_tm_policy, NULL); + if (err) + return err; + } + + mutex_lock(&dev->mutex); + + if (tb[MT76_TM_ATTR_STATS]) { + a = nla_nest_start(msg, MT76_TM_ATTR_STATS); + err = mt76_testmode_dump_stats(dev, msg); + nla_nest_end(msg, a); + + goto out; + } + + mt76_testmode_init_defaults(dev); + + err = -EMSGSIZE; + if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state)) + goto out; + + if (td->mtd_name && + (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, td->mtd_name) || + nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, td->mtd_offset))) + goto out; + + if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) || + nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) || + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) || + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) || + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) || + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) || + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) || + (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) && + nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) || + (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) && + nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) || + (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) && + nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset))) + goto out; + + if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) { + a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER); + if (!a) + goto out; + + for (i = 0; i < ARRAY_SIZE(td->tx_power); i++) + if (nla_put_u8(msg, i, td->tx_power[i])) + goto out; + + nla_nest_end(msg, a); + } + + err = 0; + +out: + mutex_unlock(&dev->mutex); + + return err; +} +EXPORT_SYMBOL(mt76_testmode_dump); diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h new file mode 100644 index 000000000000..691fe5773244 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/testmode.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (C) 2020 Felix Fietkau + */ +#ifndef __MT76_TESTMODE_H +#define __MT76_TESTMODE_H + +/** + * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA + * + * @MT76_TM_ATTR_UNSPEC: (invalid attribute) + * + * @MT76_TM_ATTR_RESET: reset parameters to default (flag) + * @MT76_TM_ATTR_STATE: test state (u32), see &enum mt76_testmode_state + * + * @MT76_TM_ATTR_MTD_PART: mtd partition used for eeprom data (string) + * @MT76_TM_ATTR_MTD_OFFSET: offset of eeprom data within the partition (u32) + * + * @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting + * state to MT76_TM_STATE_TX_FRAMES (u32) + * @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32) + * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32) + * @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode) + * @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8) + * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8) + * @MT76_TM_ATTR_TX_RATE_SGI: packet tx use short guard interval (u8) + * @MT76_TM_ATTR_TX_RATE_LDPC: packet tx enable LDPC (u8) + * + * @MT76_TM_ATTR_TX_ANTENNA: tx antenna mask (u8) + * @MT76_TM_ATTR_TX_POWER_CONTROL: enable tx power control (u8) + * @MT76_TM_ATTR_TX_POWER: per-antenna tx power array (nested, u8 attrs) + * + * @MT76_TM_ATTR_FREQ_OFFSET: RF frequency offset (u32) + * + * @MT76_TM_ATTR_STATS: statistics (nested, see &enum mt76_testmode_stats_attr) + */ +enum mt76_testmode_attr { + MT76_TM_ATTR_UNSPEC, + + MT76_TM_ATTR_RESET, + MT76_TM_ATTR_STATE, + + MT76_TM_ATTR_MTD_PART, + MT76_TM_ATTR_MTD_OFFSET, + + MT76_TM_ATTR_TX_COUNT, + MT76_TM_ATTR_TX_LENGTH, + MT76_TM_ATTR_TX_RATE_MODE, + MT76_TM_ATTR_TX_RATE_NSS, + MT76_TM_ATTR_TX_RATE_IDX, + MT76_TM_ATTR_TX_RATE_SGI, + MT76_TM_ATTR_TX_RATE_LDPC, + + MT76_TM_ATTR_TX_ANTENNA, + MT76_TM_ATTR_TX_POWER_CONTROL, + MT76_TM_ATTR_TX_POWER, + + MT76_TM_ATTR_FREQ_OFFSET, + + MT76_TM_ATTR_STATS, + + /* keep last */ + NUM_MT76_TM_ATTRS, + MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1, +}; + +/** + * enum mt76_testmode_state - statistics attributes + * + * @MT76_TM_STATS_ATTR_TX_PENDING: pending tx frames (u32) + * @MT76_TM_STATS_ATTR_TX_QUEUED: queued tx frames (u32) + * @MT76_TM_STATS_ATTR_TX_QUEUED: completed tx frames (u32) + * + * @MT76_TM_STATS_ATTR_RX_PACKETS: number of rx packets (u64) + * @MT76_TM_STATS_ATTR_RX_FCS_ERROR: number of rx packets with FCS error (u64) + * @MT76_TM_STATS_ATTR_LAST_RX: information about the last received packet + * see &enum mt76_testmode_rx_attr + */ +enum mt76_testmode_stats_attr { + MT76_TM_STATS_ATTR_UNSPEC, + MT76_TM_STATS_ATTR_PAD, + + MT76_TM_STATS_ATTR_TX_PENDING, + MT76_TM_STATS_ATTR_TX_QUEUED, + MT76_TM_STATS_ATTR_TX_DONE, + + MT76_TM_STATS_ATTR_RX_PACKETS, + MT76_TM_STATS_ATTR_RX_FCS_ERROR, + MT76_TM_STATS_ATTR_LAST_RX, + + /* keep last */ + NUM_MT76_TM_STATS_ATTRS, + MT76_TM_STATS_ATTR_MAX = NUM_MT76_TM_STATS_ATTRS - 1, +}; + + +/** + * enum mt76_testmode_rx_attr - packet rx information + * + * @MT76_TM_RX_ATTR_FREQ_OFFSET: frequency offset (s32) + * @MT76_TM_RX_ATTR_RCPI: received channel power indicator (array, u8) + * @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (s8) + * @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (s8) + */ +enum mt76_testmode_rx_attr { + MT76_TM_RX_ATTR_UNSPEC, + + MT76_TM_RX_ATTR_FREQ_OFFSET, + MT76_TM_RX_ATTR_RCPI, + MT76_TM_RX_ATTR_IB_RSSI, + MT76_TM_RX_ATTR_WB_RSSI, + + /* keep last */ + NUM_MT76_TM_RX_ATTRS, + MT76_TM_RX_ATTR_MAX = NUM_MT76_TM_RX_ATTRS - 1, +}; + +/** + * enum mt76_testmode_state - phy test state + * + * @MT76_TM_STATE_OFF: test mode disabled (normal operation) + * @MT76_TM_STATE_IDLE: test mode enabled, but idle + * @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames + * @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics + */ +enum mt76_testmode_state { + MT76_TM_STATE_OFF, + MT76_TM_STATE_IDLE, + MT76_TM_STATE_TX_FRAMES, + MT76_TM_STATE_RX_FRAMES, + + /* keep last */ + NUM_MT76_TM_STATES, + MT76_TM_STATE_MAX = NUM_MT76_TM_STATES - 1, +}; + +/** + * enum mt76_testmode_tx_mode - packet tx phy mode + * + * @MT76_TM_TX_MODE_CCK: legacy CCK mode + * @MT76_TM_TX_MODE_OFDM: legacy OFDM mode + * @MT76_TM_TX_MODE_HT: 802.11n MCS + * @MT76_TM_TX_MODE_VHT: 802.11ac MCS + */ +enum mt76_testmode_tx_mode { + MT76_TM_TX_MODE_CCK, + MT76_TM_TX_MODE_OFDM, + MT76_TM_TX_MODE_HT, + MT76_TM_TX_MODE_VHT, + + /* keep last */ + NUM_MT76_TM_TX_MODES, + MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1, +}; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index f10c98aa883c..5adf92d7a9f3 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -236,6 +236,14 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) struct ieee80211_hw *hw; struct sk_buff_head list; +#ifdef CONFIG_NL80211_TESTMODE + if (skb == dev->test.tx_skb) { + dev->test.tx_done++; + if (dev->test.tx_queued == dev->test.tx_done) + wake_up(&dev->tx_wait); + } +#endif + if (!skb->prev) { hw = mt76_tx_status_get_hw(dev, skb); ieee80211_free_txskb(hw, skb); @@ -259,6 +267,11 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta, int qid = skb_get_queue_mapping(skb); bool ext_phy = phy != &dev->phy; + if (mt76_testmode_enabled(dev)) { + ieee80211_free_txskb(phy->hw, skb); + return; + } + if (WARN_ON(qid >= MT_TXQ_PSD)) { qid = MT_TXQ_BE; skb_set_queue_mapping(skb, qid); @@ -579,6 +592,11 @@ void mt76_tx_tasklet(unsigned long data) mt76_txq_schedule_all(&dev->phy); if (dev->phy2) mt76_txq_schedule_all(dev->phy2); + +#ifdef CONFIG_NL80211_TESTMODE + if (dev->test.tx_pending) + mt76_testmode_tx_pending(dev); +#endif } void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, From 4f0bce1c8888245e006f8c88f44b7419b47a1b4b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 18 Jun 2020 21:14:43 +0200 Subject: [PATCH 1357/2056] mt76: mt7615: implement testmode support Supports sending a configurable number of packets with a specific rate and configurable tx power levels / antenna settings, as well as displaying rx statistics. Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt7615/Makefile | 1 + .../net/wireless/mediatek/mt76/mt7615/init.c | 8 +- .../net/wireless/mediatek/mt76/mt7615/mac.c | 36 ++ .../net/wireless/mediatek/mt76/mt7615/mac.h | 5 + .../net/wireless/mediatek/mt76/mt7615/main.c | 44 ++- .../net/wireless/mediatek/mt76/mt7615/mcu.c | 34 +- .../net/wireless/mediatek/mt76/mt7615/mcu.h | 6 + .../wireless/mediatek/mt76/mt7615/mt7615.h | 18 + .../net/wireless/mediatek/mt76/mt7615/regs.h | 17 + .../wireless/mediatek/mt76/mt7615/testmode.c | 363 ++++++++++++++++++ 10 files changed, 523 insertions(+), 9 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/testmode.c diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile index 99f353b8b9aa..d2ad84cb5244 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -8,6 +8,7 @@ CFLAGS_trace.o := -I$(src) mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \ debugfs.o trace.o +mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o mt7615e-$(CONFIG_MT7622_WMAC) += soc.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index e2d80518e5af..2e0840b0e902 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -321,6 +321,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw) ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); + ieee80211_hw_set(hw, WANT_MONITOR_VIF); if (is_mt7615(&phy->dev->mt76)) hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM; @@ -405,9 +406,6 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev) mphy->sband_2g.sband.n_channels = 0; mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; - /* The second interface does not get any packets unless it has a vif */ - ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); - ret = mt76_register_phy(mphy); if (ret) ieee80211_free_hw(mphy->hw); @@ -457,5 +455,9 @@ void mt7615_init_device(struct mt7615_dev *dev) IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; mt7615_cap_dbdc_disable(dev); dev->phy.dfs_state = -1; + +#ifdef CONFIG_NL80211_TESTMODE + dev->mt76.test_ops = &mt7615_testmode_ops; +#endif } EXPORT_SYMBOL_GPL(mt7615_init_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1dc291e8b766..2d432d052780 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -186,6 +186,40 @@ mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy, status->freq = ieee80211_channel_to_frequency(chfreq, status->band); } +static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv) +{ +#ifdef CONFIG_NL80211_TESTMODE + u32 rxv1 = le32_to_cpu(rxv[0]); + u32 rxv3 = le32_to_cpu(rxv[2]); + u32 rxv4 = le32_to_cpu(rxv[3]); + u32 rxv5 = le32_to_cpu(rxv[4]); + u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); + u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); + s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); + u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; + + if (!mode) { + /* CCK */ + foe &= ~BIT(11); + foe *= 1000; + foe >>= 11; + } else { + if (foe > 2048) + foe -= 4096; + + foe = (foe * foe_const) >> 15; + } + + dev->test.last_freq_offset = foe; + dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); + dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); + dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); + dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); + dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); + dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); +#endif +} + static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) { struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; @@ -401,6 +435,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) status->chain_signal[i]); } + mt7615_mac_fill_tm_rx(dev, rxd); + rxd += 6; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h index 81608ab656b8..169f4e17b5b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h @@ -100,11 +100,16 @@ enum rx_pkt_type { #define MT_RXV2_GROUP_ID GENMASK(26, 21) #define MT_RXV2_LENGTH GENMASK(20, 0) +#define MT_RXV3_WB_RSSI GENMASK(31, 24) +#define MT_RXV3_IB_RSSI GENMASK(23, 16) + #define MT_RXV4_RCPI3 GENMASK(31, 24) #define MT_RXV4_RCPI2 GENMASK(23, 16) #define MT_RXV4_RCPI1 GENMASK(15, 8) #define MT_RXV4_RCPI0 GENMASK(7, 0) +#define MT_RXV5_FOE GENMASK(11, 0) + #define MT_RXV6_NF3 GENMASK(31, 24) #define MT_RXV6_NF2 GENMASK(23, 16) #define MT_RXV6_NF1 GENMASK(15, 8) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index abb9853eaa8b..3abb33ddab7d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -76,6 +76,8 @@ static void mt7615_stop(struct ieee80211_hw *hw) mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, true); + clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); cancel_delayed_work_sync(&phy->scan_work); @@ -137,6 +139,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, true); + + if (vif->type == NL80211_IFTYPE_MONITOR && + is_zero_ether_addr(vif->addr)) + phy->monitor_vif = vif; + mvif->idx = ffs(~dev->mphy.vif_mask) - 1; if (mvif->idx >= MT7615_MAX_INTERFACES) { ret = -ENOSPC; @@ -197,6 +205,13 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, /* TODO: disable beacon for the bss */ + mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, true); + mutex_unlock(&dev->mt76.mutex); + + if (vif == phy->monitor_vif) + phy->monitor_vif = NULL; + mt7615_mcu_add_dev_info(dev, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); @@ -234,7 +249,7 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy) phy->dfs_state = -1; } -static int mt7615_set_channel(struct mt7615_phy *phy) +int mt7615_set_channel(struct mt7615_phy *phy) { struct mt7615_dev *dev = phy->dev; bool ext_phy = phy != &dev->phy; @@ -260,7 +275,7 @@ static int mt7615_set_channel(struct mt7615_phy *phy) mt7615_mac_set_timing(phy); ret = mt7615_dfs_init_radar_detector(phy); mt7615_mac_cca_stats_reset(phy); - mt7615_mcu_set_sku_en(phy, true); + mt7615_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76)); mt7615_mac_reset_counters(dev); phy->noise = 0; @@ -271,8 +286,11 @@ static int mt7615_set_channel(struct mt7615_phy *phy) mutex_unlock(&dev->mt76.mutex); mt76_txq_schedule_all(phy->mt76); - ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, - MT7615_WATCHDOG_TIME); + + if (!mt76_testmode_enabled(&dev->mt76)) + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, + MT7615_WATCHDOG_TIME); + return ret; } @@ -369,6 +387,13 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | IEEE80211_CONF_CHANGE_POWER)) { +#ifdef CONFIG_NL80211_TESTMODE + if (dev->mt76.test.state != MT76_TM_STATE_OFF) { + mutex_lock(&dev->mt76.mutex); + mt76_testmode_reset(&dev->mt76, false); + mutex_unlock(&dev->mt76.mutex); + } +#endif ieee80211_stop_queues(hw); ret = mt7615_set_channel(phy); ieee80211_wake_queues(hw); @@ -377,6 +402,8 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) mutex_lock(&dev->mt76.mutex); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { + mt76_testmode_reset(&dev->mt76, true); + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; else @@ -419,10 +446,13 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR1_DROP_CFACK; u32 flags = 0; + mutex_lock(&dev->mt76.mutex); + #define MT76_FILTER(_flag, _hw) do { \ flags |= *total_flags & FIF_##_flag; \ phy->rxfilter &= ~(_hw); \ - phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ + if (!mt76_testmode_enabled(&dev->mt76)) \ + phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\ } while (0) phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | @@ -455,6 +485,8 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); else mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); + + mutex_unlock(&dev->mt76.mutex); } static void mt7615_bss_info_changed(struct ieee80211_hw *hw, @@ -1069,6 +1101,8 @@ const struct ieee80211_ops mt7615_ops = { .sched_scan_stop = mt7615_stop_sched_scan, .remain_on_channel = mt7615_remain_on_channel, .cancel_remain_on_channel = mt7615_cancel_remain_on_channel, + CFG80211_TESTMODE_CMD(mt76_testmode_cmd) + CFG80211_TESTMODE_DUMP(mt76_testmode_dump) #ifdef CONFIG_PM .suspend = mt7615_suspend, .resume = mt7615_resume, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index b4440f131925..445eb81267ef 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -2806,6 +2806,14 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) .center_chan2 = ieee80211_frequency_to_channel(freq2), }; +#ifdef CONFIG_NL80211_TESTMODE + if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES && + dev->mt76.test.tx_antenna_mask) { + req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask); + req.rx_streams_mask = dev->mt76.test.tx_antenna_mask; + } +#endif + if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && @@ -2817,7 +2825,10 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd) req.band_idx = phy != &dev->phy; req.bw = mt7615_mcu_chan_bw(chandef); - mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); + if (mt76_testmode_enabled(&dev->mt76)) + memset(req.txpower_sku, 0x3f, 49); + else + mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); } @@ -2835,6 +2846,27 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index) sizeof(req), true); } +int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, + u32 val) +{ + struct { + u8 test_mode_en; + u8 param_idx; + u8 _rsv[2]; + + __le32 value; + + u8 pad[8]; + } req = { + .test_mode_en = test_mode, + .param_idx = param, + .value = cpu_to_le32(val), + }; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, + sizeof(req), false); +} + int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) { struct mt7615_dev *dev = phy->dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index 8e2be150a556..e8c7aa5c35ce 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -268,6 +268,7 @@ enum { MCU_EXT_CMD_GET_TEMP = 0x2c, MCU_EXT_CMD_WTBL_UPDATE = 0x32, MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, + MCU_EXT_CMD_ATE_CTRL = 0x3d, MCU_EXT_CMD_PROTECT_CTRL = 0x3e, MCU_EXT_CMD_DBDC_CTRL = 0x45, MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, @@ -289,6 +290,11 @@ enum { MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07, }; +enum { + MCU_ATE_SET_FREQ_OFFSET = 0xa, + MCU_ATE_SET_TX_POWER_CONTROL = 0x15, +}; + struct mt7615_mcu_uni_event { u8 cid; u8 pad[3]; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index af86cf6925dd..a995d41fb9c1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -169,6 +169,8 @@ struct mt7615_phy { struct mt76_phy *mt76; struct mt7615_dev *dev; + struct ieee80211_vif *monitor_vif; + u32 rxfilter; u32 omac_mask; @@ -283,6 +285,17 @@ struct mt7615_dev { u32 debugfs_rf_wf; u32 debugfs_rf_reg; + +#ifdef CONFIG_NL80211_TESTMODE + struct { + u32 *reg_backup; + + s16 last_freq_offset; + u8 last_rcpi[4]; + s8 last_ib_rssi; + s8 last_wb_rssi; + } test; +#endif }; enum tx_pkt_queue_idx { @@ -377,6 +390,7 @@ extern const u32 mt7615e_reg_map[__MT_BASE_MAX]; extern const u32 mt7663e_reg_map[__MT_BASE_MAX]; extern struct pci_driver mt7615_pci_driver; extern struct platform_driver mt7622_wmac_driver; +extern const struct mt76_testmode_ops mt7615_testmode_ops; #ifdef CONFIG_MT7622_WMAC int mt7622_wmac_init(struct mt7615_dev *dev); @@ -488,6 +502,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev, struct ieee80211_supported_band *sband); void mt7615_phy_init(struct mt7615_dev *dev); void mt7615_mac_init(struct mt7615_dev *dev); +int mt7615_set_channel(struct mt7615_phy *phy); int mt7615_mcu_restart(struct mt76_dev *dev); void mt7615_update_channel(struct mt76_dev *mdev); @@ -531,6 +546,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev); int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable); int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val); int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index); +int mt7615_mcu_set_tx_power(struct mt7615_phy *phy); void mt7615_mcu_exit(struct mt7615_dev *dev); void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, int cmd, int *wait_seq); @@ -569,6 +585,8 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev, const struct mt7615_dfs_pulse *pulse); int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, const struct mt7615_dfs_pattern *pattern); +int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, + u32 val); int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable); int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy); int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 4743c12005a4..32216bfef4b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -212,6 +212,9 @@ enum mt7615_reg_base { #define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00) #define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2)) +#define MT_WF_PHY_RFINTF3_0(_n) MT_WF_PHY(0x1100 + (_n) * 0x400) +#define MT_WF_PHY_RFINTF3_0_ANT GENMASK(7, 4) + #define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE]) #define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs)) @@ -255,6 +258,13 @@ enum mt7615_reg_base { #define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE]) #define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs)) +#define MT_ARB_RQCR MT_WF_ARB(0x070) +#define MT_ARB_RQCR_RX_START BIT(0) +#define MT_ARB_RQCR_RXV_START BIT(4) +#define MT_ARB_RQCR_RXV_R_EN BIT(7) +#define MT_ARB_RQCR_RXV_T_EN BIT(8) +#define MT_ARB_RQCR_BAND_SHIFT 16 + #define MT_ARB_SCR MT_WF_ARB(0x080) #define MT_ARB_SCR_TX0_DISABLE BIT(8) #define MT_ARB_SCR_RX0_DISABLE BIT(9) @@ -550,4 +560,11 @@ enum mt7615_reg_base { #define MT_WL_RX_BUSY BIT(30) #define MT_WL_TX_BUSY BIT(31) +#define MT_MCU_PTA_BASE 0x81060000 +#define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n)) + +#define MT_ANT_SWITCH_CON(n) MT_MCU_PTA(0x0c8) +#define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8)) +#define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8)) + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c new file mode 100644 index 000000000000..1730751133aa --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 Felix Fietkau */ + +#include "mt7615.h" +#include "eeprom.h" +#include "mcu.h" + +enum { + TM_CHANGED_TXPOWER_CTRL, + TM_CHANGED_TXPOWER, + TM_CHANGED_FREQ_OFFSET, + + /* must be last */ + NUM_TM_CHANGED +}; + + +static const u8 tm_change_map[] = { + [TM_CHANGED_TXPOWER_CTRL] = MT76_TM_ATTR_TX_POWER_CONTROL, + [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER, + [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET, +}; + +static const u32 reg_backup_list[] = { + MT_WF_PHY_RFINTF3_0(0), + MT_WF_PHY_RFINTF3_0(1), + MT_WF_PHY_RFINTF3_0(2), + MT_WF_PHY_RFINTF3_0(3), + MT_ANT_SWITCH_CON(2), + MT_ANT_SWITCH_CON(3), + MT_ANT_SWITCH_CON(4), + MT_ANT_SWITCH_CON(6), + MT_ANT_SWITCH_CON(7), + MT_ANT_SWITCH_CON(8), +}; + +static const struct { + u16 wf; + u16 reg; +} rf_backup_list[] = { + { 0, 0x48 }, + { 1, 0x48 }, + { 2, 0x48 }, + { 3, 0x48 }, +}; + +static int +mt7615_tm_set_tx_power(struct mt7615_phy *phy) +{ + struct mt7615_dev *dev = phy->dev; + struct mt76_phy *mphy = phy->mt76; + int i, ret, n_chains = hweight8(mphy->antenna_mask); + struct cfg80211_chan_def *chandef = &mphy->chandef; + int freq = chandef->center_freq1, len, target_chains; + u8 *data, *eep = (u8 *)dev->mt76.eeprom.data; + enum nl80211_band band = chandef->chan->band; + struct sk_buff *skb; + struct { + u8 center_chan; + u8 dbdc_idx; + u8 band; + u8 rsv; + } __packed req_hdr = { + .center_chan = ieee80211_frequency_to_channel(freq), + .band = band, + .dbdc_idx = phy != &dev->phy, + }; + u8 *tx_power = NULL; + + if (dev->mt76.test.state != MT76_TM_STATE_OFF) + tx_power = dev->mt76.test.tx_power; + + len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0; + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len); + if (!skb) + return -ENOMEM; + + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); + data = skb_put_data(skb, eep + MT_EE_NIC_CONF_0, len); + + target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains; + for (i = 0; i < target_chains; i++) { + int index; + + ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i); + if (ret < 0) + return -EINVAL; + + index = ret - MT_EE_NIC_CONF_0; + if (tx_power && tx_power[i]) + data[ret - MT_EE_NIC_CONF_0] = tx_power[i]; + } + + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_EXT_CMD_SET_TX_POWER_CTRL, false); +} + +static void +mt7615_tm_reg_backup_restore(struct mt7615_dev *dev) +{ + u32 *b = dev->test.reg_backup; + int n_regs = ARRAY_SIZE(reg_backup_list); + int n_rf_regs = ARRAY_SIZE(rf_backup_list); + int i; + + if (dev->mt76.test.state == MT76_TM_STATE_OFF) { + for (i = 0; i < n_regs; i++) + mt76_wr(dev, reg_backup_list[i], b[i]); + + for (i = 0; i < n_rf_regs; i++) + mt7615_rf_wr(dev, rf_backup_list[i].wf, + rf_backup_list[i].reg, b[n_regs + i]); + return; + } + + if (b) + return; + + b = devm_kzalloc(dev->mt76.dev, 4 * (n_regs + n_rf_regs), + GFP_KERNEL); + if (!b) + return; + + dev->test.reg_backup = b; + for (i = 0; i < n_regs; i++) + b[i] = mt76_rr(dev, reg_backup_list[i]); + for (i = 0; i < n_rf_regs; i++) + b[n_regs + i] = mt7615_rf_rr(dev, rf_backup_list[i].wf, + rf_backup_list[i].reg); +} + + +static void +mt7615_tm_init_phy(struct mt7615_dev *dev, struct mt7615_phy *phy) +{ + unsigned int total_flags = ~0; + + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) + return; + + mutex_unlock(&dev->mt76.mutex); + mt7615_set_channel(phy); + mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0); + mutex_lock(&dev->mt76.mutex); + + mt7615_tm_reg_backup_restore(dev); +} + +static void +mt7615_tm_init(struct mt7615_dev *dev) +{ + mt7615_tm_init_phy(dev, &dev->phy); + + if (dev->mt76.phy2) + mt7615_tm_init_phy(dev, dev->mt76.phy2->priv); +} + +static void +mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en) +{ + u32 rqcr_mask = (MT_ARB_RQCR_RX_START | + MT_ARB_RQCR_RXV_START | + MT_ARB_RQCR_RXV_R_EN | + MT_ARB_RQCR_RXV_T_EN) * + (BIT(0) | BIT(MT_ARB_RQCR_BAND_SHIFT)); + + if (en) { + mt76_clear(dev, MT_ARB_SCR, + MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE); + mt76_set(dev, MT_ARB_RQCR, rqcr_mask); + } else { + mt76_set(dev, MT_ARB_SCR, + MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE); + mt76_clear(dev, MT_ARB_RQCR, rqcr_mask); + } +} + +static void +mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en) +{ + struct mt76_testmode_data *td = &dev->mt76.test; + u8 mask = td->tx_antenna_mask; + int i; + + if (!mask) + return; + + if (!en) + mask = dev->phy.chainmask; + + for (i = 0; i < 4; i++) { + mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i), + MT_WF_PHY_RFINTF3_0_ANT, + td->tx_antenna_mask & BIT(i) ? 0 : 0xa); + + } + + /* 2.4 GHz band */ + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0), + (td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2), + (td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0), + (td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2), + (td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf); + + /* 5 GHz band */ + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1), + (td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3), + (td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1), + (td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf); + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3), + (td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf); + + for (i = 0; i < 4; i++) { + u32 val; + + val = mt7615_rf_rr(dev, i, 0x48); + val &= ~(0x3ff << 20); + if (td->tx_antenna_mask & BIT(i)) + val |= 3 << 20; + else + val |= (2 << 28) | (2 << 26) | (8 << 20); + mt7615_rf_wr(dev, i, 0x48, val); + } +} + +static void +mt7615_tm_set_tx_frames(struct mt7615_dev *dev, bool en) +{ + struct ieee80211_tx_info *info; + struct sk_buff *skb = dev->mt76.test.tx_skb; + + mt7615_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH); + mt7615_tm_set_tx_antenna(dev, en); + mt7615_tm_set_rx_enable(dev, !en); + if (!en || !skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = dev->phy.monitor_vif; +} + +static void +mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed) +{ + struct mt76_testmode_data *td = &dev->mt76.test; + bool en = dev->mt76.test.state != MT76_TM_STATE_OFF; + + if (changed & BIT(TM_CHANGED_TXPOWER_CTRL)) + mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL, + en, en && td->tx_power_control); + if (changed & BIT(TM_CHANGED_FREQ_OFFSET)) + mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET, + en, en ? td->freq_offset : 0); + if (changed & BIT(TM_CHANGED_TXPOWER)) + mt7615_tm_set_tx_power(&dev->phy); +} + +static int +mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt76_testmode_data *td = &mdev->test; + enum mt76_testmode_state prev_state = td->state; + + mdev->test.state = state; + + if (prev_state == MT76_TM_STATE_TX_FRAMES) + mt7615_tm_set_tx_frames(dev, false); + else if (state == MT76_TM_STATE_TX_FRAMES) + mt7615_tm_set_tx_frames(dev, true); + + if (state <= MT76_TM_STATE_IDLE) + mt7615_tm_init(dev); + + if ((state == MT76_TM_STATE_IDLE && + prev_state == MT76_TM_STATE_OFF) || + (state == MT76_TM_STATE_OFF && + prev_state == MT76_TM_STATE_IDLE)) { + u32 changed = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { + u16 cur = tm_change_map[i]; + + if (td->param_set[cur / 32] & BIT(cur % 32)) + changed |= BIT(i); + } + + mt7615_tm_update_params(dev, changed); + } + + return 0; +} + +static int +mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb, + enum mt76_testmode_state new_state) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt76_testmode_data *td = &dev->mt76.test; + u32 changed = 0; + int i; + + BUILD_BUG_ON(NUM_TM_CHANGED >= 32); + + if (new_state == MT76_TM_STATE_OFF || + td->state == MT76_TM_STATE_OFF) + return 0; + + if (td->tx_antenna_mask & ~dev->phy.chainmask) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { + if (tb[tm_change_map[i]]) + changed |= BIT(i); + } + + mt7615_tm_update_params(dev, changed); + + return 0; +} + +static int +mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + void *rx, *rssi; + int i; + + rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX); + if (!rx) + return -ENOMEM; + + if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) || + nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) || + nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi)) + return -ENOMEM; + + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI); + if (!rssi) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++) + if (nla_put_u8(msg, i, dev->test.last_rcpi[i])) + return -ENOMEM; + + nla_nest_end(msg, rssi); + + nla_nest_end(msg, rx); + + return 0; +} + +const struct mt76_testmode_ops mt7615_testmode_ops = { + .set_state = mt7615_tm_set_state, + .set_params = mt7615_tm_set_params, + .dump_stats = mt7615_tm_dump_stats, +}; From d9ea74c4131626396c783ad0d2162b13a0c4490c Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Thu, 25 Jun 2020 01:23:17 +0800 Subject: [PATCH 1358/2056] mt76: mt7915: update HE capabilities Sync from SDK to update HE capabilities. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/init.c | 44 +++++++++++-------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index aadf56e80bae..e90d0087e377 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -417,11 +417,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, he_cap_elem->mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE; - he_cap_elem->mac_cap_info[1] = - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1; - he_cap_elem->mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_BSR; he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED; @@ -443,27 +438,27 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; - /* TODO: OFDMA */ - switch (i) { case NL80211_IFTYPE_AP: he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES; + he_cap_elem->mac_cap_info[2] |= + IEEE80211_HE_MAC_CAP2_BSR; he_cap_elem->mac_cap_info[4] |= IEEE80211_HE_MAC_CAP4_BQR; + he_cap_elem->mac_cap_info[5] |= + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX; he_cap_elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; he_cap_elem->phy_cap_info[6] |= IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; - he_cap_elem->phy_cap_info[9] |= - IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; break; case NL80211_IFTYPE_STATION: he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ; - he_cap_elem->mac_cap_info[3] |= - IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED; + he_cap_elem->mac_cap_info[1] |= + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; if (band == NL80211_BAND_2GHZ) he_cap_elem->phy_cap_info[0] |= @@ -473,18 +468,31 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band, IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G; he_cap_elem->phy_cap_info[1] |= - IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A; + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; + he_cap_elem->phy_cap_info[3] |= + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; + he_cap_elem->phy_cap_info[6] |= + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; + he_cap_elem->phy_cap_info[7] |= + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; he_cap_elem->phy_cap_info[8] |= IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484; he_cap_elem->phy_cap_info[9] |= - IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; + IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; break; -#ifdef CONFIG_MAC80211_MESH - case NL80211_IFTYPE_MESH_POINT: - break; -#endif } he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map); From 40ba9a938fb40e69d14c8e102d2c675ccc6868cc Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 29 Jun 2020 17:12:16 +0200 Subject: [PATCH 1359/2056] mt76: mt76x2: fix pci suspend/resume on mt7612e Fix the following mt7612e hw hangs during suspend/resume reported on Dell Vostro 3360 mt76x2e 0000:01:00.0: MCU message 2 (seq 11) timed out mt76x2e 0000:01:00.0: MCU message 30 (seq 12) timed out mt76x2e 0000:01:00.0: MCU message 30 (seq 13) timed out mt76x2e 0000:01:00.0: Firmware Version: 0.0.00 mt76x2e 0000:01:00.0: Build: 1 mt76x2e 0000:01:00.0: Build Time: 201507311614____ mt76x2e 0000:01:00.0: Firmware running! ieee80211 phy0: Hardware restart was requested mt76x2e 0000:01:00.0: MCU message 2 (seq 1) timed out mt76x2e 0000:01:00.0: MCU message 30 (seq 2) timed out mt76x2e 0000:01:00.0: MCU message 30 (seq 3) timed out mt76x2e 0000:01:00.0: Firmware Version: 0.0.00 mt76x2e 0000:01:00.0: Build: 1 mt76x2e 0000:01:00.0: Build Time: 201507311614____ mt76x2e 0000:01:00.0: Firmware running! ieee80211 phy0: Hardware restart was requested mt76x2e 0000:01:00.0: MCU message 31 (seq 5) timed out mt76x2e 0000:01:00.0: MCU message 31 (seq 6) timed out mt76x2e 0000:01:00.0: MCU message 31 (seq 7) timed out mt76x2e 0000:01:00.0: MCU message 31 (seq 8) timed out mt76x2e 0000:01:00.0: MCU message 31 (seq 9) timed out mt76x2e 0000:01:00.0: MCU message 31 (seq 10) timed out mt76x2e 0000:01:00.0: MCU message 31 (seq 11) timed out mt76x2e 0000:01:00.0: Firmware Version: 0.0.00 mt76x2e 0000:01:00.0: Build: 1 mt76x2e 0000:01:00.0: Build Time: 201507311614____ mt76x2e 0000:01:00.0: Firmware running! ieee80211 phy0: Hardware restart was requested ------------[ cut here ]----------- CPU: 3 PID: 11956 Comm: kworker/3:1 Not tainted 5.7.0-pf2 #1 Hardware name: Dell Inc. Vostro 3360/0F5DWF, BIOS A18 09/25/2013 Workqueue: events_freezable ieee80211_restart_work [mac80211] RIP: 0010:ieee80211_reconfig+0x234/0x1700 [mac80211] RSP: 0018:ffffb803c23ffdf0 EFLAGS: 00010286 RAX: 00000000fffffff0 RBX: ffff9595a7564900 RCX: 0000000000000008 RDX: 0000000000000000 RSI: 0000000000000100 RDI: 0000000000000100 RBP: ffff9595a7ec07e0 R08: 0000000000000000 R09: 0000000000000001 R10: 0000000000000001 R11: 0000000000000000 R12: ffff9595a7ec18d0 R13: 00000000ffffffff R14: 0000000000000000 R15: 00000000fffffff0 FS: 0000000000000000(0000) GS:ffff9595af2c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055e56d7de000 CR3: 000000042200a001 CR4: 00000000001706e0 Call Trace: ieee80211_restart_work+0xb7/0xe0 [mac80211] process_one_work+0x1d4/0x3c0 worker_thread+0x228/0x470 ? process_one_work+0x3c0/0x3c0 kthread+0x19c/0x1c0 ? __kthread_init_worker+0x30/0x30 ret_from_fork+0x35/0x40 wlp1s0: Failed check-sdata-in-driver check, flags: 0x0 CPU: 3 PID: 11956 Comm: kworker/3:1 Tainted: G W 5.7.0-pf2 #1 Hardware name: Dell Inc. Vostro 3360/0F5DWF, BIOS A18 09/25/2013 Workqueue: events_freezable ieee80211_restart_work [mac80211] RIP: 0010:drv_remove_interface+0x11f/0x130 [mac80211] RSP: 0018:ffffb803c23ffc80 EFLAGS: 00010282 RAX: 0000000000000000 RBX: ffff9595a7564900 RCX: 0000000000000000 RDX: 0000000000000001 RSI: 0000000000000082 RDI: 00000000ffffffff RBP: ffff9595a7ec1930 R08: 00000000000004b6 R09: 0000000000000001 R10: 0000000000000001 R11: 0000000000006f08 R12: ffff9595a7ec1000 R13: ffff9595a75654b8 R14: ffff9595a7ec0ca0 R15: ffff9595a7ec07e0 FS: 0000000000000000(0000) GS:ffff9595af2c0000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055e56d7de000 CR3: 000000042200a001 CR4: 00000000001706e0 Call Trace: ieee80211_do_stop+0x5af/0x8c0 [mac80211] ieee80211_stop+0x16/0x20 [mac80211] __dev_close_many+0xaa/0x120 dev_close_many+0xa1/0x2b0 dev_close+0x6d/0x90 cfg80211_shutdown_all_interfaces+0x71/0xd0 [cfg80211] ieee80211_reconfig+0xa2/0x1700 [mac80211] ieee80211_restart_work+0xb7/0xe0 [mac80211] process_one_work+0x1d4/0x3c0 worker_thread+0x228/0x470 ? process_one_work+0x3c0/0x3c0 kthread+0x19c/0x1c0 ? __kthread_init_worker+0x30/0x30 ret_from_fork+0x35/0x40 Fixes: 7bc04215a66b ("mt76: add driver code for MT76x2e") Tested-by: Oleksandr Natalenko Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt76x2/mt76x2.h | 1 + .../net/wireless/mediatek/mt76/mt76x2/pci.c | 56 +++++++++++++++++++ .../wireless/mediatek/mt76/mt76x2/pci_init.c | 17 ++++++ 3 files changed, 74 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index eca95b7f64d2..d01f47c83eb1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -39,6 +39,7 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev) extern const struct ieee80211_ops mt76x2_ops; int mt76x2_register_device(struct mt76x02_dev *dev); +int mt76x2_resume_device(struct mt76x02_dev *dev); void mt76x2_phy_power_on(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c index f36c71c1bc58..6dfb0df8ec8a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c @@ -103,6 +103,58 @@ mt76x2e_remove(struct pci_dev *pdev) mt76_free_device(mdev); } +static int __maybe_unused +mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + int i, err; + + napi_disable(&mdev->tx_napi); + tasklet_kill(&mdev->pre_tbtt_tasklet); + tasklet_kill(&mdev->tx_tasklet); + + mt76_for_each_q_rx(mdev, i) + napi_disable(&mdev->napi[i]); + + pci_enable_wake(pdev, pci_choose_state(pdev, state), true); + pci_save_state(pdev); + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); + if (err) + goto restore; + + return 0; + +restore: + mt76_for_each_q_rx(mdev, i) + napi_enable(&mdev->napi[i]); + napi_enable(&mdev->tx_napi); + + return err; +} + +static int __maybe_unused +mt76x2e_resume(struct pci_dev *pdev) +{ + struct mt76_dev *mdev = pci_get_drvdata(pdev); + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + int i, err; + + err = pci_set_power_state(pdev, PCI_D0); + if (err) + return err; + + pci_restore_state(pdev); + + mt76_for_each_q_rx(mdev, i) { + napi_enable(&mdev->napi[i]); + napi_schedule(&mdev->napi[i]); + } + napi_enable(&mdev->tx_napi); + napi_schedule(&mdev->tx_napi); + + return mt76x2_resume_device(dev); +} + MODULE_DEVICE_TABLE(pci, mt76x2e_device_table); MODULE_FIRMWARE(MT7662_FIRMWARE); MODULE_FIRMWARE(MT7662_ROM_PATCH); @@ -113,6 +165,10 @@ static struct pci_driver mt76pci_driver = { .id_table = mt76x2e_device_table, .probe = mt76x2e_probe, .remove = mt76x2e_remove, +#ifdef CONFIG_PM + .suspend = mt76x2e_suspend, + .resume = mt76x2e_resume, +#endif /* CONFIG_PM */ }; module_pci_driver(mt76pci_driver); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index f27774f57438..101a0fe00ef3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -217,6 +217,23 @@ mt76x2_power_on(struct mt76x02_dev *dev) mt76x2_power_on_rf(dev, 1); } +int mt76x2_resume_device(struct mt76x02_dev *dev) +{ + int err; + + mt76x02_dma_disable(dev); + mt76x2_reset_wlan(dev, true); + mt76x2_power_on(dev); + + err = mt76x2_mac_reset(dev, true); + if (err) + return err; + + mt76x02_mac_start(dev); + + return mt76x2_mcu_init(dev); +} + static int mt76x2_init_hardware(struct mt76x02_dev *dev) { int ret; From 05b5a339a7b2308ec38563a7da6649ff1f9cc28b Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Mon, 29 Jun 2020 19:05:33 +0200 Subject: [PATCH 1360/2056] mt76: mt76x2u: enable HC-M7662BU1 Enable support for HC-M7662BU1 module on mt76x2u driver Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76x2/usb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 3a4e41724af1..b27acf1bbc03 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -16,6 +16,7 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */ { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ + { USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */ { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */ { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ From 0d4b6909987723f11c23b3a996499cb8f7a0e2b7 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Wed, 1 Jul 2020 16:13:07 +0800 Subject: [PATCH 1361/2056] mt76: mt7915: avoid memcpy in rxv operation Avoid memcpy in Rx hot path to slightly improve performance. Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/mac.c | 78 +++++++++---------- .../net/wireless/mediatek/mt76/mt7915/mac.h | 7 -- 2 files changed, 37 insertions(+), 48 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 822540e5df6b..6825afca1efb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -178,14 +178,14 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev) static void mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, - struct mt7915_rxv *rxv, - struct ieee80211_radiotap_he *he) + struct ieee80211_radiotap_he *he, + __le32 *rxv) { u32 ru_h, ru_l; u8 ru, offs = 0; - ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv->v[0])); - ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv->v[1])); + ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0])); + ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1])); ru = (u8)(ru_l | ru_h << 4); status->bw = RATE_INFO_BW_HE_RU; @@ -228,7 +228,7 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, static void mt7915_mac_decode_he_radiotap(struct sk_buff *skb, struct mt76_rx_status *status, - struct mt7915_rxv *rxv) + __le32 *rxv, u32 phy) { /* TODO: struct ieee80211_radiotap_he_mu */ static const struct ieee80211_radiotap_he known = { @@ -245,48 +245,45 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, HE_BITS(DATA2_TXOP_KNOWN), }; struct ieee80211_radiotap_he *he = NULL; - __le32 v2 = rxv->v[2]; - __le32 v11 = rxv->v[11]; - __le32 v14 = rxv->v[14]; - u32 ltf_size = le32_get_bits(v2, MT_CRXV_HE_LTF_SIZE) + 1; + u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1; he = skb_push(skb, sizeof(known)); memcpy(he, &known, sizeof(known)); - he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, v14) | - HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, v2); - he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, v2) | + he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) | + HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]); + he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) | le16_encode_bits(ltf_size, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); - he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, v14) | - HE_PREP(DATA6_DOPPLER, DOPPLER, v14); + he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | + HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); - switch (rxv->phy) { + switch (phy) { case MT_PHY_TYPE_HE_SU: he->data1 |= HE_BITS(DATA1_FORMAT_SU) | HE_BITS(DATA1_UL_DL_KNOWN) | HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | HE_BITS(DATA1_SPTL_REUSE_KNOWN); - he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, v14) | - HE_PREP(DATA3_UL_DL, UPLINK, v2); - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11); + he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) | + HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); + he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); break; case MT_PHY_TYPE_HE_EXT_SU: he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | HE_BITS(DATA1_UL_DL_KNOWN); - he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2); + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); break; case MT_PHY_TYPE_HE_MU: he->data1 |= HE_BITS(DATA1_FORMAT_MU) | HE_BITS(DATA1_UL_DL_KNOWN) | HE_BITS(DATA1_SPTL_REUSE_KNOWN); - he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2); - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11); + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); + he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); - mt7915_mac_decode_he_radiotap_ru(status, rxv, he); + mt7915_mac_decode_he_radiotap_ru(status, he, rxv); break; case MT_PHY_TYPE_HE_TB: he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | @@ -295,12 +292,12 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb, HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | HE_BITS(DATA1_SPTL_REUSE4_KNOWN); - he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, v11) | - HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, v11) | - HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, v11) | - HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, v11); + he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) | + HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) | + HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) | + HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]); - mt7915_mac_decode_he_radiotap_ru(status, rxv, he); + mt7915_mac_decode_he_radiotap_ru(status, he, rxv); break; default: break; @@ -314,8 +311,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) struct mt7915_phy *phy = &dev->phy; struct ieee80211_supported_band *sband; struct ieee80211_hdr *hdr; - struct mt7915_rxv rxv = {}; __le32 *rxd = (__le32 *)skb->data; + __le32 *rxv = NULL; + u32 mode = 0; u32 rxd1 = le32_to_cpu(rxd[1]); u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); @@ -427,15 +425,14 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { u32 v0, v1, v2; - memcpy(rxv.v, rxd, sizeof(rxv.v)); - + rxv = rxd; rxd += 2; if ((u8 *)rxd - skb->data >= skb->len) return -EINVAL; - v0 = le32_to_cpu(rxv.v[0]); - v1 = le32_to_cpu(rxv.v[1]); - v2 = le32_to_cpu(rxv.v[2]); + v0 = le32_to_cpu(rxv[0]); + v1 = le32_to_cpu(rxv[1]); + v2 = le32_to_cpu(rxv[2]); if (v0 & MT_PRXV_HT_AD_CODE) status->enc_flags |= RX_ENC_FLAG_LDPC; @@ -466,9 +463,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) return -EINVAL; idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); - rxv.phy = FIELD_GET(MT_CRXV_TX_MODE, v2); + mode = FIELD_GET(MT_CRXV_TX_MODE, v2); - switch (rxv.phy) { + switch (mode) { case MT_PHY_TYPE_CCK: cck = true; /* fall through */ @@ -503,8 +500,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) if (gi <= NL80211_RATE_INFO_HE_GI_3_2) status->he_gi = gi; - if (idx & MT_PRXV_TX_DCM) - status->he_dcm = true; + status->he_dcm = !!(idx & MT_PRXV_TX_DCM); break; default: return -EINVAL; @@ -515,7 +511,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) case IEEE80211_STA_RX_BW_20: break; case IEEE80211_STA_RX_BW_40: - if (rxv.phy & MT_PHY_TYPE_HE_EXT_SU && + if (mode & MT_PHY_TYPE_HE_EXT_SU && (idx & MT_PRXV_TX_ER_SU_106T)) { status->bw = RATE_INFO_BW_HE_RU; status->he_ru = @@ -535,7 +531,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) } status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; - if (rxv.phy < MT_PHY_TYPE_HE_SU && gi) + if (mode < MT_PHY_TYPE_HE_SU && gi) status->enc_flags |= RX_ENC_FLAG_SHORT_GI; } } @@ -548,8 +544,8 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb) mt76_insert_ccmp_hdr(skb, key_id); } - if (status->flag & RX_FLAG_RADIOTAP_HE) - mt7915_mac_decode_he_radiotap(skb, status, &rxv); + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) + mt7915_mac_decode_he_radiotap(skb, status, rxv, mode); hdr = mt76_skb_get_hdr(skb); if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h index 4b0871ab2414..c8bb5ea96c60 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h @@ -128,13 +128,6 @@ enum rx_pkt_type { #define MT_CRXV_HE_BEAM_CHNG BIT(13) #define MT_CRXV_HE_DOPPLER BIT(16) -struct mt7915_rxv { - u32 phy; - - /* P-RXV: bit 0~1, C-RXV: bit 2~19 */ - __le32 v[20]; -}; - enum tx_header_format { MT_HDR_FORMAT_802_3, MT_HDR_FORMAT_CMD, From 757b0e7fd6f4f2e558fac602ea34a9be20143c95 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:40 +0200 Subject: [PATCH 1362/2056] mt76: mt7615: avoid polling in fw_own for mt7663 According to the vendor sdk, mt7663 does not need to poll register after firmware own. Since just mt7622 polls status register, set proper timeout value according to the vendor sdk Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mcu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 445eb81267ef..0fcb379a8c06 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -1918,9 +1918,9 @@ int mt7615_firmware_own(struct mt7615_dev *dev) mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); - if (!is_mt7615(&dev->mt76) && + if (is_mt7622(&dev->mt76) && !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, - MT_CFG_LPCR_HOST_FW_OWN, 3000)) { + MT_CFG_LPCR_HOST_FW_OWN, 300)) { dev_err(dev->mt76.dev, "Timeout for firmware own\n"); return -EIO; } From a86f1d01f5ce5c5e6c685168747822e82ba5bf83 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:41 +0200 Subject: [PATCH 1363/2056] mt76: move mt76 workqueue in common code Move mt76 workqueue from usb to common code in order to be reused adding low-power support for mt7663 chipset Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 13 +++++++++++- drivers/net/wireless/mediatek/mt76/mt76.h | 4 ++-- .../net/wireless/mediatek/mt76/mt7615/mac.c | 2 +- .../net/wireless/mediatek/mt76/mt7615/main.c | 2 +- .../net/wireless/mediatek/mt76/mt7615/usb.c | 6 ++---- .../net/wireless/mediatek/mt76/mt76x0/usb.c | 6 ++---- .../net/wireless/mediatek/mt76/mt76x2/usb.c | 7 ++----- drivers/net/wireless/mediatek/mt76/usb.c | 20 ++++--------------- 8 files changed, 26 insertions(+), 34 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 10e6dbb64996..6aa3efa8f55e 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -437,6 +437,12 @@ mt76_alloc_device(struct device *pdev, unsigned int size, tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev); + dev->wq = alloc_ordered_workqueue("mt76", 0); + if (!dev->wq) { + ieee80211_free_hw(hw); + return NULL; + } + return dev; } EXPORT_SYMBOL_GPL(mt76_alloc_device); @@ -490,7 +496,12 @@ EXPORT_SYMBOL_GPL(mt76_unregister_device); void mt76_free_device(struct mt76_dev *dev) { - mt76_tx_free(dev); + if (dev->wq) { + destroy_workqueue(dev->wq); + dev->wq = NULL; + } + if (mt76_is_mmio(dev)) + mt76_tx_free(dev); ieee80211_free_hw(dev->hw); } EXPORT_SYMBOL_GPL(mt76_free_device); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 14b3a39cd55a..dea63d6579f5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -423,7 +423,6 @@ struct mt76_usb { u16 data_len; struct tasklet_struct rx_tasklet; - struct workqueue_struct *wq; struct work_struct stat_work; u8 out_ep[__MT_EP_OUT_MAX]; @@ -621,6 +620,8 @@ struct mt76_dev { struct mt76_testmode_data test; #endif + struct workqueue_struct *wq; + union { struct mt76_mmio mmio; struct mt76_usb usb; @@ -1018,7 +1019,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req, void *buf, size_t len); void mt76u_single_wr(struct mt76_dev *dev, const u8 req, const u16 offset, const u32 val); -void mt76u_deinit(struct mt76_dev *dev); int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, bool ext); int mt76u_alloc_mcu_queue(struct mt76_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 2d432d052780..73b994a410b0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -927,7 +927,7 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, &wd->rate); list_add_tail(&wd->node, &dev->wd_head); - queue_work(dev->mt76.usb.wq, &dev->wtbl_work); + queue_work(dev->mt76.wq, &dev->wtbl_work); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 3abb33ddab7d..3bcbd13c26d7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -319,7 +319,7 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd, wd->key.cmd = cmd; list_add_tail(&wd->node, &dev->wd_head); - queue_work(dev->mt76.usb.wq, &dev->wtbl_work); + queue_work(dev->mt76.wq, &dev->wtbl_work); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index 0ba28d37c414..f70a7d9d65e2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -349,11 +349,10 @@ static int mt7663u_probe(struct usb_interface *usb_intf, error_freeq: mt76u_queues_deinit(&dev->mt76); error: - mt76u_deinit(&dev->mt76); usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - ieee80211_free_hw(mdev->hw); + mt76_free_device(&dev->mt76); return ret; } @@ -371,8 +370,7 @@ static void mt7663u_disconnect(struct usb_interface *usb_intf) usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - mt76u_deinit(&dev->mt76); - ieee80211_free_hw(dev->mt76.hw); + mt76_free_device(&dev->mt76); } #ifdef CONFIG_PM diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 5535b9c0632f..ce6b286a8152 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -277,9 +277,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - mt76u_deinit(&dev->mt76); + mt76_free_device(&dev->mt76); - ieee80211_free_hw(mdev->hw); return ret; } @@ -297,8 +296,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf) usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); - mt76u_deinit(&dev->mt76); - ieee80211_free_hw(dev->mt76.hw); + mt76_free_device(&dev->mt76); } static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index b27acf1bbc03..4e003c7b62cf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -75,8 +75,7 @@ static int mt76x2u_probe(struct usb_interface *intf, return 0; err: - ieee80211_free_hw(mt76_hw(dev)); - mt76u_deinit(&dev->mt76); + mt76_free_device(&dev->mt76); usb_set_intfdata(intf, NULL); usb_put_dev(udev); @@ -92,9 +91,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf) set_bit(MT76_REMOVED, &dev->mphy.state); ieee80211_unregister_hw(hw); mt76x2u_cleanup(dev); - mt76u_deinit(&dev->mt76); - - ieee80211_free_hw(hw); + mt76_free_device(&dev->mt76); usb_set_intfdata(intf, NULL); usb_put_dev(udev); } diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 0ff3096f7455..84e2fd0a4fc1 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -842,7 +842,7 @@ static void mt76u_tx_tasklet(unsigned long data) if (dev->drv->tx_status_data && !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) - queue_work(dev->usb.wq, &dev->usb.stat_work); + queue_work(dev->wq, &dev->usb.stat_work); if (wake) ieee80211_wake_queue(dev->hw, i); } @@ -868,7 +868,7 @@ static void mt76u_tx_status_data(struct work_struct *work) } if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) - queue_work(usb->wq, &usb->stat_work); + queue_work(dev->wq, &usb->stat_work); else clear_bit(MT76_READING_STATS, &dev->phy.state); } @@ -1132,15 +1132,6 @@ static const struct mt76_queue_ops usb_queue_ops = { .kick = mt76u_tx_kick, }; -void mt76u_deinit(struct mt76_dev *dev) -{ - if (dev->usb.wq) { - destroy_workqueue(dev->usb.wq); - dev->usb.wq = NULL; - } -} -EXPORT_SYMBOL_GPL(mt76u_deinit); - int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, bool ext) { @@ -1163,10 +1154,6 @@ int mt76u_init(struct mt76_dev *dev, tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); INIT_WORK(&usb->stat_work, mt76u_tx_status_data); - usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0); - if (!usb->wq) - return -ENOMEM; - usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1); if (usb->data_len < 32) usb->data_len = 32; @@ -1190,7 +1177,8 @@ int mt76u_init(struct mt76_dev *dev, return 0; error: - mt76u_deinit(dev); + destroy_workqueue(dev->wq); + return err; } EXPORT_SYMBOL_GPL(mt76u_init); From 08523a2a1db5eba808de0ba0a7aa29cb993771dd Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:42 +0200 Subject: [PATCH 1364/2056] mt76: mt7615: add mt7615_pm_wake utility routine Introduce mt7615_pm_wake utility routine to wake the device from runtime low-power state (lower-power state is currently supported by offload firmware for pcie devices). Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76.h | 1 + .../net/wireless/mediatek/mt76/mt7615/init.c | 3 ++ .../net/wireless/mediatek/mt76/mt7615/mac.c | 42 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/main.c | 2 + .../net/wireless/mediatek/mt76/mt7615/mcu.c | 25 ++++++++--- .../wireless/mediatek/mt76/mt7615/mt7615.h | 8 ++++ 6 files changed, 75 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index dea63d6579f5..7e4adb9be273 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -291,6 +291,7 @@ enum { MT76_STATE_POWER_OFF, MT76_STATE_SUSPEND, MT76_STATE_ROC, + MT76_STATE_PM, }; struct mt76_hw_cap { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 2e0840b0e902..e1bf45466ad6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -435,6 +435,9 @@ void mt7615_init_device(struct mt7615_dev *dev) dev->phy.dev = dev; dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; + + INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); + init_completion(&dev->pm.wake_cmpl); INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work); skb_queue_head_init(&dev->phy.scan_event_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 73b994a410b0..d4dbaaa0b279 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1808,6 +1808,48 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy) } } +void mt7615_pm_wake_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + struct mt76_phy *mphy; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + pm.wake_work); + mphy = dev->phy.mt76; + + if (mt7615_driver_own(dev)) + dev_err(mphy->dev->dev, "failed to wake device\n"); + + complete_all(&dev->pm.wake_cmpl); +} + +int mt7615_pm_wake(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy = dev->phy.mt76; + + if (!mt7615_firmware_offload(dev)) + return 0; + + if (!mt76_is_mmio(mphy->dev)) + return 0; + + if (!test_bit(MT76_STATE_PM, &mphy->state)) + return 0; + + if (test_bit(MT76_HW_SCANNING, &mphy->state) || + test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) + return 0; + + if (queue_work(dev->mt76.wq, &dev->pm.wake_work)) + reinit_completion(&dev->pm.wake_cmpl); + + if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) + return -ETIMEDOUT; + + return 0; +} +EXPORT_SYMBOL_GPL(mt7615_pm_wake); + void mt7615_mac_work(struct work_struct *work) { struct mt7615_phy *phy; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 3bcbd13c26d7..1e7f7b9b1388 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -74,6 +74,8 @@ static void mt7615_stop(struct ieee80211_hw *hw) del_timer_sync(&phy->roc_timer); cancel_work_sync(&phy->roc_work); + cancel_work_sync(&dev->pm.wake_work); + mutex_lock(&dev->mt76.mutex); mt76_testmode_reset(&dev->mt76, true); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 0fcb379a8c06..2f9f079bad0b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -1889,28 +1889,36 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en) int mt7615_driver_own(struct mt7615_dev *dev) { + struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_dev *mdev = &dev->mt76; + int err = 0; u32 addr; + mt7622_trigger_hif_int(dev, true); + addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); - mt7622_trigger_hif_int(dev, true); - addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) { dev_err(dev->mt76.dev, "Timeout for driver own\n"); - return -EIO; + err = -EIO; + goto out; } + clear_bit(MT76_STATE_PM, &mphy->state); + +out: mt7622_trigger_hif_int(dev, false); - return 0; + return err; } EXPORT_SYMBOL_GPL(mt7615_driver_own); int mt7615_firmware_own(struct mt7615_dev *dev) { + struct mt76_phy *mphy = &dev->mt76.phy; + int err = 0; u32 addr; addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; @@ -1922,11 +1930,16 @@ int mt7615_firmware_own(struct mt7615_dev *dev) !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, MT_CFG_LPCR_HOST_FW_OWN, 300)) { dev_err(dev->mt76.dev, "Timeout for firmware own\n"); - return -EIO; + err = -EIO; + goto out; } + + set_bit(MT76_STATE_PM, &mphy->state); + +out: mt7622_trigger_hif_int(dev, false); - return 0; + return err; } EXPORT_SYMBOL_GPL(mt7615_firmware_own); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index a995d41fb9c1..27d4dfd81dfd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -4,6 +4,7 @@ #ifndef __MT7615_H #define __MT7615_H +#include #include #include #include @@ -296,6 +297,11 @@ struct mt7615_dev { s8 last_wb_rssi; } test; #endif + + struct { + struct work_struct wake_work; + struct completion wake_cmpl; + } pm; }; enum tx_pkt_queue_idx { @@ -425,6 +431,8 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev); void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *probe_rate, struct ieee80211_tx_rate *rates); +void mt7615_pm_wake_work(struct work_struct *work); +int mt7615_pm_wake(struct mt7615_dev *dev); int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev); int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd); int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, From adfd5112c81b31cc4c9ac481bd0ad372149771ea Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:43 +0200 Subject: [PATCH 1365/2056] mt76: mt7615: introduce mt7615_mutex_{acquire,release} utilities Introduce mt7615_mutex_{acquire,release} utility routines in order to switch in full-power/low-power before/after accessing device register-map Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mac.c | 14 ++-- .../net/wireless/mediatek/mt76/mt7615/main.c | 82 ++++++++++--------- .../net/wireless/mediatek/mt76/mt7615/mcu.c | 1 + .../wireless/mediatek/mt76/mt7615/mt7615.h | 16 ++++ .../net/wireless/mediatek/mt76/mt7615/usb.c | 11 +-- 5 files changed, 72 insertions(+), 52 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index d4dbaaa0b279..3f470388fe01 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1530,7 +1530,7 @@ void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) bool ext_phy = phy != &dev->phy; u32 reg, mask; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); if (phy->scs_en == enable) goto out; @@ -1557,7 +1557,7 @@ void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable) phy->scs_en = enable; out: - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) @@ -1859,7 +1859,7 @@ void mt7615_mac_work(struct work_struct *work) mac_work.work); mdev = &phy->dev->mt76; - mutex_lock(&mdev->mutex); + mt7615_mutex_acquire(phy->dev); mt76_update_survey(mdev); if (++phy->mac_work_count == 5) { @@ -1869,7 +1869,7 @@ void mt7615_mac_work(struct work_struct *work) mt7615_mac_scs_check(phy); } - mutex_unlock(&mdev->mutex); + mt7615_mutex_release(phy->dev); mt76_tx_status_check(mdev, NULL, false); ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, @@ -1973,7 +1973,7 @@ void mt7615_mac_reset_work(struct work_struct *work) napi_disable(&dev->mt76.napi[1]); napi_disable(&dev->mt76.tx_napi); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); @@ -2006,10 +2006,10 @@ void mt7615_mac_reset_work(struct work_struct *work) mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); - mutex_unlock(&dev->mt76.mutex); - mt7615_update_beacons(dev); + mt7615_mutex_release(dev); + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work, MT7615_WATCHDOG_TIME); if (phy2) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 1e7f7b9b1388..5f2c50a2449b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -33,7 +33,7 @@ static int mt7615_start(struct ieee80211_hw *hw) if (!mt7615_wait_for_mcu_init(dev)) return -EIO; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); running = mt7615_dev_running(dev); @@ -60,7 +60,7 @@ static int mt7615_start(struct ieee80211_hw *hw) if (!running) mt7615_mac_reset_counters(dev); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return 0; } @@ -76,7 +76,7 @@ static void mt7615_stop(struct ieee80211_hw *hw) cancel_work_sync(&dev->pm.wake_work); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt76_testmode_reset(&dev->mt76, true); @@ -93,7 +93,7 @@ static void mt7615_stop(struct ieee80211_hw *hw) mt7615_mcu_set_mac_enable(dev, 0, false); } - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } static int get_omac_idx(enum nl80211_iftype type, u32 mask) @@ -139,7 +139,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, bool ext_phy = phy != &dev->phy; int idx, ret = 0; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt76_testmode_reset(&dev->mt76, true); @@ -191,7 +191,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, ret = mt7615_mcu_add_dev_info(dev, vif, true); out: - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return ret; } @@ -207,10 +207,9 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, /* TODO: disable beacon for the bss */ - mutex_lock(&dev->mt76.mutex); - mt76_testmode_reset(&dev->mt76, true); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); + mt76_testmode_reset(&dev->mt76, true); if (vif == phy->monitor_vif) phy->monitor_vif = NULL; @@ -220,11 +219,11 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, if (vif->txq) mt76_txq_remove(&dev->mt76, vif->txq); - mutex_lock(&dev->mt76.mutex); dev->mphy.vif_mask &= ~BIT(mvif->idx); dev->omac_mask &= ~BIT(mvif->omac_idx); phy->omac_mask &= ~BIT(mvif->omac_idx); - mutex_unlock(&dev->mt76.mutex); + + mt7615_mutex_release(dev); spin_lock_bh(&dev->sta_poll_lock); if (!list_empty(&msta->poll_list)) @@ -259,7 +258,8 @@ int mt7615_set_channel(struct mt7615_phy *phy) cancel_delayed_work_sync(&phy->mac_work); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); + set_bit(MT76_RESET, &phy->mt76->state); mt7615_init_dfs_state(phy); @@ -285,7 +285,8 @@ int mt7615_set_channel(struct mt7615_phy *phy) out: clear_bit(MT76_RESET, &phy->mt76->state); - mutex_unlock(&dev->mt76.mutex); + + mt7615_mutex_release(dev); mt76_txq_schedule_all(phy->mt76); @@ -391,9 +392,9 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) IEEE80211_CONF_CHANGE_POWER)) { #ifdef CONFIG_NL80211_TESTMODE if (dev->mt76.test.state != MT76_TM_STATE_OFF) { - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt76_testmode_reset(&dev->mt76, false); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } #endif ieee80211_stop_queues(hw); @@ -401,7 +402,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) ieee80211_wake_queues(hw); } - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); if (changed & IEEE80211_CONF_CHANGE_MONITOR) { mt76_testmode_reset(&dev->mt76, true); @@ -414,7 +415,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed) mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); } - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return ret; } @@ -448,7 +449,7 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, MT_WF_RFCR1_DROP_CFACK; u32 flags = 0; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); #define MT76_FILTER(_flag, _hw) do { \ flags |= *total_flags & FIF_##_flag; \ @@ -488,7 +489,7 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw, else mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } static void mt7615_bss_info_changed(struct ieee80211_hw *hw, @@ -499,7 +500,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); if (changed & BSS_CHANGED_ERP_SLOT) { int slottime = info->use_short_slot ? 9 : 20; @@ -528,7 +529,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ARP_FILTER) mt7615_mcu_update_arp_filter(hw, vif, info); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } static void @@ -538,9 +539,9 @@ mt7615_channel_switch_beacon(struct ieee80211_hw *hw, { struct mt7615_dev *dev = mt7615_hw_dev(hw); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt7615_mcu_add_beacon(dev, hw, vif, true); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, @@ -656,9 +657,9 @@ static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val) struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt7615_mcu_set_rts_thresh(phy, val); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return 0; } @@ -682,7 +683,8 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mtxq = (struct mt76_txq *)txq->drv_priv; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); + switch (action) { case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, @@ -718,7 +720,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return ret; } @@ -763,13 +765,13 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) u32 t32[2]; } tsf; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0); tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return tsf.t64; } @@ -784,14 +786,14 @@ mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 t32[2]; } tsf = { .t64 = timestamp, }; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); /* TSF software overwrite */ mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } static void @@ -800,10 +802,10 @@ mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) struct mt7615_phy *phy = mt7615_hw_phy(hw); struct mt7615_dev *dev = phy->dev; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); phy->coverage_class = max_t(s16, coverage_class, 0); mt7615_mac_set_timing(phy); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); } static int @@ -820,7 +822,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) tx_ant = BIT(ffs(tx_ant) - 1) - 1; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); phy->mt76->antenna_mask = tx_ant; if (ext_phy) { @@ -833,7 +835,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) mt76_set_stream_caps(phy->mt76, true); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return 0; } @@ -995,7 +997,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw, bool ext_phy = phy != &dev->phy; int err = 0; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); cancel_delayed_work_sync(&phy->scan_work); @@ -1011,7 +1013,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw, if (!mt7615_dev_running(dev)) err = mt7615_mcu_set_hif_suspend(dev, true); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return err; } @@ -1022,7 +1024,7 @@ static int mt7615_resume(struct ieee80211_hw *hw) struct mt7615_phy *phy = mt7615_hw_phy(hw); bool running, ext_phy = phy != &dev->phy; - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); running = mt7615_dev_running(dev); set_bit(MT76_STATE_RUNNING, &phy->mt76->state); @@ -1032,7 +1034,7 @@ static int mt7615_resume(struct ieee80211_hw *hw) err = mt7615_mcu_set_hif_suspend(dev, false); if (err < 0) { - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return err; } } @@ -1046,7 +1048,7 @@ static int mt7615_resume(struct ieee80211_hw *hw) MT7615_WATCHDOG_TIME); mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 2f9f079bad0b..72cfc197b936 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -1910,6 +1910,7 @@ int mt7615_driver_own(struct mt7615_dev *dev) out: mt7622_trigger_hif_int(dev, false); + dev->pm.last_activity = jiffies; return err; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 27d4dfd81dfd..ec91b729a25c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -301,6 +301,8 @@ struct mt7615_dev { struct { struct work_struct wake_work; struct completion wake_cmpl; + + unsigned long last_activity; } pm; }; @@ -487,6 +489,20 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev) return MT7615_WTBL_SIZE; } +static inline void mt7615_mutex_acquire(struct mt7615_dev *dev) + __acquires(&dev->mt76.mutex) +{ + mutex_lock(&dev->mt76.mutex); + mt7615_pm_wake(dev); +} + +static inline void mt7615_mutex_release(struct mt7615_dev *dev) + __releases(&dev->mt76.mutex) +{ + dev->pm.last_activity = jiffies; + mutex_unlock(&dev->mt76.mutex); +} + static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) { static const u8 lmac_queue_map[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index f70a7d9d65e2..a780127377bf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -201,12 +201,13 @@ void mt7663u_wtbl_work(struct work_struct *work) dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, wtbl_work); + mt7615_mutex_acquire(dev); + list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) { spin_lock_bh(&dev->mt76.lock); list_del(&wd->node); spin_unlock_bh(&dev->mt76.lock); - mutex_lock(&dev->mt76.mutex); switch (wd->type) { case MT7615_WTBL_RATE_DESC: __mt7663u_mac_set_rates(dev, wd); @@ -215,10 +216,10 @@ void mt7663u_wtbl_work(struct work_struct *work) __mt7663u_mac_set_key(dev, wd); break; } - mutex_unlock(&dev->mt76.mutex); - kfree(wd); } + + mt7615_mutex_release(dev); } static void @@ -257,9 +258,9 @@ static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update) { struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - mutex_lock(&dev->mt76.mutex); + mt7615_mutex_acquire(dev); mt7615_mac_sta_poll(dev); - mutex_unlock(&dev->mt76.mutex); + mt7615_mutex_release(dev); return 0; } From ea4906c4be4944f8341d500f83267d1ba3e8e838 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:44 +0200 Subject: [PATCH 1366/2056] mt76: mt7615: wake device before accessing regmap in debugfs Make sure the device is in full-power before reading regs in debugfs Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt7615/debugfs.c | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 8bb7c64db738..8894a0ab407b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -6,11 +6,16 @@ static int mt7615_radar_pattern_set(void *data, u64 val) { struct mt7615_dev *dev = data; + int err; if (!mt7615_wait_for_mcu_init(dev)) return 0; - return mt7615_mcu_rdd_send_pattern(dev); + mt7615_mutex_acquire(dev); + err = mt7615_mcu_rdd_send_pattern(dev); + mt7615_mutex_release(dev); + + return err; } DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL, @@ -84,7 +89,10 @@ mt7615_fw_debug_set(void *data, u64 val) return 0; dev->fw_debug = val; + + mt7615_mutex_acquire(dev); mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0); + mt7615_mutex_release(dev); return 0; } @@ -111,6 +119,8 @@ mt7615_reset_test_set(void *data, u64 val) if (!mt7615_wait_for_mcu_init(dev)) return 0; + mt7615_mutex_acquire(dev); + skb = alloc_skb(1, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -118,6 +128,8 @@ mt7615_reset_test_set(void *data, u64 val) skb_put(skb, 1); mt76_tx_queue_skb_raw(dev, 0, skb, 0); + mt7615_mutex_release(dev); + return 0; } @@ -167,9 +179,13 @@ mt7615_ampdu_stat_read(struct seq_file *file, void *data) { struct mt7615_dev *dev = file->private; + mt7615_mutex_acquire(dev); + mt7615_ampdu_stat_read_phy(&dev->phy, file); mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file); + mt7615_mutex_release(dev); + return 0; } @@ -221,7 +237,10 @@ static int mt7615_read_temperature(struct seq_file *s, void *data) return 0; /* cpu */ + mt7615_mutex_acquire(dev); temp = mt7615_mcu_get_temperature(dev, 0); + mt7615_mutex_release(dev); + seq_printf(s, "Temperature: %d\n", temp); return 0; @@ -233,6 +252,8 @@ mt7615_queues_acq(struct seq_file *s, void *data) struct mt7615_dev *dev = dev_get_drvdata(s->private); int i; + mt7615_mutex_acquire(dev); + for (i = 0; i < 16; i++) { int j, wmm_idx = i % MT7615_MAX_WMM_SETS; int acs = i / MT7615_MAX_WMM_SETS; @@ -253,6 +274,8 @@ mt7615_queues_acq(struct seq_file *s, void *data) seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); } + mt7615_mutex_release(dev); + return 0; } From 940a0c63e0c3df5881ff4a034501638de01a63a9 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:45 +0200 Subject: [PATCH 1367/2056] mt76: mt7615: wake device before configuring hw keys Make sure the device is in full-power before uploading keys to the hw Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 5f2c50a2449b..3c230479b36c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -336,7 +336,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; struct mt76_wcid *wcid = &msta->wcid; - int idx = key->keyidx; + int idx = key->keyidx, err; /* The hardware does not support per-STA RX GTK, fallback * to software mode for these. @@ -366,6 +366,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } + mt7615_mutex_acquire(dev); + if (cmd == SET_KEY) { key->hw_key_idx = wcid->idx; wcid->hw_key_idx = idx; @@ -376,9 +378,13 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, cmd == SET_KEY ? key : NULL); if (mt76_is_usb(&dev->mt76)) - return mt7615_queue_key_update(dev, cmd, msta, key); + err = mt7615_queue_key_update(dev, cmd, msta, key); + else + err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); - return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); + mt7615_mutex_release(dev); + + return err; } static int mt7615_config(struct ieee80211_hw *hw, u32 changed) From de5ff3c9d1a2d6ecbe4b8d29565430e7ce29bac2 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:46 +0200 Subject: [PATCH 1368/2056] mt76: mt7615: introduce pm_power_save delayed work Introduce runtime-pm power_save delayed work used to enable low-power after an inactivity period Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/init.c | 1 + .../net/wireless/mediatek/mt76/mt7615/mac.c | 28 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/main.c | 3 ++ .../wireless/mediatek/mt76/mt7615/mt7615.h | 8 +++++- 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index e1bf45466ad6..5bb87dd21279 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -436,6 +436,7 @@ void mt7615_init_device(struct mt7615_dev *dev) dev->phy.mt76 = &dev->mt76.phy; dev->mt76.phy.priv = &dev->phy; + INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work); INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); init_completion(&dev->pm.wake_cmpl); INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 3f470388fe01..b1148654771a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1850,6 +1850,34 @@ int mt7615_pm_wake(struct mt7615_dev *dev) } EXPORT_SYMBOL_GPL(mt7615_pm_wake); +void mt7615_pm_power_save_sched(struct mt7615_dev *dev) +{ + struct mt76_phy *mphy = dev->phy.mt76; + + if (!mt7615_firmware_offload(dev) || + !dev->pm.enable || !mt76_is_mmio(mphy->dev) || + !test_bit(MT76_STATE_RUNNING, &mphy->state)) + return; + + dev->pm.last_activity = jiffies; + if (!test_bit(MT76_STATE_PM, &mphy->state)) + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, + MT7615_PM_TIMEOUT); +} +EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched); + +void mt7615_pm_power_save_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + pm.ps_work.work); + + if (mt7615_firmware_own(dev)) + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, + MT7615_PM_TIMEOUT); +} + void mt7615_mac_work(struct work_struct *work) { struct mt7615_phy *phy; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 3c230479b36c..d1ddd5b15955 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -74,6 +74,7 @@ static void mt7615_stop(struct ieee80211_hw *hw) del_timer_sync(&phy->roc_timer); cancel_work_sync(&phy->roc_work); + cancel_delayed_work_sync(&dev->pm.ps_work); cancel_work_sync(&dev->pm.wake_work); mt7615_mutex_acquire(dev); @@ -1003,6 +1004,8 @@ static int mt7615_suspend(struct ieee80211_hw *hw, bool ext_phy = phy != &dev->phy; int err = 0; + cancel_delayed_work_sync(&dev->pm.ps_work); + mt7615_mutex_acquire(dev); clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index ec91b729a25c..3c5cd7f0ea1e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -19,6 +19,7 @@ #define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \ MT7615_MAX_INTERFACES) +#define MT7615_PM_TIMEOUT (HZ / 12) #define MT7615_WATCHDOG_TIME (HZ / 10) #define MT7615_HW_SCAN_TIMEOUT (HZ / 10) #define MT7615_RESET_TIMEOUT (30 * HZ) @@ -299,9 +300,12 @@ struct mt7615_dev { #endif struct { + bool enable; + struct work_struct wake_work; struct completion wake_cmpl; + struct delayed_work ps_work; unsigned long last_activity; } pm; }; @@ -435,6 +439,8 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *rates); void mt7615_pm_wake_work(struct work_struct *work); int mt7615_pm_wake(struct mt7615_dev *dev); +void mt7615_pm_power_save_sched(struct mt7615_dev *dev); +void mt7615_pm_power_save_work(struct work_struct *work); int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev); int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd); int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, @@ -499,7 +505,7 @@ static inline void mt7615_mutex_acquire(struct mt7615_dev *dev) static inline void mt7615_mutex_release(struct mt7615_dev *dev) __releases(&dev->mt76.mutex) { - dev->pm.last_activity = jiffies; + mt7615_pm_power_save_sched(dev); mutex_unlock(&dev->mt76.mutex); } From 04414240adb7092a03a2ede3cd3e24d461adb756 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:47 +0200 Subject: [PATCH 1369/2056] mt76: mt7615: wake device in mt7615_update_channel before access regmap Introduce mt7615_update_survey utility routine in order to compute survey stats without waking up the device since it runs holding mt76 lock. Run mt7615_pm_wake directly in mt7615_update_channel since it can run with mt76.mutex held if called by mac80211 Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mac80211.c | 4 +-- drivers/net/wireless/mediatek/mt76/mt76.h | 1 + .../net/wireless/mediatek/mt76/mt7615/mac.c | 30 +++++++++++++++++-- 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 6aa3efa8f55e..f340af40f8f4 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -560,8 +560,7 @@ mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) return &msband->chan[idx]; } -static void -mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) +void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) { struct mt76_channel_state *state = phy->chan_state; @@ -569,6 +568,7 @@ mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) phy->survey_time)); phy->survey_time = time; } +EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); void mt76_update_survey(struct mt76_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 7e4adb9be273..532267056ff3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -884,6 +884,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw, bool mt76_has_tx_pending(struct mt76_phy *phy); void mt76_set_channel(struct mt76_phy *phy); void mt76_update_survey(struct mt76_dev *dev); +void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); int mt76_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index b1148654771a..e17cba62252f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1747,9 +1747,9 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx) state->noise = -(phy->noise >> 4); } -void mt7615_update_channel(struct mt76_dev *mdev) +static void __mt7615_update_channel(struct mt7615_dev *dev) { - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt76_dev *mdev = &dev->mt76; mt7615_phy_update_channel(&mdev->phy, 0); if (mdev->phy2) @@ -1758,8 +1758,32 @@ void mt7615_update_channel(struct mt76_dev *mdev) /* reset obss airtime */ mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); } + +void mt7615_update_channel(struct mt76_dev *mdev) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + if (mt7615_pm_wake(dev)) + return; + + __mt7615_update_channel(dev); + mt7615_pm_power_save_sched(dev); +} EXPORT_SYMBOL_GPL(mt7615_update_channel); +static void mt7615_update_survey(struct mt7615_dev *dev) +{ + struct mt76_dev *mdev = &dev->mt76; + ktime_t cur_time; + + __mt7615_update_channel(dev); + cur_time = ktime_get_boottime(); + + mt76_update_survey_active_time(&mdev->phy, cur_time); + if (mdev->phy2) + mt76_update_survey_active_time(mdev->phy2, cur_time); +} + static void mt7615_mac_update_mib_stats(struct mt7615_phy *phy) { @@ -1889,7 +1913,7 @@ void mt7615_mac_work(struct work_struct *work) mt7615_mutex_acquire(phy->dev); - mt76_update_survey(mdev); + mt7615_update_survey(phy->dev); if (++phy->mac_work_count == 5) { phy->mac_work_count = 0; From 3d0558c82200f86715f0db59e636144551c7c0ba Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:48 +0200 Subject: [PATCH 1370/2056] mt76: mt7615: acquire driver_own before configuring device for suspend Make sure to wake the device in mt7615_pci_suspend in order to properly configure device registers before suspend Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c index ba12f199bce0..2328d78e06a1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c @@ -75,6 +75,10 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state) bool hif_suspend; int i, err; + err = mt7615_pm_wake(dev); + if (err < 0) + return err; + hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && mt7615_firmware_offload(dev); if (hif_suspend) { From 888a678a64e08c4c0478daf773bf8f7a937895ed Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:49 +0200 Subject: [PATCH 1371/2056] mt76: mt7615: wake device before performing freq scan Set device in full power before performing hw scan or hw scheduled scan Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/main.c | 34 ++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index d1ddd5b15955..eb56cd7cb429 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -915,17 +915,26 @@ static int mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *req) { + struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt76_phy *mphy = hw->priv; + int err; - return mt7615_mcu_hw_scan(mphy->priv, vif, req); + mt7615_mutex_acquire(dev); + err = mt7615_mcu_hw_scan(mphy->priv, vif, req); + mt7615_mutex_release(dev); + + return err; } static void mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt76_phy *mphy = hw->priv; + mt7615_mutex_acquire(dev); mt7615_mcu_cancel_hw_scan(mphy->priv, vif); + mt7615_mutex_release(dev); } static int @@ -933,22 +942,35 @@ mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { + struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt76_phy *mphy = hw->priv; int err; + mt7615_mutex_acquire(dev); + err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req); if (err < 0) - return err; + goto out; - return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true); + err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, true); +out: + mt7615_mutex_release(dev); + + return err; } static int mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { + struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt76_phy *mphy = hw->priv; + int err; - return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false); + mt7615_mutex_acquire(dev); + err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, false); + mt7615_mutex_release(dev); + + return err; } static int mt7615_remain_on_channel(struct ieee80211_hw *hw, @@ -1074,7 +1096,11 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_gtk_rekey_data *data) { + struct mt7615_dev *dev = mt7615_hw_dev(hw); + + mt7615_mutex_acquire(dev); mt7615_mcu_update_gtk_rekey(hw, vif, data); + mt7615_mutex_release(dev); } #endif /* CONFIG_PM */ From 030aaeddbab161403aa4e4f34b84971bb5db7a9f Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:50 +0200 Subject: [PATCH 1372/2056] mt76: mt7615: add missing lock in mt7615_regd_notifier Make sure to run mt7615_dfs_init_radar_detector in mt7615_regd_notifier holding mt76 mutex in order to avoid races and set the device in full power Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/init.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 5bb87dd21279..fdc40a1c84c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -285,7 +285,9 @@ mt7615_regd_notifier(struct wiphy *wiphy, if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) return; + mt7615_mutex_acquire(dev); mt7615_dfs_init_radar_detector(phy); + mt7615_mutex_release(dev); } static void From de1f66bab9b7d9c1c80ae18bb1d85b699bbf8f44 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:51 +0200 Subject: [PATCH 1373/2056] mt76: mt7615: run mt7615_mcu_set_wmm holding mt76 mutex Make sure to run mt7615_mcu_set_wmm() holding mt76 mutex in order to wake the device from low power state and avoid races Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index eb56cd7cb429..c52b5c68ba97 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -433,11 +433,17 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct mt7615_dev *dev = mt7615_hw_dev(hw); + int err; + + mt7615_mutex_acquire(dev); queue = mt7615_lmac_mapping(dev, queue); queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; + err = mt7615_mcu_set_wmm(dev, queue, params); - return mt7615_mcu_set_wmm(dev, queue, params); + mt7615_mutex_release(dev); + + return err; } static void mt7615_configure_filter(struct ieee80211_hw *hw, From 46dadc3104415ad834db1be3208311fd5f02e3ab Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:52 +0200 Subject: [PATCH 1374/2056] mt76: mt7615: run mt7615_mcu_set_roc holding mt76 mutex Make sure to run mt7615_mcu_set_roc() holding mt76 mutex in order to wake the device from low power state and avoid races Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index c52b5c68ba97..47dca270150d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -871,9 +871,11 @@ void mt7615_roc_work(struct work_struct *work) if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) return; + mt7615_mutex_acquire(phy->dev); ieee80211_iterate_active_interfaces(phy->mt76->hw, IEEE80211_IFACE_ITER_RESUME_ALL, mt7615_roc_iter, phy); + mt7615_mutex_release(phy->dev); ieee80211_remain_on_channel_expired(phy->mt76->hw); } @@ -991,20 +993,24 @@ static int mt7615_remain_on_channel(struct ieee80211_hw *hw, if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) return 0; + mt7615_mutex_acquire(phy->dev); + err = mt7615_mcu_set_roc(phy, vif, chan, duration); if (err < 0) { clear_bit(MT76_STATE_ROC, &phy->mt76->state); - return err; + goto out; } if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) { mt7615_mcu_set_roc(phy, vif, NULL, 0); clear_bit(MT76_STATE_ROC, &phy->mt76->state); - - return -ETIMEDOUT; + err = -ETIMEDOUT; } - return 0; +out: + mt7615_mutex_release(phy->dev); + + return err; } static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, @@ -1018,7 +1024,9 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, del_timer_sync(&phy->roc_timer); cancel_work_sync(&phy->roc_work); + mt7615_mutex_acquire(phy->dev); mt7615_mcu_set_roc(phy, vif, NULL, 0); + mt7615_mutex_release(phy->dev); return 0; } From 5cf8f7794d32e3ea7af6f04139a77157c536d4b5 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:53 +0200 Subject: [PATCH 1375/2056] mt76: mt7615: wake device before pulling packets from mac80211 queues Make sure the device is in full-power before pulling frames from mac80211 queues Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mac.c | 6 +++++- .../net/wireless/mediatek/mt76/mt7615/main.c | 20 ++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index e17cba62252f..f340e74c1a71 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1841,9 +1841,13 @@ void mt7615_pm_wake_work(struct work_struct *work) pm.wake_work); mphy = dev->phy.mt76; - if (mt7615_driver_own(dev)) + if (mt7615_driver_own(dev)) { dev_err(mphy->dev->dev, "failed to wake device\n"); + goto out; + } + tasklet_schedule(&dev->mt76.tx_tasklet); +out: complete_all(&dev->pm.wake_cmpl); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 47dca270150d..3ff387f31659 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -638,6 +638,24 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, spin_unlock_bh(&dev->mt76.lock); } +static void +mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +{ + struct mt7615_dev *dev = mt7615_hw_dev(hw); + struct mt7615_phy *phy = mt7615_hw_phy(hw); + struct mt76_phy *mphy = phy->mt76; + + if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) + return; + + if (test_bit(MT76_STATE_PM, &mphy->state)) { + queue_work(dev->mt76.wq, &dev->pm.wake_work); + return; + } + + tasklet_schedule(&dev->mt76.tx_tasklet); +} + static void mt7615_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -1134,7 +1152,7 @@ const struct ieee80211_ops mt7615_ops = { .set_key = mt7615_set_key, .ampdu_action = mt7615_ampdu_action, .set_rts_threshold = mt7615_set_rts_threshold, - .wake_tx_queue = mt76_wake_tx_queue, + .wake_tx_queue = mt7615_wake_tx_queue, .sta_rate_tbl_update = mt7615_sta_rate_tbl_update, .sw_scan_start = mt76_sw_scan, .sw_scan_complete = mt76_sw_scan_complete, From 2b8cdfb28d3402b25fab47ffe70b6d858d26a2c1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:54 +0200 Subject: [PATCH 1376/2056] mt76: mt7615: wake device before pushing frames in mt7615_tx Queue frames pushed by mac80211 running mt7615_tx if the device is low-power state. Run wake workqueue in order to swicth to full-power before transmitting pending frames Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/init.c | 1 + .../net/wireless/mediatek/mt76/mt7615/mac.c | 25 ++++++++- .../net/wireless/mediatek/mt76/mt7615/main.c | 56 +++++++++++++++++-- .../wireless/mediatek/mt76/mt7615/mt7615.h | 6 ++ 4 files changed, 81 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index fdc40a1c84c4..02a82cd848c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -441,6 +441,7 @@ void mt7615_init_device(struct mt7615_dev *dev) INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work); INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); init_completion(&dev->pm.wake_cmpl); + spin_lock_init(&dev->pm.txq_lock); INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work); skb_queue_head_init(&dev->phy.scan_event_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index f340e74c1a71..09885ab30092 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1836,6 +1836,7 @@ void mt7615_pm_wake_work(struct work_struct *work) { struct mt7615_dev *dev; struct mt76_phy *mphy; + int i; dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, pm.wake_work); @@ -1846,8 +1847,28 @@ void mt7615_pm_wake_work(struct work_struct *work) goto out; } + spin_lock_bh(&dev->pm.txq_lock); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + struct mt7615_sta *msta = dev->pm.tx_q[i].msta; + struct mt76_wcid *wcid = msta ? &msta->wcid : NULL; + struct ieee80211_sta *sta = NULL; + + if (!dev->pm.tx_q[i].skb) + continue; + + if (msta && wcid->sta) + sta = container_of((void *)msta, struct ieee80211_sta, + drv_priv); + + mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb); + dev->pm.tx_q[i].skb = NULL; + } + spin_unlock_bh(&dev->pm.txq_lock); + tasklet_schedule(&dev->mt76.tx_tasklet); + out: + ieee80211_wake_queues(mphy->hw); complete_all(&dev->pm.wake_cmpl); } @@ -1871,8 +1892,10 @@ int mt7615_pm_wake(struct mt7615_dev *dev) if (queue_work(dev->mt76.wq, &dev->pm.wake_work)) reinit_completion(&dev->pm.wake_cmpl); - if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) + if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) { + ieee80211_wake_queues(mphy->hw); return -ETIMEDOUT; + } return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 3ff387f31659..57d5e3aa9439 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -24,6 +24,22 @@ static bool mt7615_dev_running(struct mt7615_dev *dev) return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state); } +static void mt7615_free_pending_tx_skbs(struct mt7615_dev *dev, + struct mt7615_sta *msta) +{ + int i; + + spin_lock_bh(&dev->pm.txq_lock); + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + if (msta && dev->pm.tx_q[i].msta != msta) + continue; + + dev_kfree_skb(dev->pm.tx_q[i].skb); + dev->pm.tx_q[i].skb = NULL; + } + spin_unlock_bh(&dev->pm.txq_lock); +} + static int mt7615_start(struct ieee80211_hw *hw) { struct mt7615_dev *dev = mt7615_hw_dev(hw); @@ -77,6 +93,8 @@ static void mt7615_stop(struct ieee80211_hw *hw) cancel_delayed_work_sync(&dev->pm.ps_work); cancel_work_sync(&dev->pm.wake_work); + mt7615_free_pending_tx_skbs(dev, NULL); + mt7615_mutex_acquire(dev); mt76_testmode_reset(&dev->mt76, true); @@ -214,6 +232,8 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, if (vif == phy->monitor_vif) phy->monitor_vif = NULL; + mt7615_free_pending_tx_skbs(dev, msta); + mt7615_mcu_add_dev_info(dev, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); @@ -595,6 +615,8 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; + mt7615_free_pending_tx_skbs(dev, msta); + mt7615_mcu_sta_add(dev, vif, sta, false); mt7615_mac_wtbl_update(dev, msta->wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); @@ -665,22 +687,43 @@ static void mt7615_tx(struct ieee80211_hw *hw, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + struct mt7615_sta *msta = NULL; + int qid; if (control->sta) { - struct mt7615_sta *sta; - - sta = (struct mt7615_sta *)control->sta->drv_priv; - wcid = &sta->wcid; + msta = (struct mt7615_sta *)control->sta->drv_priv; + wcid = &msta->wcid; } if (vif && !control->sta) { struct mt7615_vif *mvif; mvif = (struct mt7615_vif *)vif->drv_priv; - wcid = &mvif->sta.wcid; + msta = &mvif->sta; + wcid = &msta->wcid; } - mt76_tx(mphy, control->sta, wcid, skb); + if (!test_bit(MT76_STATE_PM, &mphy->state)) { + mt76_tx(mphy, control->sta, wcid, skb); + return; + } + + qid = skb_get_queue_mapping(skb); + if (qid >= MT_TXQ_PSD) { + qid = IEEE80211_AC_BE; + skb_set_queue_mapping(skb, qid); + } + + spin_lock_bh(&dev->pm.txq_lock); + if (!dev->pm.tx_q[qid].skb) { + ieee80211_stop_queues(hw); + dev->pm.tx_q[qid].msta = msta; + dev->pm.tx_q[qid].skb = skb; + queue_work(dev->mt76.wq, &dev->pm.wake_work); + } else { + dev_kfree_skb(skb); + } + spin_unlock_bh(&dev->pm.txq_lock); } static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val) @@ -1059,6 +1102,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw, int err = 0; cancel_delayed_work_sync(&dev->pm.ps_work); + mt7615_free_pending_tx_skbs(dev, NULL); mt7615_mutex_acquire(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 3c5cd7f0ea1e..cc1f76adc975 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -302,6 +302,12 @@ struct mt7615_dev { struct { bool enable; + spinlock_t txq_lock; + struct { + struct mt7615_sta *msta; + struct sk_buff *skb; + } tx_q[IEEE80211_NUM_ACS]; + struct work_struct wake_work; struct completion wake_cmpl; From 1eae3fb949c146c10476692fb4625290c7060676 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:55 +0200 Subject: [PATCH 1377/2056] mt76: mt7615: run mt7615_pm_wake in mt7615_mac_sta_{add,remove} Run mt7615_pm_wake()/mt7615_pm_power_save_sched() directly in order to wake the device from low power state in mt7615_mac_sta_{add,remove} since they run holding mt76 mutex in common mt76 code Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 57d5e3aa9439..de4230b2d7bb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -583,7 +583,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - int idx; + int idx, err; idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); if (idx < 0) @@ -595,6 +595,10 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, msta->wcid.idx = idx; msta->wcid.ext_phy = mvif->band_idx; + err = mt7615_pm_wake(dev); + if (err) + return err; + if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { struct mt7615_phy *phy; @@ -605,6 +609,8 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); mt7615_mcu_sta_add(dev, vif, sta, true); + mt7615_pm_power_save_sched(dev); + return 0; } EXPORT_SYMBOL_GPL(mt7615_mac_sta_add); @@ -616,6 +622,7 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; mt7615_free_pending_tx_skbs(dev, msta); + mt7615_pm_wake(dev); mt7615_mcu_sta_add(dev, vif, sta, false); mt7615_mac_wtbl_update(dev, msta->wcid.idx, @@ -632,6 +639,8 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (!list_empty(&msta->poll_list)) list_del_init(&msta->poll_list); spin_unlock_bh(&dev->sta_poll_lock); + + mt7615_pm_power_save_sched(dev); } EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove); From a2b30bd4096e5c95a7fed72102de6fff2b138ff7 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:56 +0200 Subject: [PATCH 1378/2056] mt76: mt7615: check MT76_STATE_PM flag before accessing the device Double-check if the device is in low-power state before accessing registermap in mt7615_sta_rate_tbl_update() and in mt7615_led_set_config() Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 6 ++++-- drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index de4230b2d7bb..0ce76472df2c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -664,8 +664,10 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, break; } msta->n_rates = i; - mt7615_mac_set_rates(phy, msta, NULL, msta->rates); - msta->rate_probe = false; + if (!test_bit(MT76_STATE_PM, &phy->mt76->state)) { + mt7615_mac_set_rates(phy, msta, NULL, msta->rates); + msta->rate_probe = false; + } spin_unlock_bh(&dev->mt76.lock); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c index 69cba8609edf..7224a0078211 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c @@ -70,6 +70,10 @@ mt7615_led_set_config(struct led_classdev *led_cdev, mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); dev = container_of(mt76, struct mt7615_dev, mt76); + + if (test_bit(MT76_STATE_PM, &mt76->phy.state)) + return; + val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | FIELD_PREP(MT_LED_STATUS_OFF, delay_off) | FIELD_PREP(MT_LED_STATUS_ON, delay_on); From 1f549009b5b2f26db01e334dc00db160ee12c0e1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:57 +0200 Subject: [PATCH 1379/2056] mt76: mt7615: do not request {driver,fw}_own if already granted Check MT76_STATE_PM in mt7615_driver_own/mt7615_firmware_own in order to not requested power ownership if it is already granted Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/init.c | 1 + .../net/wireless/mediatek/mt76/mt7615/mcu.c | 21 ++++++++++--------- .../wireless/mediatek/mt76/mt7615/usb_mcu.c | 2 ++ 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index 02a82cd848c4..b3f0e2cae040 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -442,6 +442,7 @@ void mt7615_init_device(struct mt7615_dev *dev) INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); init_completion(&dev->pm.wake_cmpl); spin_lock_init(&dev->pm.txq_lock); + set_bit(MT76_STATE_PM, &dev->mphy.state); INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work); INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work); skb_queue_head_init(&dev->phy.scan_event_list); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 72cfc197b936..6118c2eeece8 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -1894,6 +1894,9 @@ int mt7615_driver_own(struct mt7615_dev *dev) int err = 0; u32 addr; + if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) + goto out; + mt7622_trigger_hif_int(dev, true); addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; @@ -1901,15 +1904,13 @@ int mt7615_driver_own(struct mt7615_dev *dev) addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) { - dev_err(dev->mt76.dev, "Timeout for driver own\n"); + dev_err(mdev->dev, "Timeout for driver own\n"); + set_bit(MT76_STATE_PM, &mphy->state); err = -EIO; - goto out; } - clear_bit(MT76_STATE_PM, &mphy->state); - -out: mt7622_trigger_hif_int(dev, false); +out: dev->pm.last_activity = jiffies; return err; @@ -1922,22 +1923,22 @@ int mt7615_firmware_own(struct mt7615_dev *dev) int err = 0; u32 addr; - addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; + if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) + return 0; + mt7622_trigger_hif_int(dev, true); + addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); if (is_mt7622(&dev->mt76) && !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, MT_CFG_LPCR_HOST_FW_OWN, 300)) { dev_err(dev->mt76.dev, "Timeout for firmware own\n"); + clear_bit(MT76_STATE_PM, &mphy->state); err = -EIO; - goto out; } - set_bit(MT76_STATE_PM, &mphy->state); - -out: mt7622_trigger_hif_int(dev, false); return err; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c index cd709fd617db..54885ad97891 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -60,6 +60,8 @@ int mt7663u_mcu_init(struct mt7615_dev *dev) dev->mt76.mcu_ops = &mt7663u_mcu_ops, + /* usb does not support runtime-pm */ + clear_bit(MT76_STATE_PM, &dev->mphy.state); mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) { From b5b4c7ddf11063dbc231a1ac955eff117d93fd70 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:58 +0200 Subject: [PATCH 1380/2056] mt76: mt7615: add runtime-pm knob in mt7615 debugfs Introduce runtime-pm knob in mt7615 debugfs in order to enable/disable runtime pm available in offload firmware Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt7615/debugfs.c | 24 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/mac.c | 17 +++++++++++++ .../wireless/mediatek/mt76/mt7615/mt7615.h | 1 + 3 files changed, 42 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 8894a0ab407b..77889becf22e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -51,6 +51,29 @@ mt7615_scs_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get, mt7615_scs_set, "%lld\n"); +static int +mt7615_pm_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + if (!mt7615_wait_for_mcu_init(dev)) + return 0; + + return mt7615_pm_set_enable(dev, val); +} + +static int +mt7615_pm_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = dev->pm.enable; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n"); + static int mt7615_dbdc_set(void *data, u64 val) { @@ -351,6 +374,7 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) debugfs_create_file("scs", 0600, dir, dev, &fops_scs); debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); + debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, mt7615_radio_read); debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 09885ab30092..59f788be7268 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1929,6 +1929,23 @@ void mt7615_pm_power_save_work(struct work_struct *work) MT7615_PM_TIMEOUT); } +int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable) +{ + if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) + return -EOPNOTSUPP; + + mt7615_mutex_acquire(dev); + + if (dev->pm.enable == enable) + goto out; + + dev->pm.enable = enable; +out: + mt7615_mutex_release(dev); + + return 0; +} + void mt7615_mac_work(struct work_struct *work) { struct mt7615_phy *phy; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index cc1f76adc975..ad7b206ab043 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -443,6 +443,7 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev); void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct ieee80211_tx_rate *probe_rate, struct ieee80211_tx_rate *rates); +int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable); void mt7615_pm_wake_work(struct work_struct *work); int mt7615_pm_wake(struct mt7615_dev *dev); void mt7615_pm_power_save_sched(struct mt7615_dev *dev); From 83b9f42aea988274195ae31df5af5b4a5328886d Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:15:59 +0200 Subject: [PATCH 1381/2056] mt76: mt7615: enable beacon hw filter for runtime-pm In order to reduce number of received interrupts and power consumption, enable hw beacon filter if runtime-pm is enabled Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mac.c | 26 +++++++ .../net/wireless/mediatek/mt76/mt7615/main.c | 19 +++++ .../net/wireless/mediatek/mt76/mt7615/mcu.c | 73 +++++++++---------- .../wireless/mediatek/mt76/mt7615/mt7615.h | 2 + 4 files changed, 83 insertions(+), 37 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 59f788be7268..1c86ecf8fcd9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1929,8 +1929,31 @@ void mt7615_pm_power_save_work(struct work_struct *work) MT7615_PM_TIMEOUT); } +static void +mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt7615_phy *phy = priv; + struct mt7615_dev *dev = phy->dev; + bool ext_phy = phy != &dev->phy; + + if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable)) + return; + + if (dev->pm.enable) { + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + mt76_set(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } else { + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; + mt76_clear(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } +} + int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable) { + struct mt76_phy *mphy = dev->phy.mt76; + if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) return -EOPNOTSUPP; @@ -1940,6 +1963,9 @@ int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable) goto out; dev->pm.enable = enable; + ieee80211_iterate_active_interfaces(mphy->hw, + IEEE80211_IFACE_ITER_RESUME_ALL, + mt7615_pm_interface_iter, mphy->priv); out: mt7615_mutex_release(dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 0ce76472df2c..b1cfae7c4b55 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -209,6 +209,18 @@ static int mt7615_add_interface(struct ieee80211_hw *hw, } ret = mt7615_mcu_add_dev_info(dev, vif, true); + if (ret) + goto out; + + if (dev->pm.enable) { + ret = mt7615_mcu_set_bss_pm(dev, vif, true); + if (ret) + goto out; + + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; + mt76_set(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } out: mt7615_mutex_release(dev); @@ -234,6 +246,13 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw, mt7615_free_pending_tx_skbs(dev, msta); + if (dev->pm.enable) { + bool ext_phy = phy != &dev->phy; + + mt7615_mcu_set_bss_pm(dev, vif, false); + mt76_clear(dev, MT_WF_RFCR(ext_phy), + MT_WF_RFCR_DROP_OTHER_BEACON); + } mt7615_mcu_add_dev_info(dev, vif, false); rcu_assign_pointer(dev->mt76.wcid[idx], NULL); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 6118c2eeece8..2d1015451699 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -3460,43 +3460,8 @@ int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy) return ret; } -#ifdef CONFIG_PM -int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend) -{ - struct { - struct { - u8 hif_type; /* 0x0: HIF_SDIO - * 0x1: HIF_USB - * 0x2: HIF_PCIE - */ - u8 pad[3]; - } __packed hdr; - struct hif_suspend_tlv { - __le16 tag; - __le16 len; - u8 suspend; - } __packed hif_suspend; - } req = { - .hif_suspend = { - .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */ - .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), - .suspend = suspend, - }, - }; - - if (mt76_is_mmio(&dev->mt76)) - req.hdr.hif_type = 2; - else if (mt76_is_usb(&dev->mt76)) - req.hdr.hif_type = 1; - - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, - &req, sizeof(req), true); -} -EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend); - -static int -mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, - bool enable) +int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool enable) { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct { @@ -3536,6 +3501,40 @@ mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, &req, sizeof(req), false); } +#ifdef CONFIG_PM +int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend) +{ + struct { + struct { + u8 hif_type; /* 0x0: HIF_SDIO + * 0x1: HIF_USB + * 0x2: HIF_PCIE + */ + u8 pad[3]; + } __packed hdr; + struct hif_suspend_tlv { + __le16 tag; + __le16 len; + u8 suspend; + } __packed hif_suspend; + } req = { + .hif_suspend = { + .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */ + .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), + .suspend = suspend, + }, + }; + + if (mt76_is_mmio(&dev->mt76)) + req.hdr.hif_type = 2; + else if (mt76_is_usb(&dev->mt76)) + req.hdr.hif_type = 1; + + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, + &req, sizeof(req), true); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend); + static int mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif, bool suspend, struct cfg80211_wowlan *wowlan) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index ad7b206ab043..9bbfe1168208 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -640,6 +640,8 @@ int mt7615_driver_own(struct mt7615_dev *dev); int mt7615_init_debugfs(struct mt7615_dev *dev); int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); +int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, + bool enable); int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend); void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); From ec4b9f380c15933e3dea72bb4723ea5e21d35b13 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:16:00 +0200 Subject: [PATCH 1382/2056] mt76: mt7615: add idle-timeout knob in mt7615 debugfs Introduce idle-timeout knob in mt7615 debugfs in order to configure the idle time to switch to low-power state Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../wireless/mediatek/mt76/mt7615/debugfs.c | 25 +++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/init.c | 1 + .../net/wireless/mediatek/mt76/mt7615/mac.c | 4 +-- .../wireless/mediatek/mt76/mt7615/mt7615.h | 1 + 4 files changed, 29 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c index 77889becf22e..88931658a9fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c @@ -74,6 +74,29 @@ mt7615_pm_get(void *data, u64 *val) DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n"); +static int +mt7615_pm_idle_timeout_set(void *data, u64 val) +{ + struct mt7615_dev *dev = data; + + dev->pm.idle_timeout = msecs_to_jiffies(val); + + return 0; +} + +static int +mt7615_pm_idle_timeout_get(void *data, u64 *val) +{ + struct mt7615_dev *dev = data; + + *val = jiffies_to_msecs(dev->pm.idle_timeout); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7615_pm_idle_timeout_get, + mt7615_pm_idle_timeout_set, "%lld\n"); + static int mt7615_dbdc_set(void *data, u64 val) { @@ -375,6 +398,8 @@ int mt7615_init_debugfs(struct mt7615_dev *dev) debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); + debugfs_create_file("idle-timeout", 0600, dir, dev, + &fops_pm_idle_timeout); debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, mt7615_radio_read); debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c index b3f0e2cae040..fc1ebabfebac 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c @@ -456,6 +456,7 @@ void mt7615_init_device(struct mt7615_dev *dev) timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0); mt7615_init_wiphy(hw); + dev->pm.idle_timeout = MT7615_PM_TIMEOUT; dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; dev->mphy.sband_5g.sband.vht_cap.cap |= diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1c86ecf8fcd9..abb81a56e908 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1913,7 +1913,7 @@ void mt7615_pm_power_save_sched(struct mt7615_dev *dev) dev->pm.last_activity = jiffies; if (!test_bit(MT76_STATE_PM, &mphy->state)) queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, - MT7615_PM_TIMEOUT); + dev->pm.idle_timeout); } EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched); @@ -1926,7 +1926,7 @@ void mt7615_pm_power_save_work(struct work_struct *work) if (mt7615_firmware_own(dev)) queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, - MT7615_PM_TIMEOUT); + dev->pm.idle_timeout); } static void diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 9bbfe1168208..9f258270c435 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -313,6 +313,7 @@ struct mt7615_dev { struct delayed_work ps_work; unsigned long last_activity; + unsigned long idle_timeout; } pm; }; From 894b7767ec2fc21574775c354ab5350e51c2171c Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:16:01 +0200 Subject: [PATCH 1383/2056] mt76: mt7615: improve mt7615_driver_own reliability mt7615_driver_own can fail if it runs too close to mt7615_fw_own. In order to improve mt7615_driver_own reliability, retry to get runtime-pm ownership if mt7615_driver_own fails Co-developed-by: Sean Wang Signed-off-by: Sean Wang Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mcu.c | 27 ++++++++++++------- .../wireless/mediatek/mt76/mt7615/mt7615.h | 2 ++ 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 2d1015451699..2e9e0002331e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -1891,29 +1891,36 @@ int mt7615_driver_own(struct mt7615_dev *dev) { struct mt76_phy *mphy = &dev->mt76.phy; struct mt76_dev *mdev = &dev->mt76; - int err = 0; - u32 addr; + int i; if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) goto out; mt7622_trigger_hif_int(dev, true); - addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; - mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); + for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { + u32 addr; - addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; - if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) { - dev_err(mdev->dev, "Timeout for driver own\n"); - set_bit(MT76_STATE_PM, &mphy->state); - err = -EIO; + addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); + + addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; + if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) + break; } mt7622_trigger_hif_int(dev, false); + + if (i == MT7615_DRV_OWN_RETRY_COUNT) { + dev_err(mdev->dev, "driver own failed\n"); + set_bit(MT76_STATE_PM, &mphy->state); + return -EIO; + } + out: dev->pm.last_activity = jiffies; - return err; + return 0; } EXPORT_SYMBOL_GPL(mt7615_driver_own); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 9f258270c435..688bd7b68668 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -33,6 +33,8 @@ #define MT7615_RX_RING_SIZE 1024 #define MT7615_RX_MCU_RING_SIZE 512 +#define MT7615_DRV_OWN_RETRY_COUNT 10 + #define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin" #define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin" #define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin" From 4bb586bc33b9845d76fc8d4efc2910e37876c399 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 3 Jul 2020 10:17:35 +0200 Subject: [PATCH 1384/2056] mt76: mt7663u: sync probe sampling with rate configuration On usb device rate configuration for sampling is performed relying on a workqueue since it is not possible to access the device in the interrupt context. Move the configuration of the probe_rate flag in the workqueue in order to keep probe sampling in sync with actual rate configuration Fixes: eb99cc95c3b6 ("mt76: mt7615: introduce mt7663u support") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mac.c | 5 +++- .../net/wireless/mediatek/mt76/mt7615/main.c | 4 +-- .../wireless/mediatek/mt76/mt7615/pci_mac.c | 1 - .../net/wireless/mediatek/mt76/mt7615/usb.c | 28 +++++++++++-------- 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index abb81a56e908..af5716797475 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -917,6 +917,9 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta, struct mt7615_dev *dev = phy->dev; struct mt7615_wtbl_desc *wd; + if (work_pending(&dev->wtbl_work)) + return -EBUSY; + wd = kzalloc(sizeof(*wd), GFP_ATOMIC); if (!wd) return -ENOMEM; @@ -1023,6 +1026,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + sta->rate_probe = !!probe_rate; } EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); @@ -1231,7 +1235,6 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta, phy = dev->mt76.phy2->priv; mt7615_mac_set_rates(phy, sta, NULL, sta->rates); - sta->rate_probe = false; } spin_unlock_bh(&dev->mt76.lock); } else { diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index b1cfae7c4b55..79453c890f20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -683,10 +683,8 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw, break; } msta->n_rates = i; - if (!test_bit(MT76_STATE_PM, &phy->mt76->state)) { + if (!test_bit(MT76_STATE_PM, &phy->mt76->state)) mt7615_mac_set_rates(phy, msta, NULL, msta->rates); - msta->rate_probe = false; - } spin_unlock_bh(&dev->mt76.lock); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c index 7ec91c0856f5..2d67f9a148cd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c @@ -155,7 +155,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, spin_lock_bh(&dev->mt76.lock); mt7615_mac_set_rates(phy, msta, &info->control.rates[0], msta->rates); - msta->rate_probe = true; spin_unlock_bh(&dev->mt76.lock); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index a780127377bf..feda5d0c171e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -140,6 +140,8 @@ __mt7663u_mac_set_rates(struct mt7615_dev *dev, mt76_wr(dev, addr + 27 * 4, w27); + sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ val = mt76_rr(dev, MT_LPON_UTTR0); sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; @@ -196,17 +198,21 @@ __mt7663u_mac_set_key(struct mt7615_dev *dev, void mt7663u_wtbl_work(struct work_struct *work) { struct mt7615_wtbl_desc *wd, *wd_next; + struct list_head wd_list; struct mt7615_dev *dev; dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, wtbl_work); - mt7615_mutex_acquire(dev); + INIT_LIST_HEAD(&wd_list); + spin_lock_bh(&dev->mt76.lock); + list_splice_init(&dev->wd_head, &wd_list); + spin_unlock_bh(&dev->mt76.lock); - list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) { - spin_lock_bh(&dev->mt76.lock); + list_for_each_entry_safe(wd, wd_next, &wd_list, node) { list_del(&wd->node); - spin_unlock_bh(&dev->mt76.lock); + + mt7615_mutex_acquire(dev); switch (wd->type) { case MT7615_WTBL_RATE_DESC: @@ -216,10 +222,11 @@ void mt7663u_wtbl_work(struct work_struct *work) __mt7663u_mac_set_key(dev, wd); break; } + + mt7615_mutex_release(dev); + kfree(wd); } - - mt7615_mutex_release(dev); } static void @@ -236,17 +243,16 @@ mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct ieee80211_sta *sta, struct mt76_tx_info *tx_info) { + struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { - struct mt7615_sta *msta; - - msta = container_of(wcid, struct mt7615_sta, wcid); + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && + !msta->rate_probe) { + /* request to configure sampling rate */ spin_lock_bh(&dev->mt76.lock); mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], msta->rates); - msta->rate_probe = true; spin_unlock_bh(&dev->mt76.lock); } mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb); From e9eb59c60d21ba3433868c89eb2460e70a2101fd Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 7 Jul 2020 23:23:11 +0200 Subject: [PATCH 1385/2056] mt76: mt7615: avoid scheduling runtime-pm during hw scan Do not schedule ps_work during hw scanning or hw scheduled frequency scan Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index af5716797475..1c440ed183ee 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1908,12 +1908,21 @@ void mt7615_pm_power_save_sched(struct mt7615_dev *dev) { struct mt76_phy *mphy = dev->phy.mt76; - if (!mt7615_firmware_offload(dev) || - !dev->pm.enable || !mt76_is_mmio(mphy->dev) || - !test_bit(MT76_STATE_RUNNING, &mphy->state)) + if (!mt7615_firmware_offload(dev)) + return; + + if (!mt76_is_mmio(mphy->dev)) + return; + + if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state)) return; dev->pm.last_activity = jiffies; + + if (test_bit(MT76_HW_SCANNING, &mphy->state) || + test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) + return; + if (!test_bit(MT76_STATE_PM, &mphy->state)) queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, dev->pm.idle_timeout); From d71d67a706352b0111a17fc8d005aeceb9a5ffd1 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 7 Jul 2020 23:27:20 +0200 Subject: [PATCH 1386/2056] mt76: mt7615: reschedule ps work according to last activity Reschedule runtime-pm delayed work if there is a new activity when ps delayed work is already scheduled Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 1c440ed183ee..126c54afefd4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -1932,13 +1932,21 @@ EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched); void mt7615_pm_power_save_work(struct work_struct *work) { struct mt7615_dev *dev; + unsigned long delta; dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, pm.ps_work.work); - if (mt7615_firmware_own(dev)) - queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, - dev->pm.idle_timeout); + delta = dev->pm.idle_timeout; + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { + delta = dev->pm.last_activity + delta - jiffies; + goto out; + } + + if (!mt7615_firmware_own(dev)) + return; +out: + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); } static void From 4a850f8dc68b8c4a20333521b31600c9d31ccb5d Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 8 Jul 2020 03:16:46 +0800 Subject: [PATCH 1387/2056] mt76: mt7663u: fix memory leak in set key Fix memory leak in set key. Fixes: eb99cc95c3b6 ("mt76: mt7615: introduce mt7663u support") Signed-off-by: Sean Wang Acked-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/usb.c | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index feda5d0c171e..1f1578895ed5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -167,12 +167,16 @@ __mt7663u_mac_set_key(struct mt7615_dev *dev, lockdep_assert_held(&dev->mt76.mutex); - if (!sta) - return -EINVAL; + if (!sta) { + err = -EINVAL; + goto out; + } cipher = mt7615_mac_get_cipher(key->cipher); - if (cipher == MT_CIPHER_NONE) - return -EOPNOTSUPP; + if (cipher == MT_CIPHER_NONE) { + err = -EOPNOTSUPP; + goto out; + } wcid = &wd->sta->wcid; @@ -180,19 +184,22 @@ __mt7663u_mac_set_key(struct mt7615_dev *dev, err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, cipher, key->cmd); if (err < 0) - return err; + goto out; err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, key->cmd); if (err < 0) - return err; + goto out; if (key->cmd == SET_KEY) wcid->cipher |= BIT(cipher); else wcid->cipher &= ~BIT(cipher); - return 0; +out: + kfree(key->key); + + return err; } void mt7663u_wtbl_work(struct work_struct *work) From c876039e95559350ee101d4b6f6084d9803f2995 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 8 Jul 2020 03:16:47 +0800 Subject: [PATCH 1388/2056] mt76: mt7663u: fix potential memory leak in mcu message handler Fix potential memory leak in mcu message handler on error condition. Fixes: eb99cc95c3b6 ("mt76: mt7615: introduce mt7663u support") Acked-by: Lorenzo Bianconi Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c index 54885ad97891..39cd7dd95c9c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -34,7 +34,6 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, 1000, ep); - dev_kfree_skb(skb); if (ret < 0) goto out; @@ -43,6 +42,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, out: mutex_unlock(&mdev->mcu.mutex); + dev_kfree_skb(skb); return ret; } From 9248c08c3fc4ef816c82aa49d01123f4746d349f Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 8 Jul 2020 03:16:48 +0800 Subject: [PATCH 1389/2056] mt76: mt7615: fix potential memory leak in mcu message handler Fix potential memory leak in mcu message handler on error condition. Fixes: 0e6a29e477f3 ("mt76: mt7615: add support to read temperature from mcu") Acked-by: Lorenzo Bianconi Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mcu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 2e9e0002331e..83e29ee7b08b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -183,8 +183,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; int ret = 0; - if (seq != rxd->seq) - return -EAGAIN; + if (seq != rxd->seq) { + ret = -EAGAIN; + goto out; + } switch (cmd) { case MCU_CMD_PATCH_SEM_CONTROL: @@ -215,6 +217,7 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, default: break; } +out: dev_kfree_skb(skb); return ret; From eb744e5df86cf7e377d0acc4e686101b0fd9663a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 9 Jul 2020 14:04:35 +0300 Subject: [PATCH 1390/2056] mt76: mt7915: potential array overflow in mt7915_mcu_tx_rate_report() Smatch complains that "wcidx" value comes from the network and thus cannot be trusted. In this case, it actually seems to come from the firmware. If your wireless firmware is malicious then probably no amount of carefulness can protect you. On the other hand, these days we still try to check the firmware as much as possible. Verifying that the index is within bounds will silence a static checker warning. And it's harmless and a good exercise in kernel hardening. So I suggest that we do add a bounds check. Fixes: e57b7901469f ("mt76: add mac80211 driver for MT7915 PCIe-based chipsets") Signed-off-by: Dan Carpenter Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7915/mcu.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index fa9f32fa9f2e..93b1439e147b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -505,15 +505,22 @@ static void mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) { struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data; - u16 wcidx = le16_to_cpu(ra->wlan_idx); - struct mt76_wcid *wcid = rcu_dereference(dev->mt76.wcid[wcidx]); - struct mt7915_sta *msta = container_of(wcid, struct mt7915_sta, wcid); - struct mt7915_sta_stats *stats = &msta->stats; - struct mt76_phy *mphy = &dev->mphy; struct rate_info rate = {}, prob_rate = {}; + u16 probe = le16_to_cpu(ra->prob_up_rate); u16 attempts = le16_to_cpu(ra->attempts); u16 curr = le16_to_cpu(ra->curr_rate); - u16 probe = le16_to_cpu(ra->prob_up_rate); + u16 wcidx = le16_to_cpu(ra->wlan_idx); + struct mt76_phy *mphy = &dev->mphy; + struct mt7915_sta_stats *stats; + struct mt7915_sta *msta; + struct mt76_wcid *wcid; + + if (wcidx >= MT76_N_WCIDS) + return; + + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); + msta = container_of(wcid, struct mt7915_sta, wcid); + stats = &msta->stats; if (msta->wcid.ext_phy && dev->mt76.phy2) mphy = dev->mt76.phy2; From a6e29d8ecd3d4eea8748d81d7b577083b4a7c441 Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Mon, 15 Jun 2020 02:23:33 +0800 Subject: [PATCH 1391/2056] mt76: mt7915: add missing CONFIG_MAC80211_DEBUGFS Add CONFIG_MAC80211_DEBUGFS to fix a reported warning. Fixes: ec9742a8f38e ("mt76: mt7915: add .sta_add_debugfs support") Reported-by: kernel test robot Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c index 95e073c02ce6..38f473d587c9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c @@ -391,6 +391,7 @@ int mt7915_init_debugfs(struct mt7915_dev *dev) return 0; } +#ifdef CONFIG_MAC80211_DEBUGFS /** per-station debugfs **/ /* usage: */ @@ -468,3 +469,4 @@ void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate); debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats); } +#endif From cdcba424814d79e3dbffa06668f72099288a23f6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 15 Jul 2020 14:46:26 +0200 Subject: [PATCH 1392/2056] mt76: mt7615: take into account sdio bus configuring txwi usb and sdio relies on SF architecture. This is a preliminary patch to add SDIO support to mt76 driver Co-developed-by: Sean Wang Signed-off-by: Sean Wang Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/mac.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index 126c54afefd4..afac8d81d317 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -529,18 +529,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, struct ieee80211_sta *sta, int pid, struct ieee80211_key_conf *key, bool beacon) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; bool multicast = is_multicast_ether_addr(hdr->addr1); struct ieee80211_vif *vif = info->control.vif; + bool is_mmio = mt76_is_mmio(&dev->mt76); + u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; struct mt76_phy *mphy = &dev->mphy; - bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; - bool is_usb = mt76_is_usb(&dev->mt76); - int tx_count = 8; - u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; __le16 fc = hdr->frame_control; - u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE; + int tx_count = 8; u16 seqno = 0; if (vif) { @@ -566,10 +566,10 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, p_fmt = MT_TX_TYPE_FW; q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; } else { - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; q_idx = wmm_idx * MT7615_MAX_WMM_SETS + mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); } @@ -675,7 +675,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi, txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); - if (is_usb) + if (!is_mmio) txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); From 75b10f0cbd0b9ff5d6a8f0c3ee5ce4193cd0450a Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 15 Jul 2020 14:46:27 +0200 Subject: [PATCH 1393/2056] mt76: mt76u: add mt76_skb_adjust_pad utility routine Introduce mt76_skb_adjust_pad to reuse the code adding sdio support to mt7615 driver and remove code duplication. Move 4B header configuration for usb devices out of mt76_skb_adjust_pad Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76.h | 2 +- .../net/wireless/mediatek/mt76/mt7615/usb.c | 8 +++-- .../wireless/mediatek/mt76/mt7615/usb_mcu.c | 3 +- .../wireless/mediatek/mt76/mt76x02_usb_core.c | 3 +- drivers/net/wireless/mediatek/mt76/tx.c | 29 +++++++++++++++++++ drivers/net/wireless/mediatek/mt76/usb.c | 29 ------------------- 6 files changed, 39 insertions(+), 35 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 532267056ff3..8ee159d614ae 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -1015,7 +1015,7 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); } -int mt76u_skb_dma_info(struct sk_buff *skb, u32 info); +int mt76_skb_adjust_pad(struct sk_buff *skb); int mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, u16 val, u16 offset, void *buf, size_t len); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index 1f1578895ed5..aba926f1eeb5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -252,7 +252,8 @@ mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, { struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); + struct sk_buff *skb = tx_info->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && !msta->rate_probe) { @@ -262,9 +263,10 @@ mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, msta->rates); spin_unlock_bh(&dev->mt76.lock); } - mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb); + mt7663u_mac_write_txwi(dev, wcid, qid, sta, skb); - return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len); + put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); + return mt76_skb_adjust_pad(skb); } static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c index 39cd7dd95c9c..0b33df3e3bfe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c @@ -28,7 +28,8 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, else ep = MT_EP_OUT_AC_BE; - ret = mt76u_skb_dma_info(skb, skb->len); + put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); + ret = mt76_skb_adjust_pad(skb); if (ret < 0) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index 0180b6200b17..37321e656776 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -56,8 +56,9 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) */ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; + put_unaligned_le32(info, skb_push(skb, sizeof(info))); - return mt76u_skb_dma_info(skb, info); + return mt76_skb_adjust_pad(skb); } int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index 5adf92d7a9f3..3afd89ecd6c9 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -677,3 +677,32 @@ u8 mt76_ac_to_hwq(u8 ac) return wmm_queue_map[ac]; } EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); + +int mt76_skb_adjust_pad(struct sk_buff *skb) +{ + struct sk_buff *iter, *last = skb; + u32 pad; + + /* Add zero pad of 4 - 7 bytes */ + pad = round_up(skb->len, 4) + 4 - skb->len; + + /* First packet of a A-MSDU burst keeps track of the whole burst + * length, need to update length of it and the last packet. + */ + skb_walk_frags(skb, iter) { + last = iter; + if (!iter->next) { + skb->data_len += pad; + skb->len += pad; + break; + } + } + + if (skb_pad(last, pad)) + return -ENOMEM; + + __skb_put(last, pad); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 84e2fd0a4fc1..5f19f9e51d9c 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -904,35 +904,6 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb, return urb->num_sgs; } -int mt76u_skb_dma_info(struct sk_buff *skb, u32 info) -{ - struct sk_buff *iter, *last = skb; - u32 pad; - - put_unaligned_le32(info, skb_push(skb, sizeof(info))); - /* Add zero pad of 4 - 7 bytes */ - pad = round_up(skb->len, 4) + 4 - skb->len; - - /* First packet of a A-MSDU burst keeps track of the whole burst - * length, need to update length of it and the last packet. - */ - skb_walk_frags(skb, iter) { - last = iter; - if (!iter->next) { - skb->data_len += pad; - skb->len += pad; - break; - } - } - - if (skb_pad(last, pad)) - return -ENOMEM; - __skb_put(last, pad); - - return 0; -} -EXPORT_SYMBOL_GPL(mt76u_skb_dma_info); - static int mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, struct sk_buff *skb, struct mt76_wcid *wcid, From 0fa407c320695f03847872b7a7d96b732901504d Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 15 Jul 2020 14:46:28 +0200 Subject: [PATCH 1394/2056] mt76: mt7615: sdio code must access rate/key regs in preocess context As usb, sdio relies on mt76 workqueue to configure tx rate or upload keys to the hw. This is a preliminary patch to add SDIO support to mt76 driver Co-developed-by: Sean Wang Signed-off-by: Sean Wang Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mac.c | 2 +- drivers/net/wireless/mediatek/mt76/mt7615/main.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c index afac8d81d317..3dd8dd28690e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c @@ -967,7 +967,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, struct mt7615_rate_desc rd; u32 w5, w27, addr; - if (mt76_is_usb(&dev->mt76)) { + if (!mt76_is_mmio(&dev->mt76)) { mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); return; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index 79453c890f20..2d0b1f49fdbc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -417,10 +417,10 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - if (mt76_is_usb(&dev->mt76)) - err = mt7615_queue_key_update(dev, cmd, msta, key); - else + if (mt76_is_mmio(&dev->mt76)) err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); + else + err = mt7615_queue_key_update(dev, cmd, msta, key); mt7615_mutex_release(dev); From 90520afbae5f01c67f6b7ad580e0de90adab21a6 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Wed, 15 Jul 2020 14:46:29 +0200 Subject: [PATCH 1395/2056] mt76: mt7615: introduce mt7663-usb-sdio-common module Introduce mt7663-usb-sdio-common module as container for shared code between usb and sdio driver. Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/Kconfig | 6 +- .../wireless/mediatek/mt76/mt7615/Makefile | 4 +- .../wireless/mediatek/mt76/mt7615/mt7615.h | 13 +- .../net/wireless/mediatek/mt76/mt7615/usb.c | 255 +----------- .../wireless/mediatek/mt76/mt7615/usb_init.c | 145 ------- .../wireless/mediatek/mt76/mt7615/usb_sdio.c | 390 ++++++++++++++++++ 6 files changed, 424 insertions(+), 389 deletions(-) delete mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index c094260b0f89..10a4f1ed9194 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -28,10 +28,14 @@ config MT7622_WMAC which has the same feature set as a MT7615, but limited to 2.4 GHz only. +config MT7663_USB_SDIO_COMMON + tristate + select MT7615_COMMON + config MT7663U tristate "MediaTek MT7663U (USB) support" select MT76_USB - select MT7615_COMMON + select MT7663_USB_SDIO_COMMON depends on MAC80211 depends on USB help diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile index d2ad84cb5244..a1cdaadb5442 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o obj-$(CONFIG_MT7615E) += mt7615e.o +obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o obj-$(CONFIG_MT7663U) += mt7663u.o CFLAGS_trace.o := -I$(src) @@ -13,4 +14,5 @@ mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o mt7615e-$(CONFIG_MT7622_WMAC) += soc.o -mt7663u-y := usb.o usb_mcu.o usb_init.o +mt7663-usb-sdio-common-y := usb_sdio.o +mt7663u-y := usb.o usb_mcu.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 688bd7b68668..38628d28b3dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -409,6 +409,7 @@ extern struct ieee80211_rate mt7615_rates[12]; extern const struct ieee80211_ops mt7615_ops; extern const u32 mt7615e_reg_map[__MT_BASE_MAX]; extern const u32 mt7663e_reg_map[__MT_BASE_MAX]; +extern const u32 mt7663_usb_sdio_reg_map[__MT_BASE_MAX]; extern struct pci_driver mt7615_pci_driver; extern struct platform_driver mt7622_wmac_driver; extern const struct mt76_testmode_ops mt7615_testmode_ops; @@ -657,8 +658,16 @@ int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, int __mt7663_load_firmware(struct mt7615_dev *dev); /* usb */ -void mt7663u_wtbl_work(struct work_struct *work); +int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info); +bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); +void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + enum mt76_txq_id qid, + struct mt76_queue_entry *e); +void mt7663_usb_sdio_wtbl_work(struct work_struct *work); +int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); int mt7663u_mcu_init(struct mt7615_dev *dev); -int mt7663u_register_device(struct mt7615_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index aba926f1eeb5..f1b4a6316db3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -15,30 +15,6 @@ #include "mcu.h" #include "regs.h" -static const u32 mt7663u_reg_map[] = { - [MT_TOP_CFG_BASE] = 0x80020000, - [MT_HW_BASE] = 0x80000000, - [MT_DMA_SHDL_BASE] = 0x5000a000, - [MT_HIF_BASE] = 0x50000000, - [MT_CSR_BASE] = 0x40000000, - [MT_EFUSE_ADDR_BASE] = 0x78011000, - [MT_TOP_MISC_BASE] = 0x81020000, - [MT_PLE_BASE] = 0x82060000, - [MT_PSE_BASE] = 0x82068000, - [MT_WTBL_BASE_ADDR] = 0x820e0000, - [MT_CFG_BASE] = 0x820f0000, - [MT_AGG_BASE] = 0x820f2000, - [MT_ARB_BASE] = 0x820f3000, - [MT_TMAC_BASE] = 0x820f4000, - [MT_RMAC_BASE] = 0x820f5000, - [MT_DMA_BASE] = 0x820f7000, - [MT_PF_BASE] = 0x820f8000, - [MT_WTBL_BASE_ON] = 0x820f9000, - [MT_WTBL_BASE_OFF] = 0x820f9800, - [MT_LPON_BASE] = 0x820fb000, - [MT_MIB_BASE] = 0x820fd000, -}; - static const struct usb_device_id mt7615_device_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) }, { }, @@ -63,221 +39,19 @@ static void mt7663u_cleanup(struct mt7615_dev *dev) mt76u_queues_deinit(&dev->mt76); } -static void -mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, - enum mt76_txq_id qid, struct ieee80211_sta *sta, - struct sk_buff *skb) +static void mt7663u_init_work(struct work_struct *work) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_key_conf *key = info->control.hw_key; - __le32 *txwi; - int pid; - - if (!wcid) - wcid = &dev->mt76.global_wcid; - - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); - - txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); - memset(txwi, 0, MT_USB_TXD_SIZE); - mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); - skb_push(skb, MT_USB_TXD_SIZE); -} - -static int -__mt7663u_mac_set_rates(struct mt7615_dev *dev, - struct mt7615_wtbl_desc *wd) -{ - struct mt7615_rate_desc *rate = &wd->rate; - struct mt7615_sta *sta = wd->sta; - u32 w5, w27, addr, val; - - lockdep_assert_held(&dev->mt76.mutex); - - if (!sta) - return -EINVAL; - - if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) - return -ETIMEDOUT; - - addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx); - - w27 = mt76_rr(dev, addr + 27 * 4); - w27 &= ~MT_WTBL_W27_CC_BW_SEL; - w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw); - - w5 = mt76_rr(dev, addr + 5 * 4); - w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | - MT_WTBL_W5_MPDU_OK_COUNT | - MT_WTBL_W5_MPDU_FAIL_COUNT | - MT_WTBL_W5_RATE_IDX); - w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) | - FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, - rate->bw_idx ? rate->bw_idx - 1 : 7); - - mt76_wr(dev, MT_WTBL_RIUCR0, w5); - - mt76_wr(dev, MT_WTBL_RIUCR1, - FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) | - FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) | - FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1])); - - mt76_wr(dev, MT_WTBL_RIUCR2, - FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) | - FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2])); - - mt76_wr(dev, MT_WTBL_RIUCR3, - FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) | - FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) | - FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3])); - - mt76_wr(dev, MT_WTBL_UPDATE, - FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) | - MT_WTBL_UPDATE_RATE_UPDATE | - MT_WTBL_UPDATE_TX_COUNT_CLEAR); - - mt76_wr(dev, addr + 27 * 4, w27); - - sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; - - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ - val = mt76_rr(dev, MT_LPON_UTTR0); - sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; - - if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) - mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); - - sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates; - sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - - return 0; -} - -static int -__mt7663u_mac_set_key(struct mt7615_dev *dev, - struct mt7615_wtbl_desc *wd) -{ - struct mt7615_key_desc *key = &wd->key; - struct mt7615_sta *sta = wd->sta; - enum mt7615_cipher_type cipher; - struct mt76_wcid *wcid; - int err; - - lockdep_assert_held(&dev->mt76.mutex); - - if (!sta) { - err = -EINVAL; - goto out; - } - - cipher = mt7615_mac_get_cipher(key->cipher); - if (cipher == MT_CIPHER_NONE) { - err = -EOPNOTSUPP; - goto out; - } - - wcid = &wd->sta->wcid; - - mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd); - err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, - cipher, key->cmd); - if (err < 0) - goto out; - - err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, - key->cmd); - if (err < 0) - goto out; - - if (key->cmd == SET_KEY) - wcid->cipher |= BIT(cipher); - else - wcid->cipher &= ~BIT(cipher); - -out: - kfree(key->key); - - return err; -} - -void mt7663u_wtbl_work(struct work_struct *work) -{ - struct mt7615_wtbl_desc *wd, *wd_next; - struct list_head wd_list; struct mt7615_dev *dev; - dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, - wtbl_work); + dev = container_of(work, struct mt7615_dev, mcu_work); + if (mt7663u_mcu_init(dev)) + return; - INIT_LIST_HEAD(&wd_list); - spin_lock_bh(&dev->mt76.lock); - list_splice_init(&dev->wd_head, &wd_list); - spin_unlock_bh(&dev->mt76.lock); - - list_for_each_entry_safe(wd, wd_next, &wd_list, node) { - list_del(&wd->node); - - mt7615_mutex_acquire(dev); - - switch (wd->type) { - case MT7615_WTBL_RATE_DESC: - __mt7663u_mac_set_rates(dev, wd); - break; - case MT7615_WTBL_KEY_DESC: - __mt7663u_mac_set_key(dev, wd); - break; - } - - mt7615_mutex_release(dev); - - kfree(wd); - } -} - -static void -mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, - struct mt76_queue_entry *e) -{ - skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE); - mt76_tx_complete_skb(mdev, e->skb); -} - -static int -mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, - enum mt76_txq_id qid, struct mt76_wcid *wcid, - struct ieee80211_sta *sta, - struct mt76_tx_info *tx_info) -{ - struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - struct sk_buff *skb = tx_info->skb; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && - !msta->rate_probe) { - /* request to configure sampling rate */ - spin_lock_bh(&dev->mt76.lock); - mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], - msta->rates); - spin_unlock_bh(&dev->mt76.lock); - } - mt7663u_mac_write_txwi(dev, wcid, qid, sta, skb); - - put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); - return mt76_skb_adjust_pad(skb); -} - -static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update) -{ - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - - mt7615_mutex_acquire(dev); - mt7615_mac_sta_poll(dev); - mt7615_mutex_release(dev); - - return 0; + mt7615_mcu_set_eeprom(dev); + mt7615_mac_init(dev); + mt7615_phy_init(dev); + mt7615_mcu_del_wtbl_all(dev); + mt7615_check_offload_capability(dev); } static int mt7663u_probe(struct usb_interface *usb_intf, @@ -286,9 +60,9 @@ static int mt7663u_probe(struct usb_interface *usb_intf, static const struct mt76_driver_ops drv_ops = { .txwi_size = MT_USB_TXD_SIZE, .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, - .tx_prepare_skb = mt7663u_tx_prepare_skb, - .tx_complete_skb = mt7663u_tx_complete_skb, - .tx_status_data = mt7663u_tx_status_data, + .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, + .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, + .tx_status_data = mt7663_usb_sdio_tx_status_data, .rx_skb = mt7615_queue_rx_skb, .sta_ps = mt7615_sta_ps, .sta_add = mt7615_mac_sta_add, @@ -318,7 +92,8 @@ static int mt7663u_probe(struct usb_interface *usb_intf, usb_set_intfdata(usb_intf, dev); - dev->reg_map = mt7663u_reg_map; + INIT_WORK(&dev->mcu_work, mt7663u_init_work); + dev->reg_map = mt7663_usb_sdio_reg_map; dev->ops = ops; ret = mt76u_init(mdev, usb_intf, true); if (ret < 0) @@ -356,7 +131,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf, if (ret) goto error; - ret = mt7663u_register_device(dev); + ret = mt7663_usb_sdio_register_device(dev); if (ret) goto error_freeq; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c deleted file mode 100644 index 1fbc9601391d..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c +++ /dev/null @@ -1,145 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright (C) 2019 MediaTek Inc. - * - * Author: Felix Fietkau - * Lorenzo Bianconi - * Sean Wang - */ - -#include -#include - -#include "mt7615.h" -#include "mac.h" -#include "regs.h" - -static int mt7663u_dma_sched_init(struct mt7615_dev *dev) -{ - int i; - - mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), - MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, - FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | - FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); - - /* disable refill group 5 - group 15 and raise group 2 - * and 3 as high priority. - */ - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006); - mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16)); - - for (i = 0; i < 5; i++) - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), - FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) | - FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff)); - - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); - - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444); - - /* group pririority from high to low: - * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0. - */ - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f); - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c); - - mt76_wr(dev, MT_UDMA_WLCFG_1, - FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) | - FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1)); - - /* setup UDMA Rx Flush */ - mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); - /* hif reset */ - mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N); - - mt76_set(dev, MT_UDMA_WLCFG_0, - MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN | - MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN | - MT_WL_TX_TMOUT_FUNC_EN); - mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO, - FIELD_PREP(MT_WL_RX_AGG_LMT, 32) | - FIELD_PREP(MT_WL_RX_AGG_TO, 100)); - - return 0; -} - -static int mt7663u_init_hardware(struct mt7615_dev *dev) -{ - int ret, idx; - - ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE); - if (ret < 0) - return ret; - - ret = mt7663u_dma_sched_init(dev); - if (ret) - return ret; - - set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); - - /* Beacon and mgmt frames should occupy wcid 0 */ - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); - if (idx) - return -ENOSPC; - - dev->mt76.global_wcid.idx = idx; - dev->mt76.global_wcid.hw_key_idx = -1; - rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); - - return 0; -} - -static void mt7663u_init_work(struct work_struct *work) -{ - struct mt7615_dev *dev; - - dev = container_of(work, struct mt7615_dev, mcu_work); - if (mt7663u_mcu_init(dev)) - return; - - mt7615_mcu_set_eeprom(dev); - mt7615_mac_init(dev); - mt7615_phy_init(dev); - mt7615_mcu_del_wtbl_all(dev); - mt7615_check_offload_capability(dev); -} - -int mt7663u_register_device(struct mt7615_dev *dev) -{ - struct ieee80211_hw *hw = mt76_hw(dev); - int err; - - INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work); - INIT_WORK(&dev->mcu_work, mt7663u_init_work); - INIT_LIST_HEAD(&dev->wd_head); - mt7615_init_device(dev); - - err = mt7663u_init_hardware(dev); - if (err) - return err; - - hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE; - /* check hw sg support in order to enable AMSDU */ - hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; - - err = mt76_register_device(&dev->mt76, true, mt7615_rates, - ARRAY_SIZE(mt7615_rates)); - if (err < 0) - return err; - - if (!dev->mt76.usb.sg_en) { - struct ieee80211_sta_vht_cap *vht_cap; - - /* decrease max A-MSDU size if SG is not supported */ - vht_cap = &dev->mphy.sband_5g.sband.vht_cap; - vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; - } - - ieee80211_queue_work(hw, &dev->mcu_work); - mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); - mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); - - return mt7615_init_debugfs(dev); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c new file mode 100644 index 000000000000..610cc1fbec5b --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" + +const u32 mt7663_usb_sdio_reg_map[] = { + [MT_TOP_CFG_BASE] = 0x80020000, + [MT_HW_BASE] = 0x80000000, + [MT_DMA_SHDL_BASE] = 0x5000a000, + [MT_HIF_BASE] = 0x50000000, + [MT_CSR_BASE] = 0x40000000, + [MT_EFUSE_ADDR_BASE] = 0x78011000, + [MT_TOP_MISC_BASE] = 0x81020000, + [MT_PLE_BASE] = 0x82060000, + [MT_PSE_BASE] = 0x82068000, + [MT_WTBL_BASE_ADDR] = 0x820e0000, + [MT_CFG_BASE] = 0x820f0000, + [MT_AGG_BASE] = 0x820f2000, + [MT_ARB_BASE] = 0x820f3000, + [MT_TMAC_BASE] = 0x820f4000, + [MT_RMAC_BASE] = 0x820f5000, + [MT_DMA_BASE] = 0x820f7000, + [MT_PF_BASE] = 0x820f8000, + [MT_WTBL_BASE_ON] = 0x820f9000, + [MT_WTBL_BASE_OFF] = 0x820f9800, + [MT_LPON_BASE] = 0x820fb000, + [MT_MIB_BASE] = 0x820fd000, +}; +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map); + +static void +mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, + enum mt76_txq_id qid, struct ieee80211_sta *sta, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_key_conf *key = info->control.hw_key; + __le32 *txwi; + int pid; + + if (!wcid) + wcid = &dev->mt76.global_wcid; + + pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); + + txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); + memset(txwi, 0, MT_USB_TXD_SIZE); + mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); + skb_push(skb, MT_USB_TXD_SIZE); +} + +static int +mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, + struct mt7615_wtbl_desc *wd) +{ + struct mt7615_rate_desc *rate = &wd->rate; + struct mt7615_sta *sta = wd->sta; + u32 w5, w27, addr, val; + + lockdep_assert_held(&dev->mt76.mutex); + + if (!sta) + return -EINVAL; + + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) + return -ETIMEDOUT; + + addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx); + + w27 = mt76_rr(dev, addr + 27 * 4); + w27 &= ~MT_WTBL_W27_CC_BW_SEL; + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw); + + w5 = mt76_rr(dev, addr + 5 * 4); + w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | + MT_WTBL_W5_MPDU_OK_COUNT | + MT_WTBL_W5_MPDU_FAIL_COUNT | + MT_WTBL_W5_RATE_IDX); + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) | + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, + rate->bw_idx ? rate->bw_idx - 1 : 7); + + mt76_wr(dev, MT_WTBL_RIUCR0, w5); + + mt76_wr(dev, MT_WTBL_RIUCR1, + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) | + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1])); + + mt76_wr(dev, MT_WTBL_RIUCR2, + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) | + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2])); + + mt76_wr(dev, MT_WTBL_RIUCR3, + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) | + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3])); + + mt76_wr(dev, MT_WTBL_UPDATE, + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) | + MT_WTBL_UPDATE_RATE_UPDATE | + MT_WTBL_UPDATE_TX_COUNT_CLEAR); + + mt76_wr(dev, addr + 27 * 4, w27); + + sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; + + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ + val = mt76_rr(dev, MT_LPON_UTTR0); + sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; + + if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); + + sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates; + sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; + + return 0; +} + +static int +mt7663_usb_sdio_set_key(struct mt7615_dev *dev, + struct mt7615_wtbl_desc *wd) +{ + struct mt7615_key_desc *key = &wd->key; + struct mt7615_sta *sta = wd->sta; + enum mt7615_cipher_type cipher; + struct mt76_wcid *wcid; + int err; + + lockdep_assert_held(&dev->mt76.mutex); + + if (!sta) { + err = -EINVAL; + goto out; + } + + cipher = mt7615_mac_get_cipher(key->cipher); + if (cipher == MT_CIPHER_NONE) { + err = -EOPNOTSUPP; + goto out; + } + + wcid = &wd->sta->wcid; + + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd); + err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, + cipher, key->cmd); + if (err < 0) + goto out; + + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, + key->cmd); + if (err < 0) + goto out; + + if (key->cmd == SET_KEY) + wcid->cipher |= BIT(cipher); + else + wcid->cipher &= ~BIT(cipher); +out: + kfree(key->key); + + return err; +} + +void mt7663_usb_sdio_wtbl_work(struct work_struct *work) +{ + struct mt7615_wtbl_desc *wd, *wd_next; + struct list_head wd_list; + struct mt7615_dev *dev; + + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, + wtbl_work); + + INIT_LIST_HEAD(&wd_list); + spin_lock_bh(&dev->mt76.lock); + list_splice_init(&dev->wd_head, &wd_list); + spin_unlock_bh(&dev->mt76.lock); + + list_for_each_entry_safe(wd, wd_next, &wd_list, node) { + list_del(&wd->node); + + mt7615_mutex_acquire(dev); + + switch (wd->type) { + case MT7615_WTBL_RATE_DESC: + mt7663_usb_sdio_set_rates(dev, wd); + break; + case MT7615_WTBL_KEY_DESC: + mt7663_usb_sdio_set_key(dev, wd); + break; + } + + mt7615_mutex_release(dev); + + kfree(wd); + } +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work); + +bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + + mt7615_mutex_acquire(dev); + mt7615_mac_sta_poll(dev); + mt7615_mutex_release(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data); + +void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, + enum mt76_txq_id qid, + struct mt76_queue_entry *e) +{ + unsigned int headroom = MT_USB_TXD_SIZE; + + if (mt76_is_usb(mdev)) + headroom += MT_USB_HDR_SIZE; + skb_pull(e->skb, headroom); + + mt76_tx_complete_skb(mdev, e->skb); +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb); + +int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, + enum mt76_txq_id qid, struct mt76_wcid *wcid, + struct ieee80211_sta *sta, + struct mt76_tx_info *tx_info) +{ + struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct sk_buff *skb = tx_info->skb; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && + !msta->rate_probe) { + /* request to configure sampling rate */ + spin_lock_bh(&dev->mt76.lock); + mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], + msta->rates); + spin_unlock_bh(&dev->mt76.lock); + } + + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); + if (mt76_is_usb(mdev)) + put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); + + return mt76_skb_adjust_pad(skb); +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); + +static int mt7663u_dma_sched_init(struct mt7615_dev *dev) +{ + int i; + + mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), + MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); + + /* disable refill group 5 - group 15 and raise group 2 + * and 3 as high priority. + */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006); + mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16)); + + for (i = 0; i < 5; i++) + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) | + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff)); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); + + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444); + + /* group pririority from high to low: + * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0. + */ + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c); + + mt76_wr(dev, MT_UDMA_WLCFG_1, + FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) | + FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1)); + + /* setup UDMA Rx Flush */ + mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); + /* hif reset */ + mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N); + + mt76_set(dev, MT_UDMA_WLCFG_0, + MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN | + MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN | + MT_WL_TX_TMOUT_FUNC_EN); + mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO, + FIELD_PREP(MT_WL_RX_AGG_LMT, 32) | + FIELD_PREP(MT_WL_RX_AGG_TO, 100)); + + return 0; +} + +static int mt7663_usb_sdio_init_hardware(struct mt7615_dev *dev) +{ + int ret, idx; + + ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE); + if (ret < 0) + return ret; + + if (mt76_is_usb(&dev->mt76)) { + ret = mt7663u_dma_sched_init(dev); + if (ret) + return ret; + } + + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); + + /* Beacon and mgmt frames should occupy wcid 0 */ + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); + if (idx) + return -ENOSPC; + + dev->mt76.global_wcid.idx = idx; + dev->mt76.global_wcid.hw_key_idx = -1; + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); + + return 0; +} + +int mt7663_usb_sdio_register_device(struct mt7615_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + int err; + + INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work); + INIT_LIST_HEAD(&dev->wd_head); + mt7615_init_device(dev); + + err = mt7663_usb_sdio_init_hardware(dev); + if (err) + return err; + + /* check hw sg support in order to enable AMSDU */ + hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; + hw->extra_tx_headroom += MT_USB_TXD_SIZE; + if (mt76_is_usb(&dev->mt76)) + hw->extra_tx_headroom += MT_USB_HDR_SIZE; + + err = mt76_register_device(&dev->mt76, true, mt7615_rates, + ARRAY_SIZE(mt7615_rates)); + if (err < 0) + return err; + + if (!dev->mt76.usb.sg_en) { + struct ieee80211_sta_vht_cap *vht_cap; + + /* decrease max A-MSDU size if SG is not supported */ + vht_cap = &dev->mphy.sband_5g.sband.vht_cap; + vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + } + + ieee80211_queue_work(hw, &dev->mcu_work); + mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); + mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); + + return mt7615_init_debugfs(dev); +} +EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device); + +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_AUTHOR("Sean Wang "); +MODULE_LICENSE("Dual BSD/GPL"); From d39b52e31aa641c79d0a1177f9d02e71d1b7fd64 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 15 Jul 2020 14:46:30 +0200 Subject: [PATCH 1396/2056] mt76: introduce mt76_sdio module Introduce mt76_sdio module as common layer to add mt7663s support Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/Kconfig | 4 + drivers/net/wireless/mediatek/mt76/Makefile | 2 + drivers/net/wireless/mediatek/mt76/mac80211.c | 6 +- drivers/net/wireless/mediatek/mt76/mt76.h | 30 ++ .../wireless/mediatek/mt76/mt7615/usb_sdio.c | 5 +- drivers/net/wireless/mediatek/mt76/sdio.c | 363 ++++++++++++++++++ 6 files changed, 408 insertions(+), 2 deletions(-) create mode 100644 drivers/net/wireless/mediatek/mt76/sdio.c diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 41533a0e1720..31015d2a8e7d 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig @@ -12,6 +12,10 @@ config MT76_USB tristate depends on MT76_CORE +config MT76_SDIO + tristate + depends on MT76_CORE + config MT76x02_LIB tristate select MT76_CORE diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 949b3c8ffa1b..e53584db0756 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_MT76_CORE) += mt76.o obj-$(CONFIG_MT76_USB) += mt76-usb.o +obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o @@ -12,6 +13,7 @@ mt76-$(CONFIG_PCI) += pci.o mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o mt76-usb-y := usb.o usb_trace.o +mt76-sdio-y := sdio.o CFLAGS_trace.o := -I$(src) CFLAGS_usb_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index f340af40f8f4..3d4bf72700a5 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -305,7 +305,11 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw) ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, TX_AMSDU); - ieee80211_hw_set(hw, TX_FRAG_LIST); + + /* TODO: avoid linearization for SDIO */ + if (!mt76_is_sdio(dev)) + ieee80211_hw_set(hw, TX_FRAG_LIST); + ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 8ee159d614ae..af35bc388ae2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -34,6 +34,7 @@ struct mt76_reg_pair { enum mt76_bus_type { MT76_BUS_MMIO, MT76_BUS_USB, + MT76_BUS_SDIO, }; struct mt76_bus_ops { @@ -53,6 +54,7 @@ struct mt76_bus_ops { #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) +#define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) enum mt76_txq_id { MT_TXQ_VO = IEEE80211_AC_VO, @@ -95,6 +97,7 @@ struct mt76_queue_entry { union { struct mt76_txwi_cache *txwi; struct urb *urb; + int buf_sz; }; enum mt76_txq_id qid; bool skip_buf0:1; @@ -147,6 +150,8 @@ struct mt76_mcu_ops { int len, bool wait_resp); int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, int cmd, bool wait_resp); + u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); + void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *rp, int len); int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, @@ -440,6 +445,24 @@ struct mt76_usb { } mcu; }; +struct mt76_sdio { + struct task_struct *tx_kthread; + struct task_struct *kthread; + struct work_struct stat_work; + + unsigned long state; + + struct sdio_func *func; + + struct { + struct mutex lock; + int pse_data_quota; + int ple_data_quota; + int pse_mcu_quota; + int deficit; + } sched; +}; + struct mt76_mmio { void __iomem *regs; spinlock_t irq_lock; @@ -626,6 +649,7 @@ struct mt76_dev { union { struct mt76_mmio mmio; struct mt76_usb usb; + struct mt76_sdio sdio; }; }; @@ -1030,6 +1054,12 @@ void mt76u_stop_rx(struct mt76_dev *dev); int mt76u_resume_rx(struct mt76_dev *dev); void mt76u_queues_deinit(struct mt76_dev *dev); +int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, + const struct mt76_bus_ops *bus_ops); +int mt76s_alloc_queues(struct mt76_dev *dev); +void mt76s_stop_txrx(struct mt76_dev *dev); +void mt76s_deinit(struct mt76_dev *dev); + struct sk_buff * mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, int data_len); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 610cc1fbec5b..50009bc5ee8d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -359,7 +359,10 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev) return err; /* check hw sg support in order to enable AMSDU */ - hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; + if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76)) + hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; + else + hw->max_tx_fragments = 1; hw->extra_tx_headroom += MT_USB_TXD_SIZE; if (mt76_is_usb(&dev->mt76)) hw->extra_tx_headroom += MT_USB_HDR_SIZE; diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c new file mode 100644 index 000000000000..a6f3255c4d84 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -0,0 +1,363 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * This file is written based on mt76/usb.c. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include +#include +#include +#include + +#include "mt76.h" + +static int +mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) +{ + struct mt76_queue *q = &dev->q_rx[qid]; + + spin_lock_init(&q->lock); + q->entry = devm_kcalloc(dev->dev, + MT_NUM_RX_ENTRIES, sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->ndesc = MT_NUM_RX_ENTRIES; + q->head = q->tail = 0; + q->queued = 0; + + return 0; +} + +static int mt76s_alloc_tx(struct mt76_dev *dev) +{ + struct mt76_queue *q; + int i; + + for (i = 0; i < MT_TXQ_MCU_WA; i++) { + INIT_LIST_HEAD(&dev->q_tx[i].swq); + + q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); + if (!q) + return -ENOMEM; + + spin_lock_init(&q->lock); + q->hw_idx = i; + dev->q_tx[i].q = q; + + q->entry = devm_kcalloc(dev->dev, + MT_NUM_TX_ENTRIES, sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->ndesc = MT_NUM_TX_ENTRIES; + } + + return 0; +} + +void mt76s_stop_txrx(struct mt76_dev *dev) +{ + struct mt76_sdio *sdio = &dev->sdio; + + cancel_work_sync(&sdio->stat_work); + clear_bit(MT76_READING_STATS, &dev->phy.state); + + mt76_tx_status_check(dev, NULL, true); +} +EXPORT_SYMBOL_GPL(mt76s_stop_txrx); + +int mt76s_alloc_queues(struct mt76_dev *dev) +{ + int err; + + err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN); + if (err < 0) + return err; + + return mt76s_alloc_tx(dev); +} +EXPORT_SYMBOL_GPL(mt76s_alloc_queues); + +static struct mt76_queue_entry * +mt76s_get_next_rx_entry(struct mt76_queue *q) +{ + struct mt76_queue_entry *e = NULL; + + spin_lock_bh(&q->lock); + if (q->queued > 0) { + e = &q->entry[q->head]; + q->head = (q->head + 1) % q->ndesc; + q->queued--; + } + spin_unlock_bh(&q->lock); + + return e; +} + +static int +mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) +{ + int qid = q - &dev->q_rx[MT_RXQ_MAIN]; + int nframes = 0; + + while (true) { + struct mt76_queue_entry *e; + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) + break; + + e = mt76s_get_next_rx_entry(q); + if (!e || !e->skb) + break; + + dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb); + e->skb = NULL; + nframes++; + } + if (qid == MT_RXQ_MAIN) + mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); + + return nframes; +} + +static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) +{ + struct mt76_sw_queue *sq = &dev->q_tx[qid]; + u32 n_dequeued = 0, n_sw_dequeued = 0; + struct mt76_queue_entry entry; + struct mt76_queue *q = sq->q; + bool wake; + + while (q->queued > n_dequeued) { + if (q->entry[q->head].schedule) { + q->entry[q->head].schedule = false; + n_sw_dequeued++; + } + + entry = q->entry[q->head]; + q->entry[q->head].done = false; + q->head = (q->head + 1) % q->ndesc; + n_dequeued++; + + if (qid == MT_TXQ_MCU) + dev_kfree_skb(entry.skb); + else + dev->drv->tx_complete_skb(dev, qid, &entry); + } + + spin_lock_bh(&q->lock); + + sq->swq_queued -= n_sw_dequeued; + q->queued -= n_dequeued; + + wake = q->stopped && q->queued < q->ndesc - 8; + if (wake) + q->stopped = false; + + if (!q->queued) + wake_up(&dev->tx_wait); + + spin_unlock_bh(&q->lock); + + if (qid == MT_TXQ_MCU) + goto out; + + mt76_txq_schedule(&dev->phy, qid); + + if (wake) + ieee80211_wake_queue(dev->hw, qid); + + wake_up_process(dev->sdio.tx_kthread); +out: + return n_dequeued; +} + +static void mt76s_tx_status_data(struct work_struct *work) +{ + struct mt76_sdio *sdio; + struct mt76_dev *dev; + u8 update = 1; + u16 count = 0; + + sdio = container_of(work, struct mt76_sdio, stat_work); + dev = container_of(sdio, struct mt76_dev, sdio); + + while (true) { + if (test_bit(MT76_REMOVED, &dev->phy.state)) + break; + + if (!dev->drv->tx_status_data(dev, &update)) + break; + count++; + } + + if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) + queue_work(dev->wq, &sdio->stat_work); + else + clear_bit(MT76_READING_STATS, &dev->phy.state); +} + +static int +mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, struct mt76_wcid *wcid, + struct ieee80211_sta *sta) +{ + struct mt76_queue *q = dev->q_tx[qid].q; + struct mt76_tx_info tx_info = { + .skb = skb, + }; + int err, len = skb->len; + u16 idx = q->tail; + + if (q->queued == q->ndesc) + return -ENOSPC; + + skb->prev = skb->next = NULL; + err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); + if (err < 0) + return err; + + q->entry[q->tail].skb = tx_info.skb; + q->entry[q->tail].buf_sz = len; + q->tail = (q->tail + 1) % q->ndesc; + + return idx; +} + +static int +mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, + struct sk_buff *skb, u32 tx_info) +{ + struct mt76_queue *q = dev->q_tx[qid].q; + int ret = -ENOSPC, len = skb->len; + + spin_lock_bh(&q->lock); + if (q->queued == q->ndesc) + goto out; + + ret = mt76_skb_adjust_pad(skb); + if (ret) + goto out; + + q->entry[q->tail].buf_sz = len; + q->entry[q->tail].skb = skb; + q->tail = (q->tail + 1) % q->ndesc; + +out: + spin_unlock_bh(&q->lock); + + return ret; +} + +static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) +{ + struct mt76_sdio *sdio = &dev->sdio; + + wake_up_process(sdio->tx_kthread); +} + +static const struct mt76_queue_ops sdio_queue_ops = { + .tx_queue_skb = mt76s_tx_queue_skb, + .kick = mt76s_tx_kick, + .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, +}; + +static int mt76s_kthread_run(void *data) +{ + struct mt76_dev *dev = data; + struct mt76_phy *mphy = &dev->phy; + + while (!kthread_should_stop()) { + int i, nframes = 0; + + cond_resched(); + + /* rx processing */ + local_bh_disable(); + rcu_read_lock(); + + mt76_for_each_q_rx(dev, i) + nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); + + rcu_read_unlock(); + local_bh_enable(); + + /* tx processing */ + for (i = 0; i < MT_TXQ_MCU_WA; i++) + nframes += mt76s_process_tx_queue(dev, i); + + if (dev->drv->tx_status_data && + !test_and_set_bit(MT76_READING_STATS, &mphy->state)) + queue_work(dev->wq, &dev->sdio.stat_work); + + if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } + } + + return 0; +} + +void mt76s_deinit(struct mt76_dev *dev) +{ + struct mt76_sdio *sdio = &dev->sdio; + int i; + + kthread_stop(sdio->kthread); + kthread_stop(sdio->tx_kthread); + mt76s_stop_txrx(dev); + + sdio_claim_host(sdio->func); + sdio_release_irq(sdio->func); + sdio_release_host(sdio->func); + + mt76_for_each_q_rx(dev, i) { + struct mt76_queue *q = &dev->q_rx[i]; + int j; + + for (j = 0; j < q->ndesc; j++) { + struct mt76_queue_entry *e = &q->entry[j]; + + if (!e->skb) + continue; + + dev_kfree_skb(e->skb); + e->skb = NULL; + } + } +} +EXPORT_SYMBOL_GPL(mt76s_deinit); + +int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, + const struct mt76_bus_ops *bus_ops) +{ + struct mt76_sdio *sdio = &dev->sdio; + + sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s"); + if (IS_ERR(sdio->kthread)) + return PTR_ERR(sdio->kthread); + + INIT_WORK(&sdio->stat_work, mt76s_tx_status_data); + + mutex_init(&sdio->sched.lock); + dev->queue_ops = &sdio_queue_ops; + dev->bus = bus_ops; + dev->sdio.func = func; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76s_init); + +MODULE_AUTHOR("Sean Wang "); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); From a66cbdd6573db8515735d37793c22605432c346d Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Wed, 15 Jul 2020 14:46:31 +0200 Subject: [PATCH 1397/2056] mt76: mt7615: introduce mt7663s support Introduce support for mt7663s 802.11ac 2x2:2 chipset to mt7615 driver. Co-developed-by: Lorenzo Bianconi Signed-off-by: Lorenzo Bianconi Signed-off-by: Sean Wang Signed-off-by: Felix Fietkau --- .../net/wireless/mediatek/mt76/mt7615/Kconfig | 11 + .../wireless/mediatek/mt76/mt7615/Makefile | 2 + .../net/wireless/mediatek/mt76/mt7615/mcu.c | 42 +- .../net/wireless/mediatek/mt76/mt7615/mcu.h | 10 + .../net/wireless/mediatek/mt76/mt7615/mmio.c | 1 + .../wireless/mediatek/mt76/mt7615/mt7615.h | 10 + .../net/wireless/mediatek/mt76/mt7615/regs.h | 12 + .../net/wireless/mediatek/mt76/mt7615/sdio.c | 478 ++++++++++++++++++ .../net/wireless/mediatek/mt76/mt7615/sdio.h | 115 +++++ .../wireless/mediatek/mt76/mt7615/sdio_mcu.c | 162 ++++++ .../wireless/mediatek/mt76/mt7615/sdio_txrx.c | 271 ++++++++++ .../wireless/mediatek/mt76/mt7615/usb_sdio.c | 1 + 12 files changed, 1114 insertions(+), 1 deletion(-) create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/sdio.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/sdio.h create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c create mode 100644 drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig index 10a4f1ed9194..f372fb629caf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig @@ -42,3 +42,14 @@ config MT7663U This adds support for MT7663U 802.11ac 2x2:2 wireless devices. To compile this driver as a module, choose M here. + +config MT7663S + tristate "MediaTek MT7663S (SDIO) support" + select MT76_SDIO + select MT7663_USB_SDIO_COMMON + depends on MAC80211 + depends on MMC + help + This adds support for MT7663S 802.11ac 2x2:2 wireless devices. + + To compile this driver as a module, choose M here. diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile index a1cdaadb5442..e8fc4a7ae9bc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o obj-$(CONFIG_MT7615E) += mt7615e.o obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o obj-$(CONFIG_MT7663U) += mt7663u.o +obj-$(CONFIG_MT7663S) += mt7663s.o CFLAGS_trace.o := -I$(src) @@ -16,3 +17,4 @@ mt7615e-$(CONFIG_MT7622_WMAC) += soc.o mt7663-usb-sdio-common-y := usb_sdio.o mt7663u-y := usb.o usb_mcu.o +mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 83e29ee7b08b..2e0928db0aad 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -146,7 +146,10 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, mcu_txd->cid = mcu_cmd; break; case MCU_CE_PREFIX: - mcu_txd->set_query = MCU_Q_SET; + if (cmd & MCU_QUERY_MASK) + mcu_txd->set_query = MCU_Q_QUERY; + else + mcu_txd->set_query = MCU_Q_SET; mcu_txd->cid = mcu_cmd; break; default: @@ -214,6 +217,14 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd, ret = le32_to_cpu(event->status); break; } + case MCU_CMD_REG_READ: { + struct mt7615_mcu_reg_event *event; + + skb_pull(skb, sizeof(*rxd)); + event = (struct mt7615_mcu_reg_event *)skb->data; + ret = (int)le32_to_cpu(event->val); + break; + } default: break; } @@ -3885,3 +3896,32 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, &req, sizeof(req), false); } + +u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset) +{ + struct { + __le32 addr; + __le32 val; + } __packed req = { + .addr = cpu_to_le32(offset), + }; + + return __mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, + &req, sizeof(req), true); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr); + +void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val) +{ + struct { + __le32 addr; + __le32 val; + } __packed req = { + .addr = cpu_to_le32(offset), + .val = cpu_to_le32(val), + }; + + __mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, + &req, sizeof(req), false); +} +EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h index e8c7aa5c35ce..7b856e9eee1e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h @@ -81,6 +81,7 @@ enum { MCU_EVENT_GENERIC = 0x01, MCU_EVENT_ACCESS_REG = 0x02, MCU_EVENT_MT_PATCH_SEM = 0x04, + MCU_EVENT_REG_ACCESS = 0x05, MCU_EVENT_SCAN_DONE = 0x0d, MCU_EVENT_ROC = 0x10, MCU_EVENT_BSS_ABSENCE = 0x11, @@ -242,6 +243,8 @@ enum { #define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \ MCU_CE_PREFIX | MCU_QUERY_PREFIX) +#define MCU_QUERY_MASK BIT(16) + enum { MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01, MCU_CMD_FW_START_REQ = MCU_FW_PREFIX | 0x02, @@ -429,6 +432,11 @@ struct nt7615_sched_scan_done { __le16 pad; } __packed; +struct mt7615_mcu_reg_event { + __le32 reg; + __le32 val; +} __packed; + struct mt7615_mcu_bss_event { u8 bss_idx; u8 is_absent; @@ -581,6 +589,8 @@ enum { MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33, MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61, MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62, + MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0, + MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0, }; #define MCU_CMD_ACK BIT(0) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c index 461ac6c42271..133f93a6ed1b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c @@ -43,6 +43,7 @@ const u32 mt7663e_reg_map[] = { [MT_CSR_BASE] = 0x07000, [MT_PLE_BASE] = 0x08000, [MT_PSE_BASE] = 0x0c000, + [MT_PP_BASE] = 0x0e000, [MT_CFG_BASE] = 0x20000, [MT_AGG_BASE] = 0x22000, [MT_TMAC_BASE] = 0x24000, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h index 38628d28b3dd..571eadc033a3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h @@ -656,6 +656,8 @@ int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info); int __mt7663_load_firmware(struct mt7615_dev *dev); +u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset); +void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val); /* usb */ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, @@ -670,4 +672,12 @@ void mt7663_usb_sdio_wtbl_work(struct work_struct *work); int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); int mt7663u_mcu_init(struct mt7615_dev *dev); +/* sdio */ +u32 mt7663s_read_pcr(struct mt7615_dev *dev); +int mt7663s_mcu_init(struct mt7615_dev *dev); +int mt7663s_driver_own(struct mt7615_dev *dev); +int mt7663s_firmware_own(struct mt7615_dev *dev); +int mt7663s_kthread_run(void *data); +void mt7663s_sdio_irq(struct sdio_func *func); + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h index 32216bfef4b9..9137d9e6b51d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h @@ -28,6 +28,7 @@ enum mt7615_reg_base { MT_PCIE_REMAP_BASE2, MT_TOP_MISC_BASE, MT_EFUSE_ADDR_BASE, + MT_PP_BASE, __MT_BASE_MAX, }; @@ -152,6 +153,8 @@ enum mt7615_reg_base { #define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs)) +#define MT_PLE_PG_HIF0_GROUP MT_PLE(0x110) +#define MT_HIF0_MIN_QUOTA GENMASK(11, 0) #define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) #define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) #define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8) @@ -161,6 +164,10 @@ enum mt7615_reg_base { ((n) << 2)) #define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs)) +#define MT_PSE_PG_HIF0_GROUP MT_PSE(0x110) +#define MT_HIF0_MIN_QUOTA GENMASK(11, 0) +#define MT_PSE_PG_HIF1_GROUP MT_PSE(0x118) +#define MT_HIF1_MIN_QUOTA GENMASK(11, 0) #define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4) #define MT_HIF_0_EMPTY_MASK BIT(16) #define MT_HIF_1_EMPTY_MASK BIT(17) @@ -168,6 +175,11 @@ enum mt7615_reg_base { #define MT_PSE_PG_INFO MT_PSE(0x194) #define MT_PSE_SRC_CNT GENMASK(27, 16) +#define MT_PP(ofs) ((dev)->reg_map[MT_PP_BASE] + (ofs)) +#define MT_PP_TXDWCNT MT_PP(0x0) +#define MT_PP_TXDWCNT_TX0_ADD_DW_CNT GENMASK(7, 0) +#define MT_PP_TXDWCNT_TX1_ADD_DW_CNT GENMASK(15, 8) + #define MT_WF_PHY_BASE 0x82070000 #define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c new file mode 100644 index 000000000000..dabce51117b0 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c @@ -0,0 +1,478 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include + +#include +#include +#include + +#include "mt7615.h" +#include "sdio.h" +#include "mac.h" + +static const struct sdio_device_id mt7663s_table[] = { + { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) }, + { } /* Terminating entry */ +}; + +static u32 mt7663s_read_whisr(struct mt76_dev *dev) +{ + return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); +} + +u32 mt7663s_read_pcr(struct mt7615_dev *dev) +{ + struct mt76_sdio *sdio = &dev->mt76.sdio; + + return sdio_readl(sdio->func, MCR_WHLPCR, NULL); +} + +static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset) +{ + struct sdio_func *func = dev->sdio.func; + u32 val = ~0, status; + int err; + + sdio_claim_host(func); + + sdio_writel(func, offset, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting address [err=%d]\n", err); + goto out; + } + + sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); + goto out; + } + + err = readx_poll_timeout(mt7663s_read_whisr, dev, status, + status & H2D_SW_INT_READ, 0, 1000000); + if (err < 0) { + dev_err(dev->dev, "query whisr timeout\n"); + goto out; + } + + sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); + goto out; + } + + val = sdio_readl(func, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); + goto out; + } + + if (val != offset) { + dev_err(dev->dev, "register mismatch\n"); + val = ~0; + goto out; + } + + val = sdio_readl(func, MCR_D2HRM1R, &err); + if (err < 0) + dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); + +out: + sdio_release_host(func); + + return val; +} + +static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) +{ + struct sdio_func *func = dev->sdio.func; + u32 status; + int err; + + sdio_claim_host(func); + + sdio_writel(func, offset, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting address [err=%d]\n", err); + goto out; + } + + sdio_writel(func, val, MCR_H2DSM1R, &err); + if (err < 0) { + dev_err(dev->dev, + "failed setting write value [err=%d]\n", err); + goto out; + } + + sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); + goto out; + } + + err = readx_poll_timeout(mt7663s_read_whisr, dev, status, + status & H2D_SW_INT_WRITE, 0, 1000000); + if (err < 0) { + dev_err(dev->dev, "query whisr timeout\n"); + goto out; + } + + sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); + if (err < 0) { + dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); + goto out; + } + + val = sdio_readl(func, MCR_H2DSM0R, &err); + if (err < 0) { + dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); + goto out; + } + + if (val != offset) + dev_err(dev->dev, "register mismatch\n"); + +out: + sdio_release_host(func); +} + +static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) + return dev->mcu_ops->mcu_rr(dev, offset); + else + return mt7663s_read_mailbox(dev, offset); +} + +static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val) +{ + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) + dev->mcu_ops->mcu_wr(dev, offset, val); + else + mt7663s_write_mailbox(dev, offset, val); +} + +static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) +{ + val |= mt7663s_rr(dev, offset) & ~mask; + mt7663s_wr(dev, offset, val); + + return val; +} + +static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset, + const void *data, int len) +{ + const u32 *val = data; + int i; + + for (i = 0; i < len / sizeof(u32); i++) { + mt7663s_wr(dev, offset, val[i]); + offset += sizeof(u32); + } +} + +static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset, + void *data, int len) +{ + u32 *val = data; + int i; + + for (i = 0; i < len / sizeof(u32); i++) { + val[i] = mt7663s_rr(dev, offset); + offset += sizeof(u32); + } +} + +static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base, + const struct mt76_reg_pair *data, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + mt7663s_wr(dev, data->reg, data->value); + data++; + } + + return 0; +} + +static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base, + struct mt76_reg_pair *data, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + data->value = mt7663s_rr(dev, data->reg); + data++; + } + + return 0; +} + +static void mt7663s_init_work(struct work_struct *work) +{ + struct mt7615_dev *dev; + + dev = container_of(work, struct mt7615_dev, mcu_work); + if (mt7663s_mcu_init(dev)) + return; + + mt7615_mcu_set_eeprom(dev); + mt7615_mac_init(dev); + mt7615_phy_init(dev); + mt7615_mcu_del_wtbl_all(dev); + mt7615_check_offload_capability(dev); +} + +static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func) +{ + u32 status, ctrl; + int ret; + + sdio_claim_host(func); + + ret = sdio_enable_func(func); + if (ret < 0) + goto release; + + /* Get ownership from the device */ + sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, + MCR_WHLPCR, &ret); + if (ret < 0) + goto disable_func; + + ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); + if (ret < 0) { + dev_err(dev->mt76.dev, "Cannot get ownership from device"); + goto disable_func; + } + + ret = sdio_set_block_size(func, 512); + if (ret < 0) + goto disable_func; + + /* Enable interrupt */ + sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); + if (ret < 0) + goto disable_func; + + ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; + sdio_writel(func, ctrl, MCR_WHIER, &ret); + if (ret < 0) + goto disable_func; + + /* set WHISR as read clear and Rx aggregation number as 16 */ + ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); + sdio_writel(func, ctrl, MCR_WHCR, &ret); + if (ret < 0) + goto disable_func; + + ret = sdio_claim_irq(func, mt7663s_sdio_irq); + if (ret < 0) + goto disable_func; + + sdio_release_host(func); + + return 0; + +disable_func: + sdio_disable_func(func); +release: + sdio_release_host(func); + + return ret; +} + +static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + struct mt76_sdio *sdio = &mdev->sdio; + u32 pse, ple; + int err; + + err = mt7615_mac_sta_add(mdev, vif, sta); + if (err < 0) + return err; + + /* init sched data quota */ + pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); + ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); + + mutex_lock(&sdio->sched.lock); + sdio->sched.pse_data_quota = pse; + sdio->sched.ple_data_quota = ple; + mutex_unlock(&sdio->sched.lock); + + return 0; +} + +static int mt7663s_probe(struct sdio_func *func, + const struct sdio_device_id *id) +{ + static const struct mt76_driver_ops drv_ops = { + .txwi_size = MT_USB_TXD_SIZE, + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, + .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, + .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, + .tx_status_data = mt7663_usb_sdio_tx_status_data, + .rx_skb = mt7615_queue_rx_skb, + .sta_ps = mt7615_sta_ps, + .sta_add = mt7663s_sta_add, + .sta_remove = mt7615_mac_sta_remove, + .update_survey = mt7615_update_channel, + }; + static const struct mt76_bus_ops mt7663s_ops = { + .rr = mt7663s_rr, + .rmw = mt7663s_rmw, + .wr = mt7663s_wr, + .write_copy = mt7663s_write_copy, + .read_copy = mt7663s_read_copy, + .wr_rp = mt7663s_wr_rp, + .rd_rp = mt7663s_rd_rp, + .type = MT76_BUS_SDIO, + }; + struct ieee80211_ops *ops; + struct mt7615_dev *dev; + struct mt76_dev *mdev; + int ret; + + ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops), + GFP_KERNEL); + if (!ops) + return -ENOMEM; + + mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops); + if (!mdev) + return -ENOMEM; + + dev = container_of(mdev, struct mt7615_dev, mt76); + + INIT_WORK(&dev->mcu_work, mt7663s_init_work); + dev->reg_map = mt7663_usb_sdio_reg_map; + dev->ops = ops; + sdio_set_drvdata(func, dev); + + mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev, + "mt7663s_tx"); + if (IS_ERR(mdev->sdio.tx_kthread)) + return PTR_ERR(mdev->sdio.tx_kthread); + + ret = mt76s_init(mdev, func, &mt7663s_ops); + if (ret < 0) + goto err_free; + + ret = mt7663s_hw_init(dev, func); + if (ret) + goto err_free; + + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | + (mt76_rr(dev, MT_HW_REV) & 0xff); + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); + + ret = mt76s_alloc_queues(&dev->mt76); + if (ret) + goto err_deinit; + + ret = mt7663_usb_sdio_register_device(dev); + if (ret) + goto err_deinit; + + return 0; + +err_deinit: + mt76s_deinit(&dev->mt76); +err_free: + mt76_free_device(&dev->mt76); + + return ret; +} + +static void mt7663s_remove(struct sdio_func *func) +{ + struct mt7615_dev *dev = sdio_get_drvdata(func); + + if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) + return; + + ieee80211_unregister_hw(dev->mt76.hw); + mt76s_deinit(&dev->mt76); + mt76_free_device(&dev->mt76); +} + +#ifdef CONFIG_PM +static int mt7663s_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct mt7615_dev *mdev = sdio_get_drvdata(func); + + if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && + mt7615_firmware_offload(mdev)) { + int err; + + err = mt7615_mcu_set_hif_suspend(mdev, true); + if (err < 0) + return err; + } + + mt76s_stop_txrx(&mdev->mt76); + + return mt7663s_firmware_own(mdev); +} + +static int mt7663s_resume(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct mt7615_dev *mdev = sdio_get_drvdata(func); + int err; + + err = mt7663s_driver_own(mdev); + if (err) + return err; + + if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && + mt7615_firmware_offload(mdev)) + err = mt7615_mcu_set_hif_suspend(mdev, false); + + return err; +} + +static const struct dev_pm_ops mt7663s_pm_ops = { + .suspend = mt7663s_suspend, + .resume = mt7663s_resume, +}; +#endif + +MODULE_DEVICE_TABLE(sdio, mt7663s_table); +MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); +MODULE_FIRMWARE(MT7663_FIRMWARE_N9); +MODULE_FIRMWARE(MT7663_ROM_PATCH); + +static struct sdio_driver mt7663s_driver = { + .name = KBUILD_MODNAME, + .probe = mt7663s_probe, + .remove = mt7663s_remove, + .id_table = mt7663s_table, +#ifdef CONFIG_PM + .drv = { + .pm = &mt7663s_pm_ops, + } +#endif +}; +module_sdio_driver(mt7663s_driver); + +MODULE_AUTHOR("Sean Wang "); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h new file mode 100644 index 000000000000..05180971de84 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Sean Wang + */ + +#ifndef __MT76S_H +#define __MT76S_H + +#define MT_PSE_PAGE_SZ 128 + +#define MCR_WCIR 0x0000 +#define MCR_WHLPCR 0x0004 +#define WHLPCR_FW_OWN_REQ_CLR BIT(9) +#define WHLPCR_FW_OWN_REQ_SET BIT(8) +#define WHLPCR_IS_DRIVER_OWN BIT(8) +#define WHLPCR_INT_EN_CLR BIT(1) +#define WHLPCR_INT_EN_SET BIT(0) + +#define MCR_WSDIOCSR 0x0008 +#define MCR_WHCR 0x000C +#define W_INT_CLR_CTRL BIT(1) +#define RECV_MAILBOX_RD_CLR_EN BIT(2) +#define MAX_HIF_RX_LEN_NUM GENMASK(13, 8) +#define RX_ENHANCE_MODE BIT(16) + +#define MCR_WHISR 0x0010 +#define MCR_WHIER 0x0014 +#define WHIER_D2H_SW_INT GENMASK(31, 8) +#define WHIER_FW_OWN_BACK_INT_EN BIT(7) +#define WHIER_ABNORMAL_INT_EN BIT(6) +#define WHIER_RX1_DONE_INT_EN BIT(2) +#define WHIER_RX0_DONE_INT_EN BIT(1) +#define WHIER_TX_DONE_INT_EN BIT(0) +#define WHIER_DEFAULT (WHIER_RX0_DONE_INT_EN | \ + WHIER_RX1_DONE_INT_EN | \ + WHIER_TX_DONE_INT_EN | \ + WHIER_ABNORMAL_INT_EN | \ + WHIER_D2H_SW_INT) + +#define MCR_WASR 0x0020 +#define MCR_WSICR 0x0024 +#define MCR_WTSR0 0x0028 +#define TQ0_CNT GENMASK(7, 0) +#define TQ1_CNT GENMASK(15, 8) +#define TQ2_CNT GENMASK(23, 16) +#define TQ3_CNT GENMASK(31, 24) + +#define MCR_WTSR1 0x002c +#define TQ4_CNT GENMASK(7, 0) +#define TQ5_CNT GENMASK(15, 8) +#define TQ6_CNT GENMASK(23, 16) +#define TQ7_CNT GENMASK(31, 24) + +#define MCR_WTDR1 0x0034 +#define MCR_WRDR0 0x0050 +#define MCR_WRDR1 0x0054 +#define MCR_WRDR(p) (0x0050 + 4 * (p)) +#define MCR_H2DSM0R 0x0070 +#define H2D_SW_INT_READ BIT(16) +#define H2D_SW_INT_WRITE BIT(17) + +#define MCR_H2DSM1R 0x0074 +#define MCR_D2HRM0R 0x0078 +#define MCR_D2HRM1R 0x007c +#define MCR_D2HRM2R 0x0080 +#define MCR_WRPLR 0x0090 +#define RX0_PACKET_LENGTH GENMASK(15, 0) +#define RX1_PACKET_LENGTH GENMASK(31, 16) + +#define MCR_WTMDR 0x00b0 +#define MCR_WTMCR 0x00b4 +#define MCR_WTMDPCR0 0x00b8 +#define MCR_WTMDPCR1 0x00bc +#define MCR_WPLRCR 0x00d4 +#define MCR_WSR 0x00D8 +#define MCR_CLKIOCR 0x0100 +#define MCR_CMDIOCR 0x0104 +#define MCR_DAT0IOCR 0x0108 +#define MCR_DAT1IOCR 0x010C +#define MCR_DAT2IOCR 0x0110 +#define MCR_DAT3IOCR 0x0114 +#define MCR_CLKDLYCR 0x0118 +#define MCR_CMDDLYCR 0x011C +#define MCR_ODATDLYCR 0x0120 +#define MCR_IDATDLYCR1 0x0124 +#define MCR_IDATDLYCR2 0x0128 +#define MCR_ILCHCR 0x012C +#define MCR_WTQCR0 0x0130 +#define MCR_WTQCR1 0x0134 +#define MCR_WTQCR2 0x0138 +#define MCR_WTQCR3 0x013C +#define MCR_WTQCR4 0x0140 +#define MCR_WTQCR5 0x0144 +#define MCR_WTQCR6 0x0148 +#define MCR_WTQCR7 0x014C +#define MCR_WTQCR(x) (0x130 + 4 * (x)) +#define TXQ_CNT_L GENMASK(15, 0) +#define TXQ_CNT_H GENMASK(31, 16) + +#define MCR_SWPCDBGR 0x0154 + +struct mt76s_intr { + u32 isr; + struct { + u32 wtqcr[8]; + } tx; + struct { + u16 num[2]; + u16 len[2][16]; + } rx; + u32 rec_mb[2]; +} __packed; + +#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c new file mode 100644 index 000000000000..28b86bec7fc2 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ +#include +#include +#include +#include + +#include "mt7615.h" +#include "mac.h" +#include "mcu.h" +#include "regs.h" +#include "sdio.h" + +static int mt7663s_mcu_init_sched(struct mt7615_dev *dev) +{ + struct mt76_sdio *sdio = &dev->mt76.sdio; + u32 pse0, ple, pse1, txdwcnt; + + pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); + pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA); + ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); + txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT, + MT_PP_TXDWCNT_TX1_ADD_DW_CNT); + + mutex_lock(&sdio->sched.lock); + + sdio->sched.pse_data_quota = pse0; + sdio->sched.ple_data_quota = ple; + sdio->sched.pse_mcu_quota = pse1; + sdio->sched.deficit = txdwcnt << 2; + + mutex_unlock(&sdio->sched.lock); + + return 0; +} + +static int +mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, + int cmd, bool wait_resp) +{ + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); + int ret, seq; + + mutex_lock(&mdev->mcu.mutex); + + mt7615_mcu_fill_msg(dev, skb, cmd, &seq); + ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0); + if (ret) + goto out; + + mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q); + if (wait_resp) + ret = mt7615_mcu_wait_response(dev, cmd, seq); + +out: + mutex_unlock(&mdev->mcu.mutex); + + return ret; +} + +int mt7663s_driver_own(struct mt7615_dev *dev) +{ + struct sdio_func *func = dev->mt76.sdio.func; + struct mt76_phy *mphy = &dev->mt76.phy; + u32 status; + int ret; + + if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) + goto out; + + sdio_claim_host(func); + + sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0); + + ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); + if (ret < 0) { + dev_err(dev->mt76.dev, "Cannot get ownership from device"); + set_bit(MT76_STATE_PM, &mphy->state); + sdio_release_host(func); + + return ret; + } + + sdio_release_host(func); + +out: + dev->pm.last_activity = jiffies; + + return 0; +} + +int mt7663s_firmware_own(struct mt7615_dev *dev) +{ + struct sdio_func *func = dev->mt76.sdio.func; + struct mt76_phy *mphy = &dev->mt76.phy; + u32 status; + int ret; + + if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) + return 0; + + sdio_claim_host(func); + + sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0); + + ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, + !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000); + if (ret < 0) { + dev_err(dev->mt76.dev, "Cannot set ownership to device"); + clear_bit(MT76_STATE_PM, &mphy->state); + } + + sdio_release_host(func); + + return ret; +} + +int mt7663s_mcu_init(struct mt7615_dev *dev) +{ + static const struct mt76_mcu_ops mt7663s_mcu_ops = { + .headroom = sizeof(struct mt7615_mcu_txd), + .tailroom = MT_USB_TAIL_SIZE, + .mcu_skb_send_msg = mt7663s_mcu_send_message, + .mcu_send_msg = mt7615_mcu_msg_send, + .mcu_restart = mt7615_mcu_restart, + .mcu_rr = mt7615_mcu_reg_rr, + .mcu_wr = mt7615_mcu_reg_wr, + }; + int ret; + + ret = mt7663s_driver_own(dev); + if (ret) + return ret; + + dev->mt76.mcu_ops = &mt7663s_mcu_ops, + + ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); + if (ret) { + mt7615_mcu_restart(&dev->mt76); + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, + MT_TOP_MISC2_FW_N9_RDY, 0, 500)) + return -EIO; + } + + ret = __mt7663_load_firmware(dev); + if (ret) + return ret; + + ret = mt7663s_mcu_init_sched(dev); + if (ret) + return ret; + + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); + + return 0; +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c new file mode 100644 index 000000000000..156666a94eb7 --- /dev/null +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: ISC +/* Copyright (C) 2020 MediaTek Inc. + * + * Author: Felix Fietkau + * Lorenzo Bianconi + * Sean Wang + */ + +#include +#include +#include + +#include +#include +#include + +#include "../trace.h" +#include "mt7615.h" +#include "sdio.h" +#include "mac.h" + +static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data) +{ + struct mt76_sdio *sdio = &dev->mt76.sdio; + + mutex_lock(&sdio->sched.lock); + sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */ + FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */ + FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */ + FIELD_GET(TXQ_CNT_H, data[1]); /* VO */ + sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */ + FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */ + FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */ + FIELD_GET(TXQ_CNT_L, data[4]); /* VO */ + sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]); + mutex_unlock(&sdio->sched.lock); +} + +static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len, + int buf_len) +{ + int len = min_t(int, data_len, MT_SKB_HEAD_LEN); + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return NULL; + + skb_put_data(skb, data, len); + if (data_len > len) { + struct page *page; + + data += len; + page = virt_to_head_page(data); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, data - page_address(page), + data_len - len, buf_len); + get_page(page); + } + + return skb; +} + +static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid, + struct mt76s_intr *intr) +{ + struct mt76_queue *q = &dev->mt76.q_rx[qid]; + struct mt76_sdio *sdio = &dev->mt76.sdio; + int len = 0, err, i, order; + struct page *page; + u8 *buf; + + for (i = 0; i < intr->rx.num[qid]; i++) + len += round_up(intr->rx.len[qid][i] + 4, 4); + + if (!len) + return 0; + + if (len > sdio->func->cur_blksize) + len = roundup(len, sdio->func->cur_blksize); + + order = get_order(len); + page = __dev_alloc_pages(GFP_KERNEL, order); + if (!page) + return -ENOMEM; + + buf = page_address(page); + + err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); + if (err < 0) { + dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err); + __free_pages(page, order); + return err; + } + + for (i = 0; i < intr->rx.num[qid]; i++) { + int index = (q->tail + i) % q->ndesc; + struct mt76_queue_entry *e = &q->entry[index]; + + len = intr->rx.len[qid][i]; + e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4)); + if (!e->skb) + break; + + buf += round_up(len + 4, 4); + if (q->queued + i + 1 == q->ndesc) + break; + } + __free_pages(page, order); + + spin_lock_bh(&q->lock); + q->tail = (q->tail + i) % q->ndesc; + q->queued += i; + spin_unlock_bh(&q->lock); + + return err; +} + +static int mt7663s_tx_update_sched(struct mt7615_dev *dev, + struct mt76_queue_entry *e, + bool mcu) +{ + struct mt76_sdio *sdio = &dev->mt76.sdio; + struct mt76_phy *mphy = &dev->mt76.phy; + struct ieee80211_hdr *hdr; + int size, ret = -EBUSY; + + size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); + + if (mcu) { + if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state)) + return 0; + + mutex_lock(&sdio->sched.lock); + if (sdio->sched.pse_mcu_quota > size) { + sdio->sched.pse_mcu_quota -= size; + ret = 0; + } + mutex_unlock(&sdio->sched.lock); + + return ret; + } + + hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE); + if (ieee80211_is_ctl(hdr->frame_control)) + return 0; + + mutex_lock(&sdio->sched.lock); + if (sdio->sched.pse_data_quota > size && + sdio->sched.ple_data_quota > 0) { + sdio->sched.pse_data_quota -= size; + sdio->sched.ple_data_quota--; + ret = 0; + } + mutex_unlock(&sdio->sched.lock); + + return ret; +} + +static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q) +{ + bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q; + struct mt76_sdio *sdio = &dev->mt76.sdio; + int nframes = 0; + + while (q->first != q->tail) { + struct mt76_queue_entry *e = &q->entry[q->first]; + int err, len = e->skb->len; + + if (mt7663s_tx_update_sched(dev, e, mcu)) + break; + + if (len > sdio->func->cur_blksize) + len = roundup(len, sdio->func->cur_blksize); + + /* TODO: skb_walk_frags and then write to SDIO port */ + err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len); + if (err) { + dev_err(dev->mt76.dev, "sdio write failed: %d\n", err); + return -EIO; + } + + q->first = (q->first + 1) % q->ndesc; + nframes++; + } + + spin_lock_bh(&q->lock); + q->queued += nframes; + spin_unlock_bh(&q->lock); + + return nframes; +} + +static int mt7663s_tx_run_queues(struct mt7615_dev *dev) +{ + int i, nframes = 0; + + for (i = 0; i < MT_TXQ_MCU_WA; i++) { + int ret; + + ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q); + if (ret < 0) + return ret; + + nframes += ret; + } + + return nframes; +} + +int mt7663s_kthread_run(void *data) +{ + struct mt7615_dev *dev = data; + struct mt76_phy *mphy = &dev->mt76.phy; + + while (!kthread_should_stop()) { + int ret; + + cond_resched(); + + sdio_claim_host(dev->mt76.sdio.func); + ret = mt7663s_tx_run_queues(dev); + sdio_release_host(dev->mt76.sdio.func); + + if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } else { + wake_up_process(dev->mt76.sdio.kthread); + } + } + + return 0; +} + +void mt7663s_sdio_irq(struct sdio_func *func) +{ + struct mt7615_dev *dev = sdio_get_drvdata(func); + struct mt76_sdio *sdio = &dev->mt76.sdio; + struct mt76s_intr intr; + + /* disable interrupt */ + sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0); + + do { + sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr)); + trace_dev_irq(&dev->mt76, intr.isr, 0); + + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state)) + goto out; + + if (intr.isr & WHIER_RX0_DONE_INT_EN) { + mt7663s_rx_run_queue(dev, 0, &intr); + wake_up_process(sdio->kthread); + } + + if (intr.isr & WHIER_RX1_DONE_INT_EN) { + mt7663s_rx_run_queue(dev, 1, &intr); + wake_up_process(sdio->kthread); + } + + if (intr.isr & WHIER_TX_DONE_INT_EN) { + mt7663s_refill_sched_quota(dev, intr.tx.wtqcr); + mt7663s_tx_run_queues(dev); + wake_up_process(sdio->kthread); + } + } while (intr.isr); +out: + /* enable interrupt */ + sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c index 50009bc5ee8d..6dffdaaa9ad5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c @@ -24,6 +24,7 @@ const u32 mt7663_usb_sdio_reg_map[] = { [MT_TOP_MISC_BASE] = 0x81020000, [MT_PLE_BASE] = 0x82060000, [MT_PSE_BASE] = 0x82068000, + [MT_PP_BASE] = 0x8206c000, [MT_WTBL_BASE_ADDR] = 0x820e0000, [MT_CFG_BASE] = 0x820f0000, [MT_AGG_BASE] = 0x820f2000, From 1f8284150dbc6a6995d8647be025944ebc29ad26 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 17 Jul 2020 10:57:39 +0200 Subject: [PATCH 1398/2056] mt76: mt76s: move queue accounting in mt76s_tx_queue_skb In order to avoid possible overflows, move tx queue accounting from mt7663s_tx_run_queue() to mt76s_tx_queue_skb_raw()/mt76s_tx_queue_skb() Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c | 5 +---- drivers/net/wireless/mediatek/mt76/sdio.c | 5 +++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c index 156666a94eb7..443a4ecdad3a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c @@ -180,14 +180,11 @@ static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q) return -EIO; } + e->done = true; q->first = (q->first + 1) % q->ndesc; nframes++; } - spin_lock_bh(&q->lock); - q->queued += nframes; - spin_unlock_bh(&q->lock); - return nframes; } diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c index a6f3255c4d84..d2b38ed7f3b4 100644 --- a/drivers/net/wireless/mediatek/mt76/sdio.c +++ b/drivers/net/wireless/mediatek/mt76/sdio.c @@ -138,6 +138,9 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) bool wake; while (q->queued > n_dequeued) { + if (!q->entry[q->head].done) + break; + if (q->entry[q->head].schedule) { q->entry[q->head].schedule = false; n_sw_dequeued++; @@ -229,6 +232,7 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, q->entry[q->tail].skb = tx_info.skb; q->entry[q->tail].buf_sz = len; q->tail = (q->tail + 1) % q->ndesc; + q->queued++; return idx; } @@ -251,6 +255,7 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, q->entry[q->tail].buf_sz = len; q->entry[q->tail].skb = skb; q->tail = (q->tail + 1) % q->ndesc; + q->queued++; out: spin_unlock_bh(&q->lock); From 4c7e1711cf4cdaf7124d3794c42c1b29af72740b Mon Sep 17 00:00:00 2001 From: Ryder Lee Date: Sun, 12 Jul 2020 01:25:51 +0800 Subject: [PATCH 1399/2056] mt76: mt7915: fix potential memory leak in mcu message handler Fix potential memory leak in mcu message handler on error condition. Fixes: c6b002bcdfa6 ("mt76: add mac80211 driver for MT7915 PCIe-based chipsets") Signed-off-by: Ryder Lee Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7915/mcu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 93b1439e147b..eaed5ef05401 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -312,8 +312,10 @@ mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd, struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; int ret = 0; - if (seq != rxd->seq) - return -EAGAIN; + if (seq != rxd->seq) { + ret = -EAGAIN; + goto out; + } switch (cmd) { case -MCU_CMD_PATCH_SEM_CONTROL: @@ -330,6 +332,7 @@ mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd, default: break; } +out: dev_kfree_skb(skb); return ret; From 2bccc8415883c1cd5ae8836548d9783dbbd84999 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 21 Jul 2020 10:19:22 +0200 Subject: [PATCH 1400/2056] mt76: mt7615: fix possible memory leak in mt7615_mcu_wtbl_sta_add Free the second mcu skb if __mt76_mcu_skb_send_msg() fails to transmit the first one in mt7615_mcu_wtbl_sta_add(). Fixes: 99c457d902cf9 ("mt76: mt7615: move mt7615_mcu_set_bmc to mt7615_mcu_ops") Signed-off-by: Lorenzo Bianconi Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt7615/mcu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index 2e0928db0aad..d0cbb283982f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -1293,8 +1293,12 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif, skb = enable ? wskb : sskb; err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); - if (err < 0) + if (err < 0) { + skb = enable ? sskb : wskb; + dev_kfree_skb(skb); + return err; + } cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE; skb = enable ? sskb : wskb; From 5648d1c9cadb0a6afb29dd8891159906dadf5c77 Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sat, 18 Jul 2020 00:47:26 -0500 Subject: [PATCH 1401/2056] mt76: mt76u: add missing release on skb in __mt76x02u_mcu_send_msg In the implementation of __mt76x02u_mcu_send_msg() the skb is consumed all execution paths except one. Release skb before returning if test_bit() fails. Signed-off-by: Navid Emamdoost Signed-off-by: Felix Fietkau --- drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index a30bb536fc8a..e43d13d7c988 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -87,8 +87,10 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, u32 info; int ret; - if (test_bit(MT76_REMOVED, &dev->phy.state)) - return 0; + if (test_bit(MT76_REMOVED, &dev->phy.state)) { + ret = 0; + goto out; + } if (wait_resp) { seq = ++dev->mcu.msg_seq & 0xf; @@ -111,6 +113,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, if (wait_resp) ret = mt76x02u_mcu_wait_resp(dev, seq); +out: consume_skb(skb); return ret; From 2ea485980734500d41e4530f038d94a21e2b3b34 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 17 Jul 2020 18:53:22 +0200 Subject: [PATCH 1402/2056] selftests: bpf: test_kmod.sh: Fix running out of srctree When running out of srctree, relative path to lib/test_bpf.ko is different than when running in srctree. Check $building_out_of_srctree environment variable and use a different relative path if needed. Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717165326.6786-2-iii@linux.ibm.com --- tools/testing/selftests/bpf/test_kmod.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh index 9df0d2ac45f8..4f6444bcd53f 100755 --- a/tools/testing/selftests/bpf/test_kmod.sh +++ b/tools/testing/selftests/bpf/test_kmod.sh @@ -10,7 +10,13 @@ if [ "$(id -u)" != "0" ]; then exit $ksft_skip fi -SRC_TREE=../../../../ +if [ "$building_out_of_srctree" ]; then + # We are in linux-build/kselftest/bpf + OUTPUT=../../ +else + # We are in linux/tools/testing/selftests/bpf + OUTPUT=../../../../ +fi test_run() { @@ -19,8 +25,8 @@ test_run() echo "[ JIT enabled:$1 hardened:$2 ]" dmesg -C - if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then - insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null + if [ -f ${OUTPUT}/lib/test_bpf.ko ]; then + insmod ${OUTPUT}/lib/test_bpf.ko 2> /dev/null if [ $? -ne 0 ]; then rc=1 fi From 7477d43be5b1448bc0d4c85cb185a0144cc080e1 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 17 Jul 2020 18:53:23 +0200 Subject: [PATCH 1403/2056] s390/bpf: Fix sign extension in branch_ku Both signed and unsigned variants of BPF_JMP | BPF_K require sign-extending the immediate. JIT emits cgfi for the signed case, which is correct, and clgfi for the unsigned case, which is not correct: clgfi zero-extends the immediate. s390 does not provide an instruction that does sign-extension and unsigned comparison at the same time. Therefore, fix by first loading the sign-extended immediate into work register REG_1 and proceeding as if it's BPF_X. Fixes: 4e9b4a6883dd ("s390/bpf: Use relative long branches") Reported-by: Seth Forshee Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Tested-by: Seth Forshee Link: https://lore.kernel.org/bpf/20200717165326.6786-3-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index f4242b894cf2..6b3d612948fb 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1417,21 +1417,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, } break; branch_ku: - is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - /* clfi or clgfi %dst,imm */ - EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000, - dst_reg, imm); - if (!is_first_pass(jit) && - can_use_rel(jit, addrs[i + off + 1])) { - /* brc mask,off */ - EMIT4_PCREL_RIC(0xa7040000, - mask >> 12, addrs[i + off + 1]); - } else { - /* brcl mask,off */ - EMIT6_PCREL_RILC(0xc0040000, - mask >> 12, addrs[i + off + 1]); - } - break; + /* lgfi %w1,imm (load sign extend imm) */ + src_reg = REG_1; + EMIT6_IMM(0xc0010000, src_reg, imm); + goto branch_xu; branch_xs: is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; if (!is_first_pass(jit) && From 5fa6974471c5518a50bdd814067508dbcb477251 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 17 Jul 2020 18:53:24 +0200 Subject: [PATCH 1404/2056] s390/bpf: Use brcl for jumping to exit_ip if necessary "BPF_MAXINSNS: Maximum possible literals" test causes panic with bpf_jit_harden = 2. The reason is that BPF_JMP | BPF_EXIT is always emitted as brc, however, after removal of JITed image size limitations, brcl might be required. Fix by using brcl when necessary. Fixes: 4e9b4a6883dd ("s390/bpf: Use relative long branches") Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717165326.6786-4-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6b3d612948fb..6b8968f6e207 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1268,8 +1268,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, last = (i == fp->len - 1) ? 1 : 0; if (last) break; - /* j */ - EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); + if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip)) + /* brc 0xf, */ + EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip); + else + /* brcl 0xf, */ + EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip); break; /* * Branch relative (number of skipped instructions) to offset on From 1491b73311a15bb5beeab5d30e03bff761ef6c18 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 17 Jul 2020 18:53:25 +0200 Subject: [PATCH 1405/2056] s390/bpf: Tolerate not converging code shrinking "BPF_MAXINSNS: Maximum possible literals" unnecessarily falls back to the interpreter because of failing sanity check in bpf_set_addr. The problem is that there are a lot of branches that can be shrunk, and doing so opens up the possibility to shrink even more. This process does not converge after 3 passes, causing code offsets to change during the codegen pass, which must never happen. Fix by inserting nops during codegen pass in order to preserve code offets. Fixes: 4e9b4a6883dd ("s390/bpf: Use relative long branches") Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717165326.6786-5-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 6b8968f6e207..a78c5b59e1ab 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -489,6 +489,24 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) } while (re <= last); } +static void bpf_skip(struct bpf_jit *jit, int size) +{ + if (size >= 6 && !is_valid_rel(size)) { + /* brcl 0xf,size */ + EMIT6_PCREL_RIL(0xc0f4000000, size); + size -= 6; + } else if (size >= 4 && is_valid_rel(size)) { + /* brc 0xf,size */ + EMIT4_PCREL(0xa7f40000, size); + size -= 4; + } + while (size >= 2) { + /* bcr 0,%0 */ + _EMIT2(0x0700); + size -= 2; + } +} + /* * Emit function prologue * @@ -1503,7 +1521,14 @@ static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i) */ static int bpf_set_addr(struct bpf_jit *jit, int i) { - if (!bpf_is_new_addr_sane(jit, i)) + int delta; + + if (is_codegen_pass(jit)) { + delta = jit->prg - jit->addrs[i]; + if (delta < 0) + bpf_skip(jit, -delta); + } + if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i))) return -1; jit->addrs[i] = jit->prg; return 0; From bfabff3cb0fef366086c64f24be8ab316a355b99 Mon Sep 17 00:00:00 2001 From: Luke Nelson Date: Mon, 20 Jul 2020 19:52:38 -0700 Subject: [PATCH 1406/2056] bpf, riscv: Modify JIT ctx to support compressed instructions This patch makes the necessary changes to struct rv_jit_context and to bpf_int_jit_compile to support compressed riscv (RVC) instructions in the BPF JIT. It changes the JIT image to be u16 instead of u32, since RVC instructions are 2 bytes as opposed to 4. It also changes ctx->offset and ctx->ninsns to refer to 2-byte instructions rather than 4-byte ones. The riscv PC is required to be 16-bit aligned with or without RVC, so this is sufficient to refer to any valid riscv offset. The code for computing jump offsets in bytes is updated accordingly, and factored into a new "ninsns_rvoff" function to simplify the code. Signed-off-by: Luke Nelson Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200721025241.8077-2-luke.r.nels@gmail.com --- arch/riscv/net/bpf_jit.h | 31 ++++++++++++++++++++++++++++--- arch/riscv/net/bpf_jit_comp32.c | 14 +++++++------- arch/riscv/net/bpf_jit_comp64.c | 12 ++++++------ arch/riscv/net/bpf_jit_core.c | 6 +++--- 4 files changed, 44 insertions(+), 19 deletions(-) diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index 20e235d06f66..e90d336a9e5f 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -13,6 +13,11 @@ #include #include +static inline bool rvc_enabled(void) +{ + return IS_ENABLED(CONFIG_RISCV_ISA_C); +} + enum { RV_REG_ZERO = 0, /* The constant value 0 */ RV_REG_RA = 1, /* Return address */ @@ -50,7 +55,7 @@ enum { struct rv_jit_context { struct bpf_prog *prog; - u32 *insns; /* RV insns */ + u16 *insns; /* RV insns */ int ninsns; int epilogue_offset; int *offset; /* BPF to RV */ @@ -58,6 +63,12 @@ struct rv_jit_context { int stack_size; }; +/* Convert from ninsns to bytes. */ +static inline int ninsns_rvoff(int ninsns) +{ + return ninsns << 1; +} + struct rv_jit_data { struct bpf_binary_header *header; u8 *image; @@ -74,8 +85,22 @@ static inline void bpf_flush_icache(void *start, void *end) flush_icache_range((unsigned long)start, (unsigned long)end); } +/* Emit a 4-byte riscv instruction. */ static inline void emit(const u32 insn, struct rv_jit_context *ctx) { + if (ctx->insns) { + ctx->insns[ctx->ninsns] = insn; + ctx->insns[ctx->ninsns + 1] = (insn >> 16); + } + + ctx->ninsns += 2; +} + +/* Emit a 2-byte riscv compressed instruction. */ +static inline void emitc(const u16 insn, struct rv_jit_context *ctx) +{ + BUILD_BUG_ON(!rvc_enabled()); + if (ctx->insns) ctx->insns[ctx->ninsns] = insn; @@ -86,7 +111,7 @@ static inline int epilogue_offset(struct rv_jit_context *ctx) { int to = ctx->epilogue_offset, from = ctx->ninsns; - return (to - from) << 2; + return ninsns_rvoff(to - from); } /* Return -1 or inverted cond. */ @@ -149,7 +174,7 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx) off++; /* BPF branch is from PC+1, RV is from PC */ from = (insn > 0) ? ctx->offset[insn - 1] : 0; to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0; - return (to - from) << 2; + return ninsns_rvoff(to - from); } /* Instruction formats. */ diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index b198eaa74456..bc5f2204693f 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -644,7 +644,7 @@ static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff, e = ctx->ninsns; /* Adjust for extra insns. */ - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); return 0; } @@ -713,7 +713,7 @@ static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx) if (far) { e = ctx->ninsns; /* Adjust for extra insns. */ - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx); } return 0; @@ -731,7 +731,7 @@ static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff, e = ctx->ninsns; /* Adjust for extra insns. */ - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx)) return -1; @@ -795,7 +795,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) * if (index >= max_entries) * goto out; */ - off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx); /* @@ -804,7 +804,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) * goto out; */ emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx); - off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx); /* @@ -818,7 +818,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) if (is_12b_check(off, insn)) return -1; emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx); - off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx); /* @@ -1214,7 +1214,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit_imm32(tmp2, imm, ctx); src = tmp2; e = ctx->ninsns; - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); } if (is64) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 6cfd164cbe88..55861269da2a 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -304,14 +304,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) if (is_12b_check(off, insn)) return -1; emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx); - off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx); /* if (TCC-- < 0) * goto out; */ emit(rv_addi(RV_REG_T1, tcc, -1), ctx); - off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx); /* prog = array->ptrs[index]; @@ -324,7 +324,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) if (is_12b_check(off, insn)) return -1; emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx); - off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; + off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx); /* goto *(prog->bpf_func + 4); */ @@ -757,7 +757,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, e = ctx->ninsns; /* Adjust for extra insns */ - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); } if (BPF_OP(code) == BPF_JSET) { @@ -810,7 +810,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, e = ctx->ninsns; /* Adjust for extra insns */ - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); break; @@ -831,7 +831,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, if (!is64 && imm < 0) emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx); e = ctx->ninsns; - rvoff -= (e - s) << 2; + rvoff -= ninsns_rvoff(e - s); emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); break; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 709b94ece3ed..3630d447352c 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -73,7 +73,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (ctx->offset) { extra_pass = true; - image_size = sizeof(u32) * ctx->ninsns; + image_size = sizeof(*ctx->insns) * ctx->ninsns; goto skip_init_ctx; } @@ -103,7 +103,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) if (jit_data->header) break; - image_size = sizeof(u32) * ctx->ninsns; + image_size = sizeof(*ctx->insns) * ctx->ninsns; jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image, @@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) goto out_offset; } - ctx->insns = (u32 *)jit_data->image; + ctx->insns = (u16 *)jit_data->image; /* * Now, when the image is allocated, the image can * potentially shrink more (auipc/jalr -> jal). From 94ad428df53688a1f80ab55f2b1186101510a778 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 17 Jul 2020 18:53:26 +0200 Subject: [PATCH 1407/2056] s390/bpf: Use bpf_skip() in bpf_jit_prologue() Now that we have bpf_skip() for emitting nops, use it in bpf_jit_prologue() in order to reduce code duplication. Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200717165326.6786-6-iii@linux.ibm.com --- arch/s390/net/bpf_jit_comp.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index a78c5b59e1ab..26f97a10e793 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -519,10 +519,11 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth) /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */ _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT); } else { - /* j tail_call_start: NOP if no tail calls are used */ - EMIT4_PCREL(0xa7f40000, 6); - /* bcr 0,%0 */ - EMIT2(0x0700, 0, REG_0); + /* + * There are no tail calls. Insert nops in order to have + * tail_call_start at a predictable offset. + */ + bpf_skip(jit, 6); } /* Tail calls have to skip above initialization */ jit->tail_call_start = jit->prg; From 804ec72c68c8477b8713a1e8f8eda120d3471031 Mon Sep 17 00:00:00 2001 From: Luke Nelson Date: Mon, 20 Jul 2020 19:52:39 -0700 Subject: [PATCH 1408/2056] bpf, riscv: Add encodings for compressed instructions This patch adds functions for encoding and emitting compressed riscv (RVC) instructions to the BPF JIT. Some regular riscv instructions can be compressed into an RVC instruction if the instruction fields meet some requirements. For example, "add rd, rs1, rs2" can be compressed into "c.add rd, rs2" when rd == rs1. To make using RVC encodings simpler, this patch also adds helper functions that selectively emit either a regular instruction or a compressed instruction if possible. For example, emit_add will produce a "c.add" if possible and regular "add" otherwise. Signed-off-by: Luke Nelson Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200721025241.8077-3-luke.r.nels@gmail.com --- arch/riscv/net/bpf_jit.h | 452 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 452 insertions(+) diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index e90d336a9e5f..75c1e9996867 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -53,6 +53,18 @@ enum { RV_REG_T6 = 31, }; +static inline bool is_creg(u8 reg) +{ + return (1 << reg) & (BIT(RV_REG_FP) | + BIT(RV_REG_S1) | + BIT(RV_REG_A0) | + BIT(RV_REG_A1) | + BIT(RV_REG_A2) | + BIT(RV_REG_A3) | + BIT(RV_REG_A4) | + BIT(RV_REG_A5)); +} + struct rv_jit_context { struct bpf_prog *prog; u16 *insns; /* RV insns */ @@ -142,6 +154,36 @@ static inline int invert_bpf_cond(u8 cond) return -1; } +static inline bool is_6b_int(long val) +{ + return -(1L << 5) <= val && val < (1L << 5); +} + +static inline bool is_7b_uint(unsigned long val) +{ + return val < (1UL << 7); +} + +static inline bool is_8b_uint(unsigned long val) +{ + return val < (1UL << 8); +} + +static inline bool is_9b_uint(unsigned long val) +{ + return val < (1UL << 9); +} + +static inline bool is_10b_int(long val) +{ + return -(1L << 9) <= val && val < (1L << 9); +} + +static inline bool is_10b_uint(unsigned long val) +{ + return val < (1UL << 10); +} + static inline bool is_12b_int(long val) { return -(1L << 11) <= val && val < (1L << 11); @@ -232,6 +274,59 @@ static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode); } +/* RISC-V compressed instruction formats. */ + +static inline u16 rv_cr_insn(u8 funct4, u8 rd, u8 rs2, u8 op) +{ + return (funct4 << 12) | (rd << 7) | (rs2 << 2) | op; +} + +static inline u16 rv_ci_insn(u8 funct3, u32 imm6, u8 rd, u8 op) +{ + u32 imm; + + imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2); + return (funct3 << 13) | (rd << 7) | op | imm; +} + +static inline u16 rv_css_insn(u8 funct3, u32 uimm, u8 rs2, u8 op) +{ + return (funct3 << 13) | (uimm << 7) | (rs2 << 2) | op; +} + +static inline u16 rv_ciw_insn(u8 funct3, u32 uimm, u8 rd, u8 op) +{ + return (funct3 << 13) | (uimm << 5) | ((rd & 0x7) << 2) | op; +} + +static inline u16 rv_cl_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rd, + u8 op) +{ + return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) | + (imm_lo << 5) | ((rd & 0x7) << 2) | op; +} + +static inline u16 rv_cs_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rs2, + u8 op) +{ + return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) | + (imm_lo << 5) | ((rs2 & 0x7) << 2) | op; +} + +static inline u16 rv_ca_insn(u8 funct6, u8 rd, u8 funct2, u8 rs2, u8 op) +{ + return (funct6 << 10) | ((rd & 0x7) << 7) | (funct2 << 5) | + ((rs2 & 0x7) << 2) | op; +} + +static inline u16 rv_cb_insn(u8 funct3, u32 imm6, u8 funct2, u8 rd, u8 op) +{ + u32 imm; + + imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2); + return (funct3 << 13) | (funct2 << 10) | ((rd & 0x7) << 7) | op | imm; +} + /* Instructions shared by both RV32 and RV64. */ static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0) @@ -439,6 +534,135 @@ static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); } +/* RVC instrutions. */ + +static inline u16 rvc_addi4spn(u8 rd, u32 imm10) +{ + u32 imm; + + imm = ((imm10 & 0x30) << 2) | ((imm10 & 0x3c0) >> 4) | + ((imm10 & 0x4) >> 1) | ((imm10 & 0x8) >> 3); + return rv_ciw_insn(0x0, imm, rd, 0x0); +} + +static inline u16 rvc_lw(u8 rd, u32 imm7, u8 rs1) +{ + u32 imm_hi, imm_lo; + + imm_hi = (imm7 & 0x38) >> 3; + imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6); + return rv_cl_insn(0x2, imm_hi, rs1, imm_lo, rd, 0x0); +} + +static inline u16 rvc_sw(u8 rs1, u32 imm7, u8 rs2) +{ + u32 imm_hi, imm_lo; + + imm_hi = (imm7 & 0x38) >> 3; + imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6); + return rv_cs_insn(0x6, imm_hi, rs1, imm_lo, rs2, 0x0); +} + +static inline u16 rvc_addi(u8 rd, u32 imm6) +{ + return rv_ci_insn(0, imm6, rd, 0x1); +} + +static inline u16 rvc_li(u8 rd, u32 imm6) +{ + return rv_ci_insn(0x2, imm6, rd, 0x1); +} + +static inline u16 rvc_addi16sp(u32 imm10) +{ + u32 imm; + + imm = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) | ((imm10 & 0x40) >> 3) | + ((imm10 & 0x180) >> 6) | ((imm10 & 0x20) >> 5); + return rv_ci_insn(0x3, imm, RV_REG_SP, 0x1); +} + +static inline u16 rvc_lui(u8 rd, u32 imm6) +{ + return rv_ci_insn(0x3, imm6, rd, 0x1); +} + +static inline u16 rvc_srli(u8 rd, u32 imm6) +{ + return rv_cb_insn(0x4, imm6, 0, rd, 0x1); +} + +static inline u16 rvc_srai(u8 rd, u32 imm6) +{ + return rv_cb_insn(0x4, imm6, 0x1, rd, 0x1); +} + +static inline u16 rvc_andi(u8 rd, u32 imm6) +{ + return rv_cb_insn(0x4, imm6, 0x2, rd, 0x1); +} + +static inline u16 rvc_sub(u8 rd, u8 rs) +{ + return rv_ca_insn(0x23, rd, 0, rs, 0x1); +} + +static inline u16 rvc_xor(u8 rd, u8 rs) +{ + return rv_ca_insn(0x23, rd, 0x1, rs, 0x1); +} + +static inline u16 rvc_or(u8 rd, u8 rs) +{ + return rv_ca_insn(0x23, rd, 0x2, rs, 0x1); +} + +static inline u16 rvc_and(u8 rd, u8 rs) +{ + return rv_ca_insn(0x23, rd, 0x3, rs, 0x1); +} + +static inline u16 rvc_slli(u8 rd, u32 imm6) +{ + return rv_ci_insn(0, imm6, rd, 0x2); +} + +static inline u16 rvc_lwsp(u8 rd, u32 imm8) +{ + u32 imm; + + imm = ((imm8 & 0xc0) >> 6) | (imm8 & 0x3c); + return rv_ci_insn(0x2, imm, rd, 0x2); +} + +static inline u16 rvc_jr(u8 rs1) +{ + return rv_cr_insn(0x8, rs1, RV_REG_ZERO, 0x2); +} + +static inline u16 rvc_mv(u8 rd, u8 rs) +{ + return rv_cr_insn(0x8, rd, rs, 0x2); +} + +static inline u16 rvc_jalr(u8 rs1) +{ + return rv_cr_insn(0x9, rs1, RV_REG_ZERO, 0x2); +} + +static inline u16 rvc_add(u8 rd, u8 rs) +{ + return rv_cr_insn(0x9, rd, rs, 0x2); +} + +static inline u16 rvc_swsp(u32 imm8, u8 rs2) +{ + u32 imm; + + imm = (imm8 & 0x3c) | ((imm8 & 0xc0) >> 6); + return rv_css_insn(0x6, imm, rs2, 0x2); +} + /* * RV64-only instructions. * @@ -528,6 +752,234 @@ static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f); } +/* RV64-only RVC instructions. */ + +static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1) +{ + u32 imm_hi, imm_lo; + + imm_hi = (imm8 & 0x38) >> 3; + imm_lo = (imm8 & 0xc0) >> 6; + return rv_cl_insn(0x3, imm_hi, rs1, imm_lo, rd, 0x0); +} + +static inline u16 rvc_sd(u8 rs1, u32 imm8, u8 rs2) +{ + u32 imm_hi, imm_lo; + + imm_hi = (imm8 & 0x38) >> 3; + imm_lo = (imm8 & 0xc0) >> 6; + return rv_cs_insn(0x7, imm_hi, rs1, imm_lo, rs2, 0x0); +} + +static inline u16 rvc_subw(u8 rd, u8 rs) +{ + return rv_ca_insn(0x27, rd, 0, rs, 0x1); +} + +static inline u16 rvc_addiw(u8 rd, u32 imm6) +{ + return rv_ci_insn(0x1, imm6, rd, 0x1); +} + +static inline u16 rvc_ldsp(u8 rd, u32 imm9) +{ + u32 imm; + + imm = ((imm9 & 0x1c0) >> 6) | (imm9 & 0x38); + return rv_ci_insn(0x3, imm, rd, 0x2); +} + +static inline u16 rvc_sdsp(u32 imm9, u8 rs2) +{ + u32 imm; + + imm = (imm9 & 0x38) | ((imm9 & 0x1c0) >> 6); + return rv_css_insn(0x7, imm, rs2, 0x2); +} + +#endif /* __riscv_xlen == 64 */ + +/* Helper functions that emit RVC instructions when possible. */ + +static inline void emit_jalr(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd == RV_REG_RA && rs && !imm) + emitc(rvc_jalr(rs), ctx); + else if (rvc_enabled() && !rd && rs && !imm) + emitc(rvc_jr(rs), ctx); + else + emit(rv_jalr(rd, rs, imm), ctx); +} + +static inline void emit_mv(u8 rd, u8 rs, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd && rs) + emitc(rvc_mv(rd, rs), ctx); + else + emit(rv_addi(rd, rs, 0), ctx); +} + +static inline void emit_add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd && rd == rs1 && rs2) + emitc(rvc_add(rd, rs2), ctx); + else + emit(rv_add(rd, rs1, rs2), ctx); +} + +static inline void emit_addi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd == RV_REG_SP && rd == rs && is_10b_int(imm) && imm && !(imm & 0xf)) + emitc(rvc_addi16sp(imm), ctx); + else if (rvc_enabled() && is_creg(rd) && rs == RV_REG_SP && is_10b_uint(imm) && + !(imm & 0x3) && imm) + emitc(rvc_addi4spn(rd, imm), ctx); + else if (rvc_enabled() && rd && rd == rs && imm && is_6b_int(imm)) + emitc(rvc_addi(rd, imm), ctx); + else + emit(rv_addi(rd, rs, imm), ctx); +} + +static inline void emit_li(u8 rd, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd && is_6b_int(imm)) + emitc(rvc_li(rd, imm), ctx); + else + emit(rv_addi(rd, RV_REG_ZERO, imm), ctx); +} + +static inline void emit_lui(u8 rd, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd && rd != RV_REG_SP && is_6b_int(imm) && imm) + emitc(rvc_lui(rd, imm), ctx); + else + emit(rv_lui(rd, imm), ctx); +} + +static inline void emit_slli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd && rd == rs && imm && (u32)imm < __riscv_xlen) + emitc(rvc_slli(rd, imm), ctx); + else + emit(rv_slli(rd, rs, imm), ctx); +} + +static inline void emit_andi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs && is_6b_int(imm)) + emitc(rvc_andi(rd, imm), ctx); + else + emit(rv_andi(rd, rs, imm), ctx); +} + +static inline void emit_srli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen) + emitc(rvc_srli(rd, imm), ctx); + else + emit(rv_srli(rd, rs, imm), ctx); +} + +static inline void emit_srai(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen) + emitc(rvc_srai(rd, imm), ctx); + else + emit(rv_srai(rd, rs, imm), ctx); +} + +static inline void emit_sub(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2)) + emitc(rvc_sub(rd, rs2), ctx); + else + emit(rv_sub(rd, rs1, rs2), ctx); +} + +static inline void emit_or(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2)) + emitc(rvc_or(rd, rs2), ctx); + else + emit(rv_or(rd, rs1, rs2), ctx); +} + +static inline void emit_and(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2)) + emitc(rvc_and(rd, rs2), ctx); + else + emit(rv_and(rd, rs1, rs2), ctx); +} + +static inline void emit_xor(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2)) + emitc(rvc_xor(rd, rs2), ctx); + else + emit(rv_xor(rd, rs1, rs2), ctx); +} + +static inline void emit_lw(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_8b_uint(off) && !(off & 0x3)) + emitc(rvc_lwsp(rd, off), ctx); + else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_7b_uint(off) && !(off & 0x3)) + emitc(rvc_lw(rd, off, rs1), ctx); + else + emit(rv_lw(rd, off, rs1), ctx); +} + +static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rs1 == RV_REG_SP && is_8b_uint(off) && !(off & 0x3)) + emitc(rvc_swsp(off, rs2), ctx); + else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_7b_uint(off) && !(off & 0x3)) + emitc(rvc_sw(rs1, off, rs2), ctx); + else + emit(rv_sw(rs1, off, rs2), ctx); +} + +/* RV64-only helper functions. */ +#if __riscv_xlen == 64 + +static inline void emit_addiw(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rd && rd == rs && is_6b_int(imm)) + emitc(rvc_addiw(rd, imm), ctx); + else + emit(rv_addiw(rd, rs, imm), ctx); +} + +static inline void emit_ld(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_9b_uint(off) && !(off & 0x7)) + emitc(rvc_ldsp(rd, off), ctx); + else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_8b_uint(off) && !(off & 0x7)) + emitc(rvc_ld(rd, off, rs1), ctx); + else + emit(rv_ld(rd, off, rs1), ctx); +} + +static inline void emit_sd(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && rs1 == RV_REG_SP && is_9b_uint(off) && !(off & 0x7)) + emitc(rvc_sdsp(off, rs2), ctx); + else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_8b_uint(off) && !(off & 0x7)) + emitc(rvc_sd(rs1, off, rs2), ctx); + else + emit(rv_sd(rs1, off, rs2), ctx); +} + +static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2)) + emitc(rvc_subw(rd, rs2), ctx); + else + emit(rv_subw(rd, rs1, rs2), ctx); +} + #endif /* __riscv_xlen == 64 */ void bpf_jit_build_prologue(struct rv_jit_context *ctx); From da7a35062bcccd3f67779a357065ba707b02ff2b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Sun, 19 Jul 2020 23:17:41 -0700 Subject: [PATCH 1409/2056] libbpf bpf_helpers: Use __builtin_offsetof for offsetof The non-builtin route for offsetof has a dependency on size_t from stdlib.h/stdint.h that is undeclared and may break targets. The offsetof macro in bpf_helpers may disable the same macro in other headers that have a #ifdef offsetof guard. Rather than add additional dependencies improve the offsetof macro declared here to use the builtin that is available since llvm 3.7 (the first with a BPF backend). Signed-off-by: Ian Rogers Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200720061741.1514673-1-irogers@google.com --- tools/lib/bpf/bpf_helpers.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index a510d8ed716f..bc14db706b88 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -40,7 +40,7 @@ * Helper macro to manipulate data structures */ #ifndef offsetof -#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) +#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) \ From 18a4d8c97b841632920c16a6fa9216d1214f3db7 Mon Sep 17 00:00:00 2001 From: Luke Nelson Date: Mon, 20 Jul 2020 19:52:40 -0700 Subject: [PATCH 1410/2056] bpf, riscv: Use compressed instructions in the rv64 JIT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch uses the RVC support and encodings from bpf_jit.h to optimize the rv64 jit. The optimizations work by replacing emit(rv_X(...)) with a call to a helper function emit_X, which will emit a compressed version of the instruction when possible, and when RVC is enabled. The JIT continues to pass all tests in lib/test_bpf.c, and introduces no new failures to test_verifier; both with and without RVC being enabled. Most changes are straightforward replacements of emit(rv_X(...), ctx) with emit_X(..., ctx), with the following exceptions bearing mention; * Change emit_imm to sign-extend the value in "lower", since the checks for RVC (and the instructions themselves) treat the value as signed. Otherwise, small negative immediates will not be recognized as encodable using an RVC instruction. For example, without this change, emit_imm(rd, -1, ctx) would cause lower to become 4095, which is not a 6b int even though a "c.li rd, -1" instruction suffices. * For {BPF_MOV,BPF_ADD} BPF_X, drop using addiw,addw in the 32-bit cases since the values are zero-extended into the upper 32 bits in the following instructions anyways, and the addition commutes with zero-extension. (BPF_SUB BPF_X must still use subw since subtraction does not commute with zero-extension.) This patch avoids optimizing branches and jumps to use RVC instructions since surrounding code often makes assumptions about the sizes of emitted instructions. Optimizing these will require changing these functions (e.g., emit_branch) to dynamically compute jump offsets. The following are examples of the JITed code for the verifier selftest "direct packet read test#3 for CGROUP_SKB OK", without and with RVC enabled, respectively. The former uses 178 bytes, and the latter uses 112, for a ~37% reduction in code size for this example. Without RVC: 0: 02000813 addi a6,zero,32 4: fd010113 addi sp,sp,-48 8: 02813423 sd s0,40(sp) c: 02913023 sd s1,32(sp) 10: 01213c23 sd s2,24(sp) 14: 01313823 sd s3,16(sp) 18: 01413423 sd s4,8(sp) 1c: 03010413 addi s0,sp,48 20: 03056683 lwu a3,48(a0) 24: 02069693 slli a3,a3,0x20 28: 0206d693 srli a3,a3,0x20 2c: 03456703 lwu a4,52(a0) 30: 02071713 slli a4,a4,0x20 34: 02075713 srli a4,a4,0x20 38: 03856483 lwu s1,56(a0) 3c: 02049493 slli s1,s1,0x20 40: 0204d493 srli s1,s1,0x20 44: 03c56903 lwu s2,60(a0) 48: 02091913 slli s2,s2,0x20 4c: 02095913 srli s2,s2,0x20 50: 04056983 lwu s3,64(a0) 54: 02099993 slli s3,s3,0x20 58: 0209d993 srli s3,s3,0x20 5c: 09056a03 lwu s4,144(a0) 60: 020a1a13 slli s4,s4,0x20 64: 020a5a13 srli s4,s4,0x20 68: 00900313 addi t1,zero,9 6c: 006a7463 bgeu s4,t1,0x74 70: 00000a13 addi s4,zero,0 74: 02d52823 sw a3,48(a0) 78: 02e52a23 sw a4,52(a0) 7c: 02952c23 sw s1,56(a0) 80: 03252e23 sw s2,60(a0) 84: 05352023 sw s3,64(a0) 88: 00000793 addi a5,zero,0 8c: 02813403 ld s0,40(sp) 90: 02013483 ld s1,32(sp) 94: 01813903 ld s2,24(sp) 98: 01013983 ld s3,16(sp) 9c: 00813a03 ld s4,8(sp) a0: 03010113 addi sp,sp,48 a4: 00078513 addi a0,a5,0 a8: 00008067 jalr zero,0(ra) With RVC: 0: 02000813 addi a6,zero,32 4: 7179 c.addi16sp sp,-48 6: f422 c.sdsp s0,40(sp) 8: f026 c.sdsp s1,32(sp) a: ec4a c.sdsp s2,24(sp) c: e84e c.sdsp s3,16(sp) e: e452 c.sdsp s4,8(sp) 10: 1800 c.addi4spn s0,sp,48 12: 03056683 lwu a3,48(a0) 16: 1682 c.slli a3,0x20 18: 9281 c.srli a3,0x20 1a: 03456703 lwu a4,52(a0) 1e: 1702 c.slli a4,0x20 20: 9301 c.srli a4,0x20 22: 03856483 lwu s1,56(a0) 26: 1482 c.slli s1,0x20 28: 9081 c.srli s1,0x20 2a: 03c56903 lwu s2,60(a0) 2e: 1902 c.slli s2,0x20 30: 02095913 srli s2,s2,0x20 34: 04056983 lwu s3,64(a0) 38: 1982 c.slli s3,0x20 3a: 0209d993 srli s3,s3,0x20 3e: 09056a03 lwu s4,144(a0) 42: 1a02 c.slli s4,0x20 44: 020a5a13 srli s4,s4,0x20 48: 4325 c.li t1,9 4a: 006a7363 bgeu s4,t1,0x50 4e: 4a01 c.li s4,0 50: d914 c.sw a3,48(a0) 52: d958 c.sw a4,52(a0) 54: dd04 c.sw s1,56(a0) 56: 03252e23 sw s2,60(a0) 5a: 05352023 sw s3,64(a0) 5e: 4781 c.li a5,0 60: 7422 c.ldsp s0,40(sp) 62: 7482 c.ldsp s1,32(sp) 64: 6962 c.ldsp s2,24(sp) 66: 69c2 c.ldsp s3,16(sp) 68: 6a22 c.ldsp s4,8(sp) 6a: 6145 c.addi16sp sp,48 6c: 853e c.mv a0,a5 6e: 8082 c.jr ra Signed-off-by: Luke Nelson Signed-off-by: Alexei Starovoitov Cc: Björn Töpel Link: https://lore.kernel.org/bpf/20200721025241.8077-4-luke.r.nels@gmail.com --- arch/riscv/net/bpf_jit_comp64.c | 275 +++++++++++++++++--------------- 1 file changed, 144 insertions(+), 131 deletions(-) diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 55861269da2a..8a56b5293117 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -132,19 +132,23 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) * * This also means that we need to process LSB to MSB. */ - s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff; + s64 upper = (val + (1 << 11)) >> 12; + /* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw, + * and addi are signed and RVC checks will perform signed comparisons. + */ + s64 lower = ((val & 0xfff) << 52) >> 52; int shift; if (is_32b_int(val)) { if (upper) - emit(rv_lui(rd, upper), ctx); + emit_lui(rd, upper, ctx); if (!upper) { - emit(rv_addi(rd, RV_REG_ZERO, lower), ctx); + emit_li(rd, lower, ctx); return; } - emit(rv_addiw(rd, rd, lower), ctx); + emit_addiw(rd, rd, lower, ctx); return; } @@ -154,9 +158,9 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) emit_imm(rd, upper, ctx); - emit(rv_slli(rd, rd, shift), ctx); + emit_slli(rd, rd, shift, ctx); if (lower) - emit(rv_addi(rd, rd, lower), ctx); + emit_addi(rd, rd, lower, ctx); } static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) @@ -164,43 +168,43 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx) int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; if (seen_reg(RV_REG_RA, ctx)) { - emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx); store_offset -= 8; } - emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx); store_offset -= 8; if (seen_reg(RV_REG_S1, ctx)) { - emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S2, ctx)) { - emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S3, ctx)) { - emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S4, ctx)) { - emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S5, ctx)) { - emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S6, ctx)) { - emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx); + emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx); store_offset -= 8; } - emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); + emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx); /* Set return value. */ if (!is_tail_call) - emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); - emit(rv_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, - is_tail_call ? 4 : 0), /* skip TCC init */ - ctx); + emit_mv(RV_REG_A0, RV_REG_A5, ctx); + emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA, + is_tail_call ? 4 : 0, /* skip TCC init */ + ctx); } static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff, @@ -280,8 +284,8 @@ static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff, static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) { - emit(rv_slli(reg, reg, 32), ctx); - emit(rv_srli(reg, reg, 32), ctx); + emit_slli(reg, reg, 32, ctx); + emit_srli(reg, reg, 32, ctx); } static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) @@ -310,7 +314,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) /* if (TCC-- < 0) * goto out; */ - emit(rv_addi(RV_REG_T1, tcc, -1), ctx); + emit_addi(RV_REG_T1, tcc, -1, ctx); off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx); @@ -318,12 +322,12 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) * if (!prog) * goto out; */ - emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx); + emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx); off = offsetof(struct bpf_array, ptrs); if (is_12b_check(off, insn)) return -1; - emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx); + emit_ld(RV_REG_T2, off, RV_REG_T2, ctx); off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn)); emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx); @@ -331,8 +335,8 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) off = offsetof(struct bpf_prog, bpf_func); if (is_12b_check(off, insn)) return -1; - emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx); - emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); + emit_ld(RV_REG_T3, off, RV_REG_T2, ctx); + emit_mv(RV_REG_TCC, RV_REG_T1, ctx); __build_epilogue(true, ctx); return 0; } @@ -360,9 +364,9 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) { - emit(rv_addi(RV_REG_T2, *rd, 0), ctx); + emit_mv(RV_REG_T2, *rd, ctx); emit_zext_32(RV_REG_T2, ctx); - emit(rv_addi(RV_REG_T1, *rs, 0), ctx); + emit_mv(RV_REG_T1, *rs, ctx); emit_zext_32(RV_REG_T1, ctx); *rd = RV_REG_T2; *rs = RV_REG_T1; @@ -370,15 +374,15 @@ static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) { - emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); - emit(rv_addiw(RV_REG_T1, *rs, 0), ctx); + emit_addiw(RV_REG_T2, *rd, 0, ctx); + emit_addiw(RV_REG_T1, *rs, 0, ctx); *rd = RV_REG_T2; *rs = RV_REG_T1; } static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) { - emit(rv_addi(RV_REG_T2, *rd, 0), ctx); + emit_mv(RV_REG_T2, *rd, ctx); emit_zext_32(RV_REG_T2, ctx); emit_zext_32(RV_REG_T1, ctx); *rd = RV_REG_T2; @@ -386,7 +390,7 @@ static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) { - emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); + emit_addiw(RV_REG_T2, *rd, 0, ctx); *rd = RV_REG_T2; } @@ -432,7 +436,7 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx) if (ret) return ret; rd = bpf_to_rv_reg(BPF_REG_0, ctx); - emit(rv_addi(rd, RV_REG_A0, 0), ctx); + emit_mv(rd, RV_REG_A0, ctx); return 0; } @@ -458,7 +462,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit_zext_32(rd, ctx); break; } - emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx); + emit_mv(rd, rs, ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -466,31 +470,35 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* dst = dst OP src */ case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU64 | BPF_ADD | BPF_X: - emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx); + emit_add(rd, rd, rs, ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU64 | BPF_SUB | BPF_X: - emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx); + if (is64) + emit_sub(rd, rd, rs, ctx); + else + emit_subw(rd, rd, rs, ctx); + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU64 | BPF_AND | BPF_X: - emit(rv_and(rd, rd, rs), ctx); + emit_and(rd, rd, rs, ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU64 | BPF_OR | BPF_X: - emit(rv_or(rd, rd, rs), ctx); + emit_or(rd, rd, rs, ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU64 | BPF_XOR | BPF_X: - emit(rv_xor(rd, rd, rs), ctx); + emit_xor(rd, rd, rs, ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -534,8 +542,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* dst = -dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: - emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) : - rv_subw(rd, RV_REG_ZERO, rd), ctx); + emit_sub(rd, RV_REG_ZERO, rd, ctx); if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -544,8 +551,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_END | BPF_FROM_LE: switch (imm) { case 16: - emit(rv_slli(rd, rd, 48), ctx); - emit(rv_srli(rd, rd, 48), ctx); + emit_slli(rd, rd, 48, ctx); + emit_srli(rd, rd, 48, ctx); break; case 32: if (!aux->verifier_zext) @@ -558,51 +565,51 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, break; case BPF_ALU | BPF_END | BPF_FROM_BE: - emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); + emit_li(RV_REG_T2, 0, ctx); - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); if (imm == 16) goto out_be; - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); if (imm == 32) goto out_be; - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); - emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); - emit(rv_srli(rd, rd, 8), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); + emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); + emit_srli(rd, rd, 8, ctx); out_be: - emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); + emit_andi(RV_REG_T1, rd, 0xff, ctx); + emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); - emit(rv_addi(rd, RV_REG_T2, 0), ctx); + emit_mv(rd, RV_REG_T2, ctx); break; /* dst = imm */ @@ -617,12 +624,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU64 | BPF_ADD | BPF_K: if (is_12b_int(imm)) { - emit(is64 ? rv_addi(rd, rd, imm) : - rv_addiw(rd, rd, imm), ctx); + emit_addi(rd, rd, imm, ctx); } else { emit_imm(RV_REG_T1, imm, ctx); - emit(is64 ? rv_add(rd, rd, RV_REG_T1) : - rv_addw(rd, rd, RV_REG_T1), ctx); + emit_add(rd, rd, RV_REG_T1, ctx); } if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); @@ -630,12 +635,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU64 | BPF_SUB | BPF_K: if (is_12b_int(-imm)) { - emit(is64 ? rv_addi(rd, rd, -imm) : - rv_addiw(rd, rd, -imm), ctx); + emit_addi(rd, rd, -imm, ctx); } else { emit_imm(RV_REG_T1, imm, ctx); - emit(is64 ? rv_sub(rd, rd, RV_REG_T1) : - rv_subw(rd, rd, RV_REG_T1), ctx); + emit_sub(rd, rd, RV_REG_T1, ctx); } if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); @@ -643,10 +646,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU64 | BPF_AND | BPF_K: if (is_12b_int(imm)) { - emit(rv_andi(rd, rd, imm), ctx); + emit_andi(rd, rd, imm, ctx); } else { emit_imm(RV_REG_T1, imm, ctx); - emit(rv_and(rd, rd, RV_REG_T1), ctx); + emit_and(rd, rd, RV_REG_T1, ctx); } if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); @@ -657,7 +660,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit(rv_ori(rd, rd, imm), ctx); } else { emit_imm(RV_REG_T1, imm, ctx); - emit(rv_or(rd, rd, RV_REG_T1), ctx); + emit_or(rd, rd, RV_REG_T1, ctx); } if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); @@ -668,7 +671,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, emit(rv_xori(rd, rd, imm), ctx); } else { emit_imm(RV_REG_T1, imm, ctx); - emit(rv_xor(rd, rd, RV_REG_T1), ctx); + emit_xor(rd, rd, RV_REG_T1, ctx); } if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); @@ -699,19 +702,28 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, break; case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU64 | BPF_LSH | BPF_K: - emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); + emit_slli(rd, rd, imm, ctx); + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_RSH | BPF_K: - emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); + if (is64) + emit_srli(rd, rd, imm, ctx); + else + emit(rv_srliw(rd, rd, imm), ctx); + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; case BPF_ALU | BPF_ARSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: - emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); + if (is64) + emit_srai(rd, rd, imm, ctx); + else + emit(rv_sraiw(rd, rd, imm), ctx); + if (!is64 && !aux->verifier_zext) emit_zext_32(rd, ctx); break; @@ -763,7 +775,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, if (BPF_OP(code) == BPF_JSET) { /* Adjust for and */ rvoff -= 4; - emit(rv_and(RV_REG_T1, rd, rs), ctx); + emit_and(RV_REG_T1, rd, rs, ctx); emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); } else { @@ -819,17 +831,17 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, rvoff = rv_offset(i, off, ctx); s = ctx->ninsns; if (is_12b_int(imm)) { - emit(rv_andi(RV_REG_T1, rd, imm), ctx); + emit_andi(RV_REG_T1, rd, imm, ctx); } else { emit_imm(RV_REG_T1, imm, ctx); - emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); + emit_and(RV_REG_T1, rd, RV_REG_T1, ctx); } /* For jset32, we should clear the upper 32 bits of t1, but * sign-extension is sufficient here and saves one instruction, * as t1 is used only in comparison against zero. */ if (!is64 && imm < 0) - emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx); + emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx); e = ctx->ninsns; rvoff -= ninsns_rvoff(e - s); emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); @@ -887,7 +899,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); emit(rv_lbu(rd, 0, RV_REG_T1), ctx); if (insn_is_zext(&insn[1])) return 1; @@ -899,7 +911,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); emit(rv_lhu(rd, 0, RV_REG_T1), ctx); if (insn_is_zext(&insn[1])) return 1; @@ -911,20 +923,20 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); emit(rv_lwu(rd, 0, RV_REG_T1), ctx); if (insn_is_zext(&insn[1])) return 1; break; case BPF_LDX | BPF_MEM | BPF_DW: if (is_12b_int(off)) { - emit(rv_ld(rd, off, rs), ctx); + emit_ld(rd, off, rs, ctx); break; } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); - emit(rv_ld(rd, 0, RV_REG_T1), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rs, ctx); + emit_ld(rd, 0, RV_REG_T1, ctx); break; /* ST: *(size *)(dst + off) = imm */ @@ -936,7 +948,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T2, off, ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx); break; @@ -948,30 +960,30 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T2, off, ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); + emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx); break; case BPF_ST | BPF_MEM | BPF_W: emit_imm(RV_REG_T1, imm, ctx); if (is_12b_int(off)) { - emit(rv_sw(rd, off, RV_REG_T1), ctx); + emit_sw(rd, off, RV_REG_T1, ctx); break; } emit_imm(RV_REG_T2, off, ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); - emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx); + emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); + emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx); break; case BPF_ST | BPF_MEM | BPF_DW: emit_imm(RV_REG_T1, imm, ctx); if (is_12b_int(off)) { - emit(rv_sd(rd, off, RV_REG_T1), ctx); + emit_sd(rd, off, RV_REG_T1, ctx); break; } emit_imm(RV_REG_T2, off, ctx); - emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); - emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx); + emit_add(RV_REG_T2, RV_REG_T2, rd, ctx); + emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx); break; /* STX: *(size *)(dst + off) = src */ @@ -982,7 +994,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); emit(rv_sb(RV_REG_T1, 0, rs), ctx); break; case BPF_STX | BPF_MEM | BPF_H: @@ -992,28 +1004,28 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); emit(rv_sh(RV_REG_T1, 0, rs), ctx); break; case BPF_STX | BPF_MEM | BPF_W: if (is_12b_int(off)) { - emit(rv_sw(rd, off, rs), ctx); + emit_sw(rd, off, rs, ctx); break; } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); - emit(rv_sw(RV_REG_T1, 0, rs), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit_sw(RV_REG_T1, 0, rs, ctx); break; case BPF_STX | BPF_MEM | BPF_DW: if (is_12b_int(off)) { - emit(rv_sd(rd, off, rs), ctx); + emit_sd(rd, off, rs, ctx); break; } emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); - emit(rv_sd(RV_REG_T1, 0, rs), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); + emit_sd(RV_REG_T1, 0, rs, ctx); break; /* STX XADD: lock *(u32 *)(dst + off) += src */ case BPF_STX | BPF_XADD | BPF_W: @@ -1021,10 +1033,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, case BPF_STX | BPF_XADD | BPF_DW: if (off) { if (is_12b_int(off)) { - emit(rv_addi(RV_REG_T1, rd, off), ctx); + emit_addi(RV_REG_T1, rd, off, ctx); } else { emit_imm(RV_REG_T1, off, ctx); - emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); + emit_add(RV_REG_T1, RV_REG_T1, rd, ctx); } rd = RV_REG_T1; @@ -1073,52 +1085,53 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx) /* First instruction is always setting the tail-call-counter * (TCC) register. This instruction is skipped for tail calls. + * Force using a 4-byte (non-compressed) instruction. */ emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); - emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx); + emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx); if (seen_reg(RV_REG_RA, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx); store_offset -= 8; } - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx); store_offset -= 8; if (seen_reg(RV_REG_S1, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S2, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S3, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S4, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S5, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx); store_offset -= 8; } if (seen_reg(RV_REG_S6, ctx)) { - emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx); + emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx); store_offset -= 8; } - emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx); + emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx); if (bpf_stack_adjust) - emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx); + emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx); /* Program contains calls and tail calls, so RV_REG_TCC need * to be saved across calls. */ if (seen_tail_call(ctx) && seen_call(ctx)) - emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx); + emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx); ctx->stack_size = stack_adjust; } From 956fcfcd359512f15b19bcd157fa8206ed26605b Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Fri, 17 Jul 2020 20:30:59 +0800 Subject: [PATCH 1411/2056] tools/bpftool: Fix error handing in do_skeleton() Fix pass 0 to PTR_ERR, also dump more err info using libbpf_strerror. Fixes: 5dc7a8b21144 ("bpftool, selftests/bpf: Embed object file inside skeleton") Signed-off-by: YueHaibing Signed-off-by: Alexei Starovoitov Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200717123059.29624-1-yuehaibing@huawei.com --- tools/bpf/bpftool/gen.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index b59d26e89367..8a4c2b3b0cd6 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -302,8 +302,11 @@ static int do_skeleton(int argc, char **argv) opts.object_name = obj_name; obj = bpf_object__open_mem(obj_data, file_sz, &opts); if (IS_ERR(obj)) { + char err_buf[256]; + + libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf)); + p_err("failed to open BPF object file: %s", err_buf); obj = NULL; - p_err("failed to open BPF object file: %ld", PTR_ERR(obj)); goto out; } From bc4f0548f683a3d53359cef15f088d2d5bb4bc39 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:33:58 -0700 Subject: [PATCH 1412/2056] bpf: Compute bpf_skc_to_*() helper socket btf ids at build time Currently, socket types (struct tcp_sock, udp_sock, etc.) used by bpf_skc_to_*() helpers are computed when vmlinux_btf is first built in the kernel. Commit 5a2798ab32ba ("bpf: Add BTF_ID_LIST/BTF_ID/BTF_ID_UNUSED macros") implemented a mechanism to compute btf_ids at kernel build time which can simplify kernel implementation and reduce runtime overhead by removing in-kernel btf_id calculation. This patch did exactly this, removing in-kernel btf_id computation and utilizing build-time btf_id computation. If CONFIG_DEBUG_INFO_BTF is not defined, BTF_ID_LIST will define an array with size of 5, which is not enough for btf_sock_ids. So define its own static array if CONFIG_DEBUG_INFO_BTF is not defined. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163358.1393023-1-yhs@fb.com --- include/linux/bpf.h | 4 ---- kernel/bpf/btf.c | 1 - net/core/filter.c | 49 +++++++++++++++++---------------------------- 3 files changed, 18 insertions(+), 36 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index adb16bdc5f0a..1df1c0fd3f28 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1541,7 +1541,6 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map) struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); void bpf_map_offload_map_free(struct bpf_map *map); -void init_btf_sock_ids(struct btf *btf); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1567,9 +1566,6 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) static inline void bpf_map_offload_map_free(struct bpf_map *map) { } -static inline void init_btf_sock_ids(struct btf *btf) -{ -} #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 03d6d43bb1d6..315cde73421b 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3672,7 +3672,6 @@ struct btf *btf_parse_vmlinux(void) goto errout; bpf_struct_ops_init(btf, log); - init_btf_sock_ids(btf); btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); diff --git a/net/core/filter.c b/net/core/filter.c index 2bd129b5ae74..5a65fb4b95ff 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9426,19 +9426,19 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) * sock_common as the first argument in its memory layout. */ #define BTF_SOCK_TYPE_xxx \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, "inet_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, "inet_connection_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, "inet_request_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, "inet_timewait_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, "request_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, "sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, "sock_common") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, "tcp_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, "tcp_request_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, "tcp_timewait_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, "tcp6_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, "udp_sock") \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, "udp6_sock") + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) enum { #define BTF_SOCK_TYPE(name, str) name, @@ -9447,26 +9447,13 @@ BTF_SOCK_TYPE_xxx MAX_BTF_SOCK_TYPE, }; -static int btf_sock_ids[MAX_BTF_SOCK_TYPE]; - -#ifdef CONFIG_BPF_SYSCALL -static const char *bpf_sock_types[] = { -#define BTF_SOCK_TYPE(name, str) str, +#ifdef CONFIG_DEBUG_INFO_BTF +BTF_ID_LIST(btf_sock_ids) +#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE -}; - -void init_btf_sock_ids(struct btf *btf) -{ - int i, btf_id; - - for (i = 0; i < MAX_BTF_SOCK_TYPE; i++) { - btf_id = btf_find_by_name_kind(btf, bpf_sock_types[i], - BTF_KIND_STRUCT); - if (btf_id > 0) - btf_sock_ids[i] = btf_id; - } -} +#else +static u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; #endif static bool check_arg_btf_id(u32 btf_id, u32 arg) From d8dfe5bfe856c0c72b1750322dbfcad402e73373 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:33:59 -0700 Subject: [PATCH 1413/2056] tools/bpf: Sync btf_ids.h to tools Sync kernel header btf_ids.h to tools directory. Also define macro CONFIG_DEBUG_INFO_BTF before including btf_ids.h in prog_tests/resolve_btfids.c since non-stub definitions for BTF_ID_LIST etc. macros are defined under CONFIG_DEBUG_INFO_BTF. This prevented test_progs from failing. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163359.1393079-1-yhs@fb.com --- tools/include/linux/btf_ids.h | 11 ++++++++++- .../testing/selftests/bpf/prog_tests/resolve_btfids.c | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h index fe019774f8a7..1cdb56950ffe 100644 --- a/tools/include/linux/btf_ids.h +++ b/tools/include/linux/btf_ids.h @@ -3,6 +3,8 @@ #ifndef _LINUX_BTF_IDS_H #define _LINUX_BTF_IDS_H +#ifdef CONFIG_DEBUG_INFO_BTF + #include /* for __PASTE */ /* @@ -21,7 +23,7 @@ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ ".local " #symbol " ; \n" \ -".type " #symbol ", @object; \n" \ +".type " #symbol ", STT_OBJECT; \n" \ ".size " #symbol ", 4; \n" \ #symbol ": \n" \ ".zero 4 \n" \ @@ -83,5 +85,12 @@ asm( \ ".zero 4 \n" \ ".popsection; \n"); +#else + +#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID(prefix, name) +#define BTF_ID_UNUSED + +#endif /* CONFIG_DEBUG_INFO_BTF */ #endif diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index 403be6f36cba..22d83bba4e91 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -6,6 +6,7 @@ #include #include #include +#define CONFIG_DEBUG_INFO_BTF #include #include "test_progs.h" From 0f12e584b241285cf60a6227f3771fa444cfcf76 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:34:01 -0700 Subject: [PATCH 1414/2056] bpf: Add BTF_ID_LIST_GLOBAL in btf_ids.h Existing BTF_ID_LIST used a local static variable to store btf_ids. This patch provided a new macro BTF_ID_LIST_GLOBAL to store btf_ids in a global variable which can be shared among multiple files. The existing BTF_ID_LIST is still retained. Two reasons. First, BTF_ID_LIST is also used to build btf_ids for helper arguments which typically is an array of 5. Since typically different helpers have different signature, it makes little sense to share them. Second, some current computed btf_ids are indeed local. If later those btf_ids are shared between different files, they can use BTF_ID_LIST_GLOBAL then. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20200720163401.1393159-1-yhs@fb.com --- include/linux/btf_ids.h | 10 ++++-- tools/include/linux/btf_ids.h | 10 ++++-- .../selftests/bpf/prog_tests/resolve_btfids.c | 33 ++++++++++++++----- 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 1cdb56950ffe..77ab45baa095 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -57,17 +57,20 @@ asm( \ * .zero 4 * */ -#define __BTF_ID_LIST(name) \ +#define __BTF_ID_LIST(name, scope) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ -".local " #name "; \n" \ +"." #scope " " #name "; \n" \ #name ":; \n" \ ".popsection; \n"); \ #define BTF_ID_LIST(name) \ -__BTF_ID_LIST(name) \ +__BTF_ID_LIST(name, local) \ extern u32 name[]; +#define BTF_ID_LIST_GLOBAL(name) \ +__BTF_ID_LIST(name, globl) + /* * The BTF_ID_UNUSED macro defines 4 zero bytes. * It's used when we want to define 'unused' entry @@ -90,6 +93,7 @@ asm( \ #define BTF_ID_LIST(name) static u32 name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED +#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h index 1cdb56950ffe..77ab45baa095 100644 --- a/tools/include/linux/btf_ids.h +++ b/tools/include/linux/btf_ids.h @@ -57,17 +57,20 @@ asm( \ * .zero 4 * */ -#define __BTF_ID_LIST(name) \ +#define __BTF_ID_LIST(name, scope) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ -".local " #name "; \n" \ +"." #scope " " #name "; \n" \ #name ":; \n" \ ".popsection; \n"); \ #define BTF_ID_LIST(name) \ -__BTF_ID_LIST(name) \ +__BTF_ID_LIST(name, local) \ extern u32 name[]; +#define BTF_ID_LIST_GLOBAL(name) \ +__BTF_ID_LIST(name, globl) + /* * The BTF_ID_UNUSED macro defines 4 zero bytes. * It's used when we want to define 'unused' entry @@ -90,6 +93,7 @@ asm( \ #define BTF_ID_LIST(name) static u32 name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED +#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c index 22d83bba4e91..3b127cab4864 100644 --- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c +++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c @@ -28,7 +28,17 @@ struct symbol test_symbols[] = { { "func", BTF_KIND_FUNC, -1 }, }; -BTF_ID_LIST(test_list) +BTF_ID_LIST(test_list_local) +BTF_ID_UNUSED +BTF_ID(typedef, S) +BTF_ID(typedef, T) +BTF_ID(typedef, U) +BTF_ID(struct, S) +BTF_ID(union, U) +BTF_ID(func, func) + +extern __u32 test_list_global[]; +BTF_ID_LIST_GLOBAL(test_list_global) BTF_ID_UNUSED BTF_ID(typedef, S) BTF_ID(typedef, T) @@ -94,18 +104,25 @@ static int resolve_symbols(void) int test_resolve_btfids(void) { - unsigned int i; + __u32 *test_list, *test_lists[] = { test_list_local, test_list_global }; + unsigned int i, j; int ret = 0; if (resolve_symbols()) return -1; - /* Check BTF_ID_LIST(test_list) IDs */ - for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { - ret = CHECK(test_list[i] != test_symbols[i].id, - "id_check", - "wrong ID for %s (%d != %d)\n", test_symbols[i].name, - test_list[i], test_symbols[i].id); + /* Check BTF_ID_LIST(test_list_local) and + * BTF_ID_LIST_GLOBAL(test_list_global) IDs + */ + for (j = 0; j < ARRAY_SIZE(test_lists); j++) { + test_list = test_lists[j]; + for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) { + ret = CHECK(test_list[i] != test_symbols[i].id, + "id_check", + "wrong ID for %s (%d != %d)\n", + test_symbols[i].name, + test_list[i], test_symbols[i].id); + } } return ret; From fce557bcef119a1bc5ab3cb02678cf454bcaf424 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:34:02 -0700 Subject: [PATCH 1415/2056] bpf: Make btf_sock_ids global tcp and udp bpf_iter can reuse some socket ids in btf_sock_ids, so make it global. I put the extern definition in btf_ids.h as a central place so it can be easily discovered by developers. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163402.1393427-1-yhs@fb.com --- include/linux/btf_ids.h | 30 ++++++++++++++++++++++++++++++ net/core/filter.c | 30 ++---------------------------- tools/include/linux/btf_ids.h | 30 ++++++++++++++++++++++++++++++ 3 files changed, 62 insertions(+), 28 deletions(-) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 77ab45baa095..4867d549e3c1 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -97,4 +97,34 @@ asm( \ #endif /* CONFIG_DEBUG_INFO_BTF */ +#ifdef CONFIG_NET +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +extern u32 btf_sock_ids[]; +#endif + #endif diff --git a/net/core/filter.c b/net/core/filter.c index 5a65fb4b95ff..654c346b7d91 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -9421,39 +9421,13 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog) bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog); } -/* Define a list of socket types which can be the argument for - * skc_to_*_sock() helpers. All these sockets should have - * sock_common as the first argument in its memory layout. - */ -#define BTF_SOCK_TYPE_xxx \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) - -enum { -#define BTF_SOCK_TYPE(name, str) name, -BTF_SOCK_TYPE_xxx -#undef BTF_SOCK_TYPE -MAX_BTF_SOCK_TYPE, -}; - #ifdef CONFIG_DEBUG_INFO_BTF -BTF_ID_LIST(btf_sock_ids) +BTF_ID_LIST_GLOBAL(btf_sock_ids) #define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type) BTF_SOCK_TYPE_xxx #undef BTF_SOCK_TYPE #else -static u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; +u32 btf_sock_ids[MAX_BTF_SOCK_TYPE]; #endif static bool check_arg_btf_id(u32 btf_id, u32 arg) diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h index 77ab45baa095..4867d549e3c1 100644 --- a/tools/include/linux/btf_ids.h +++ b/tools/include/linux/btf_ids.h @@ -97,4 +97,34 @@ asm( \ #endif /* CONFIG_DEBUG_INFO_BTF */ +#ifdef CONFIG_NET +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +extern u32 btf_sock_ids[]; +#endif + #endif From 6bd557275ad5a1adaff5082f4e24218ba344e0c0 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 20 Jul 2020 12:18:10 +0200 Subject: [PATCH 1416/2056] selftests/bpf: Fix test_lwt_seg6local.sh hangs OpenBSD netcat (Debian patchlevel 1.195-2) does not seem to react to SIGINT for whatever reason, causing prefix.pl to hang after test_lwt_seg6local.sh exits due to netcat inheriting test_lwt_seg6local.sh's file descriptors. Fix by using SIGTERM instead. Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720101810.84299-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/test_lwt_seg6local.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh index 785eabf2a593..5620919fde9e 100755 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh @@ -140,7 +140,7 @@ ip netns exec ns6 sysctl net.ipv6.conf.veth10.seg6_enabled=1 > /dev/null ip netns exec ns6 nc -l -6 -u -d 7330 > $TMP_FILE & ip netns exec ns1 bash -c "echo 'foobar' | nc -w0 -6 -u -p 2121 -s fb00::1 fb00::6 7330" sleep 5 # wait enough time to ensure the UDP datagram arrived to the last segment -kill -INT $! +kill -TERM $! if [[ $(< $TMP_FILE) != "foobar" ]]; then exit 1 From 951cf368bcb11d6f817709660cf5cd914072c36f Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 20 Jul 2020 09:34:03 -0700 Subject: [PATCH 1417/2056] bpf: net: Use precomputed btf_id for bpf iterators One additional field btf_id is added to struct bpf_ctx_arg_aux to store the precomputed btf_ids. The btf_id is computed at build time with BTF_ID_LIST or BTF_ID_LIST_GLOBAL macro definitions. All existing bpf iterators are changed to used pre-compute btf_ids. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720163403.1393551-1-yhs@fb.com --- include/linux/bpf.h | 1 + kernel/bpf/btf.c | 5 +++-- kernel/bpf/map_iter.c | 7 ++++++- kernel/bpf/task_iter.c | 12 ++++++++++-- net/ipv4/tcp_ipv4.c | 4 +++- net/ipv4/udp.c | 4 +++- net/ipv6/route.c | 7 ++++++- net/netlink/af_netlink.c | 7 ++++++- 8 files changed, 38 insertions(+), 9 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1df1c0fd3f28..bae557ff2da8 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -668,6 +668,7 @@ struct bpf_jit_poke_descriptor { struct bpf_ctx_arg_aux { u32 offset; enum bpf_reg_type reg_type; + u32 btf_id; }; struct bpf_prog_aux { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 315cde73421b..ee36b7f60936 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3817,16 +3817,17 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, return true; /* this is a pointer to another type */ - info->reg_type = PTR_TO_BTF_ID; for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; if (ctx_arg_info->offset == off) { info->reg_type = ctx_arg_info->reg_type; - break; + info->btf_id = ctx_arg_info->btf_id; + return true; } } + info->reg_type = PTR_TO_BTF_ID; if (tgt_prog) { ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg); if (ret > 0) { diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index c69071e334bf..8a7af11b411f 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -4,6 +4,7 @@ #include #include #include +#include struct bpf_iter_seq_map_info { u32 mid; @@ -81,7 +82,10 @@ static const struct seq_operations bpf_map_seq_ops = { .show = bpf_map_seq_show, }; -static const struct bpf_iter_reg bpf_map_reg_info = { +BTF_ID_LIST(btf_bpf_map_id) +BTF_ID(struct, bpf_map) + +static struct bpf_iter_reg bpf_map_reg_info = { .target = "bpf_map", .seq_ops = &bpf_map_seq_ops, .init_seq_private = NULL, @@ -96,6 +100,7 @@ static const struct bpf_iter_reg bpf_map_reg_info = { static int __init bpf_map_iter_init(void) { + bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id; return bpf_iter_reg_target(&bpf_map_reg_info); } diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 4dbf2b6035f8..2feecf095609 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -7,6 +7,7 @@ #include #include #include +#include struct bpf_iter_seq_task_common { struct pid_namespace *ns; @@ -312,7 +313,11 @@ static const struct seq_operations task_file_seq_ops = { .show = task_file_seq_show, }; -static const struct bpf_iter_reg task_reg_info = { +BTF_ID_LIST(btf_task_file_ids) +BTF_ID(struct, task_struct) +BTF_ID(struct, file) + +static struct bpf_iter_reg task_reg_info = { .target = "task", .seq_ops = &task_seq_ops, .init_seq_private = init_seq_pidns, @@ -325,7 +330,7 @@ static const struct bpf_iter_reg task_reg_info = { }, }; -static const struct bpf_iter_reg task_file_reg_info = { +static struct bpf_iter_reg task_file_reg_info = { .target = "task_file", .seq_ops = &task_file_seq_ops, .init_seq_private = init_seq_pidns, @@ -344,10 +349,13 @@ static int __init task_iter_init(void) { int ret; + task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0]; ret = bpf_iter_reg_target(&task_reg_info); if (ret) return ret; + task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0]; + task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1]; return bpf_iter_reg_target(&task_file_reg_info); } late_initcall(task_iter_init); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 116c11a0aaed..a7f1b41482f8 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -76,6 +76,7 @@ #include #include #include +#include #include #include @@ -2954,7 +2955,7 @@ static void bpf_iter_fini_tcp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static const struct bpf_iter_reg tcp_reg_info = { +static struct bpf_iter_reg tcp_reg_info = { .target = "tcp", .seq_ops = &bpf_iter_tcp_seq_ops, .init_seq_private = bpf_iter_init_tcp, @@ -2969,6 +2970,7 @@ static const struct bpf_iter_reg tcp_reg_info = { static void __init bpf_iter_register(void) { + tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON]; if (bpf_iter_reg_target(&tcp_reg_info)) pr_warn("Warning: could not register bpf iterator tcp\n"); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b738c63d7a77..b5231ab350e0 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -106,6 +106,7 @@ #include #include #include +#include #include #include #include "udp_impl.h" @@ -3232,7 +3233,7 @@ static void bpf_iter_fini_udp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static const struct bpf_iter_reg udp_reg_info = { +static struct bpf_iter_reg udp_reg_info = { .target = "udp", .seq_ops = &bpf_iter_udp_seq_ops, .init_seq_private = bpf_iter_init_udp, @@ -3247,6 +3248,7 @@ static const struct bpf_iter_reg udp_reg_info = { static void __init bpf_iter_register(void) { + udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP]; if (bpf_iter_reg_target(&udp_reg_info)) pr_warn("Warning: could not register bpf iterator udp\n"); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 427b81cbc164..33f5efbad0a9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -61,6 +61,7 @@ #include #include #include +#include #ifdef CONFIG_SYSCTL #include @@ -6423,7 +6424,10 @@ void __init ip6_route_init_special_entries(void) #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt) -static const struct bpf_iter_reg ipv6_route_reg_info = { +BTF_ID_LIST(btf_fib6_info_id) +BTF_ID(struct, fib6_info) + +static struct bpf_iter_reg ipv6_route_reg_info = { .target = "ipv6_route", .seq_ops = &ipv6_route_seq_ops, .init_seq_private = bpf_iter_init_seq_net, @@ -6438,6 +6442,7 @@ static const struct bpf_iter_reg ipv6_route_reg_info = { static int __init bpf_iter_register(void) { + ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id; return bpf_iter_reg_target(&ipv6_route_reg_info); } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 4f2c3b14ddbf..3cd58f0c2de4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -60,6 +60,7 @@ #include #include #include +#include #include #include @@ -2803,7 +2804,10 @@ static const struct rhashtable_params netlink_rhashtable_params = { }; #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS) -static const struct bpf_iter_reg netlink_reg_info = { +BTF_ID_LIST(btf_netlink_sock_id) +BTF_ID(struct, netlink_sock) + +static struct bpf_iter_reg netlink_reg_info = { .target = "netlink", .seq_ops = &netlink_seq_ops, .init_seq_private = bpf_iter_init_seq_net, @@ -2818,6 +2822,7 @@ static const struct bpf_iter_reg netlink_reg_info = { static int __init bpf_iter_register(void) { + netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id; return bpf_iter_reg_target(&netlink_reg_info); } #endif From e4d9c2320716ea0e9ef59f503ddd8f253a642ddd Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 20 Jul 2020 13:48:06 +0200 Subject: [PATCH 1418/2056] samples/bpf, selftests/bpf: Use bpf_probe_read_kernel A handful of samples and selftests fail to build on s390, because after commit 0ebeea8ca8a4 ("bpf: Restrict bpf_probe_read{, str}() only to archs where they work") bpf_probe_read is not available anymore. Fix by using bpf_probe_read_kernel. Signed-off-by: Ilya Leoshkevich Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200720114806.88823-1-iii@linux.ibm.com --- samples/bpf/offwaketime_kern.c | 7 ++++++- samples/bpf/test_overhead_kprobe_kern.c | 12 +++++++++--- samples/bpf/tracex1_kern.c | 9 +++++++-- samples/bpf/tracex5_kern.c | 4 ++-- tools/bpf/bpftool/skeleton/pid_iter.bpf.c | 3 ++- tools/testing/selftests/bpf/progs/bpf_iter_netlink.c | 6 +++--- tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c | 2 +- tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c | 2 +- tools/testing/selftests/bpf/progs/bpf_iter_udp4.c | 2 +- tools/testing/selftests/bpf/progs/bpf_iter_udp6.c | 2 +- 10 files changed, 33 insertions(+), 16 deletions(-) diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c index d459f73412a4..e74ee1cd4b9c 100644 --- a/samples/bpf/offwaketime_kern.c +++ b/samples/bpf/offwaketime_kern.c @@ -12,7 +12,12 @@ #include #include -#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;}) +#define _(P) \ + ({ \ + typeof(P) val; \ + bpf_probe_read_kernel(&val, sizeof(val), &(P)); \ + val; \ + }) #define MINBLOCK_US 1 diff --git a/samples/bpf/test_overhead_kprobe_kern.c b/samples/bpf/test_overhead_kprobe_kern.c index 8b811c29dc79..f6d593e47037 100644 --- a/samples/bpf/test_overhead_kprobe_kern.c +++ b/samples/bpf/test_overhead_kprobe_kern.c @@ -10,7 +10,12 @@ #include #include -#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;}) +#define _(P) \ + ({ \ + typeof(P) val = 0; \ + bpf_probe_read_kernel(&val, sizeof(val), &(P)); \ + val; \ + }) SEC("kprobe/__set_task_comm") int prog(struct pt_regs *ctx) @@ -25,8 +30,9 @@ int prog(struct pt_regs *ctx) tsk = (void *)PT_REGS_PARM1(ctx); pid = _(tsk->pid); - bpf_probe_read(oldcomm, sizeof(oldcomm), &tsk->comm); - bpf_probe_read(newcomm, sizeof(newcomm), (void *)PT_REGS_PARM2(ctx)); + bpf_probe_read_kernel(oldcomm, sizeof(oldcomm), &tsk->comm); + bpf_probe_read_kernel(newcomm, sizeof(newcomm), + (void *)PT_REGS_PARM2(ctx)); signal = _(tsk->signal); oom_score_adj = _(signal->oom_score_adj); return 0; diff --git a/samples/bpf/tracex1_kern.c b/samples/bpf/tracex1_kern.c index 8e2610e14475..3f4599c9a202 100644 --- a/samples/bpf/tracex1_kern.c +++ b/samples/bpf/tracex1_kern.c @@ -11,7 +11,12 @@ #include #include -#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;}) +#define _(P) \ + ({ \ + typeof(P) val = 0; \ + bpf_probe_read_kernel(&val, sizeof(val), &(P)); \ + val; \ + }) /* kprobe is NOT a stable ABI * kernel functions can be removed, renamed or completely change semantics. @@ -34,7 +39,7 @@ int bpf_prog1(struct pt_regs *ctx) dev = _(skb->dev); len = _(skb->len); - bpf_probe_read(devname, sizeof(devname), dev->name); + bpf_probe_read_kernel(devname, sizeof(devname), dev->name); if (devname[0] == 'l' && devname[1] == 'o') { char fmt[] = "skb %p len %d\n"; diff --git a/samples/bpf/tracex5_kern.c b/samples/bpf/tracex5_kern.c index 32b49e8ab6bd..64a1f7550d7e 100644 --- a/samples/bpf/tracex5_kern.c +++ b/samples/bpf/tracex5_kern.c @@ -47,7 +47,7 @@ PROG(SYS__NR_write)(struct pt_regs *ctx) { struct seccomp_data sd; - bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); + bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); if (sd.args[2] == 512) { char fmt[] = "write(fd=%d, buf=%p, size=%d)\n"; bpf_trace_printk(fmt, sizeof(fmt), @@ -60,7 +60,7 @@ PROG(SYS__NR_read)(struct pt_regs *ctx) { struct seccomp_data sd; - bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); + bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx)); if (sd.args[2] > 128 && sd.args[2] <= 1024) { char fmt[] = "read(fd=%d, buf=%p, size=%d)\n"; bpf_trace_printk(fmt, sizeof(fmt), diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c index 8468a608911e..d9b420972934 100644 --- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c +++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c @@ -71,7 +71,8 @@ int iter(struct bpf_iter__task_file *ctx) e.pid = task->tgid; e.id = get_obj_id(file->private_data, obj_type); - bpf_probe_read(&e.comm, sizeof(e.comm), task->group_leader->comm); + bpf_probe_read_kernel(&e.comm, sizeof(e.comm), + task->group_leader->comm); bpf_seq_write(ctx->meta->seq, &e, sizeof(e)); return 0; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c index 7de98a68599a..95989f4c99b5 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_netlink.c @@ -36,10 +36,10 @@ int dump_netlink(struct bpf_iter__netlink *ctx) if (!nlk->groups) { group = 0; } else { - /* FIXME: temporary use bpf_probe_read here, needs + /* FIXME: temporary use bpf_probe_read_kernel here, needs * verifier support to do direct access. */ - bpf_probe_read(&group, sizeof(group), &nlk->groups[0]); + bpf_probe_read_kernel(&group, sizeof(group), &nlk->groups[0]); } BPF_SEQ_PRINTF(seq, "%-10u %08x %-8d %-8d %-5d %-8d ", nlk->portid, (u32)group, @@ -56,7 +56,7 @@ int dump_netlink(struct bpf_iter__netlink *ctx) * with current verifier. */ inode = SOCK_INODE(sk); - bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); } BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c index 30fd587cb325..54380c5e1069 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c @@ -57,7 +57,7 @@ static long sock_i_ino(const struct sock *sk) return 0; inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; - bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); return ino; } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c index 10dec4392031..b4fbddfa4e10 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c @@ -57,7 +57,7 @@ static long sock_i_ino(const struct sock *sk) return 0; inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; - bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); return ino; } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c index 7053784575e4..f258583afbbd 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c @@ -18,7 +18,7 @@ static long sock_i_ino(const struct sock *sk) return 0; inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; - bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); return ino; } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c index c1175a6ecf43..65f93bb03f0f 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c @@ -25,7 +25,7 @@ static long sock_i_ino(const struct sock *sk) return 0; inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode; - bpf_probe_read(&ino, sizeof(ino), &inode->i_ino); + bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino); return ino; } From 9165e1d70fb34ce438e78aad90408cfa86e4c2d0 Mon Sep 17 00:00:00 2001 From: Tony Ambardar Date: Mon, 20 Jul 2020 19:48:16 -0700 Subject: [PATCH 1419/2056] bpftool: Use only nftw for file tree parsing The bpftool sources include code to walk file trees, but use multiple frameworks to do so: nftw and fts. While nftw conforms to POSIX/SUSv3 and is widely available, fts is not conformant and less common, especially on non-glibc systems. The inconsistent framework usage hampers maintenance and portability of bpftool, in particular for embedded systems. Standardize code usage by rewriting one fts-based function to use nftw and clean up some related function warnings by extending use of "const char *" arguments. This change helps in building bpftool against musl for OpenWrt. Also fix an unsafe call to dirname() by duplicating the string to pass, since some implementations may directly alter it. The same approach is used in libbpf.c. Signed-off-by: Tony Ambardar Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200721024817.13701-1-Tony.Ambardar@gmail.com --- tools/bpf/bpftool/common.c | 143 +++++++++++++++++++++---------------- tools/bpf/bpftool/main.h | 4 +- 2 files changed, 85 insertions(+), 62 deletions(-) diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 9b28c69dd8e4..65303664417e 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ +#define _GNU_SOURCE #include #include #include -#include +#include #include #include #include @@ -161,24 +162,35 @@ int mount_tracefs(const char *target) return err; } -int open_obj_pinned(char *path, bool quiet) +int open_obj_pinned(const char *path, bool quiet) { - int fd; + char *pname; + int fd = -1; - fd = bpf_obj_get(path); - if (fd < 0) { + pname = strdup(path); + if (!pname) { if (!quiet) - p_err("bpf obj get (%s): %s", path, - errno == EACCES && !is_bpffs(dirname(path)) ? - "directory not in bpf file system (bpffs)" : - strerror(errno)); - return -1; + p_err("mem alloc failed"); + goto out_ret; } + fd = bpf_obj_get(pname); + if (fd < 0) { + if (!quiet) + p_err("bpf obj get (%s): %s", pname, + errno == EACCES && !is_bpffs(dirname(pname)) ? + "directory not in bpf file system (bpffs)" : + strerror(errno)); + goto out_free; + } + +out_free: + free(pname); +out_ret: return fd; } -int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type) +int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type) { enum bpf_obj_type type; int fd; @@ -368,71 +380,82 @@ void print_hex_data_json(uint8_t *data, size_t len) jsonw_end_array(json_wtr); } +/* extra params for nftw cb */ +static struct pinned_obj_table *build_fn_table; +static enum bpf_obj_type build_fn_type; + +static int do_build_table_cb(const char *fpath, const struct stat *sb, + int typeflag, struct FTW *ftwbuf) +{ + struct bpf_prog_info pinned_info; + __u32 len = sizeof(pinned_info); + struct pinned_obj *obj_node; + enum bpf_obj_type objtype; + int fd, err = 0; + + if (typeflag != FTW_F) + goto out_ret; + + fd = open_obj_pinned(fpath, true); + if (fd < 0) + goto out_ret; + + objtype = get_fd_type(fd); + if (objtype != build_fn_type) + goto out_close; + + memset(&pinned_info, 0, sizeof(pinned_info)); + if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len)) + goto out_close; + + obj_node = calloc(1, sizeof(*obj_node)); + if (!obj_node) { + err = -1; + goto out_close; + } + + obj_node->id = pinned_info.id; + obj_node->path = strdup(fpath); + if (!obj_node->path) { + err = -1; + free(obj_node); + goto out_close; + } + + hash_add(build_fn_table->table, &obj_node->hash, obj_node->id); +out_close: + close(fd); +out_ret: + return err; +} + int build_pinned_obj_table(struct pinned_obj_table *tab, enum bpf_obj_type type) { - struct bpf_prog_info pinned_info = {}; - struct pinned_obj *obj_node = NULL; - __u32 len = sizeof(pinned_info); struct mntent *mntent = NULL; - enum bpf_obj_type objtype; FILE *mntfile = NULL; - FTSENT *ftse = NULL; - FTS *fts = NULL; - int fd, err; + int flags = FTW_PHYS; + int nopenfd = 16; + int err = 0; mntfile = setmntent("/proc/mounts", "r"); if (!mntfile) return -1; + build_fn_table = tab; + build_fn_type = type; + while ((mntent = getmntent(mntfile))) { - char *path[] = { mntent->mnt_dir, NULL }; + char *path = mntent->mnt_dir; if (strncmp(mntent->mnt_type, "bpf", 3) != 0) continue; - - fts = fts_open(path, 0, NULL); - if (!fts) - continue; - - while ((ftse = fts_read(fts))) { - if (!(ftse->fts_info & FTS_F)) - continue; - fd = open_obj_pinned(ftse->fts_path, true); - if (fd < 0) - continue; - - objtype = get_fd_type(fd); - if (objtype != type) { - close(fd); - continue; - } - memset(&pinned_info, 0, sizeof(pinned_info)); - err = bpf_obj_get_info_by_fd(fd, &pinned_info, &len); - if (err) { - close(fd); - continue; - } - - obj_node = malloc(sizeof(*obj_node)); - if (!obj_node) { - close(fd); - fts_close(fts); - fclose(mntfile); - return -1; - } - - memset(obj_node, 0, sizeof(*obj_node)); - obj_node->id = pinned_info.id; - obj_node->path = strdup(ftse->fts_path); - hash_add(tab->table, &obj_node->hash, obj_node->id); - - close(fd); - } - fts_close(fts); + err = nftw(path, do_build_table_cb, nopenfd, flags); + if (err) + break; } fclose(mntfile); - return 0; + return err; } void delete_pinned_obj_table(struct pinned_obj_table *tab) diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 78d34e860713..e3a79b5a9960 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -152,8 +152,8 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv, int get_fd_type(int fd); const char *get_fd_type_name(enum bpf_obj_type type); char *get_fdinfo(int fd, const char *key); -int open_obj_pinned(char *path, bool quiet); -int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type); +int open_obj_pinned(const char *path, bool quiet); +int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type); int mount_bpffs_for_pin(const char *name); int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***)); int do_pin_fd(int fd, const char *name); From c17e317802d8d407cbb6b0d2cd3aaf2b3de068b1 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Tue, 21 Jul 2020 15:01:57 +0800 Subject: [PATCH 1420/2056] net: mdio-mux-gpio: use devm_gpiod_get_array() Use devm_gpiod_get_array() to simplify the error handling and exit code path. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux-gpio.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 6c8960df43b0..10a758fdc9e6 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -42,25 +42,21 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev) struct gpio_descs *gpios; int r; - gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); + gpios = devm_gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); if (IS_ERR(gpios)) return PTR_ERR(gpios); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); - if (!s) { - gpiod_put_array(gpios); + if (!s) return -ENOMEM; - } s->gpios = gpios; r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); - if (r != 0) { - gpiod_put_array(s->gpios); + if (r != 0) return r; - } pdev->dev.platform_data = s; return 0; @@ -70,7 +66,6 @@ static int mdio_mux_gpio_remove(struct platform_device *pdev) { struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); - gpiod_put_array(s->gpios); return 0; } From 02293dd4b79ec16ae0f680222768fb1582f03b42 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 21 Jul 2020 10:55:17 +0300 Subject: [PATCH 1421/2056] enetc: Refine buffer descriptor ring sizes It's time to differentiate between Rx and Tx ring sizes. Not only Tx rings are processed differently than Rx rings, but their default number also differs - i.e. up to 8 Tx rings per device (8 traffic classes) vs. 2 Rx rings (one per CPU). So let's set Tx rings sizes to half the size of the Rx rings for now, to be conservative. The default ring sizes were decreased as well (to the next lower power of 2), to reduce the memory footprint, buffering etc., since the measurements I've made so far show that the rings are very unlikely to get full. This change also anticipates the introduction of the dynamic interrupt moderation (dim) algorithm which operates on maximum packet thresholds of 256 packets for Rx and 128 packets for Tx. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.c | 4 ++-- drivers/net/ethernet/freescale/enetc/enetc.h | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 3f32b85ba2cf..d91e52618681 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1064,8 +1064,8 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) struct enetc_si *si = priv->si; int cpus = num_online_cpus(); - priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE; - priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE; + priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE; + priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE; /* Enable all available TX rings in order to configure as many * priorities as possible, when needed. diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index b705464f6882..0dd8ee179753 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -44,8 +44,9 @@ struct enetc_ring_stats { unsigned int rx_alloc_errs; }; -#define ENETC_BDR_DEFAULT_SIZE 1024 -#define ENETC_DEFAULT_TX_WORK 256 +#define ENETC_RX_RING_DEFAULT_SIZE 512 +#define ENETC_TX_RING_DEFAULT_SIZE 256 +#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) struct enetc_bdr { struct device *dev; /* for DMA mapping */ From bbb96dc7fa1a82cd176994e8af4d7b1079b22a52 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 21 Jul 2020 10:55:18 +0300 Subject: [PATCH 1422/2056] enetc: Factor out the traffic start/stop procedures A reliable traffic pause (and reconfiguration) procedure is needed to be able to safely make h/w configuration changes during run-time, like changing the mode in which the interrupts are operating (i.e. with or without coalescing), as opposed to making on-the-fly register updates that may be subject to h/w or s/w concurrency issues. To this end, the code responsible of the run-time device configurations that basically starts resp. stops the traffic flow through the device has been extracted from the the enetc_open/_close procedures, to the separate standalone enetc_start/_stop procedures. Traffic stop should be as graceful as possible, it lets the executing napi threads to to finish while the interrupts stay disabled. But since the napi thread will try to re-enable interrupts by clearing the device's unmask register, the enable_irq/ disable_irq API has been used to avoid this potential concurrency issue and make the traffic pause procedure more reliable. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.c | 74 +++++++++++++------- 1 file changed, 49 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index d91e52618681..51a1c97aedac 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1264,6 +1264,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv) dev_err(priv->dev, "request_irq() failed!\n"); goto irq_err; } + disable_irq(irq); v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); @@ -1306,7 +1307,7 @@ static void enetc_free_irqs(struct enetc_ndev_priv *priv) } } -static void enetc_enable_interrupts(struct enetc_ndev_priv *priv) +static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) { int i; @@ -1322,7 +1323,7 @@ static void enetc_enable_interrupts(struct enetc_ndev_priv *priv) } } -static void enetc_disable_interrupts(struct enetc_ndev_priv *priv) +static void enetc_clear_interrupts(struct enetc_ndev_priv *priv) { int i; @@ -1369,10 +1370,33 @@ static int enetc_phy_connect(struct net_device *ndev) return 0; } +static void enetc_start(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + int i; + + enetc_setup_interrupts(priv); + + for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(priv->si->pdev, + ENETC_BDR_INT_BASE_IDX + i); + + napi_enable(&priv->int_vector[i]->napi); + enable_irq(irq); + } + + if (ndev->phydev) + phy_start(ndev->phydev); + else + netif_carrier_on(ndev); + + netif_tx_start_all_queues(ndev); +} + int enetc_open(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); - int i, err; + int err; err = enetc_setup_irqs(priv); if (err) @@ -1390,8 +1414,6 @@ int enetc_open(struct net_device *ndev) if (err) goto err_alloc_rx; - enetc_setup_bdrs(priv); - err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings); if (err) goto err_set_queues; @@ -1400,17 +1422,8 @@ int enetc_open(struct net_device *ndev) if (err) goto err_set_queues; - for (i = 0; i < priv->bdr_int_num; i++) - napi_enable(&priv->int_vector[i]->napi); - - enetc_enable_interrupts(priv); - - if (ndev->phydev) - phy_start(ndev->phydev); - else - netif_carrier_on(ndev); - - netif_tx_start_all_queues(ndev); + enetc_setup_bdrs(priv); + enetc_start(ndev); return 0; @@ -1427,28 +1440,39 @@ int enetc_open(struct net_device *ndev) return err; } -int enetc_close(struct net_device *ndev) +static void enetc_stop(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); int i; netif_tx_stop_all_queues(ndev); - if (ndev->phydev) { - phy_stop(ndev->phydev); - phy_disconnect(ndev->phydev); - } else { - netif_carrier_off(ndev); - } - for (i = 0; i < priv->bdr_int_num; i++) { + int irq = pci_irq_vector(priv->si->pdev, + ENETC_BDR_INT_BASE_IDX + i); + + disable_irq(irq); napi_synchronize(&priv->int_vector[i]->napi); napi_disable(&priv->int_vector[i]->napi); } - enetc_disable_interrupts(priv); + if (ndev->phydev) + phy_stop(ndev->phydev); + else + netif_carrier_off(ndev); + + enetc_clear_interrupts(priv); +} + +int enetc_close(struct net_device *ndev) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + + enetc_stop(ndev); enetc_clear_bdrs(priv); + if (ndev->phydev) + phy_disconnect(ndev->phydev); enetc_free_rxtx_rings(priv); enetc_free_rx_resources(priv); enetc_free_tx_resources(priv); From 12460a0abe53d2134a88386a857fca350d239620 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 21 Jul 2020 10:55:19 +0300 Subject: [PATCH 1423/2056] enetc: Fix interrupt coalescing register naming Interrupt coalescing registers naming in the current revision of the Ref Man (RM) is ICR, deprecating the ICIR name used in earlier (draft) versions of the RM. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.c | 4 ++-- drivers/net/ethernet/freescale/enetc/enetc_ethtool.c | 2 +- drivers/net/ethernet/freescale/enetc/enetc_hw.h | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 51a1c97aedac..be594c7af538 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -1140,7 +1140,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR); /* enable Tx ints by setting pkt thr to 1 */ - enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1); + enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); tbmr = ENETC_TBMR_EN; if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) @@ -1174,7 +1174,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); /* enable Rx ints by setting pkt thr to 1 */ - enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1); + enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); rbmr = ENETC_RBMR_EN; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 34bd1f3fb415..8aeaa3de0012 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -19,7 +19,7 @@ static const u32 enetc_txbdr_regs[] = { static const u32 enetc_rxbdr_regs[] = { ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, - ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER + ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICR0, ENETC_RBIER }; static const u32 enetc_port_regs[] = { diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index 135bf46354ea..d70ee3722844 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -121,8 +121,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_RBIER 0xa0 #define ENETC_RBIER_RXTIE BIT(0) #define ENETC_RBIDR 0xa4 -#define ENETC_RBICIR0 0xa8 -#define ENETC_RBICIR0_ICEN BIT(31) +#define ENETC_RBICR0 0xa8 +#define ENETC_RBICR0_ICEN BIT(31) /* TX BDR reg offsets */ #define ENETC_TBMR 0 @@ -141,8 +141,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_TBIER 0xa0 #define ENETC_TBIER_TXTIE BIT(0) #define ENETC_TBIDR 0xa4 -#define ENETC_TBICIR0 0xa8 -#define ENETC_TBICIR0_ICEN BIT(31) +#define ENETC_TBICR0 0xa8 +#define ENETC_TBICR0_ICEN BIT(31) #define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) From 058d9cfa6075463396e42fb7be64c015cbdda80b Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 21 Jul 2020 10:55:20 +0300 Subject: [PATCH 1424/2056] enetc: Drop redundant ____cacheline_aligned_in_smp 'struct enetc_bdr' is already '____cacheline_aligned_in_smp'. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 0dd8ee179753..81e9072e10d4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -195,7 +195,7 @@ struct enetc_int_vector { struct napi_struct napi; char name[ENETC_INT_NAME_MAX]; - struct enetc_bdr rx_ring ____cacheline_aligned_in_smp; + struct enetc_bdr rx_ring; struct enetc_bdr tx_ring[]; }; From 915710812ba0ff11f49aa810025f91bbad8da20a Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 21 Jul 2020 10:55:21 +0300 Subject: [PATCH 1425/2056] enetc: Add interrupt coalescing support Enable programming of the interrupt coalescing registers and allow manual configuration of the coalescing time thresholds via ethtool. Packet thresholds have been fixed to predetermined values as there's no point in making them run-time configurable, also anticipating the dynamic interrupt moderation (DIM) algorithm which uses fixed packet thresholds as well. If the interface is up when the operation mode of traffic interrupt events is changed by the user (i.e. switching from default per-packet interrupts to coalesced interrupts), the traffic needs to be paused in the process. This patch also prepares the ground for introducing DIM on Rx. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.c | 34 +++++++-- drivers/net/ethernet/freescale/enetc/enetc.h | 18 +++++ .../ethernet/freescale/enetc/enetc_ethtool.c | 73 ++++++++++++++++++- .../net/ethernet/freescale/enetc/enetc_hw.h | 19 ++++- 4 files changed, 133 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index be594c7af538..f4593c044043 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -265,6 +265,7 @@ static irqreturn_t enetc_msix(int irq, void *data) /* disable interrupts */ enetc_wr_reg(v->rbier, 0); + enetc_wr_reg(v->ricr1, v->rx_ictt); for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); @@ -1268,6 +1269,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv) v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER); v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER); + v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1); enetc_wr(hw, ENETC_SIMSIRRV(i), entry); @@ -1309,17 +1311,35 @@ static void enetc_free_irqs(struct enetc_ndev_priv *priv) static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) { + struct enetc_hw *hw = &priv->si->hw; + u32 icpt, ictt; int i; /* enable Tx & Rx event indication */ - for (i = 0; i < priv->num_rx_rings; i++) { - enetc_rxbdr_wr(&priv->si->hw, i, - ENETC_RBIER, ENETC_RBIER_RXTIE); + if (priv->ic_mode & ENETC_IC_RX_MANUAL) { + icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); + /* init to non-0 minimum, will be adjusted later */ + ictt = 0x1; + } else { + icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */ + ictt = 0; } + for (i = 0; i < priv->num_rx_rings; i++) { + enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt); + enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt); + enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE); + } + + if (priv->ic_mode & ENETC_IC_TX_MANUAL) + icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR); + else + icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */ + for (i = 0; i < priv->num_tx_rings; i++) { - enetc_txbdr_wr(&priv->si->hw, i, - ENETC_TBIER, ENETC_TBIER_TXTIE); + enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt); + enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt); + enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE); } } @@ -1370,7 +1390,7 @@ static int enetc_phy_connect(struct net_device *ndev) return 0; } -static void enetc_start(struct net_device *ndev) +void enetc_start(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); int i; @@ -1440,7 +1460,7 @@ int enetc_open(struct net_device *ndev) return err; } -static void enetc_stop(struct net_device *ndev) +void enetc_stop(struct net_device *ndev) { struct enetc_ndev_priv *priv = netdev_priv(ndev); int i; diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 81e9072e10d4..4e3af7f07892 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -190,8 +190,10 @@ static inline bool enetc_si_is_pf(struct enetc_si *si) struct enetc_int_vector { void __iomem *rbier; void __iomem *tbier_base; + void __iomem *ricr1; unsigned long tx_rings_map; int count_tx_rings; + u32 rx_ictt; struct napi_struct napi; char name[ENETC_INT_NAME_MAX]; @@ -221,6 +223,18 @@ enum enetc_active_offloads { ENETC_F_QCI = BIT(3), }; +/* interrupt coalescing modes */ +enum enetc_ic_mode { + /* one interrupt per frame */ + ENETC_IC_NONE = 0, + /* activated when int coalescing time is set to a non-0 value */ + ENETC_IC_RX_MANUAL = BIT(0), + ENETC_IC_TX_MANUAL = BIT(1), +}; + +#define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2) +#define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2) + struct enetc_ndev_priv { struct net_device *ndev; struct device *dev; /* dma-mapping device */ @@ -245,6 +259,8 @@ struct enetc_ndev_priv { struct device_node *phy_node; phy_interface_t if_mode; + int ic_mode; + u32 tx_ictt; }; /* Messaging */ @@ -274,6 +290,8 @@ void enetc_free_si_resources(struct enetc_ndev_priv *priv); int enetc_open(struct net_device *ndev); int enetc_close(struct net_device *ndev); +void enetc_start(struct net_device *ndev); +void enetc_stop(struct net_device *ndev); netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); struct net_device_stats *enetc_get_stats(struct net_device *ndev); int enetc_set_features(struct net_device *ndev, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 8aeaa3de0012..521f3b4cd250 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -14,12 +14,14 @@ static const u32 enetc_si_regs[] = { static const u32 enetc_txbdr_regs[] = { ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1, - ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER + ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0, + ENETC_TBICR1 }; static const u32 enetc_rxbdr_regs[] = { ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0, - ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICR0, ENETC_RBIER + ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0, + ENETC_RBICR1 }; static const u32 enetc_port_regs[] = { @@ -561,6 +563,65 @@ static void enetc_get_ringparam(struct net_device *ndev, } } +static int enetc_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ic) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + struct enetc_int_vector *v = priv->int_vector[0]; + + ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt); + ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt); + + ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; + ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; + + return 0; +} + +static int enetc_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ic) +{ + struct enetc_ndev_priv *priv = netdev_priv(ndev); + u32 rx_ictt, tx_ictt; + int i, ic_mode; + bool changed; + + tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs); + rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs); + + if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR) + return -EOPNOTSUPP; + + if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR) + return -EOPNOTSUPP; + + ic_mode = ENETC_IC_NONE; + ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0; + ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; + + /* commit the settings */ + changed = (ic_mode != priv->ic_mode); + + priv->ic_mode = ic_mode; + priv->tx_ictt = tx_ictt; + + for (i = 0; i < priv->bdr_int_num; i++) { + struct enetc_int_vector *v = priv->int_vector[i]; + + v->rx_ictt = rx_ictt; + } + + if (netif_running(ndev) && changed) { + /* reconfigure the operation mode of h/w interrupts, + * traffic needs to be paused in the process + */ + enetc_stop(ndev); + enetc_start(ndev); + } + + return 0; +} + static int enetc_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) { @@ -617,6 +678,8 @@ static int enetc_set_wol(struct net_device *dev, } static const struct ethtool_ops enetc_pf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .get_regs_len = enetc_get_reglen, .get_regs = enetc_get_regs, .get_sset_count = enetc_get_sset_count, @@ -629,6 +692,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { .get_rxfh = enetc_get_rxfh, .set_rxfh = enetc_set_rxfh, .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .get_link = ethtool_op_get_link, @@ -638,6 +703,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { }; static const struct ethtool_ops enetc_vf_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_MAX_FRAMES, .get_regs_len = enetc_get_reglen, .get_regs = enetc_get_regs, .get_sset_count = enetc_get_sset_count, @@ -649,6 +716,8 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = { .get_rxfh = enetc_get_rxfh, .set_rxfh = enetc_set_rxfh, .get_ringparam = enetc_get_ringparam, + .get_coalesce = enetc_get_coalesce, + .set_coalesce = enetc_set_coalesce, .get_link = ethtool_op_get_link, .get_ts_info = enetc_get_ts_info, }; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index d70ee3722844..17cf7c94fdb5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -122,7 +122,10 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_RBIER_RXTIE BIT(0) #define ENETC_RBIDR 0xa4 #define ENETC_RBICR0 0xa8 -#define ENETC_RBICR0_ICEN BIT(31) +#define ENETC_RBICR0_ICEN BIT(31) +#define ENETC_RBICR0_ICPT_MASK 0x1ff +#define ENETC_RBICR0_SET_ICPT(n) ((n) & ENETC_RBICR0_ICPT_MASK) +#define ENETC_RBICR1 0xac /* TX BDR reg offsets */ #define ENETC_TBMR 0 @@ -142,7 +145,10 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_TBIER_TXTIE BIT(0) #define ENETC_TBIDR 0xa4 #define ENETC_TBICR0 0xa8 -#define ENETC_TBICR0_ICEN BIT(31) +#define ENETC_TBICR0_ICEN BIT(31) +#define ENETC_TBICR0_ICPT_MASK 0xf +#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK) +#define ENETC_TBICR1 0xac #define ENETC_RTBLENR_LEN(n) ((n) & ~0x7) @@ -787,6 +793,15 @@ struct enetc_cbd { }; #define ENETC_CLK 400000000ULL +static inline u32 enetc_cycles_to_usecs(u32 cycles) +{ + return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK); +} + +static inline u32 enetc_usecs_to_cycles(u32 usecs) +{ + return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL); +} /* port time gating control register */ #define ENETC_QBV_PTGCR_OFFSET 0x11a00 From ae0e6a5d16271f3a773bded360fa0eb0bdd25b10 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 21 Jul 2020 10:55:22 +0300 Subject: [PATCH 1426/2056] enetc: Add adaptive interrupt coalescing Use the generic dynamic interrupt moderation (dim) framework to implement adaptive interrupt coalescing on Rx. With the per-packet interrupt scheme, a high interrupt rate has been noted for moderate traffic flows leading to high CPU utilization. The 'dim' scheme implemented by the current patch addresses this issue improving CPU utilization while using minimal coalescing time thresholds in order to preserve a good latency. On the Tx side use an optimal time threshold value by default. This value has been optimized for Tx TCP streams at a rate of around 85kpps on a 1G link, at which rate half of the Tx ring size (128) gets filled in 1500 usecs. Scaling this down to 2.5G links yields the current value of 600 usecs, which is conservative and gives good enough results for 1G links too (see next). Below are some measurement results for before and after this patch (and related dependencies) basically, for a 2 ARM Cortex-A72 @1.3Ghz CPUs system (32 KB L1 data cache), using 60secs log netperf TCP stream tests @ 1Gbit link (maximum throughput): 1) 1 Rx TCP flow, both Rx and Tx processed by the same NAPI thread on the same CPU: CPU utilization int rate (ints/sec) Before: 50%-60% (over 50%) 92k After: 13%-22% 3.5k-12k Comment: Major CPU utilization improvement for a single flow Rx TCP flow (i.e. netperf -t TCP_MAERTS) on a single CPU. Usually settles under 16% for longer tests. 2) 4 Rx TCP flows + 4 Tx TCP flows (+ pings to check the latency): Total CPU utilization Total int rate (ints/sec) Before: ~80% (spikes to 90%) ~100k After: 60% (more steady) ~4k Comment: Important improvement for this load test, while the ping test outcome does not show any notable difference compared to before. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/Kconfig | 2 + drivers/net/ethernet/freescale/enetc/enetc.c | 48 ++++++++++++++++++- drivers/net/ethernet/freescale/enetc/enetc.h | 11 ++++- .../ethernet/freescale/enetc/enetc_ethtool.c | 19 ++++++-- 4 files changed, 73 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig index 2b43848e1363..37b804f8bd76 100644 --- a/drivers/net/ethernet/freescale/enetc/Kconfig +++ b/drivers/net/ethernet/freescale/enetc/Kconfig @@ -4,6 +4,7 @@ config FSL_ENETC depends on PCI && PCI_MSI select FSL_ENETC_MDIO select PHYLIB + select DIMLIB help This driver supports NXP ENETC gigabit ethernet controller PCIe physical function (PF) devices, managing ENETC Ports at a privileged @@ -15,6 +16,7 @@ config FSL_ENETC_VF tristate "ENETC VF driver" depends on PCI && PCI_MSI select PHYLIB + select DIMLIB help This driver supports NXP ENETC gigabit ethernet controller PCIe virtual function (VF) devices enabled by the ENETC PF driver. diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index f4593c044043..f50353cbb4db 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -279,6 +279,34 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget); static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring, struct napi_struct *napi, int work_limit); +static void enetc_rx_dim_work(struct work_struct *w) +{ + struct dim *dim = container_of(w, struct dim, work); + struct dim_cq_moder moder = + net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + struct enetc_int_vector *v = + container_of(dim, struct enetc_int_vector, rx_dim); + + v->rx_ictt = enetc_usecs_to_cycles(moder.usec); + dim->state = DIM_START_MEASURE; +} + +static void enetc_rx_net_dim(struct enetc_int_vector *v) +{ + struct dim_sample dim_sample; + + v->comp_cnt++; + + if (!v->rx_napi_work) + return; + + dim_update_sample(v->comp_cnt, + v->rx_ring.stats.packets, + v->rx_ring.stats.bytes, + &dim_sample); + net_dim(&v->rx_dim, dim_sample); +} + static int enetc_poll(struct napi_struct *napi, int budget) { struct enetc_int_vector @@ -294,12 +322,19 @@ static int enetc_poll(struct napi_struct *napi, int budget) work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget); if (work_done == budget) complete = false; + if (work_done) + v->rx_napi_work = true; if (!complete) return budget; napi_complete_done(napi, work_done); + if (likely(v->rx_dim_en)) + enetc_rx_net_dim(v); + + v->rx_napi_work = false; + /* enable interrupts */ enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE); @@ -1075,6 +1110,8 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv) priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings); priv->num_tx_rings = si->num_tx_rings; priv->bdr_int_num = cpus; + priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL; + priv->tx_ictt = ENETC_TXIC_TIMETHR; /* SI specific */ si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE; @@ -1316,7 +1353,8 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv) int i; /* enable Tx & Rx event indication */ - if (priv->ic_mode & ENETC_IC_RX_MANUAL) { + if (priv->ic_mode & + (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) { icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR); /* init to non-0 minimum, will be adjusted later */ ictt = 0x1; @@ -1786,6 +1824,12 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) priv->int_vector[i] = v; + /* init defaults for adaptive IC */ + if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) { + v->rx_ictt = 0x1; + v->rx_dim_en = true; + } + INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work); netif_napi_add(priv->ndev, &v->napi, enetc_poll, NAPI_POLL_WEIGHT); v->count_tx_rings = v_tx_rings; @@ -1821,6 +1865,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv) fail: while (i--) { netif_napi_del(&priv->int_vector[i]->napi); + cancel_work_sync(&priv->int_vector[i]->rx_dim.work); kfree(priv->int_vector[i]); } @@ -1837,6 +1882,7 @@ void enetc_free_msix(struct enetc_ndev_priv *priv) struct enetc_int_vector *v = priv->int_vector[i]; netif_napi_del(&v->napi); + cancel_work_sync(&v->rx_dim.work); } for (i = 0; i < priv->num_rx_rings; i++) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index 4e3af7f07892..d309803cfeb6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "enetc_hw.h" @@ -194,12 +195,15 @@ struct enetc_int_vector { unsigned long tx_rings_map; int count_tx_rings; u32 rx_ictt; - struct napi_struct napi; + u16 comp_cnt; + bool rx_dim_en, rx_napi_work; + struct napi_struct napi ____cacheline_aligned_in_smp; + struct dim rx_dim ____cacheline_aligned_in_smp; char name[ENETC_INT_NAME_MAX]; struct enetc_bdr rx_ring; struct enetc_bdr tx_ring[]; -}; +} ____cacheline_aligned_in_smp; struct enetc_cls_rule { struct ethtool_rx_flow_spec fs; @@ -230,10 +234,13 @@ enum enetc_ic_mode { /* activated when int coalescing time is set to a non-0 value */ ENETC_IC_RX_MANUAL = BIT(0), ENETC_IC_TX_MANUAL = BIT(1), + /* use dynamic interrupt moderation */ + ENETC_IC_RX_ADAPTIVE = BIT(2), }; #define ENETC_RXIC_PKTTHR min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2) #define ENETC_TXIC_PKTTHR min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2) +#define ENETC_TXIC_TIMETHR enetc_usecs_to_cycles(600) struct enetc_ndev_priv { struct net_device *ndev; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index 521f3b4cd250..1dab83fbca77 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -575,6 +575,8 @@ static int enetc_get_coalesce(struct net_device *ndev, ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR; ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR; + ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE; + return 0; } @@ -596,11 +598,17 @@ static int enetc_set_coalesce(struct net_device *ndev, return -EOPNOTSUPP; ic_mode = ENETC_IC_NONE; + if (ic->use_adaptive_rx_coalesce) { + ic_mode |= ENETC_IC_RX_ADAPTIVE; + rx_ictt = 0x1; + } else { + ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; + } + ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0; - ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0; /* commit the settings */ - changed = (ic_mode != priv->ic_mode); + changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt); priv->ic_mode = ic_mode; priv->tx_ictt = tx_ictt; @@ -609,6 +617,7 @@ static int enetc_set_coalesce(struct net_device *ndev, struct enetc_int_vector *v = priv->int_vector[i]; v->rx_ictt = rx_ictt; + v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE); } if (netif_running(ndev) && changed) { @@ -679,7 +688,8 @@ static int enetc_set_wol(struct net_device *dev, static const struct ethtool_ops enetc_pf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_regs_len = enetc_get_reglen, .get_regs = enetc_get_regs, .get_sset_count = enetc_get_sset_count, @@ -704,7 +714,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = { static const struct ethtool_ops enetc_vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, .get_regs_len = enetc_get_reglen, .get_regs = enetc_get_regs, .get_sset_count = enetc_get_sset_count, From 1ceb7ee7a6e7a8195fd1fbc3c7d554305bc26607 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:03:45 +0100 Subject: [PATCH 1427/2056] net: phylink: update ethtool reporting for fixed-link modes Comparing the ethtool output from phylink and non-phylink fixed-link setups shows that we have some differences: - The "auto-negotiation" fields are different; phylink reports these as "No", non-phylink reports these as "Yes" for the supported and advertising masks. - The link partner advertisement is set to the link speed with non- phylink, but phylink leaves this unset, causing all link partner fields to be omitted. The phylink ethtool output also disagrees with the software emulated PHY dump via the MII registers. Update the phylink fixed-link parsing code so that we better reflect the behaviour of the non-phylink code that this facility replaces, and bring the ethtool interface more into line with the report from via the MII interface. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index dae6c8b51d7f..0fd5a11966aa 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -241,8 +241,10 @@ static int phylink_parse_fixedlink(struct phylink *pl, phylink_set(pl->supported, MII); phylink_set(pl->supported, Pause); phylink_set(pl->supported, Asym_Pause); + phylink_set(pl->supported, Autoneg); if (s) { __set_bit(s->bit, pl->supported); + __set_bit(s->bit, pl->link_config.lp_advertising); } else { phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n", pl->link_config.duplex == DUPLEX_FULL ? "full" : "half", From b06e5cac213cffbbf642d7e46e06719e02c75a4b Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:03:50 +0100 Subject: [PATCH 1428/2056] net: phylink: rejig link state tracking Rejig the link state tracking, so that we can use the current state in a future patch. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 0fd5a11966aa..b36e0315f0b1 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -578,9 +578,14 @@ static void phylink_resolve(struct work_struct *w) struct phylink *pl = container_of(w, struct phylink, resolve); struct phylink_link_state link_state; struct net_device *ndev = pl->netdev; - int link_changed; + bool cur_link_state; mutex_lock(&pl->state_mutex); + if (pl->netdev) + cur_link_state = netif_carrier_ok(ndev); + else + cur_link_state = pl->old_link_state; + if (pl->phylink_disable_state) { pl->mac_link_dropped = false; link_state.link = false; @@ -623,12 +628,7 @@ static void phylink_resolve(struct work_struct *w) } } - if (pl->netdev) - link_changed = (link_state.link != netif_carrier_ok(ndev)); - else - link_changed = (link_state.link != pl->old_link_state); - - if (link_changed) { + if (link_state.link != cur_link_state) { pl->old_link_state = link_state.link; if (!link_state.link) phylink_link_down(pl); From 319bfafe3494b6fd6cdf48dcdc9d17cf2a77d405 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:03:55 +0100 Subject: [PATCH 1429/2056] net: phylink: rearrange resolve mac_config() call Use a boolean to indicate whether mac_config() should be called during a resolution. This allows resolution to have a single location where mac_config() will be called, which will allow us to make decisions about how and what we do. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b36e0315f0b1..8ffe5df5c296 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -421,13 +421,6 @@ static void phylink_mac_config(struct phylink *pl, pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, state); } -static void phylink_mac_config_up(struct phylink *pl, - const struct phylink_link_state *state) -{ - if (state->link) - phylink_mac_config(pl, state); -} - static void phylink_mac_pcs_an_restart(struct phylink *pl) { if (pl->link_config.an_enabled && @@ -578,6 +571,7 @@ static void phylink_resolve(struct work_struct *w) struct phylink *pl = container_of(w, struct phylink, resolve); struct phylink_link_state link_state; struct net_device *ndev = pl->netdev; + bool mac_config = false; bool cur_link_state; mutex_lock(&pl->state_mutex); @@ -596,12 +590,12 @@ static void phylink_resolve(struct work_struct *w) case MLO_AN_PHY: link_state = pl->phy_state; phylink_apply_manual_flow(pl, &link_state); - phylink_mac_config_up(pl, &link_state); + mac_config = link_state.link; break; case MLO_AN_FIXED: phylink_get_fixed_state(pl, &link_state); - phylink_mac_config_up(pl, &link_state); + mac_config = link_state.link; break; case MLO_AN_INBAND: @@ -619,15 +613,16 @@ static void phylink_resolve(struct work_struct *w) /* If we have a PHY, we need to update with * the PHY flow control bits. */ link_state.pause = pl->phy_state.pause; - phylink_apply_manual_flow(pl, &link_state); - phylink_mac_config(pl, &link_state); - } else { - phylink_apply_manual_flow(pl, &link_state); + mac_config = true; } + phylink_apply_manual_flow(pl, &link_state); break; } } + if (mac_config) + phylink_mac_config(pl, &link_state); + if (link_state.link != cur_link_state) { pl->old_link_state = link_state.link; if (!link_state.link) From 16319a7d31b5df881ab61c9a9e8b4265355d157f Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:00 +0100 Subject: [PATCH 1430/2056] net: phylink: ensure link is down when changing interface The only PHYs that are used with phylink which change their interface are the BCM84881 and MV88X3310 family, both of which only change their interface modes on link-up events. However, rather than relying upon this behaviour by the PHY, we should give a stronger guarantee when resolving that the link will be down whenever we change the interface mode. This patch implements that stronger guarantee for resolve. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 8ffe5df5c296..1507ea8a9385 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -620,8 +620,18 @@ static void phylink_resolve(struct work_struct *w) } } - if (mac_config) + if (mac_config) { + if (link_state.interface != pl->link_config.interface) { + /* The interface has changed, force the link down and + * then reconfigure. + */ + if (cur_link_state) { + phylink_link_down(pl); + cur_link_state = false; + } + } phylink_mac_config(pl, &link_state); + } if (link_state.link != cur_link_state) { pl->old_link_state = link_state.link; From 5005b163440f3fe18b434a20e8944dad508a15e8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:05 +0100 Subject: [PATCH 1431/2056] net: phylink: update PCS when changing interface during resolution The only PHYs that are used with phylink which change their interface are the BCM84881 and MV88X3310 family, both of which only change their interface modes on link-up events. This will break when drivers are converted to split-PCS. Fix this. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 1507ea8a9385..f1693ec63366 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -629,8 +629,15 @@ static void phylink_resolve(struct work_struct *w) phylink_link_down(pl); cur_link_state = false; } + phylink_pcs_config(pl, false, &link_state); + pl->link_config.interface = link_state.interface; + } else { + /* The interface remains unchanged, only the speed, + * duplex or pause settings have changed. Call the + * old mac_config() method to configure the MAC/PCS. + */ + phylink_mac_config(pl, &link_state); } - phylink_mac_config(pl, &link_state); } if (link_state.link != cur_link_state) { From 7cceb599d15d6639b1298ae400c26d822148e86f Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:10 +0100 Subject: [PATCH 1432/2056] net: phylink: avoid mac_config calls Avoid calling mac_config() when using split PCS, and the interface remains the same. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index f1693ec63366..424a927d7889 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -631,10 +631,12 @@ static void phylink_resolve(struct work_struct *w) } phylink_pcs_config(pl, false, &link_state); pl->link_config.interface = link_state.interface; - } else { + } else if (!pl->pcs_ops) { /* The interface remains unchanged, only the speed, * duplex or pause settings have changed. Call the - * old mac_config() method to configure the MAC/PCS. + * old mac_config() method to configure the MAC/PCS + * only if we do not have a PCS installed (an + * unconverted user.) */ phylink_mac_config(pl, &link_state); } From c8cab719cc64f8999503aff584a7bef7edb28e24 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:16 +0100 Subject: [PATCH 1433/2056] net: phylink: simplify ksettings_set() implementation Simplify the ksettings_set() implementation to look more like phylib's implementation; use a switch() for validating the autoneg setting, and use the linkmode_modify() helper to set the autoneg bit in the advertisement mask. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 424a927d7889..103d2a550415 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1314,25 +1314,24 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, __ETHTOOL_DECLARE_LINK_MODE_MASK(support); struct ethtool_link_ksettings our_kset; struct phylink_link_state config; + const struct phy_setting *s; int ret; ASSERT_RTNL(); - if (kset->base.autoneg != AUTONEG_DISABLE && - kset->base.autoneg != AUTONEG_ENABLE) - return -EINVAL; - linkmode_copy(support, pl->supported); config = pl->link_config; + config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE; - /* Mask out unsupported advertisements */ + /* Mask out unsupported advertisements, and force the autoneg bit */ linkmode_and(config.advertising, kset->link_modes.advertising, support); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising, + config.an_enabled); /* FIXME: should we reject autoneg if phy/mac does not support it? */ - if (kset->base.autoneg == AUTONEG_DISABLE) { - const struct phy_setting *s; - + switch (kset->base.autoneg) { + case AUTONEG_DISABLE: /* Autonegotiation disabled, select a suitable speed and * duplex. */ @@ -1351,19 +1350,19 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, config.speed = s->speed; config.duplex = s->duplex; - config.an_enabled = false; + break; - __clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); - } else { + case AUTONEG_ENABLE: /* If we have a fixed link, refuse to enable autonegotiation */ if (pl->cur_link_an_mode == MLO_AN_FIXED) return -EINVAL; config.speed = SPEED_UNKNOWN; config.duplex = DUPLEX_UNKNOWN; - config.an_enabled = true; + break; - __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + default: + return -EINVAL; } if (pl->phydev) { From cbc1bb1e4689ce1e6654485b6865f10b98f6ddb4 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:21 +0100 Subject: [PATCH 1434/2056] net: phylink: simplify phy case for ksettings_set method When we have a PHY attached, an ethtool ksettings_set() call only really needs to call through to the phylib equivalent; phylib will call back to us when the link changes so we can update our state. Therefore, we can bypass most of our ksettings_set() call for this case. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 108 +++++++++++++++++--------------------- 1 file changed, 49 insertions(+), 59 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 103d2a550415..967c068d16c8 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1312,13 +1312,33 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, const struct ethtool_link_ksettings *kset) { __ETHTOOL_DECLARE_LINK_MODE_MASK(support); - struct ethtool_link_ksettings our_kset; struct phylink_link_state config; const struct phy_setting *s; - int ret; ASSERT_RTNL(); + if (pl->phydev) { + /* We can rely on phylib for this update; we also do not need + * to update the pl->link_config settings: + * - the configuration returned via ksettings_get() will come + * from phylib whenever a PHY is present. + * - link_config.interface will be updated by the PHY calling + * back via phylink_phy_change() and a subsequent resolve. + * - initial link configuration for PHY mode comes from the + * last phy state updated via phylink_phy_change(). + * - other configuration changes (e.g. pause modes) are + * performed directly via phylib. + * - if in in-band mode with a PHY, the link configuration + * is passed on the link from the PHY, and all of + * link_config.{speed,duplex,an_enabled,pause} are not used. + * - the only possible use would be link_config.advertising + * pause modes when in 1000base-X mode with a PHY, but in + * the presence of a PHY, this should not be changed as that + * should be determined from the media side advertisement. + */ + return phy_ethtool_ksettings_set(pl->phydev, kset); + } + linkmode_copy(support, pl->supported); config = pl->link_config; config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE; @@ -1365,65 +1385,35 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, return -EINVAL; } - if (pl->phydev) { - /* If we have a PHY, we process the kset change via phylib. - * phylib will call our link state function if the PHY - * parameters have changed, which will trigger a resolve - * and update the MAC configuration. + /* For a fixed link, this isn't able to change any parameters, + * which just leaves inband mode. + */ + if (phylink_validate(pl, support, &config)) + return -EINVAL; + + /* If autonegotiation is enabled, we must have an advertisement */ + if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) + return -EINVAL; + + mutex_lock(&pl->state_mutex); + linkmode_copy(pl->link_config.advertising, config.advertising); + pl->link_config.interface = config.interface; + pl->link_config.speed = config.speed; + pl->link_config.duplex = config.duplex; + pl->link_config.an_enabled = kset->base.autoneg != + AUTONEG_DISABLE; + + if (pl->cur_link_an_mode == MLO_AN_INBAND && + !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + /* If in 802.3z mode, this updates the advertisement. + * + * If we are in SGMII mode without a PHY, there is no + * advertisement; the only thing we have is the pause + * modes which can only come from a PHY. */ - our_kset = *kset; - linkmode_copy(our_kset.link_modes.advertising, - config.advertising); - our_kset.base.speed = config.speed; - our_kset.base.duplex = config.duplex; - - ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset); - if (ret) - return ret; - - mutex_lock(&pl->state_mutex); - /* Save the new configuration */ - linkmode_copy(pl->link_config.advertising, - our_kset.link_modes.advertising); - pl->link_config.interface = config.interface; - pl->link_config.speed = our_kset.base.speed; - pl->link_config.duplex = our_kset.base.duplex; - pl->link_config.an_enabled = our_kset.base.autoneg != - AUTONEG_DISABLE; - mutex_unlock(&pl->state_mutex); - } else { - /* For a fixed link, this isn't able to change any parameters, - * which just leaves inband mode. - */ - if (phylink_validate(pl, support, &config)) - return -EINVAL; - - /* If autonegotiation is enabled, we must have an advertisement */ - if (config.an_enabled && - phylink_is_empty_linkmode(config.advertising)) - return -EINVAL; - - mutex_lock(&pl->state_mutex); - linkmode_copy(pl->link_config.advertising, config.advertising); - pl->link_config.interface = config.interface; - pl->link_config.speed = config.speed; - pl->link_config.duplex = config.duplex; - pl->link_config.an_enabled = kset->base.autoneg != - AUTONEG_DISABLE; - - if (pl->cur_link_an_mode == MLO_AN_INBAND && - !test_bit(PHYLINK_DISABLE_STOPPED, - &pl->phylink_disable_state)) { - /* If in 802.3z mode, this updates the advertisement. - * - * If we are in SGMII mode without a PHY, there is no - * advertisement; the only thing we have is the pause - * modes which can only come from a PHY. - */ - phylink_pcs_config(pl, true, &pl->link_config); - } - mutex_unlock(&pl->state_mutex); + phylink_pcs_config(pl, true, &pl->link_config); } + mutex_unlock(&pl->state_mutex); return 0; } From a83c8829d18d47939980db16a0ec79fa365ce6b0 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:26 +0100 Subject: [PATCH 1435/2056] net: phylink: use config.an_enabled in ksettings_set method Rather than recomputing whether AN is enabled, use config.an_enabled. Suggested-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 967c068d16c8..6cb9ca74341b 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1400,8 +1400,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, pl->link_config.interface = config.interface; pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; - pl->link_config.an_enabled = kset->base.autoneg != - AUTONEG_DISABLE; + pl->link_config.an_enabled = config.an_enabled; if (pl->cur_link_an_mode == MLO_AN_INBAND && !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { From 1e1bf14a89c0f5d11b62a8974dc53862e214b131 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:31 +0100 Subject: [PATCH 1436/2056] net: phylink: simplify fixed-link case for ksettings_set method For fixed links, we only allow the current settings, so this should be a matter of merely rejecting an attempt to change the settings. If the settings agree, then there is nothing more we need to do. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 6cb9ca74341b..452d509803ca 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1360,22 +1360,31 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, if (!s) return -EINVAL; - /* If we have a fixed link (as specified by firmware), refuse - * to change link parameters. + /* If we have a fixed link, refuse to change link parameters. + * If the link parameters match, accept them but do nothing. */ - if (pl->cur_link_an_mode == MLO_AN_FIXED && - (s->speed != pl->link_config.speed || - s->duplex != pl->link_config.duplex)) - return -EINVAL; + if (pl->cur_link_an_mode == MLO_AN_FIXED) { + if (s->speed != pl->link_config.speed || + s->duplex != pl->link_config.duplex) + return -EINVAL; + return 0; + } config.speed = s->speed; config.duplex = s->duplex; break; case AUTONEG_ENABLE: - /* If we have a fixed link, refuse to enable autonegotiation */ - if (pl->cur_link_an_mode == MLO_AN_FIXED) - return -EINVAL; + /* If we have a fixed link, allow autonegotiation (since that + * is our default case) but do not allow the advertisement to + * be changed. If the advertisement matches, simply return. + */ + if (pl->cur_link_an_mode == MLO_AN_FIXED) { + if (!linkmode_equal(config.advertising, + pl->link_config.advertising)) + return -EINVAL; + return 0; + } config.speed = SPEED_UNKNOWN; config.duplex = DUPLEX_UNKNOWN; @@ -1385,8 +1394,8 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, return -EINVAL; } - /* For a fixed link, this isn't able to change any parameters, - * which just leaves inband mode. + /* We have ruled out the case with a PHY attached, and the + * fixed-link cases. All that is left are in-band links. */ if (phylink_validate(pl, support, &config)) return -EINVAL; From 1571e700fd610c39e8b50b0110b1ee9badb2fe6a Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:36 +0100 Subject: [PATCH 1437/2056] net: phylink: in-band pause mode advertisement update for PCS Re-code the pause in-band advertisement update in light of the addition of PCS support, so that we perform the minimum required; only the PCS configuration function needs to be called in this case, followed by the request to trigger a restart of negotiation if the programmed advertisement changed. We need to change the pcs_config() signature to pass whether resolved pause should be passed to the MAC for setups such as mvneta and mvpp2 where doing so overrides the MAC manual flow controls. Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 55 ++++++++++++++++++++++++++++++++++++--- include/linux/phylink.h | 7 +++-- 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 452d509803ca..84a426401102 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -441,7 +441,9 @@ static void phylink_pcs_config(struct phylink *pl, bool force_restart, if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, state->interface, - state->advertising)) + state->advertising, + !!(pl->link_config.pause & + MLO_PAUSE_AN))) restart = true; phylink_mac_config(pl, state); @@ -450,6 +452,49 @@ static void phylink_pcs_config(struct phylink *pl, bool force_restart, phylink_mac_pcs_an_restart(pl); } +/* + * Reconfigure for a change of inband advertisement. + * If we have a separate PCS, we only need to call its pcs_config() method, + * and then restart AN if it indicates something changed. Otherwise, we do + * the full MAC reconfiguration. + */ +static int phylink_change_inband_advert(struct phylink *pl) +{ + int ret; + + if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) + return 0; + + if (!pl->pcs_ops) { + /* Legacy method */ + phylink_mac_config(pl, &pl->link_config); + phylink_mac_pcs_an_restart(pl); + return 0; + } + + phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__, + phylink_an_mode_str(pl->cur_link_an_mode), + phy_modes(pl->link_config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising, + pl->link_config.pause); + + /* Modern PCS-based method; update the advert at the PCS, and + * restart negotiation if the pcs_config() helper indicates that + * the programmed advertisement has changed. + */ + ret = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + pl->link_config.interface, + pl->link_config.advertising, + !!(pl->link_config.pause & MLO_PAUSE_AN)); + if (ret < 0) + return ret; + + if (ret > 0) + phylink_mac_pcs_an_restart(pl); + + return 0; +} + static void phylink_mac_pcs_get_state(struct phylink *pl, struct phylink_link_state *state) { @@ -1524,9 +1569,11 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, config->pause = pause_state; - if (!pl->phydev && !test_bit(PHYLINK_DISABLE_STOPPED, - &pl->phylink_disable_state)) - phylink_pcs_config(pl, true, &pl->link_config); + /* Update our in-band advertisement, triggering a renegotiation if + * the advertisement changed. + */ + if (!pl->phydev) + phylink_change_inband_advert(pl); mutex_unlock(&pl->state_mutex); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index b32b8b45421b..d9913d8e6b91 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -286,7 +286,8 @@ struct phylink_pcs_ops { struct phylink_link_state *state); int (*pcs_config)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, - const unsigned long *advertising); + const unsigned long *advertising, + bool permit_pause_to_mac); void (*pcs_an_restart)(struct phylink_config *config); void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, phy_interface_t interface, int speed, int duplex); @@ -317,9 +318,11 @@ void pcs_get_state(struct phylink_config *config, * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @interface: interface mode to be used * @advertising: adertisement ethtool link mode mask + * @permit_pause_to_mac: permit forwarding pause resolution to MAC * * Configure the PCS for the operating mode, the interface mode, and set - * the advertisement mask. + * the advertisement mask. @permit_pause_to_mac indicates whether the + * hardware may forward the pause mode resolution to the MAC. * * When operating in %MLO_AN_INBAND, inband should always be enabled, * otherwise inband should be disabled. From b7ad14c2fe2d4b2abee491e3adfa3d0123aa2d8c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:41 +0100 Subject: [PATCH 1438/2056] net: phylink: re-implement interface configuration with PCS With PCS support, how we implement interface reconfiguration (or other major reconfiguration) is not up to the job; we end up reconfiguring the PCS for an interface change while the link could potentially be up. In order to solve this, add two additional MAC methods for major configuration, one to prepare for the change, and one to finish the change. This allows mvneta and mvpp2 to shutdown what they require prior to the MAC and PCS configuration calls, and then restart as appropriate. This impacts ksettings_set(), which now needs to identify whether the change is a minor tweak to the advertisement masks or whether the interface mode has changed, and call the appropriate function for that update. Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 75 +++++++++++++++++++++++++++------------ include/linux/phylink.h | 48 +++++++++++++++++++++++++ 2 files changed, 100 insertions(+), 23 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 84a426401102..d554a0fbb4f3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -433,23 +433,47 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) } } -static void phylink_pcs_config(struct phylink *pl, bool force_restart, - const struct phylink_link_state *state) +static void phylink_major_config(struct phylink *pl, bool restart, + const struct phylink_link_state *state) { - bool restart = force_restart; + int err; - if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config, - pl->cur_link_an_mode, - state->interface, - state->advertising, - !!(pl->link_config.pause & - MLO_PAUSE_AN))) - restart = true; + phylink_dbg(pl, "major config %s\n", phy_modes(state->interface)); + + if (pl->mac_ops->mac_prepare) { + err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode, + state->interface); + if (err < 0) { + phylink_err(pl, "mac_prepare failed: %pe\n", + ERR_PTR(err)); + return; + } + } phylink_mac_config(pl, state); + if (pl->pcs_ops) { + err = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + state->interface, + state->advertising, + !!(pl->link_config.pause & + MLO_PAUSE_AN)); + if (err < 0) + phylink_err(pl, "pcs_config failed: %pe\n", + ERR_PTR(err)); + if (err > 0) + restart = true; + } if (restart) phylink_mac_pcs_an_restart(pl); + + if (pl->mac_ops->mac_finish) { + err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode, + state->interface); + if (err < 0) + phylink_err(pl, "mac_prepare failed: %pe\n", + ERR_PTR(err)); + } } /* @@ -555,7 +579,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart) link_state.link = false; phylink_apply_manual_flow(pl, &link_state); - phylink_pcs_config(pl, force_restart, &link_state); + phylink_major_config(pl, force_restart, &link_state); } static const char *phylink_pause_to_str(int pause) @@ -674,7 +698,7 @@ static void phylink_resolve(struct work_struct *w) phylink_link_down(pl); cur_link_state = false; } - phylink_pcs_config(pl, false, &link_state); + phylink_major_config(pl, false, &link_state); pl->link_config.interface = link_state.interface; } else if (!pl->pcs_ops) { /* The interface remains unchanged, only the speed, @@ -1450,21 +1474,26 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, return -EINVAL; mutex_lock(&pl->state_mutex); - linkmode_copy(pl->link_config.advertising, config.advertising); - pl->link_config.interface = config.interface; pl->link_config.speed = config.speed; pl->link_config.duplex = config.duplex; pl->link_config.an_enabled = config.an_enabled; - if (pl->cur_link_an_mode == MLO_AN_INBAND && - !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { - /* If in 802.3z mode, this updates the advertisement. - * - * If we are in SGMII mode without a PHY, there is no - * advertisement; the only thing we have is the pause - * modes which can only come from a PHY. - */ - phylink_pcs_config(pl, true, &pl->link_config); + if (pl->link_config.interface != config.interface) { + /* The interface changed, e.g. 1000base-X <-> 2500base-X */ + /* We need to force the link down, then change the interface */ + if (pl->old_link_state) { + phylink_link_down(pl); + pl->old_link_state = false; + } + if (!test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_major_config(pl, false, &config); + pl->link_config.interface = config.interface; + linkmode_copy(pl->link_config.advertising, config.advertising); + } else if (!linkmode_equal(pl->link_config.advertising, + config.advertising)) { + linkmode_copy(pl->link_config.advertising, config.advertising); + phylink_change_inband_advert(pl); } mutex_unlock(&pl->state_mutex); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index d9913d8e6b91..2f1315f32113 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -76,7 +76,9 @@ struct phylink_config { * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. * @mac_pcs_get_state: Read the current link state from the hardware. + * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. + * @mac_finish: finish a major reconfiguration of the interface. * @mac_an_restart: restart 802.3z BaseX autonegotiation. * @mac_link_down: take the link down. * @mac_link_up: allow the link to come up. @@ -89,8 +91,12 @@ struct phylink_mac_ops { struct phylink_link_state *state); void (*mac_pcs_get_state)(struct phylink_config *config, struct phylink_link_state *state); + int (*mac_prepare)(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); + int (*mac_finish)(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); void (*mac_an_restart)(struct phylink_config *config); void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); @@ -145,6 +151,31 @@ void validate(struct phylink_config *config, unsigned long *supported, void mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state); +/** + * mac_prepare() - prepare to change the PHY interface mode + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @iface: interface mode to switch to + * + * phylink will call this method at the beginning of a full initialisation + * of the link, which includes changing the interface mode or at initial + * startup time. It may be called for the current mode. The MAC driver + * should perform whatever actions are required, e.g. disabling the + * Serdes PHY. + * + * This will be the first call in the sequence: + * - mac_prepare() + * - mac_config() + * - pcs_config() + * - possible pcs_an_restart() + * - mac_finish() + * + * Returns zero on success, or negative errno on failure which will be + * reported to the kernel log. + */ +int mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); + /** * mac_config() - configure the MAC for the selected mode and state * @config: a pointer to a &struct phylink_config. @@ -220,6 +251,23 @@ void mac_pcs_get_state(struct phylink_config *config, void mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); +/** + * mac_finish() - finish a to change the PHY interface mode + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @iface: interface mode to switch to + * + * phylink will call this if it called mac_prepare() to allow the MAC to + * complete any necessary steps after the MAC and PCS have been configured + * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the + * Serdes PHY here if it was previously disabled by mac_prepare(). + * + * Returns zero on success, or negative errno on failure which will be + * reported to the kernel log. + */ +int mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); + /** * mac_an_restart() - restart 802.3z BaseX autonegotiation * @config: a pointer to a &struct phylink_config. From 7137e18f6f889a67046d5004e1690a32d7d2108d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:46 +0100 Subject: [PATCH 1439/2056] net: phylink: add struct phylink_pcs Add a way for MAC PCS to have private data while keeping independence from struct phylink_config, which is used for the MAC itself. We need this independence as we will have stand-alone code for PCS that is independent of the MAC. Introduce struct phylink_pcs, which is designed to be embedded in a driver private data structure. This structure does not include a mdio_device as there are PCS implementations such as the Marvell DSA and network drivers where this is not necessary. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 34 ++++++++++++++++++++++------- include/linux/phylink.h | 45 ++++++++++++++++++++++++++------------- 2 files changed, 56 insertions(+), 23 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index d554a0fbb4f3..b57cd2142786 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -43,6 +43,7 @@ struct phylink { const struct phylink_mac_ops *mac_ops; const struct phylink_pcs_ops *pcs_ops; struct phylink_config *config; + struct phylink_pcs *pcs; struct device *dev; unsigned int old_link_state:1; @@ -427,7 +428,7 @@ static void phylink_mac_pcs_an_restart(struct phylink *pl) phy_interface_mode_is_8023z(pl->link_config.interface) && phylink_autoneg_inband(pl->cur_link_an_mode)) { if (pl->pcs_ops) - pl->pcs_ops->pcs_an_restart(pl->config); + pl->pcs_ops->pcs_an_restart(pl->pcs); else pl->mac_ops->mac_an_restart(pl->config); } @@ -453,7 +454,7 @@ static void phylink_major_config(struct phylink *pl, bool restart, phylink_mac_config(pl, state); if (pl->pcs_ops) { - err = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + err = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode, state->interface, state->advertising, !!(pl->link_config.pause & @@ -506,7 +507,7 @@ static int phylink_change_inband_advert(struct phylink *pl) * restart negotiation if the pcs_config() helper indicates that * the programmed advertisement has changed. */ - ret = pl->pcs_ops->pcs_config(pl->config, pl->cur_link_an_mode, + ret = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode, pl->link_config.interface, pl->link_config.advertising, !!(pl->link_config.pause & MLO_PAUSE_AN)); @@ -533,7 +534,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl, state->link = 1; if (pl->pcs_ops) - pl->pcs_ops->pcs_get_state(pl->config, state); + pl->pcs_ops->pcs_get_state(pl->pcs, state); else pl->mac_ops->mac_pcs_get_state(pl->config, state); } @@ -604,7 +605,7 @@ static void phylink_link_up(struct phylink *pl, pl->cur_interface = link_state.interface; if (pl->pcs_ops && pl->pcs_ops->pcs_link_up) - pl->pcs_ops->pcs_link_up(pl->config, pl->cur_link_an_mode, + pl->pcs_ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode, pl->cur_interface, link_state.speed, link_state.duplex); @@ -863,11 +864,26 @@ struct phylink *phylink_create(struct phylink_config *config, } EXPORT_SYMBOL_GPL(phylink_create); -void phylink_add_pcs(struct phylink *pl, const struct phylink_pcs_ops *ops) +/** + * phylink_set_pcs() - set the current PCS for phylink to use + * @pl: a pointer to a &struct phylink returned from phylink_create() + * @pcs: a pointer to the &struct phylink_pcs + * + * Bind the MAC PCS to phylink. This may be called after phylink_create(), + * in mac_prepare() or mac_config() methods if it is desired to dynamically + * change the PCS. + * + * Please note that there are behavioural changes with the mac_config() + * callback if a PCS is present (denoting a newer setup) so removing a PCS + * is not supported, and if a PCS is going to be used, it must be registered + * by calling phylink_set_pcs() at the latest in the first mac_config() call. + */ +void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs) { - pl->pcs_ops = ops; + pl->pcs = pcs; + pl->pcs_ops = pcs->ops; } -EXPORT_SYMBOL_GPL(phylink_add_pcs); +EXPORT_SYMBOL_GPL(phylink_set_pcs); /** * phylink_destroy() - cleanup and destroy the phylink instance @@ -1212,6 +1228,8 @@ void phylink_start(struct phylink *pl) break; case MLO_AN_INBAND: poll |= pl->config->pcs_poll; + if (pl->pcs) + poll |= pl->pcs->poll; break; } if (poll) diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2f1315f32113..057f78263a46 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -321,6 +321,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy, int speed, int duplex, bool tx_pause, bool rx_pause); #endif +struct phylink_pcs_ops; + +/** + * struct phylink_pcs - PHYLINK PCS instance + * @ops: a pointer to the &struct phylink_pcs_ops structure + * @poll: poll the PCS for link changes + * + * This structure is designed to be embedded within the PCS private data, + * and will be passed between phylink and the PCS. + */ +struct phylink_pcs { + const struct phylink_pcs_ops *ops; + bool poll; +}; + /** * struct phylink_pcs_ops - MAC PCS operations structure. * @pcs_get_state: read the current MAC PCS link state from the hardware. @@ -330,21 +345,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy, * (where necessary). */ struct phylink_pcs_ops { - void (*pcs_get_state)(struct phylink_config *config, + void (*pcs_get_state)(struct phylink_pcs *pcs, struct phylink_link_state *state); - int (*pcs_config)(struct phylink_config *config, unsigned int mode, + int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, const unsigned long *advertising, bool permit_pause_to_mac); - void (*pcs_an_restart)(struct phylink_config *config); - void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, + void (*pcs_an_restart)(struct phylink_pcs *pcs); + void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); }; #if 0 /* For kernel-doc purposes only. */ /** * pcs_get_state() - Read the current inband link state from the hardware - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @state: a pointer to a &struct phylink_link_state. * * Read the current inband link state from the MAC PCS, reporting the @@ -357,12 +372,12 @@ struct phylink_pcs_ops { * When present, this overrides mac_pcs_get_state() in &struct * phylink_mac_ops. */ -void pcs_get_state(struct phylink_config *config, +void pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state); /** * pcs_config() - Configure the PCS mode and advertisement - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @interface: interface mode to be used * @advertising: adertisement ethtool link mode mask @@ -382,21 +397,21 @@ void pcs_get_state(struct phylink_config *config, * * For most 10GBASE-R, there is no advertisement. */ -int (*pcs_config)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, const unsigned long *advertising); +int pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, const unsigned long *advertising); /** * pcs_an_restart() - restart 802.3z BaseX autonegotiation - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * * When PCS ops are present, this overrides mac_an_restart() in &struct * phylink_mac_ops. */ -void (*pcs_an_restart)(struct phylink_config *config); +void pcs_an_restart(struct phylink_pcs *pcs); /** * pcs_link_up() - program the PCS for the resolved link configuration - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @speed: link speed @@ -407,14 +422,14 @@ void (*pcs_an_restart)(struct phylink_config *config); * mode without in-band AN needs to be manually configured for the link * and duplex setting. Otherwise, this should be a no-op. */ -void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, int speed, int duplex); +void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex); #endif struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); -void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops); +void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); From 93eaceb0fcf87b7f70924f9da3ec27e3c73be53d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 21 Jul 2020 12:04:51 +0100 Subject: [PATCH 1440/2056] net: phylink: add interface to configure clause 22 PCS PHY Add an interface to configure the advertisement for a clause 22 PCS PHY, and set the AN enable flag in the BMCR appropriately. Reviewed-by: Ioana Ciornei Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 37 +++++++++++++++++++++++++++++++++++++ include/linux/phylink.h | 3 +++ 2 files changed, 40 insertions(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index b57cd2142786..32b4bd6a5b55 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -2442,6 +2442,43 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, } EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement); +/** + * phylink_mii_c22_pcs_config() - configure clause 22 PCS + * @pcs: a pointer to a &struct mdio_device. + * @mode: link autonegotiation mode + * @interface: the PHY interface mode being configured + * @advertising: the ethtool advertisement mask + * + * Configure a Clause 22 PCS PHY with the appropriate negotiation + * parameters for the @mode, @interface and @advertising parameters. + * Returns negative error number on failure, zero if the advertisement + * has not changed, or positive if there is a change. + */ +int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising) +{ + bool changed; + u16 bmcr; + int ret; + + ret = phylink_mii_c22_pcs_set_advertisement(pcs, interface, + advertising); + if (ret < 0) + return ret; + + changed = ret > 0; + + bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0; + ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR, + BMCR_ANENABLE, bmcr); + if (ret < 0) + return ret; + + return changed ? 1 : 0; +} +EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config); + /** * phylink_mii_c22_pcs_an_restart() - restart 802.3z autonegotiation * @pcs: a pointer to a &struct mdio_device. diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 057f78263a46..1aad2aea4610 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -478,6 +478,9 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, phy_interface_t interface, const unsigned long *advertising); +int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising); void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs); void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, From d4eae993fc45398526aed683e225d6fa713f8ddf Mon Sep 17 00:00:00 2001 From: Yuval Basson Date: Tue, 21 Jul 2020 14:34:26 +0300 Subject: [PATCH 1441/2056] qed: Fix ILT and XRCD bitmap memory leaks - Free ILT lines used for XRC-SRQ's contexts. - Free XRCD bitmap Fixes: b8204ad878ce7 ("qed: changes to ILT to support XRC") Fixes: 7bfb399eca460 ("qed: Add XRC to RoCE") Signed-off-by: Michal Kalderon Signed-off-by: Yuval Basson Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 5 +++++ drivers/net/ethernet/qlogic/qed/qed_rdma.c | 1 + 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 5362dc18b6c2..6c221e9c8799 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2335,6 +2335,11 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, elem_size = SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; + case QED_ELEM_XRC_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = XRC_SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; case QED_ELEM_TASK: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index e5648ca2838b..a4bcde522cdf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -379,6 +379,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1); kfree(p_rdma_info->port); kfree(p_rdma_info->dev); From 1e5ae3507225f5560e45817155e77f496473850c Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:04 +0300 Subject: [PATCH 1442/2056] net: ena: avoid unnecessary rearming of interrupt vector when busy-polling For an overview of the race created by this patch goto synchronization label. In napi busy-poll mode, the kernel invokes the napi handler of the device repeatedly to poll the NIC's receive queues. This process repeats until a timeout, specific for each connection, is up. By polling packets in busy-poll mode the user may gain lower latency and higher throughput (since the kernel no longer waits for interrupts to poll the queues) in expense of CPU usage. Upon completing a napi routine, the driver checks whether the routine was called by an interrupt handler. If so, the driver re-enables interrupts for the device. This is needed since an interrupt routine invocation disables future invocations until explicitly re-enabled. The driver avoids re-enabling the interrupts if they were not disabled in the first place (e.g. if driver in busy mode). Originally, the driver checked whether interrupt re-enabling is needed by reading the 'ena_napi->unmask_interrupt' variable. This atomic variable was set upon interrupt and cleared after re-enabling it. In the 4.10 Linux version, the 'napi_complete_done' call was changed so that it returns 'false' when device should not re-enable interrupts, and 'true' otherwise. The change includes reading the "NAPIF_STATE_IN_BUSY_POLL" flag to check if the napi call is in busy-poll mode, and if so, return 'false'. The driver was changed to re-enable interrupts according to this routine's return value. The Linux community rejected the use of the 'ena_napi->unmaunmask_interrupt' variable to determine whether unmasking is needed, and urged to use napi_napi_complete_done() return value solely. See https://lore.kernel.org/patchwork/patch/741149/ for more details As explained, a busy-poll session exists for a specified timeout value, after which it exits the busy-poll mode and re-enters it later. This leads to many invocations of the napi handler where napi_complete_done() false indicates that interrupts should be re-enabled. This creates a bug in which the interrupts are re-enabled unnecessarily. To reproduce this bug: 1) echo 50 | sudo tee /proc/sys/net/core/busy_poll 2) echo 50 | sudo tee /proc/sys/net/core/busy_read 3) Add counters that check whether 'ena_unmask_interrupt(tx_ring, rx_ring);' is called without disabling the interrupts in the first place (i.e. with calling the interrupt routine ena_intr_msix_io()) Steps 1+2 enable busy-poll as the default mode for new connections. The busy poll routine rearms the interrupts after every session by design, and so we need to add an extra check that the interrupts were masked in the first place. synchronization: This patch introduces a race between the interrupt handler ena_intr_msix_io() and the napi routine ena_io_poll(). Some macros and instruction were added to prevent this race from leaving the interrupts masked. The following specifies the different race scenarios in this patch: 1) interrupt handler and napi routine run sequentially i) interrupt handler is called, sets 'interrupts_masked' flag and successfully schedules the napi handler via softirq. In this scenario the napi routine might not see the flag change for several reasons: a) The flag is stored in a register by the compiler. For this case the WRITE_ONCE macro which prevents this. b) The compiler might reorder the instruction. For this the smp_wmb() instruction was used which implies a compiler memory barrier. c) On archs with weak consistency model (like ARM64) the napi routine might be scheduled and start running before the flag STORE instruction is committed to cache/memory. To ensure this doesn't happen, the smp_wmb() instruction was added. It ensures that the flag set instruction is committed before scheduling napi. ii) compiler reorders the flag's value check in the 'if' with the flag set in the napi routine. This scenario is prevented by smp_rmb() call after the flag check. 2) interrupt handler and napi routine run in parallel (can happen when busy poll routine invokes the napi handler) i) interrupt handler sets the flag in one core, while the napi routine reads it in another core. This scenario also is divided into two cases: a) napi_complete_done() doesn't finish running, in which case napi_sched() would just set NAPIF_STATE_MISSED and the napi routine would reschedule itself without changing the flag's value. b) napi_complete_done() finishes running. In this case the napi routine might override the flag's value. This doesn't present any rise since it later unmasks the interrupt vector. Signed-off-by: Shay Agroskin Signed-off-by: Arthur Kiyanovski Cc: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 8 +++++++- drivers/net/ethernet/amazon/ena/ena_netdev.h | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 3eb63b12dd68..e453542c9e94 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -1913,7 +1913,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget) /* Update numa and unmask the interrupt only when schedule * from the interrupt context (vs from sk_busy_loop) */ - if (napi_complete_done(napi, rx_work_done)) { + if (napi_complete_done(napi, rx_work_done) && + READ_ONCE(ena_napi->interrupts_masked)) { + smp_rmb(); /* make sure interrupts_masked is read */ + WRITE_ONCE(ena_napi->interrupts_masked, false); /* We apply adaptive moderation on Rx path only. * Tx uses static interrupt moderation. */ @@ -1961,6 +1964,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) ena_napi->first_interrupt = true; + WRITE_ONCE(ena_napi->interrupts_masked, true); + smp_wmb(); /* write interrupts_masked before calling napi */ + napi_schedule_irqoff(&ena_napi->napi); return IRQ_HANDLED; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index ba030d260940..89304b403995 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -167,6 +167,7 @@ struct ena_napi { struct ena_ring *rx_ring; struct ena_ring *xdp_ring; bool first_interrupt; + bool interrupts_masked; u32 qid; struct dim dim; }; From 866032ab4d16cffa3ad1a6a3fb89d374656161db Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:05 +0300 Subject: [PATCH 1443/2056] net: ena: add reserved PCI device ID Add a reserved PCI device ID to the driver's table Used for internal testing purposes. Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h index f80d2a47fa94..426e57e10a7f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h +++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h @@ -53,10 +53,15 @@ #define PCI_DEV_ID_ENA_LLQ_VF 0xec21 #endif +#ifndef PCI_DEV_ID_ENA_RESRV0 +#define PCI_DEV_ID_ENA_RESRV0 0x0051 +#endif + #define ENA_PCI_ID_TABLE_ENTRY(devid) \ {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)}, static const struct pci_device_id ena_pci_tbl[] = { + ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_RESRV0) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF) ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF) From 79890d3f3cde559e0fc5a50cf77176dfcf64b6c9 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:06 +0300 Subject: [PATCH 1444/2056] net: ena: cosmetic: satisfy gcc warning gcc 4.8 reports a warning when initializing with = {0}. Dropping the "0" from the braces fixes the issue. This fix is not ANSI compatible but is allowed by gcc. Signed-off-by: Sameeh Jubran Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index e453542c9e94..d3d2e3934237 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -307,7 +307,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev, struct ena_rx_buffer *rx_info) { struct ena_adapter *adapter = netdev_priv(dev); - struct ena_com_tx_ctx ena_tx_ctx = {0}; + struct ena_com_tx_ctx ena_tx_ctx = {}; struct ena_tx_buffer *tx_info; struct ena_ring *xdp_ring; u16 next_to_use, req_id; From 0dcec68651f2162290750aae48fd7a0fdace0c66 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:07 +0300 Subject: [PATCH 1445/2056] net: ena: cosmetic: change ena_com_stats_admin stats to u64 The size of the admin statistics in ena_com_stats_admin is changed from 32bit to 64bit so to align with the sizes of the other statistics in the driver (i.e. rx_stats, tx_stats and ena_stats_dev). This is done as part of an effort to create a unified API to read statistics. Signed-off-by: Shay Agroskin Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.h | 10 +++++----- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index bc187adf54e4..4c98f6f07882 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -230,11 +230,11 @@ struct ena_com_admin_sq { }; struct ena_com_stats_admin { - u32 aborted_cmd; - u32 submitted_cmd; - u32 completed_cmd; - u32 out_of_space; - u32 no_completion; + u64 aborted_cmd; + u64 submitted_cmd; + u64 completed_cmd; + u64 out_of_space; + u64 no_completion; }; struct ena_com_admin_queue { diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index e340b65af08c..430275bc0d04 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -164,13 +164,13 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data) static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data) { const struct ena_stats *ena_stats; - u32 *ptr; + u64 *ptr; int i; for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) { ena_stats = &ena_stats_ena_com_strings[i]; - ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + + ptr = (u64 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats + (uintptr_t)ena_stats->stat_offset); *(*data)++ = *ptr; From 0f505c604e4ffd2ddc7cf06583ac169c443b8f68 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:08 +0300 Subject: [PATCH 1446/2056] net: ena: add support for traffic mirroring Add support for traffic mirroring, where the hardware reads the buffer from the instance memory directly. Traffic Mirroring needs access to the rx buffers in the instance. To have this access, this patch: 1. Changes the code to map and unmap the rx buffers bidirectionally. 2. Enables the relevant bit in driver_supported_features to indicate to the FW that this driver supports traffic mirroring. Rx completion is not generated until mirroring is done to avoid the situation where the driver changes the buffer before it is mirrored. Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 5 ++++- drivers/net/ethernet/amazon/ena/ena_netdev.c | 15 +++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 336742f6e3c3..afe87ff09b20 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -816,7 +816,8 @@ struct ena_admin_host_info { /* 0 : reserved * 1 : rx_offset * 2 : interrupt_moderation - * 31:3 : reserved + * 3 : rx_buf_mirroring + * 31:4 : reserved */ u32 driver_supported_features; }; @@ -1129,6 +1130,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1) #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) +#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3 +#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3) /* aenq_common_desc */ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d3d2e3934237..0563cb3125db 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -959,8 +959,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring, return -ENOMEM; } + /* To enable NIC-side port-mirroring, AKA SPAN port, + * we make the buffer readable from the nic as well + */ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { u64_stats_update_begin(&rx_ring->syncp); rx_ring->rx_stats.dma_mapping_err++; @@ -993,10 +996,9 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, return; } - dma_unmap_page(rx_ring->dev, - ena_buf->paddr - rx_ring->rx_headroom, + dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom, ENA_PAGE_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); __free_page(page); rx_info->page = NULL; @@ -1431,7 +1433,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, do { dma_unmap_page(rx_ring->dev, dma_unmap_addr(&rx_info->ena_buf, paddr), - ENA_PAGE_SIZE, DMA_FROM_DEVICE); + ENA_PAGE_SIZE, DMA_BIDIRECTIONAL); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, rx_info->page_offset, len, ENA_PAGE_SIZE); @@ -3123,7 +3125,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd host_info->driver_supported_features = ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | - ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK; + ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | + ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK; rc = ena_com_set_host_attributes(ena_dev); if (rc) { From 0ee60edf4669cd8c466a532c233fcd6281da9892 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:09 +0300 Subject: [PATCH 1447/2056] net: ena: enable support of rss hash key and function changes Add the rss_configurable_function_key bit to driver_supported_feature. This bit tells the device that the driver in question supports the retrieving and updating of RSS function and hash key, and therefore the device should allow RSS function and key manipulation. This commit turns on device support for hash key and RSS function management. Without this commit this feature is turned off at the device and appears to the user as unsupported. This commit concludes the following series of already merged commits: commit 0af3c4e2eab8 ("net: ena: changes to RSS hash key allocation") commit c1bd17e51c71 ("net: ena: change default RSS hash function to Toeplitz") commit f66c2ea3b18a ("net: ena: allow setting the hash function without changing the key") commit e9a1de378dd4 ("net: ena: fix error returning in ena_com_get_hash_function()") commit 80f8443fcdaa ("net: ena: avoid unnecessary admin command when RSS function set fails") commit 6a4f7dc82d1e ("net: ena: rss: do not allocate key when not supported") commit 0d1c3de7b8c7 ("net: ena: fix incorrect default RSS key") The above commits represent the last part of the implementation of this feature, and with them merged the feature can be enabled in the device. Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 5 ++++- drivers/net/ethernet/amazon/ena/ena_netdev.c | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index afe87ff09b20..7f7978b135a9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -817,7 +817,8 @@ struct ena_admin_host_info { * 1 : rx_offset * 2 : interrupt_moderation * 3 : rx_buf_mirroring - * 31:4 : reserved + * 4 : rss_configurable_function_key + * 31:5 : reserved */ u32 driver_supported_features; }; @@ -1132,6 +1133,8 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2) #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT 3 #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3) +#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4 +#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4) /* aenq_common_desc */ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 0563cb3125db..491ea013868b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3126,7 +3126,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd host_info->driver_supported_features = ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK | - ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK; + ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK | + ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; rc = ena_com_set_host_attributes(ena_dev); if (rc) { From c29efeae37077f4bcb2ecd710fc66ca72c43ec79 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:10 +0300 Subject: [PATCH 1448/2056] net: ena: move llq configuration from ena_probe to ena_device_init() When the ENA device resets to recover from some error state, all LLQ configuration values are reset to their defaults, because LLQ is initialized only once during ena_probe(). Changes in this commit: 1. Move the LLQ configuration process into ena_init_device() which is called from both ena_probe() and ena_restore_device(). This way, LLQ setup configurations that are different from the default values will survive resets. 2. Extract the LLQ bar mapping to ena_map_llq_bar(), and call once in the lifetime of the driver from ena_probe(), since there is no need to unmap and map the LLQ bar again every reset. 3. Map the LLQ bar if it exists, regardless if initialization of LLQ placement policy (ENA_ADMIN_PLACEMENT_POLICY_DEV) succeeded or not. Initialization might fail the first time, falling back to the ENA_ADMIN_PLACEMENT_POLICY_HOST placement policy, but later succeed after device reset, in which case the LLQ bar needs to be mapped already. Signed-off-by: Sameeh Jubran Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 136 ++++++++++--------- 1 file changed, 73 insertions(+), 63 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 491ea013868b..913d698b9834 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3280,10 +3280,71 @@ static int ena_device_validate_params(struct ena_adapter *adapter, return 0; } +static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) +{ + llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; + llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; + llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; + llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; + llq_config->llq_ring_entry_size_value = 128; +} + +static int ena_set_queues_placement_policy(struct pci_dev *pdev, + struct ena_com_dev *ena_dev, + struct ena_admin_feature_llq_desc *llq, + struct ena_llq_configurations *llq_default_configurations) +{ + int rc; + u32 llq_feature_mask; + + llq_feature_mask = 1 << ENA_ADMIN_LLQ; + if (!(ena_dev->supported_features & llq_feature_mask)) { + dev_err(&pdev->dev, + "LLQ is not supported Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + return 0; + } + + rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); + if (unlikely(rc)) { + dev_err(&pdev->dev, + "Failed to configure the device mode. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + } + + return 0; +} + +static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev, + int bars) +{ + bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR)); + + if (!has_mem_bar) { + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { + dev_err(&pdev->dev, + "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + } + + return 0; + } + + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); + + if (!ena_dev->mem_bar) + return -EFAULT; + + return 0; +} + static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, struct ena_com_dev_get_features_ctx *get_feat_ctx, bool *wd_state) { + struct ena_llq_configurations llq_config; struct device *dev = &pdev->dev; bool readless_supported; u32 aenq_groups; @@ -3374,6 +3435,15 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + set_default_llq_configurations(&llq_config); + + rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, + &llq_config); + if (rc) { + dev_err(&pdev->dev, "ena device init failed\n"); + goto err_admin_init; + } + return 0; err_admin_init: @@ -3880,54 +3950,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, return max_num_io_queues; } -static int ena_set_queues_placement_policy(struct pci_dev *pdev, - struct ena_com_dev *ena_dev, - struct ena_admin_feature_llq_desc *llq, - struct ena_llq_configurations *llq_default_configurations) -{ - bool has_mem_bar; - int rc; - u32 llq_feature_mask; - - llq_feature_mask = 1 << ENA_ADMIN_LLQ; - if (!(ena_dev->supported_features & llq_feature_mask)) { - dev_err(&pdev->dev, - "LLQ is not supported Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - return 0; - } - - has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); - - rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); - if (unlikely(rc)) { - dev_err(&pdev->dev, - "Failed to configure the device mode. Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - return 0; - } - - /* Nothing to config, exit */ - if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - return 0; - - if (!has_mem_bar) { - dev_err(&pdev->dev, - "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); - ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - return 0; - } - - ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, - pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); - - if (!ena_dev->mem_bar) - return -EFAULT; - - return 0; -} - static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, struct net_device *netdev) { @@ -4043,14 +4065,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) pci_release_selected_regions(pdev, release_bars); } -static void set_default_llq_configurations(struct ena_llq_configurations *llq_config) -{ - llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; - llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; - llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; - llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; - llq_config->llq_ring_entry_size_value = 128; -} static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx) { @@ -4132,7 +4146,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 }; struct ena_com_dev_get_features_ctx get_feat_ctx; - struct ena_llq_configurations llq_config; struct ena_com_dev *ena_dev = NULL; struct ena_adapter *adapter; struct net_device *netdev; @@ -4187,13 +4200,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_region; } - set_default_llq_configurations(&llq_config); - - rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, - &llq_config); + rc = ena_map_llq_mem_bar(pdev, ena_dev, bars); if (rc) { - dev_err(&pdev->dev, "ena device init failed\n"); - goto err_device_destroy; + dev_err(&pdev->dev, "ena llq bar mapping failed\n"); + goto err_free_ena_dev; } calc_queue_ctx.ena_dev = ena_dev; From 0e3a3f6dacf01cbad9ecc26149ffe8cda3137540 Mon Sep 17 00:00:00 2001 From: Arthur Kiyanovski Date: Tue, 21 Jul 2020 16:38:11 +0300 Subject: [PATCH 1449/2056] net: ena: support new LLQ acceleration mode New devices add a new hardware acceleration engine, which adds some restrictions to the driver. Metadata descriptor must be present for each packet and the maximum burst size between two doorbells is now limited to a number advertised by the device. This patch adds: 1. A handshake protocol between the driver and the device, so the device will enable the accelerated queues only when both sides support it. 2. The driver support for the new acceleration engine: 2.1. Send metadata descriptor for each Tx packet. 2.2. Limit the number of packets sent between doorbells.(*) (*) A previous driver implementation of this feature was comitted in commit 05d62ca218f8 ("net: ena: add handling of llq max tx burst size") however the design of the interface between the driver and device changed since then. This change is reflected in this commit. Signed-off-by: Netanel Belgazal Signed-off-by: Arthur Kiyanovski Signed-off-by: David S. Miller --- .../net/ethernet/amazon/ena/ena_admin_defs.h | 39 ++++++++++++-- drivers/net/ethernet/amazon/ena/ena_com.c | 19 ++++++- drivers/net/ethernet/amazon/ena/ena_com.h | 3 ++ drivers/net/ethernet/amazon/ena/ena_eth_com.c | 53 +++++++++++++------ drivers/net/ethernet/amazon/ena/ena_eth_com.h | 3 +- drivers/net/ethernet/amazon/ena/ena_netdev.c | 16 ++++-- drivers/net/ethernet/amazon/ena/ena_netdev.h | 2 + 7 files changed, 110 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 7f7978b135a9..b818a169c193 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -491,6 +491,36 @@ enum ena_admin_llq_stride_ctrl { ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2, }; +enum ena_admin_accel_mode_feat { + ENA_ADMIN_DISABLE_META_CACHING = 0, + ENA_ADMIN_LIMIT_TX_BURST = 1, +}; + +struct ena_admin_accel_mode_get { + /* bit field of enum ena_admin_accel_mode_feat */ + u16 supported_flags; + + /* maximum burst size between two doorbells. The size is in bytes */ + u16 max_tx_burst_size; +}; + +struct ena_admin_accel_mode_set { + /* bit field of enum ena_admin_accel_mode_feat */ + u16 enabled_flags; + + u16 reserved; +}; + +struct ena_admin_accel_mode_req { + union { + u32 raw[2]; + + struct ena_admin_accel_mode_get get; + + struct ena_admin_accel_mode_set set; + } u; +}; + struct ena_admin_feature_llq_desc { u32 max_llq_num; @@ -536,10 +566,13 @@ struct ena_admin_feature_llq_desc { /* the stride control the driver selected to use */ u16 descriptors_stride_ctrl_enabled; - /* Maximum size in bytes taken by llq entries in a single tx burst. - * Set to 0 when there is no such limit. + /* reserved */ + u32 reserved1; + + /* accelerated low latency queues requirement. driver needs to + * support those requirements in order to use accelerated llq */ - u32 max_tx_burst_size; + struct ena_admin_accel_mode_req accel_mode; }; struct ena_admin_queue_ext_feature_fields { diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 432f143559a1..435bf05a853c 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -403,6 +403,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, 0x0, io_sq->llq_info.desc_list_entry_size); io_sq->llq_buf_ctrl.descs_left_in_line = io_sq->llq_info.descs_num_before_header; + io_sq->disable_meta_caching = + io_sq->llq_info.disable_meta_caching; if (io_sq->llq_info.max_entries_in_tx_burst > 0) io_sq->entries_in_tx_burst_left = @@ -626,6 +628,10 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; + cmd.u.llq.accel_mode.u.set.enabled_flags = + BIT(ENA_ADMIN_DISABLE_META_CACHING) | + BIT(ENA_ADMIN_LIMIT_TX_BURST); + ret = ena_com_execute_admin_command(admin_queue, (struct ena_admin_aq_entry *)&cmd, sizeof(cmd), @@ -643,6 +649,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, struct ena_llq_configurations *llq_default_cfg) { struct ena_com_llq_info *llq_info = &ena_dev->llq_info; + struct ena_admin_accel_mode_get llq_accel_mode_get; u16 supported_feat; int rc; @@ -742,9 +749,17 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_default_cfg->llq_num_decs_before_header, supported_feat, llq_info->descs_num_before_header); } + /* Check for accelerated queue supported */ + llq_accel_mode_get = llq_features->accel_mode.u.get; - llq_info->max_entries_in_tx_burst = - (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value); + llq_info->disable_meta_caching = + !!(llq_accel_mode_get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + + if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) + llq_info->max_entries_in_tx_burst = + llq_accel_mode_get.max_tx_burst_size / + llq_default_cfg->llq_ring_entry_size_value; rc = ena_com_set_llq(ena_dev); if (rc) diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h index 4c98f6f07882..4287d47b2b0b 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_com.h @@ -127,6 +127,7 @@ struct ena_com_llq_info { u16 descs_num_before_header; u16 descs_per_entry; u16 max_entries_in_tx_burst; + bool disable_meta_caching; }; struct ena_com_io_cq { @@ -189,6 +190,8 @@ struct ena_com_io_sq { enum queue_direction direction; enum ena_admin_placement_policy_type mem_queue_type; + bool disable_meta_caching; + u32 msix_vector; struct ena_com_tx_meta cached_tx_meta; struct ena_com_llq_info llq_info; diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index ec8ea25e988d..ccd440589565 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c @@ -285,11 +285,10 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, return count; } -static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, - struct ena_com_tx_ctx *ena_tx_ctx) +static int ena_com_create_meta(struct ena_com_io_sq *io_sq, + struct ena_com_tx_meta *ena_meta) { struct ena_eth_io_tx_meta_desc *meta_desc = NULL; - struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; meta_desc = get_sq_desc(io_sq); memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc)); @@ -309,12 +308,13 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, /* Extended meta desc */ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; - meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; meta_desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK; + meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; + meta_desc->word2 |= ena_meta->l3_hdr_len & ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK; meta_desc->word2 |= (ena_meta->l3_hdr_offset << @@ -325,15 +325,38 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) & ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK; - meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; - - /* Cached the meta desc */ - memcpy(&io_sq->cached_tx_meta, ena_meta, - sizeof(struct ena_com_tx_meta)); - return ena_com_sq_update_tail(io_sq); } +static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq, + struct ena_com_tx_ctx *ena_tx_ctx, + bool *have_meta) +{ + struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; + + /* When disable meta caching is set, don't bother to save the meta and + * compare it to the stored version, just create the meta + */ + if (io_sq->disable_meta_caching) { + if (unlikely(!ena_tx_ctx->meta_valid)) + return -EINVAL; + + *have_meta = true; + return ena_com_create_meta(io_sq, ena_meta); + } + + if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) { + *have_meta = true; + /* Cache the meta desc */ + memcpy(&io_sq->cached_tx_meta, ena_meta, + sizeof(struct ena_com_tx_meta)); + return ena_com_create_meta(io_sq, ena_meta); + } + + *have_meta = false; + return 0; +} + static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx, struct ena_eth_io_rx_cdesc_base *cdesc) { @@ -402,12 +425,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, if (unlikely(rc)) return rc; - have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq, - ena_tx_ctx); - if (have_meta) { - rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx); - if (unlikely(rc)) - return rc; + rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta); + if (unlikely(rc)) { + pr_err("failed to create and store tx meta desc\n"); + return rc; } /* If the caller doesn't want to send packets */ diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 8b1afd3b32f2..b6592cb93b04 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h @@ -157,7 +157,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, llq_info = &io_sq->llq_info; num_descs = ena_tx_ctx->num_bufs; - if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) + if (llq_info->disable_meta_caching || + unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx))) ++num_descs; if (num_descs > llq_info->descs_num_before_header) { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 913d698b9834..6478c1e0d137 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -655,6 +655,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter, txr->sgl_size = adapter->max_tx_sgl_size; txr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_tx(ena_dev); + txr->disable_meta_caching = adapter->disable_meta_caching; /* Don't init RX queues for xdp queues */ if (!ENA_IS_XDP_INDEX(adapter, i)) { @@ -2783,7 +2784,9 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count) return dev_was_up ? ena_open(adapter->netdev) : 0; } -static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) +static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, + struct sk_buff *skb, + bool disable_meta_caching) { u32 mss = skb_shinfo(skb)->gso_size; struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; @@ -2827,7 +2830,9 @@ static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb) ena_meta->l3_hdr_len = skb_network_header_len(skb); ena_meta->l3_hdr_offset = skb_network_offset(skb); ena_tx_ctx->meta_valid = 1; - + } else if (disable_meta_caching) { + memset(ena_meta, 0, sizeof(*ena_meta)); + ena_tx_ctx->meta_valid = 1; } else { ena_tx_ctx->meta_valid = 0; } @@ -3011,7 +3016,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) ena_tx_ctx.header_len = header_len; /* set flags and meta data */ - ena_tx_csum(&ena_tx_ctx, skb); + ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); rc = ena_xmit_common(dev, tx_ring, @@ -4260,6 +4265,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->xdp_num_queues = 0; adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; + if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) + adapter->disable_meta_caching = + !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags & + BIT(ENA_ADMIN_DISABLE_META_CACHING)); + adapter->wd_state = wd_state; snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 89304b403995..0c8504006247 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -298,6 +298,7 @@ struct ena_ring { u8 tx_max_header_size; bool first_interrupt; + bool disable_meta_caching; u16 no_interrupt_event_cnt; /* cpu for TPH */ @@ -399,6 +400,7 @@ struct ena_adapter { bool wd_state; bool dev_up_before_reset; + bool disable_meta_caching; unsigned long last_keep_alive_jiffies; struct u64_stats_sync syncp; From 3fc364c0527494f322ad9504a39d39a6197edfdc Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Tue, 21 Jul 2020 18:22:24 +0200 Subject: [PATCH 1450/2056] r8169: allow to enable ASPM on RTL8125A For most chip versions this has been added already. Allow also for RTL8125A to enable ASPM. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169_main.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 975f5c10ba47..d1da92ac7fbe 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -3622,6 +3622,7 @@ static void rtl_hw_start_8125a_1(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8125a_1); rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) @@ -3649,6 +3650,7 @@ static void rtl_hw_start_8125a_2(struct rtl8169_private *tp) rtl_ephy_init(tp, e_info_8125a_2); rtl_hw_start_8125_common(tp); + rtl_hw_aspm_clkreq_enable(tp, true); } static void rtl_hw_start_8125b(struct rtl8169_private *tp) From 6553e561cadc0ec966ce2f039965a99a7502e19b Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 21 Jul 2020 19:53:51 +0300 Subject: [PATCH 1451/2056] devlink: Do not hold devlink mutex when initializing devlink fields There is no need to hold a device global lock when initializing devlink device fields of a devlink instance which is not yet part of the devices list. Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 6335e1851088..7df918a5899e 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -7421,9 +7421,9 @@ EXPORT_SYMBOL_GPL(devlink_alloc); */ int devlink_register(struct devlink *devlink, struct device *dev) { - mutex_lock(&devlink_mutex); devlink->dev = dev; devlink->registered = true; + mutex_lock(&devlink_mutex); list_add_tail(&devlink->list, &devlink_list); devlink_notify(devlink, DEVLINK_CMD_NEW); mutex_unlock(&devlink_mutex); From 9232a3e67b212f9ef924786f8dde23080acd321a Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 21 Jul 2020 19:53:52 +0300 Subject: [PATCH 1452/2056] devlink: Avoid duplicate check for reload enabled flag Reload operation is enabled or not is already checked by devlink_reload(). Hence, remove the duplicate check from devlink_nl_cmd_reload(). Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 7df918a5899e..5c74e67f358c 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2967,7 +2967,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) struct net *dest_net = NULL; int err; - if (!devlink_reload_supported(devlink) || !devlink->reload_enabled) + if (!devlink_reload_supported(devlink)) return -EOPNOTSUPP; err = devlink_resources_validate(devlink, NULL, info); From 336ce1c93293e1e606fbc557587b1a1f8630cd5c Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 21 Jul 2020 19:53:53 +0300 Subject: [PATCH 1453/2056] devlink: Add comment for devlink instance lock Add comment to describe the purpose of devlink instance lock. Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/net/devlink.h b/include/net/devlink.h index 913e8679ae35..19d990c8edcc 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -40,7 +40,9 @@ struct devlink { struct xarray snapshot_ids; struct device *dev; possible_net_t _net; - struct mutex lock; + struct mutex lock; /* Serializes access to devlink instance specific objects such as + * port, sb, dpipe, resource, params, region, traps and more. + */ u8 reload_failed:1, reload_enabled:1, registered:1; From eac5f8a95ae39dd94af818f0f9fad5d46207ee33 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Tue, 21 Jul 2020 19:53:54 +0300 Subject: [PATCH 1454/2056] devlink: Constify devlink instance pointer Constify devlink instance pointer while checking if reload operation is supported or not. This helps to review the scope of checks done in reload. Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/devlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 5c74e67f358c..8b7bb4bfb6d0 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2921,7 +2921,7 @@ static void devlink_reload_netns_change(struct devlink *devlink, DEVLINK_CMD_PARAM_NEW); } -static bool devlink_reload_supported(struct devlink *devlink) +static bool devlink_reload_supported(const struct devlink *devlink) { return devlink->ops->reload_down && devlink->ops->reload_up; } From 35dfb013149f74c2be1ff9c78f14e6a3cd1539d1 Mon Sep 17 00:00:00 2001 From: Andrew Sy Kim Date: Wed, 8 Jul 2020 12:16:38 -0400 Subject: [PATCH 1455/2056] ipvs: queue delayed work to expire no destination connections if expire_nodest_conn=1 When expire_nodest_conn=1 and a destination is deleted, IPVS does not expire the existing connections until the next matching incoming packet. If there are many connection entries from a single client to a single destination, many packets may get dropped before all the connections are expired (more likely with lots of UDP traffic). An optimization can be made where upon deletion of a destination, IPVS queues up delayed work to immediately expire any connections with a deleted destination. This ensures any reused source ports from a client (within the IPVS timeouts) are scheduled to new real servers instead of silently dropped. Signed-off-by: Andrew Sy Kim Signed-off-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- include/net/ip_vs.h | 29 ++++++++++++++++++++ net/netfilter/ipvs/ip_vs_conn.c | 39 +++++++++++++++++++++++++++ net/netfilter/ipvs/ip_vs_core.c | 47 ++++++++++++++------------------- net/netfilter/ipvs/ip_vs_ctl.c | 22 +++++++++++++++ 4 files changed, 110 insertions(+), 27 deletions(-) diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 011f407b76fe..9a59a33787cb 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -14,6 +14,7 @@ #include /* for struct rwlock_t */ #include /* for struct atomic_t */ #include /* for struct refcount_t */ +#include #include #include @@ -886,6 +887,8 @@ struct netns_ipvs { atomic_t conn_out_counter; #ifdef CONFIG_SYSCTL + /* delayed work for expiring no dest connections */ + struct delayed_work expire_nodest_conn_work; /* 1/rate drop and drop-entry variables */ struct delayed_work defense_work; /* Work handler */ int drop_rate; @@ -1051,6 +1054,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return ipvs->sysctl_conn_reuse_mode; } +static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_expire_nodest_conn; +} + static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return ipvs->sysctl_schedule_icmp; @@ -1138,6 +1146,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return 1; } +static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) +{ + return 0; +} + static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return 0; @@ -1507,6 +1520,22 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif +#ifdef CONFIG_SYSCTL +/* Enqueue delayed work for expiring no dest connections + * Only run when sysctl_expire_nodest=1 + */ +static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) +{ + if (sysctl_expire_nodest_conn(ipvs)) + queue_delayed_work(system_long_wq, + &ipvs->expire_nodest_conn_work, 1); +} + +void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs); +#else +static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {} +#endif + #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ IP_VS_CONN_F_FWD_MASK) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index b3921ae92740..a90b8eac16ac 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1389,6 +1389,45 @@ static void ip_vs_conn_flush(struct netns_ipvs *ipvs) goto flush_again; } } + +#ifdef CONFIG_SYSCTL +void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs) +{ + int idx; + struct ip_vs_conn *cp, *cp_c; + struct ip_vs_dest *dest; + + rcu_read_lock(); + for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { + hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { + if (cp->ipvs != ipvs) + continue; + + dest = cp->dest; + if (!dest || (dest->flags & IP_VS_DEST_F_AVAILABLE)) + continue; + + if (atomic_read(&cp->n_control)) + continue; + + cp_c = cp->control; + IP_VS_DBG(4, "del connection\n"); + ip_vs_conn_del(cp); + if (cp_c && !atomic_read(&cp_c->n_control)) { + IP_VS_DBG(4, "del controlling connection\n"); + ip_vs_conn_del(cp_c); + } + } + cond_resched_rcu(); + + /* netns clean up started, abort delayed work */ + if (!ipvs->enable) + break; + } + rcu_read_unlock(); +} +#endif + /* * per netns init and exit */ diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b4a6b7662f3f..e3668a6e54e4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -694,16 +694,10 @@ static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) return ipvs->sysctl_nat_icmp_send; } -static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) -{ - return ipvs->sysctl_expire_nodest_conn; -} - #else static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; } static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; } -static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; } #endif @@ -2097,6 +2091,26 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int } } + /* Check the server status */ + if (cp && cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { + /* the destination server is not available */ + if (sysctl_expire_nodest_conn(ipvs)) { + bool old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); + + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; + + ip_vs_conn_expire_now(cp); + __ip_vs_conn_put(cp); + if (old_ct) + return NF_DROP; + cp = NULL; + } else { + __ip_vs_conn_put(cp); + return NF_DROP; + } + } + if (unlikely(!cp)) { int v; @@ -2106,27 +2120,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet"); - /* Check the server status */ - if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { - /* the destination server is not available */ - - __u32 flags = cp->flags; - - /* when timer already started, silently drop the packet.*/ - if (timer_pending(&cp->timer)) - __ip_vs_conn_put(cp); - else - ip_vs_conn_put(cp); - - if (sysctl_expire_nodest_conn(ipvs) && - !(flags & IP_VS_CONN_F_ONE_PACKET)) { - /* try to expire the connection immediately */ - ip_vs_conn_expire_now(cp); - } - - return NF_DROP; - } - ip_vs_in_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); if (cp->packet_xmit) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 4af83f466dfc..f984d2c881ff 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -210,6 +210,17 @@ static void update_defense_level(struct netns_ipvs *ipvs) local_bh_enable(); } +/* Handler for delayed work for expiring no + * destination connections + */ +static void expire_nodest_conn_handler(struct work_struct *work) +{ + struct netns_ipvs *ipvs; + + ipvs = container_of(work, struct netns_ipvs, + expire_nodest_conn_work.work); + ip_vs_expire_nodest_conn_flush(ipvs); +} /* * Timer for checking the defense @@ -1164,6 +1175,12 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, list_add(&dest->t_list, &ipvs->dest_trash); dest->idle_start = 0; spin_unlock_bh(&ipvs->dest_trash_lock); + + /* Queue up delayed work to expire all no destination connections. + * No-op when CONFIG_SYSCTL is disabled. + */ + if (!cleanup) + ip_vs_enqueue_expire_nodest_conns(ipvs); } @@ -4086,6 +4103,10 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) queue_delayed_work(system_long_wq, &ipvs->defense_work, DEFENSE_TIMER_PERIOD); + /* Init delayed work for expiring no dest conn */ + INIT_DELAYED_WORK(&ipvs->expire_nodest_conn_work, + expire_nodest_conn_handler); + return 0; } @@ -4093,6 +4114,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { struct net *net = ipvs->net; + cancel_delayed_work_sync(&ipvs->expire_nodest_conn_work); cancel_delayed_work_sync(&ipvs->defense_work); cancel_work_sync(&ipvs->defense_work.work); unregister_net_sysctl_table(ipvs->sysctl_hdr); From 954d82979b2f9dd4c20b895226799650d4841b94 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Jul 2020 15:09:39 -0500 Subject: [PATCH 1456/2056] netfilter: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/ebtables.c | 2 +- net/netfilter/ipset/ip_set_core.c | 2 +- net/netfilter/nf_conntrack_h323_asn1.c | 6 +++--- net/netfilter/nf_conntrack_proto.c | 2 +- net/netfilter/nf_conntrack_proto_tcp.c | 2 +- net/netfilter/nf_conntrack_standalone.c | 2 +- net/netfilter/nf_nat_core.c | 12 ++++++------ net/netfilter/nf_synproxy_core.c | 6 ++---- net/netfilter/nf_tables_api.c | 8 ++++---- net/netfilter/nf_tables_core.c | 2 +- net/netfilter/nfnetlink_cttimeout.c | 2 +- net/netfilter/nft_cmp.c | 4 ++-- net/netfilter/nft_ct.c | 6 +++--- net/netfilter/nft_fib.c | 2 +- net/netfilter/nft_payload.c | 2 +- net/netfilter/utils.c | 8 ++++---- net/netfilter/x_tables.c | 2 +- 17 files changed, 34 insertions(+), 36 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c83ffe912163..38e946fdd041 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1935,7 +1935,7 @@ static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, size_kern = match_size; module_put(match->me); break; - case EBT_COMPAT_WATCHER: /* fallthrough */ + case EBT_COMPAT_WATCHER: case EBT_COMPAT_TARGET: wt = xt_request_find_target(NFPROTO_BRIDGE, name, mwt->u.revision); diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 56621d6bfd29..920b7c4331f0 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1644,7 +1644,7 @@ ip_set_dump_do(struct sk_buff *skb, struct netlink_callback *cb) goto next_set; if (set->variant->uref) set->variant->uref(set, cb, true); - /* fall through */ + fallthrough; default: ret = set->variant->list(set, skb, cb); if (!cb->args[IPSET_CB_ARG0]) diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 573cb4481481..e697a824b001 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -257,15 +257,15 @@ static unsigned int get_uint(struct bitstr *bs, int b) case 4: v |= *bs->cur++; v <<= 8; - /* fall through */ + fallthrough; case 3: v |= *bs->cur++; v <<= 8; - /* fall through */ + fallthrough; case 2: v |= *bs->cur++; v <<= 8; - /* fall through */ + fallthrough; case 1: v |= *bs->cur++; break; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index a0560d175a7f..95f79980348c 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -610,7 +610,7 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto) switch (nfproto) { case NFPROTO_BRIDGE: nf_ct_netns_do_put(net, NFPROTO_BRIDGE); - /* fall through */ + fallthrough; case NFPROTO_INET: nf_ct_netns_do_put(net, NFPROTO_IPV4); nf_ct_netns_do_put(net, NFPROTO_IPV6); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 1926fd56df56..6892e497781c 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -900,7 +900,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, return -NF_REPEAT; return NF_DROP; } - /* Fall through */ + fallthrough; case TCP_CONNTRACK_IGNORE: /* Ignored packets: * diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6a26299cb064..a604f43e3e6b 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -60,7 +60,7 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, ntohs(tuple->src.u.tcp.port), ntohs(tuple->dst.u.tcp.port)); break; - case IPPROTO_UDPLITE: /* fallthrough */ + case IPPROTO_UDPLITE: case IPPROTO_UDP: seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.udp.port), diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index bfc555fcbc72..ea923f8cf9c4 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -408,7 +408,7 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, static const unsigned int max_attempts = 128; switch (tuple->dst.protonum) { - case IPPROTO_ICMP: /* fallthrough */ + case IPPROTO_ICMP: case IPPROTO_ICMPV6: /* id is same for either direction... */ keyptr = &tuple->src.u.icmp.id; @@ -442,11 +442,11 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, } goto find_free_id; #endif - case IPPROTO_UDP: /* fallthrough */ - case IPPROTO_UDPLITE: /* fallthrough */ - case IPPROTO_TCP: /* fallthrough */ - case IPPROTO_SCTP: /* fallthrough */ - case IPPROTO_DCCP: /* fallthrough */ + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + case IPPROTO_TCP: + case IPPROTO_SCTP: + case IPPROTO_DCCP: if (maniptype == NF_NAT_MANIP_SRC) keyptr = &tuple->src.u.all; else diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index ebcdc8e54476..9cca35d22927 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -704,8 +704,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb, nf_ct_seqadj_init(ct, ctinfo, 0); synproxy->tsoff = 0; this_cpu_inc(snet->stats->conn_reopened); - - /* fall through */ + fallthrough; case TCP_CONNTRACK_SYN_SENT: if (!synproxy_parse_options(skb, thoff, th, &opts)) return NF_DROP; @@ -1128,8 +1127,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb, nf_ct_seqadj_init(ct, ctinfo, 0); synproxy->tsoff = 0; this_cpu_inc(snet->stats->conn_reopened); - - /* fall through */ + fallthrough; case TCP_CONNTRACK_SYN_SENT: if (!synproxy_parse_options(skb, thoff, th, &opts)) return NF_DROP; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 6708a4f2eec8..0d96e4eb754d 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4375,7 +4375,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: set->use--; - /* fall through */ + fallthrough; default: nf_tables_unbind_set(ctx, set, binding, phase == NFT_TRANS_COMMIT); @@ -6256,7 +6256,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx, case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: flowtable->use--; - /* fall through */ + fallthrough; default: return; } @@ -7262,7 +7262,7 @@ static int nf_tables_validate(struct net *net) break; case NFT_VALIDATE_NEED: nft_validate_state_update(net, NFT_VALIDATE_DO); - /* fall through */ + fallthrough; case NFT_VALIDATE_DO: list_for_each_entry(table, &net->nft.tables, list) { if (nft_table_validate(net, table) < 0) @@ -8336,7 +8336,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, default: return -EINVAL; } - /* fall through */ + fallthrough; case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 96c74c4c7176..587897a2498b 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -213,7 +213,7 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv) jumpstack[stackptr].chain = chain; jumpstack[stackptr].rules = rules + 1; stackptr++; - /* fall through */ + fallthrough; case NFT_GOTO: nft_trace_packet(&info, chain, rule, NFT_TRACETYPE_RULE); diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index da915c224a82..89a381f7f945 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -451,7 +451,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, case IPPROTO_TCP: timeouts = nf_tcp_pernet(net)->timeouts; break; - case IPPROTO_UDP: /* fallthrough */ + case IPPROTO_UDP: case IPPROTO_UDPLITE: timeouts = nf_udp_pernet(net)->timeouts; break; diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 8a28c127effc..16f4d84599ac 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -43,7 +43,7 @@ void nft_cmp_eval(const struct nft_expr *expr, case NFT_CMP_LT: if (d == 0) goto mismatch; - /* fall through */ + fallthrough; case NFT_CMP_LTE: if (d > 0) goto mismatch; @@ -51,7 +51,7 @@ void nft_cmp_eval(const struct nft_expr *expr, case NFT_CMP_GT: if (d == 0) goto mismatch; - /* fall through */ + fallthrough; case NFT_CMP_GTE: if (d < 0) goto mismatch; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 77258af1fce0..322bd674963e 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -129,7 +129,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, return; } #endif - case NFT_CT_BYTES: /* fallthrough */ + case NFT_CT_BYTES: case NFT_CT_PKTS: { const struct nf_conn_acct *acct = nf_conn_acct_find(ct); u64 count = 0; @@ -1013,8 +1013,8 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, help6 = nf_conntrack_helper_try_module_get(name, family, priv->l4proto); break; - case NFPROTO_NETDEV: /* fallthrough */ - case NFPROTO_BRIDGE: /* same */ + case NFPROTO_NETDEV: + case NFPROTO_BRIDGE: case NFPROTO_INET: help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4, priv->l4proto); diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c index cfac0964f48d..4dfdaeaf09a5 100644 --- a/net/netfilter/nft_fib.c +++ b/net/netfilter/nft_fib.c @@ -32,7 +32,7 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, unsigned int hooks; switch (priv->result) { - case NFT_FIB_RESULT_OIF: /* fallthrough */ + case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIFNAME: hooks = (1 << NF_INET_PRE_ROUTING); break; diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index a7de3a58f553..ed7cb9f747f6 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -467,7 +467,7 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, case IPPROTO_UDP: if (!nft_payload_udp_checksum(skb, pkt->xt.thoff)) return -1; - /* Fall through. */ + fallthrough; case IPPROTO_UDPLITE: *l4csum_offset = offsetof(struct udphdr, check); break; diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c index 51b454d8fa9c..cedf47ab3c6f 100644 --- a/net/netfilter/utils.c +++ b/net/netfilter/utils.c @@ -25,7 +25,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, skb->ip_summed = CHECKSUM_UNNECESSARY; break; } - /* fall through */ + fallthrough; case CHECKSUM_NONE: if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) skb->csum = 0; @@ -51,7 +51,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip_checksum(skb, hook, dataoff, protocol); - /* fall through */ + fallthrough; case CHECKSUM_NONE: skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->len - dataoff, 0); @@ -79,7 +79,7 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, skb->ip_summed = CHECKSUM_UNNECESSARY; break; } - /* fall through */ + fallthrough; case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, @@ -106,7 +106,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); - /* fall through */ + fallthrough; case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 99a468be4a59..8b2daccaf8df 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1571,7 +1571,7 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, trav->curr = trav->curr->next; if (trav->curr != trav->head) break; - /* fall through */ + fallthrough; default: return NULL; } From c1d069e3bfc9e4021f087016578fbff209f493fd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 21 Jul 2020 21:08:54 +0200 Subject: [PATCH 1457/2056] mptcp: move helper to where its used Only used in token.c. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/mptcp/protocol.h | 11 ----------- net/mptcp/token.c | 12 ++++++++++++ 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index e5baaef5ec89..6e114c09e5b4 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -396,17 +396,6 @@ struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, void mptcp_token_destroy(struct mptcp_sock *msk); void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn); -static inline void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn) -{ - /* we might consider a faster version that computes the key as a - * hash of some information available in the MPTCP socket. Use - * random data at the moment, as it's probably the safest option - * in case multiple sockets are opened in different namespaces at - * the same time. - */ - get_random_bytes(key, sizeof(u64)); - mptcp_crypto_key_sha(*key, token, idsn); -} void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac); diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 7d8106026081..b25b390dbbff 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -83,6 +83,18 @@ static bool __token_bucket_busy(struct token_bucket *t, u32 token) __token_lookup_req(t, token) || __token_lookup_msk(t, token); } +static void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn) +{ + /* we might consider a faster version that computes the key as a + * hash of some information available in the MPTCP socket. Use + * random data at the moment, as it's probably the safest option + * in case multiple sockets are opened in different namespaces at + * the same time. + */ + get_random_bytes(key, sizeof(u64)); + mptcp_crypto_key_sha(*key, token, idsn); +} + /** * mptcp_token_new_request - create new key/idsn/token for subflow_request * @req: the request socket From e3ec13be571b9da61011bf41ef097fe9bbbc3041 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Tue, 21 Jul 2020 19:38:23 +0300 Subject: [PATCH 1458/2056] dpaa2-eth: move the mqprio setup into a separate function Move the setup done for MQPRIO into a separate function so that with the addition of another offload we do not crowd dpaa2_eth_setup_tc(). After this restructuring it's easier to see what is supported in terms of Qdisc offloading. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- .../net/ethernet/freescale/dpaa2/dpaa2-eth.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index d0cc1dc49aaa..18e0c00074ff 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2213,17 +2213,13 @@ static int update_xps(struct dpaa2_eth_priv *priv) return err; } -static int dpaa2_eth_setup_tc(struct net_device *net_dev, - enum tc_setup_type type, void *type_data) +static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, + struct tc_mqprio_qopt *mqprio) { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc, num_queues; int i; - if (type != TC_SETUP_QDISC_MQPRIO) - return -EOPNOTSUPP; - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_queues = dpaa2_eth_queue_count(priv); num_tc = mqprio->num_tc; @@ -2255,6 +2251,17 @@ static int dpaa2_eth_setup_tc(struct net_device *net_dev, return 0; } +static int dpaa2_eth_setup_tc(struct net_device *net_dev, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_QDISC_MQPRIO: + return dpaa2_eth_setup_mqprio(net_dev, type_data); + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops dpaa2_eth_ops = { .ndo_open = dpaa2_eth_open, .ndo_start_xmit = dpaa2_eth_tx, From 39344a89623d77ffa4cf9354d85fd6e58467bb87 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Tue, 21 Jul 2020 19:38:24 +0300 Subject: [PATCH 1459/2056] dpaa2-eth: add API for Tx shaping Add the necessary API (dpni_set_tx_shaping) for configuring the rate and burst size of a per port shaper in DPAA2. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- .../net/ethernet/freescale/dpaa2/dpni-cmd.h | 13 +++++++ drivers/net/ethernet/freescale/dpaa2/dpni.c | 36 +++++++++++++++++++ drivers/net/ethernet/freescale/dpaa2/dpni.h | 16 +++++++++ 3 files changed, 65 insertions(+) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h index fd069f67be9b..593e3812af93 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -626,4 +626,17 @@ struct dpni_cmd_set_congestion_notification { __le32 threshold_exit; }; +#define DPNI_COUPLED_SHIFT 0 +#define DPNI_COUPLED_SIZE 1 + +struct dpni_cmd_set_tx_shaping { + __le16 tx_cr_max_burst_size; + __le16 tx_er_max_burst_size; + __le32 pad; + __le32 tx_cr_rate_limit; + __le32 tx_er_rate_limit; + /* from LSB: coupled:1 */ + u8 coupled; +}; + #endif /* _FSL_DPNI_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c index 426100607854..68ed4c41b282 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -1963,3 +1963,39 @@ int dpni_clear_qos_table(struct fsl_mc_io *mc_io, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } + +/** + * dpni_set_tx_shaping() - Set the transmit shaping + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tx_cr_shaper: TX committed rate shaping configuration + * @tx_er_shaper: TX excess rate shaping configuration + * @coupled: Committed and excess rate shapers are coupled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_tx_shaping_cfg *tx_cr_shaper, + const struct dpni_tx_shaping_cfg *tx_er_shaper, + int coupled) +{ + struct dpni_cmd_set_tx_shaping *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params; + cmd_params->tx_cr_max_burst_size = cpu_to_le16(tx_cr_shaper->max_burst_size); + cmd_params->tx_er_max_burst_size = cpu_to_le16(tx_er_shaper->max_burst_size); + cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit); + cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit); + dpni_set_field(cmd_params->coupled, COUPLED, coupled); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h index e874d8084142..39387991a1f9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpni.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -1062,5 +1062,21 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 *major_ver, u16 *minor_ver); +/** + * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration + * @rate_limit: Rate in Mbps + * @max_burst_size: Burst size in bytes (up to 64KB) + */ +struct dpni_tx_shaping_cfg { + u32 rate_limit; + u16 max_burst_size; +}; + +int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_tx_shaping_cfg *tx_cr_shaper, + const struct dpni_tx_shaping_cfg *tx_er_shaper, + int coupled); #endif /* __FSL_DPNI_H */ From 3657cdaf03a6f751b6f6c09381b1a9e016678069 Mon Sep 17 00:00:00 2001 From: Ioana Ciornei Date: Tue, 21 Jul 2020 19:38:25 +0300 Subject: [PATCH 1460/2056] dpaa2-eth: add support for TBF offload React to TC_SETUP_QDISC_TBF and configure the egress shaper as appropriate with the maximum rate and burst size requested by the user. TBF can only be offloaded on DPAA2 when it's the root qdisc, ie it's a per port shaper. Signed-off-by: Ioana Ciornei Signed-off-by: David S. Miller --- .../net/ethernet/freescale/dpaa2/dpaa2-eth.c | 46 ++++++++++++++++++- .../net/ethernet/freescale/dpaa2/dpaa2-eth.h | 3 ++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 18e0c00074ff..c1bea9132f50 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "dpaa2-eth.h" @@ -2251,12 +2252,55 @@ static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, return 0; } +#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8) + +static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p) +{ + struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 }; + struct dpni_tx_shaping_cfg tx_er_shaper = { 0 }; + int err; + + if (p->command == TC_TBF_STATS) + return -EOPNOTSUPP; + + /* Only per port Tx shaping */ + if (p->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + if (p->command == TC_TBF_REPLACE) { + if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) { + netdev_err(net_dev, "burst size cannot be greater than %d\n", + DPAA2_ETH_MAX_BURST_SIZE); + return -EINVAL; + } + + tx_cr_shaper.max_burst_size = cfg->max_size; + /* The TBF interface is in bytes/s, whereas DPAA2 expects the + * rate in Mbits/s + */ + tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps); + } + + err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper, + &tx_er_shaper, 0); + if (err) { + netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err); + return err; + } + + return 0; +} + static int dpaa2_eth_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_QDISC_MQPRIO: return dpaa2_eth_setup_mqprio(net_dev, type_data); + case TC_SETUP_QDISC_TBF: + return dpaa2_eth_setup_tbf(net_dev, type_data); default: return -EOPNOTSUPP; } @@ -3726,7 +3770,7 @@ static int netdev_init(struct net_device *net_dev) net_dev->features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | - NETIF_F_LLTX; + NETIF_F_LLTX | NETIF_F_HW_TC; net_dev->hw_features = net_dev->features; return 0; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 9138a35a68f9..7f3c41dc98f2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -43,6 +43,9 @@ */ #define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024) +/* Maximum burst size value for Tx shaping */ +#define DPAA2_ETH_MAX_BURST_SIZE 0xF7FF + /* Maximum number of Tx confirmation frames to be processed * in a single NAPI call */ From 4787dd582dbde0b7f29eb3dbe59df3da1b350925 Mon Sep 17 00:00:00 2001 From: Martin Varghese Date: Fri, 17 Jul 2020 08:05:12 +0530 Subject: [PATCH 1461/2056] bareudp: Reverted support to enable & disable rx metadata collection The commit fe80536acf83 ("bareudp: Added attribute to enable & disable rx metadata collection") breaks the the original(5.7) default behavior of bareudp module to collect RX metadadata at the receive. It was added to avoid the crash at the kernel neighbour subsytem when packet with metadata from bareudp is processed. But it is no more needed as the commit 394de110a733 ("net: Added pointer check for dst->ops->neigh_lookup in dst_neigh_lookup_skb") solves this crash. Fixes: fe80536acf83 ("bareudp: Added attribute to enable & disable rx metadata collection") Signed-off-by: Martin Varghese Acked-by: Guillaume Nault Signed-off-by: David S. Miller --- Documentation/networking/bareudp.rst | 6 ++---- drivers/net/bareudp.c | 21 +++++---------------- include/net/bareudp.h | 1 - include/uapi/linux/if_link.h | 1 - 4 files changed, 7 insertions(+), 22 deletions(-) diff --git a/Documentation/networking/bareudp.rst b/Documentation/networking/bareudp.rst index 0e00636d8d74..465a8b251bfe 100644 --- a/Documentation/networking/bareudp.rst +++ b/Documentation/networking/bareudp.rst @@ -48,7 +48,5 @@ enabled. The bareudp device could be used along with OVS or flower filter in TC. The OVS or TC flower layer must set the tunnel information in SKB dst field before sending packet buffer to the bareudp device for transmission. On reception the -bareudp device decapsulates the udp header and passes the inner packet to the -network stack. If RX_COLLECT_METADATA flag is enabled in the device the tunnel -information will be stored in the SKB dst field before the packet buffer is -passed to the network stack. +bareudp device extracts and stores the tunnel information in SKB dst field before +passing the packet buffer to the network stack. diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 108a8cafc4f8..44eb2b1d0416 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -46,7 +46,6 @@ struct bareudp_dev { __be16 port; u16 sport_min; bool multi_proto_mode; - bool rx_collect_metadata; struct socket __rcu *sock; struct list_head next; /* bareudp node on namespace list */ struct gro_cells gro_cells; @@ -126,14 +125,12 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) bareudp->dev->stats.rx_dropped++; goto drop; } - if (bareudp->rx_collect_metadata) { - tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); - if (!tun_dst) { - bareudp->dev->stats.rx_dropped++; - goto drop; - } - skb_dst_set(skb, &tun_dst->dst); + tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0); + if (!tun_dst) { + bareudp->dev->stats.rx_dropped++; + goto drop; } + skb_dst_set(skb, &tun_dst->dst); skb->dev = bareudp->dev; oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -577,9 +574,6 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf, if (data[IFLA_BAREUDP_MULTIPROTO_MODE]) conf->multi_proto_mode = true; - if (data[IFLA_BAREUDP_RX_COLLECT_METADATA]) - conf->rx_collect_metadata = true; - return 0; } @@ -617,7 +611,6 @@ static int bareudp_configure(struct net *net, struct net_device *dev, bareudp->ethertype = conf->ethertype; bareudp->sport_min = conf->sport_min; bareudp->multi_proto_mode = conf->multi_proto_mode; - bareudp->rx_collect_metadata = conf->rx_collect_metadata; err = register_netdevice(dev); if (err) @@ -676,7 +669,6 @@ static size_t bareudp_get_size(const struct net_device *dev) nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */ nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */ nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */ - nla_total_size(0) + /* IFLA_BAREUDP_RX_COLLECT_METADATA */ 0; } @@ -693,9 +685,6 @@ static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev) if (bareudp->multi_proto_mode && nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE)) goto nla_put_failure; - if (bareudp->rx_collect_metadata && - nla_put_flag(skb, IFLA_BAREUDP_RX_COLLECT_METADATA)) - goto nla_put_failure; return 0; diff --git a/include/net/bareudp.h b/include/net/bareudp.h index 3dd5f9a8d01c..dc65a0d71d9b 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -12,7 +12,6 @@ struct bareudp_conf { __be16 port; u16 sport_min; bool multi_proto_mode; - bool rx_collect_metadata; }; struct net_device *bareudp_dev_create(struct net *net, const char *name, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 26842ffd0501..af8f31987526 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -601,7 +601,6 @@ enum { IFLA_BAREUDP_ETHERTYPE, IFLA_BAREUDP_SRCPORT_MIN, IFLA_BAREUDP_MULTIPROTO_MODE, - IFLA_BAREUDP_RX_COLLECT_METADATA, __IFLA_BAREUDP_MAX }; From 4b03b27349c03b72a084b2484623a744565bc399 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 21 Jul 2020 13:34:04 -0700 Subject: [PATCH 1462/2056] ionic: get MTU from lif identity Change from using hardcoded MTU limits and instead use the firmware defined limits. The value from the LIF attributes is the frame size, so we take off the header size to convert to MTU size. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_dev.h | 2 -- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 17 ++++++++++++++--- drivers/net/ethernet/pensando/ionic/ionic_lif.h | 1 + 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index 525434f10025..d5cba502abca 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -10,8 +10,6 @@ #include "ionic_if.h" #include "ionic_regs.h" -#define IONIC_MIN_MTU ETH_MIN_MTU -#define IONIC_MAX_MTU 9194 #define IONIC_MAX_TX_DESC 8192 #define IONIC_MAX_RX_DESC 16384 #define IONIC_MIN_TXRX_DESC 16 diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index f49486b6d04d..cfcef41b7b23 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -2022,11 +2023,16 @@ int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg) static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index) { struct device *dev = ionic->dev; + union ionic_lif_identity *lid; struct net_device *netdev; struct ionic_lif *lif; int tbl_sz; int err; + lid = kzalloc(sizeof(*lid), GFP_KERNEL); + if (!lid) + return ERR_PTR(-ENOMEM); + netdev = alloc_etherdev_mqs(sizeof(*lif), ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); if (!netdev) { @@ -2045,8 +2051,12 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index netdev->watchdog_timeo = 2 * HZ; netif_carrier_off(netdev); - netdev->min_mtu = IONIC_MIN_MTU; - netdev->max_mtu = IONIC_MAX_MTU; + lif->identity = lid; + lif->lif_type = IONIC_LIF_TYPE_CLASSIC; + ionic_lif_identify(ionic, lif->lif_type, lif->identity); + lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size); + lif->netdev->max_mtu = + le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN; lif->neqs = ionic->neqs_per_lif; lif->nxqs = ionic->ntxqs_per_lif; @@ -2113,6 +2123,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index err_out_free_netdev: free_netdev(lif->netdev); lif = NULL; + kfree(lid); return ERR_PTR(err); } @@ -2132,7 +2143,6 @@ int ionic_lifs_alloc(struct ionic *ionic) return -ENOMEM; } - lif->lif_type = IONIC_LIF_TYPE_CLASSIC; ionic_lif_queue_identify(lif); return 0; @@ -2243,6 +2253,7 @@ static void ionic_lif_free(struct ionic_lif *lif) ionic_lif_reset(lif); /* free lif info */ + kfree(lif->identity); dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); lif->info = NULL; lif->info_pa = 0; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index ed126dd74e01..949f96dc9cd8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -184,6 +184,7 @@ struct ionic_lif { u16 lif_type; unsigned int nucast; + union ionic_lif_identity *identity; struct ionic_lif_info *info; dma_addr_t info_pa; u32 info_sz; From c8768e7321d2f2c69c3467928a2e63630e682e22 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 21 Jul 2020 13:34:05 -0700 Subject: [PATCH 1463/2056] ionic: set netdev default name If the host system's udev fails to set a new name for the network port, there is no NETDEV_CHANGENAME event to trigger the driver to send the name down to the firmware. It is safe to set the lif name multiple times, so we add a call early on to set the default netdev name to be sure the FW has something to use in its internal debug logging. Then when udev gets around to changing it we can update it to the actual name the system will be using. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index cfcef41b7b23..bbfa25cd3294 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2631,6 +2631,7 @@ int ionic_lifs_register(struct ionic *ionic) return err; } ionic->master_lif->registered = true; + ionic_lif_set_netdev_info(ionic->master_lif); return 0; } From 4471b1c13ae7e3c8be64a9e015c6bc31468e1b16 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 21 Jul 2020 13:34:06 -0700 Subject: [PATCH 1464/2056] ionic: remove unused ionic_coal_hw_to_usec Clean up some unused code. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_lif.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 949f96dc9cd8..f1e7d3ef1c58 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -236,19 +236,6 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs) return (usecs * mult) / div; } -static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units) -{ - u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult); - u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div); - - /* Div-by-zero should never be an issue, but check anyway */ - if (!div || !mult) - return 0; - - /* Convert from device units to usec */ - return (units * div) / mult; -} - typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg); void ionic_link_status_check_request(struct ionic_lif *lif); From 3fbc9bb6ca32d12d4d32a7ae32abef67ac95f889 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 21 Jul 2020 13:34:07 -0700 Subject: [PATCH 1465/2056] ionic: update eid test for overflow Fix up our comparison to better handle a potential (but largely unlikely) wrap around. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index bbfa25cd3294..db60c5405a58 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -719,7 +719,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq, eid = le64_to_cpu(comp->event.eid); /* Have we run out of new completions to process? */ - if (eid <= lif->last_eid) + if ((s64)(eid - lif->last_eid) <= 0) return false; lif->last_eid = eid; From 6a6014e2fb276753d4dc9b803370e7af7f57e30b Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 21 Jul 2020 13:34:08 -0700 Subject: [PATCH 1466/2056] ionic: rearrange reset and bus-master control We can prevent potential incorrect DMA access attempts from the NIC by enabling bus-master after the reset, and by disabling bus-master earlier in cleanup. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 2924cde440aa..85c686c16741 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -247,12 +247,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_pci_disable_device; } - pci_set_master(pdev); pcie_print_link_status(pdev); err = ionic_map_bars(ionic); if (err) - goto err_out_pci_clear_master; + goto err_out_pci_disable_device; /* Configure the device */ err = ionic_setup(ionic); @@ -260,6 +259,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(dev, "Cannot setup device: %d, aborting\n", err); goto err_out_unmap_bars; } + pci_set_master(pdev); err = ionic_identify(ionic); if (err) { @@ -350,6 +350,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ionic_reset(ionic); err_out_teardown: ionic_dev_teardown(ionic); + pci_clear_master(pdev); /* Don't fail the probe for these errors, keep * the hw interface around for inspection */ @@ -358,8 +359,6 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_out_unmap_bars: ionic_unmap_bars(ionic); pci_release_regions(pdev); -err_out_pci_clear_master: - pci_clear_master(pdev); err_out_pci_disable_device: pci_disable_device(pdev); err_out_debugfs_del_dev: @@ -389,9 +388,9 @@ static void ionic_remove(struct pci_dev *pdev) ionic_port_reset(ionic); ionic_reset(ionic); ionic_dev_teardown(ionic); + pci_clear_master(pdev); ionic_unmap_bars(ionic); pci_release_regions(pdev); - pci_clear_master(pdev); pci_disable_device(pdev); ionic_debugfs_del_dev(ionic); mutex_destroy(&ionic->dev_cmd_lock); From 1b897e7d8d4496e81c1e896ab516536a6f509a95 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 21 Jul 2020 13:34:09 -0700 Subject: [PATCH 1467/2056] ionic: interface file updates Add some new interface values and update a few more descriptions. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- .../net/ethernet/pensando/ionic/ionic_if.h | 88 ++++++++++++++----- 1 file changed, 68 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 7e22ba4ed915..acc94b244cf3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -59,6 +59,8 @@ enum ionic_cmd_opcode { IONIC_CMD_QOS_CLASS_INIT = 241, IONIC_CMD_QOS_CLASS_RESET = 242, IONIC_CMD_QOS_CLASS_UPDATE = 243, + IONIC_CMD_QOS_CLEAR_STATS = 244, + IONIC_CMD_QOS_RESET = 245, /* Firmware commands */ IONIC_CMD_FW_DOWNLOAD = 254, @@ -90,8 +92,8 @@ enum ionic_status_code { IONIC_RC_DEV_CMD = 18, /* Device cmd attempted on AdminQ */ IONIC_RC_ENOSUPP = 19, /* Operation not supported */ IONIC_RC_ERROR = 29, /* Generic error */ - IONIC_RC_ERDMA = 30, /* Generic RDMA error */ + IONIC_RC_EVFID = 31, /* VF ID does not exist */ }; enum ionic_notifyq_opcode { @@ -103,7 +105,7 @@ enum ionic_notifyq_opcode { }; /** - * struct cmd - General admin command format + * struct ionic_admin_cmd - General admin command format * @opcode: Opcode for the command * @lif_index: LIF index * @cmd_data: Opcode-specific command bytes @@ -167,7 +169,7 @@ struct ionic_dev_init_cmd { }; /** - * struct init_comp - Device init command completion + * struct ionic_dev_init_comp - Device init command completion * @status: Status of the command (enum ionic_status_code) */ struct ionic_dev_init_comp { @@ -185,7 +187,7 @@ struct ionic_dev_reset_cmd { }; /** - * struct reset_comp - Reset command completion + * struct ionic_dev_reset_comp - Reset command completion * @status: Status of the command (enum ionic_status_code) */ struct ionic_dev_reset_comp { @@ -357,12 +359,12 @@ struct ionic_lif_logical_qtype { * enum ionic_lif_state - LIF state * @IONIC_LIF_DISABLE: LIF disabled * @IONIC_LIF_ENABLE: LIF enabled - * @IONIC_LIF_HANG_RESET: LIF hung, being reset + * @IONIC_LIF_QUIESCE: LIF Quiesced */ enum ionic_lif_state { - IONIC_LIF_DISABLE = 0, + IONIC_LIF_QUIESCE = 0, IONIC_LIF_ENABLE = 1, - IONIC_LIF_HANG_RESET = 2, + IONIC_LIF_DISABLE = 2, }; /** @@ -371,6 +373,7 @@ enum ionic_lif_state { * @name: LIF name * @mtu: MTU * @mac: Station MAC address + * @vlan: Default Vlan ID * @features: Features (enum ionic_eth_hw_features) * @queue_count: Queue counts per queue-type */ @@ -381,7 +384,7 @@ union ionic_lif_config { char name[IONIC_IFNAMSIZ]; __le32 mtu; u8 mac[6]; - u8 rsvd2[2]; + __le16 vlan; __le64 features; __le32 queue_count[IONIC_QTYPE_MAX]; } __packed; @@ -489,13 +492,13 @@ struct ionic_lif_init_comp { u8 rsvd2[12]; }; - /** - * struct ionic_q_identify_cmd - queue identify command - * @opcode: opcode - * @lif_type: LIF type (enum ionic_lif_type) - * @type: Logical queue type (enum ionic_logical_qtype) - * @ver: Highest queue type version that the driver supports - */ +/** + * struct ionic_q_identify_cmd - queue identify command + * @opcode: opcode + * @lif_type: LIF type (enum ionic_lif_type) + * @type: Logical queue type (enum ionic_logical_qtype) + * @ver: Highest queue type version that the driver supports + */ struct ionic_q_identify_cmd { u8 opcode; u8 rsvd; @@ -983,6 +986,14 @@ enum ionic_pkt_type { IONIC_PKT_TYPE_IPV6 = 0x008, IONIC_PKT_TYPE_IPV6_TCP = 0x018, IONIC_PKT_TYPE_IPV6_UDP = 0x028, + /* below types are only used if encap offloads are enabled on lif */ + IONIC_PKT_TYPE_ENCAP_NON_IP = 0x40, + IONIC_PKT_TYPE_ENCAP_IPV4 = 0x41, + IONIC_PKT_TYPE_ENCAP_IPV4_TCP = 0x43, + IONIC_PKT_TYPE_ENCAP_IPV4_UDP = 0x45, + IONIC_PKT_TYPE_ENCAP_IPV6 = 0x48, + IONIC_PKT_TYPE_ENCAP_IPV6_TCP = 0x58, + IONIC_PKT_TYPE_ENCAP_IPV6_UDP = 0x68, }; enum ionic_eth_hw_features { @@ -1003,6 +1014,9 @@ enum ionic_eth_hw_features { IONIC_ETH_HW_TSO_IPXIP6 = BIT(14), IONIC_ETH_HW_TSO_UDP = BIT(15), IONIC_ETH_HW_TSO_UDP_CSUM = BIT(16), + IONIC_ETH_HW_RX_CSUM_GENEVE = BIT(17), + IONIC_ETH_HW_TX_CSUM_GENEVE = BIT(18), + IONIC_ETH_HW_TSO_GENEVE = BIT(19) }; /** @@ -1011,7 +1025,7 @@ enum ionic_eth_hw_features { * @type: Queue type * @lif_index: LIF index * @index: Queue index - * @oper: Operation (enum q_control_oper) + * @oper: Operation (enum ionic_q_control_oper) */ struct ionic_q_control_cmd { u8 opcode; @@ -1172,7 +1186,7 @@ enum ionic_port_loopback_mode { * struct ionic_xcvr_status - Transceiver Status information * @state: Transceiver status (enum ionic_xcvr_state) * @phy: Physical connection type (enum ionic_phy_type) - * @pid: Transceiver link mode (enum pid) + * @pid: Transceiver link mode (enum ionic_xcvr_pid) * @sprom: Transceiver sprom contents */ struct ionic_xcvr_status { @@ -1186,7 +1200,7 @@ struct ionic_xcvr_status { * union ionic_port_config - Port configuration * @speed: port speed (in Mbps) * @mtu: mtu - * @state: port admin state (enum port_admin_state) + * @state: port admin state (enum ionic_port_admin_state) * @an_enable: autoneg enable * @fec_type: fec type (enum ionic_port_fec_type) * @pause_type: pause type (enum ionic_port_pause_type) @@ -1874,12 +1888,14 @@ struct ionic_qos_identify_comp { }; #define IONIC_QOS_TC_MAX 8 +#define IONIC_QOS_ALL_TC 0xFF /* Capri max supported, should be renamed. */ #define IONIC_QOS_CLASS_MAX 7 #define IONIC_QOS_PCP_MAX 8 #define IONIC_QOS_CLASS_NAME_SZ 32 #define IONIC_QOS_DSCP_MAX 64 #define IONIC_QOS_ALL_PCP 0xFF +#define IONIC_DSCP_BLOCK_SIZE 8 /** * enum ionic_qos_class @@ -1923,6 +1939,7 @@ enum ionic_qos_sched_type { * IONIC_QOS_CONFIG_F_NO_DROP drop/nodrop * IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP enable dot1q pcp rewrite * IONIC_QOS_CONFIG_F_RW_IP_DSCP enable ip dscp rewrite + * IONIC_QOS_CONFIG_F_NON_DISRUPTIVE Non-disruptive TC update * @sched_type: QoS class scheduling type (enum ionic_qos_sched_type) * @class_type: QoS class type (enum ionic_qos_class_type) * @pause_type: QoS pause type (enum ionic_qos_pause_type) @@ -1944,6 +1961,8 @@ union ionic_qos_config { /* Used to rewrite PCP or DSCP value. */ #define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP BIT(2) #define IONIC_QOS_CONFIG_F_RW_IP_DSCP BIT(3) +/* Non-disruptive TC update */ +#define IONIC_QOS_CONFIG_F_NON_DISRUPTIVE BIT(4) u8 flags; u8 sched_type; u8 class_type; @@ -2019,6 +2038,16 @@ struct ionic_qos_reset_cmd { u8 rsvd[62]; }; +/** + * struct ionic_qos_clear_port_stats_cmd - Qos config reset command + * @opcode: Opcode + */ +struct ionic_qos_clear_stats_cmd { + u8 opcode; + u8 group_bitmap; + u8 rsvd[62]; +}; + typedef struct ionic_admin_comp ionic_qos_reset_comp; /** @@ -2164,7 +2193,7 @@ struct ionic_notifyq_event { * struct ionic_link_change_event - Link change event notification * @eid: event number * @ecode: event code = IONIC_EVENT_LINK_CHANGE - * @link_status: link up or down, with error bits (enum port_status) + * @link_status: link up/down, with error bits (enum ionic_port_status) * @link_speed: speed of the network link * * Sent when the network link state changes between UP and DOWN @@ -2377,6 +2406,16 @@ enum ionic_pb_buffer_drop_stats { IONIC_BUFFER_DROP_MAX, }; +enum ionic_oflow_drop_stats { + IONIC_OFLOW_OCCUPANCY_DROP, + IONIC_OFLOW_EMERGENCY_STOP_DROP, + IONIC_OFLOW_WRITE_BUFFER_ACK_FILL_UP_DROP, + IONIC_OFLOW_WRITE_BUFFER_ACK_FULL_DROP, + IONIC_OFLOW_WRITE_BUFFER_FULL_DROP, + IONIC_OFLOW_CONTROL_FIFO_FULL_DROP, + IONIC_OFLOW_DROP_MAX, +}; + /** * struct port_pb_stats - packet buffers system stats * uses ionic_pb_buffer_drop_stats for drop_counts[] @@ -2390,12 +2429,20 @@ struct ionic_port_pb_stats { __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX]; __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX]; __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX]; + __le64 oflow_drop_counts[IONIC_OFLOW_DROP_MAX]; + __le64 input_queue_good_pkts_in[IONIC_QOS_TC_MAX]; + __le64 input_queue_good_pkts_out[IONIC_QOS_TC_MAX]; + __le64 input_queue_err_pkts_in[IONIC_QOS_TC_MAX]; + __le64 input_queue_fifo_depth[IONIC_QOS_TC_MAX]; + __le64 input_queue_max_fifo_depth[IONIC_QOS_TC_MAX]; + __le64 input_queue_peak_occupancy[IONIC_QOS_TC_MAX]; + __le64 output_queue_buffer_occupancy[IONIC_QOS_TC_MAX]; }; /** * struct ionic_port_identity - port identity structure * @version: identity structure version - * @type: type of port (enum port_type) + * @type: type of port (enum ionic_port_type) * @num_lanes: number of lanes for the port * @autoneg: autoneg supported * @min_frame_size: minimum frame size supported @@ -2637,6 +2684,7 @@ union ionic_dev_cmd { struct ionic_qos_identify_cmd qos_identify; struct ionic_qos_init_cmd qos_init; struct ionic_qos_reset_cmd qos_reset; + struct ionic_qos_clear_stats_cmd qos_clear_stats; struct ionic_q_identify_cmd q_identify; struct ionic_q_init_cmd q_init; From a6c0d0934f0d8d4e32a3e3bca4b503a3e7237470 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 22 Jul 2020 09:40:27 +0200 Subject: [PATCH 1468/2056] net: explicitly include in net/core/sock.c The buildbot found a config where the header isn't already implicitly pulled in, so add an explicit include as well. Fixes: 8c918ffbbad4 ("net: remove compat_sock_common_{get,set}sockopt") Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/core/sock.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/core/sock.c b/net/core/sock.c index d828bfe1c47d..6da54eac2b34 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -113,6 +113,7 @@ #include #include #include +#include #include From 8bb849d67f44b1a1280cda900ea02fdbc36d4363 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 22 Jul 2020 11:08:57 +0300 Subject: [PATCH 1469/2056] net: mscc: ocelot: fix non-initialized CPU port on VSC7514 The VSC7514 is marketed as a 10-port switch, however it has 11 physical ports (0->10) in the block diagram: https://www.microsemi.com/product-directory/ethernet-switches/3992-vsc7514 (also in the device tree at arch/mips/boot/dts/mscc/ocelot.dtsi) Additionally, by architecture it has one more entry in the analyzer block, situated right after the physical ports, for the CPU port module. This is not a physical port, it only represents a channel for frame injection and extraction. That entry for the CPU port is at index 11 in the analyzer. When the register groups for QSYS_SWITCH_PORT_MODE, SYS_PORT_MODE and SYS_PAUSE_CFG are declared to be replicated 11 times, the 11th entry in the array of regfields is not initialized, so the CPU port module is not initialized either. The documentation of QSYS_SWITCH_PORT_MODE for VSC7514 also says that this register group is replicated 12 times, so this patch is simply reflecting that and not introducing any further inconsistency. Fixes: 886e1387c73d ("net: mscc: ocelot: convert QSYS_SWITCH_PORT_MODE and SYS_PORT_MODE to regfields") Fixes: 541132f0961a ("net: mscc: ocelot: convert SYS_PAUSE_CFG register access to regfield") Reported-by: Bryan Whitehead Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_vsc7514.c | 28 +++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 0ead1ef11c6c..65408bc994c4 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -358,20 +358,20 @@ static const struct reg_field ocelot_regfields[REGFIELD_MAX] = { [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2), [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1), [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0), - /* Replicated per number of ports (11), register size 4 per port */ - [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 11, 4), - [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 11, 4), - [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4), - [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4), - [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4), - [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4), - [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 11, 4), - [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 11, 4), - [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 11, 4), - [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4), - [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 11, 4), - [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 11, 4), - [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4), + /* Replicated per number of ports (12), register size 4 per port */ + [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4), + [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4), + [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4), + [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4), + [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4), + [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4), + [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4), + [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4), + [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4), + [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4), + [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4), + [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4), }; static const struct ocelot_stat_layout ocelot_stats_layout[] = { From bce58590d1bd29ae27d6a7b3e7639d36feba3ced Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 20 Jul 2020 14:49:37 +0200 Subject: [PATCH 1470/2056] dt-bindings: net: dsa: Add DSA yaml binding For future DSA drivers it makes sense to add a generic DSA yaml binding which can be used then. This was created using the properties from dsa.txt. It includes the ports and the dsa,member property. Suggested-by: Florian Fainelli Signed-off-by: Kurt Kanzenbach Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/dsa.yaml | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/dsa/dsa.yaml diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml new file mode 100644 index 000000000000..faea214339ca --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/dsa.yaml @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/dsa/dsa.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Ethernet Switch Device Tree Bindings + +maintainers: + - Andrew Lunn + - Florian Fainelli + - Vivien Didelot + +description: + This binding represents Ethernet Switches which have a dedicated CPU + port. That port is usually connected to an Ethernet Controller of the + SoC. Such setups are typical for embedded devices. + +select: false + +properties: + $nodename: + pattern: "^switch(@.*)?$" + + dsa,member: + minItems: 2 + maxItems: 2 + description: + A two element list indicates which DSA cluster, and position within the + cluster a switch takes. <0 0> is cluster 0, switch 0. <0 1> is cluster 0, + switch 1. <1 0> is cluster 1, switch 0. A switch not part of any cluster + (single device hanging off a CPU port) must not specify this property + $ref: /schemas/types.yaml#/definitions/uint32-array + +patternProperties: + "^(ethernet-)?ports$": + type: object + properties: + '#address-cells': + const: 1 + '#size-cells': + const: 0 + + patternProperties: + "^(ethernet-)?port@[0-9]+$": + type: object + description: Ethernet switch ports + + properties: + reg: + description: Port number + + label: + description: + Describes the label associated with this port, which will become + the netdev name + $ref: /schemas/types.yaml#definitions/string + + link: + description: + Should be a list of phandles to other switch's DSA port. This + port is used as the outgoing port towards the phandle ports. The + full routing information must be given, not just the one hop + routes to neighbouring switches + $ref: /schemas/types.yaml#definitions/phandle-array + + ethernet: + description: + Should be a phandle to a valid Ethernet device node. This host + device is what the switch port is connected to + $ref: /schemas/types.yaml#definitions/phandle + + phy-handle: true + + phy-mode: true + + fixed-link: true + + mac-address: true + + required: + - reg + + additionalProperties: false + +oneOf: + - required: + - ports + - required: + - ethernet-ports + +... From 5a18bb14c0f7ccd2c004ea2ca5ea106ec38675f8 Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 20 Jul 2020 14:49:38 +0200 Subject: [PATCH 1471/2056] dt-bindings: net: dsa: Let dsa.txt refer to dsa.yaml The DSA bindings have been converted to YAML. Therefore, the old text style documentation should refer to that one. The text file can be removed completely once all the existing DSA switch bindings have been converted as well. Signed-off-by: Kurt Kanzenbach Signed-off-by: David S. Miller --- .../devicetree/bindings/net/dsa/dsa.txt | 255 +----------------- 1 file changed, 1 insertion(+), 254 deletions(-) diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index f66bb7ecdb82..bf7328aba330 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt @@ -1,257 +1,4 @@ Distributed Switch Architecture Device Tree Bindings ---------------------------------------------------- -Switches are true Linux devices and can be probed by any means. Once -probed, they register to the DSA framework, passing a node -pointer. This node is expected to fulfil the following binding, and -may contain additional properties as required by the device it is -embedded within. - -Required properties: - -- ports : A container for child nodes representing switch ports. - -Optional properties: - -- dsa,member : A two element list indicates which DSA cluster, and position - within the cluster a switch takes. <0 0> is cluster 0, - switch 0. <0 1> is cluster 0, switch 1. <1 0> is cluster 1, - switch 0. A switch not part of any cluster (single device - hanging off a CPU port) must not specify this property - -The ports container has the following properties - -Required properties: - -- #address-cells : Must be 1 -- #size-cells : Must be 0 - -Each port children node must have the following mandatory properties: -- reg : Describes the port address in the switch - -An uplink/downlink port between switches in the cluster has the following -mandatory property: - -- link : Should be a list of phandles to other switch's DSA - port. This port is used as the outgoing port - towards the phandle ports. The full routing - information must be given, not just the one hop - routes to neighbouring switches. - -A CPU port has the following mandatory property: - -- ethernet : Should be a phandle to a valid Ethernet device node. - This host device is what the switch port is - connected to. - -A user port has the following optional property: - -- label : Describes the label associated with this port, which - will become the netdev name. - -Port child nodes may also contain the following optional standardised -properties, described in binding documents: - -- phy-handle : Phandle to a PHY on an MDIO bus. See - Documentation/devicetree/bindings/net/ethernet.txt - for details. - -- phy-mode : See - Documentation/devicetree/bindings/net/ethernet.txt - for details. - -- fixed-link : Fixed-link subnode describing a link to a non-MDIO - managed entity. See - Documentation/devicetree/bindings/net/fixed-link.txt - for details. - -The MAC address will be determined using the optional properties -defined in ethernet.txt. - -Example - -The following example shows three switches on three MDIO busses, -linked into one DSA cluster. - -&mdio1 { - #address-cells = <1>; - #size-cells = <0>; - - switch0: switch0@0 { - compatible = "marvell,mv88e6085"; - reg = <0>; - - dsa,member = <0 0>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - label = "lan0"; - }; - - port@1 { - reg = <1>; - label = "lan1"; - local-mac-address = [00 00 00 00 00 00]; - }; - - port@2 { - reg = <2>; - label = "lan2"; - }; - - switch0port5: port@5 { - reg = <5>; - phy-mode = "rgmii-txid"; - link = <&switch1port6 - &switch2port9>; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - - port@6 { - reg = <6>; - ethernet = <&fec1>; - fixed-link { - speed = <100>; - full-duplex; - }; - }; - }; - }; -}; - -&mdio2 { - #address-cells = <1>; - #size-cells = <0>; - - switch1: switch1@0 { - compatible = "marvell,mv88e6085"; - reg = <0>; - - dsa,member = <0 1>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - label = "lan3"; - phy-handle = <&switch1phy0>; - }; - - port@1 { - reg = <1>; - label = "lan4"; - phy-handle = <&switch1phy1>; - }; - - port@2 { - reg = <2>; - label = "lan5"; - phy-handle = <&switch1phy2>; - }; - - switch1port5: port@5 { - reg = <5>; - link = <&switch2port9>; - phy-mode = "rgmii-txid"; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - - switch1port6: port@6 { - reg = <6>; - phy-mode = "rgmii-txid"; - link = <&switch0port5>; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - }; - mdio-bus { - #address-cells = <1>; - #size-cells = <0>; - switch1phy0: switch1phy0@0 { - reg = <0>; - }; - switch1phy1: switch1phy0@1 { - reg = <1>; - }; - switch1phy2: switch1phy0@2 { - reg = <2>; - }; - }; - }; -}; - -&mdio4 { - #address-cells = <1>; - #size-cells = <0>; - - switch2: switch2@0 { - compatible = "marvell,mv88e6085"; - reg = <0>; - - dsa,member = <0 2>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - label = "lan6"; - }; - - port@1 { - reg = <1>; - label = "lan7"; - }; - - port@2 { - reg = <2>; - label = "lan8"; - }; - - port@3 { - reg = <3>; - label = "optical3"; - fixed-link { - speed = <1000>; - full-duplex; - link-gpios = <&gpio6 2 - GPIO_ACTIVE_HIGH>; - }; - }; - - port@4 { - reg = <4>; - label = "optical4"; - fixed-link { - speed = <1000>; - full-duplex; - link-gpios = <&gpio6 3 - GPIO_ACTIVE_HIGH>; - }; - }; - - switch2port9: port@9 { - reg = <9>; - phy-mode = "rgmii-txid"; - link = <&switch1port5 - &switch0port5>; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - }; - }; -}; +See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documenation. From 85e05d263ed2d50e3be86c73175effe780ec1d9a Mon Sep 17 00:00:00 2001 From: Kurt Kanzenbach Date: Mon, 20 Jul 2020 14:49:39 +0200 Subject: [PATCH 1472/2056] net: dsa: of: Allow ethernet-ports as encapsulating node Due to unified Ethernet Switch Device Tree Bindings allow for ethernet-ports as encapsulating node as well. Signed-off-by: Kurt Kanzenbach Signed-off-by: David S. Miller --- net/dsa/dsa2.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index e055efff390b..c0ffc7a2b65f 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -727,8 +727,12 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds, ports = of_get_child_by_name(dn, "ports"); if (!ports) { - dev_err(ds->dev, "no ports child node found\n"); - return -EINVAL; + /* The second possibility is "ethernet-ports" */ + ports = of_get_child_by_name(dn, "ethernet-ports"); + if (!ports) { + dev_err(ds->dev, "no ports child node found\n"); + return -EINVAL; + } } for_each_available_child_of_node(ports, port) { From 749c08f8206cdf5cad15d557912898ce22aa55da Mon Sep 17 00:00:00 2001 From: Richard Sailer Date: Mon, 20 Jul 2020 18:06:14 +0200 Subject: [PATCH 1473/2056] net: dccp: Add SIOCOUTQ IOCTL support (send buffer fill) This adds support for the SIOCOUTQ IOCTL to get the send buffer fill of a DCCP socket, like UDP and TCP sockets already have. Regarding the used data field: DCCP uses per packet sequence numbers, not per byte, so sequence numbers can't be used like in TCP. sk_wmem_queued is not used by DCCP and always 0, even in test on highly congested paths. Therefore this uses sk_wmem_alloc like in UDP. Signed-off-by: Richard Sailer Signed-off-by: David S. Miller --- Documentation/networking/dccp.rst | 3 +++ net/dccp/proto.c | 9 +++++++++ 2 files changed, 12 insertions(+) diff --git a/Documentation/networking/dccp.rst b/Documentation/networking/dccp.rst index dde16be04456..91e5c33ba3ff 100644 --- a/Documentation/networking/dccp.rst +++ b/Documentation/networking/dccp.rst @@ -192,6 +192,9 @@ FIONREAD Works as in udp(7): returns in the ``int`` argument pointer the size of the next pending datagram in bytes, or 0 when no datagram is pending. +SIOCOUTQ + Returns the number of unsent data bytes in the socket send queue as ``int`` + into the buffer specified by the argument pointer. Other tunables ============== diff --git a/net/dccp/proto.c b/net/dccp/proto.c index fd92d3fe321f..9e453611107f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -375,6 +375,15 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) goto out; switch (cmd) { + case SIOCOUTQ: { + int amount = sk_wmem_alloc_get(sk); + /* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and + * always 0, comparably to UDP. + */ + + rc = put_user(amount, (int __user *)arg); + } + break; case SIOCINQ: { struct sk_buff *skb; unsigned long amount = 0; From fb16d465f7710d5bfb9bd4d4bee72ba053ab27da Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 20 Jul 2020 20:26:54 +0300 Subject: [PATCH 1474/2056] net: phy: fix check in get_phy_c45_ids After the patch below, the iteration through the available MMDs is completely short-circuited, and devs_in_pkg remains set to the initial value of zero. Due to devs_in_pkg being zero, the rest of get_phy_c45_ids() is short-circuited too: the following loop never reaches below this point either (it executes "continue" for every device in package, failing to retrieve PHY ID for any of them): /* Now probe Device Identifiers for each device present. */ for (i = 1; i < num_ids; i++) { if (!(devs_in_pkg & (1 << i))) continue; So c45_ids->device_ids remains populated with zeroes. This causes an Aquantia AQR412 PHY (same as any C45 PHY would, in fact) to be probed by the Generic PHY driver. The issue seems to be a case of submitting partially committed work (and therefore testing something other than was submitted). The intention of the patch was to delay exiting the loop until one more condition is reached (the devs_in_pkg read from hardware is either 0, OR mostly f's). So fix the patch to reflect that. Tested with traffic on a LS1028A-QDS, the PHY is now probed correctly using the Aquantia driver. The devs_in_pkg bit field is set to 0xe000009a, and the MMDs that are present have the following IDs: [ 5.600772] libphy: get_phy_c45_ids: device_ids[1]=0x3a1b662 [ 5.618781] libphy: get_phy_c45_ids: device_ids[3]=0x3a1b662 [ 5.630797] libphy: get_phy_c45_ids: device_ids[4]=0x3a1b662 [ 5.654535] libphy: get_phy_c45_ids: device_ids[7]=0x3a1b662 [ 5.791723] libphy: get_phy_c45_ids: device_ids[29]=0x3a1b662 [ 5.804050] libphy: get_phy_c45_ids: device_ids[30]=0x3a1b662 [ 5.816375] libphy: get_phy_c45_ids: device_ids[31]=0x0 [ 7.690237] mscc_felix 0000:00:00.5: PHY [0.5:00] driver [Aquantia AQR412] (irq=POLL) [ 7.704739] mscc_felix 0000:00:00.5: PHY [0.5:01] driver [Aquantia AQR412] (irq=POLL) [ 7.718918] mscc_felix 0000:00:00.5: PHY [0.5:02] driver [Aquantia AQR412] (irq=POLL) [ 7.733044] mscc_felix 0000:00:00.5: PHY [0.5:03] driver [Aquantia AQR412] (irq=POLL) Fixes: bba238ed037c ("net: phy: continue searching for C45 MMDs even if first returned ffff:ffff") Reported-by: Colin King Reported-by: Ioana Ciornei Signed-off-by: Vladimir Oltean Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 49e98a092b96..1b9523595839 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -734,8 +734,8 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, /* Find first non-zero Devices In package. Device zero is reserved * for 802.3 c45 complied PHYs, so don't probe it at first. */ - for (i = 1; i < MDIO_MMD_NUM && devs_in_pkg == 0 && - (devs_in_pkg & 0x1fffffff) == 0x1fffffff; i++) { + for (i = 1; i < MDIO_MMD_NUM && (devs_in_pkg == 0 || + (devs_in_pkg & 0x1fffffff) == 0x1fffffff); i++) { if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) { /* Check that there is a device present at this * address before reading the devices-in-package From 7979a7d2abb831371f9e75ab88d94dda59dfcf96 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Wed, 22 Jul 2020 10:10:27 +0800 Subject: [PATCH 1475/2056] net: qed: Remove unneeded cast from memory allocation Remove casting the values returned by memory allocation function. Coccinelle emits WARNING: casting value returned by memory allocation unction to (struct roce_destroy_qp_req_output_params *) is useless. This issue was detected by using the Coccinelle software. Signed-off-by: Wang Hai Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_roce.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index a1423ec0edf7..f16a157bb95a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -757,8 +757,7 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (!qp->req_offloaded) return 0; - p_ramrod_res = (struct roce_destroy_qp_req_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), &ramrod_res_phys, GFP_KERNEL); if (!p_ramrod_res) { From c6dd6488acd105f4fde9b4d289aaef5669b12c76 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Wed, 22 Jul 2020 15:38:48 +0300 Subject: [PATCH 1476/2056] enetc: Remove the imdio bus on PF probe bailout enetc_imdio_remove() is missing from the enetc_pf_probe() bailout path. Not surprisingly because enetc_setup_serdes() is registering the imdio bus for internal purposes, and it's not obvious that enetc_imdio_remove() currently performs the teardown of enetc_setup_serdes(). To fix this, define enetc_teardown_serdes() to wrap enetc_imdio_remove() (improve code maintenance) and call it on bailout and remove paths. Fixes: 975d183ef0ca ("net: enetc: Initialize SerDes for SGMII and USXGMII protocols") Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc_pf.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 5a08f66b123c..1d2158fd9a28 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -966,6 +966,13 @@ static int enetc_configure_serdes(struct enetc_ndev_priv *priv) return 0; } +static void enetc_teardown_serdes(struct enetc_ndev_priv *priv) +{ + struct enetc_pf *pf = enetc_si_priv(priv->si); + + enetc_imdio_remove(pf); +} + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1045,6 +1052,7 @@ static int enetc_pf_probe(struct pci_dev *pdev, return 0; err_reg_netdev: + enetc_teardown_serdes(priv); enetc_free_msix(priv); err_alloc_msix: enetc_free_si_resources(priv); @@ -1071,7 +1079,7 @@ static void enetc_pf_remove(struct pci_dev *pdev) priv = netdev_priv(si->ndev); unregister_netdev(si->ndev); - enetc_imdio_remove(pf); + enetc_teardown_serdes(priv); enetc_mdio_remove(pf); enetc_of_put_phy(pf); From bb809a047eb5070e2fc76aa62d111fbbe656c532 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 22 Jul 2020 16:12:21 +0100 Subject: [PATCH 1477/2056] lan743x: remove redundant initialization of variable current_head_index The variable current_head_index is being initialized with a value that is never read and it is being updated later with a new value. Replace the initialization of -1 with the latter assignment. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/lan743x_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index f6479384dc4f..de93cc6ebc1a 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2046,14 +2046,13 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx) { struct skb_shared_hwtstamps *hwtstamps = NULL; int result = RX_PROCESS_RESULT_NOTHING_TO_DO; + int current_head_index = *rx->head_cpu_ptr; struct lan743x_rx_buffer_info *buffer_info; struct lan743x_rx_descriptor *descriptor; - int current_head_index = -1; int extension_index = -1; int first_index = -1; int last_index = -1; - current_head_index = *rx->head_cpu_ptr; if (current_head_index < 0 || current_head_index >= rx->ring_size) goto done; From 6ab301c98f174a8c25d5351b977a1113e2f1fb91 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 22 Jul 2020 17:20:50 +0200 Subject: [PATCH 1478/2056] mptcp: zero token hash at creation time. Otherwise the 'chain_len' filed will carry random values, some token creation calls will fail due to excessive chain length, causing unexpected fallback to TCP. Fixes: 2c5ebd001d4f ("mptcp: refactor token container") Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/token.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/token.c b/net/mptcp/token.c index b25b390dbbff..97cfc45bcc4f 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -368,7 +368,7 @@ void __init mptcp_token_init(void) sizeof(struct token_bucket), 0, 20,/* one slot per 1MB of memory */ - 0, + HASH_ZERO, NULL, &token_mask, 0, From fdd8fac47ce67bae17feed9b74b3505e432fe561 Mon Sep 17 00:00:00 2001 From: Sriram Krishnan Date: Wed, 22 Jul 2020 21:08:44 +0530 Subject: [PATCH 1479/2056] hv_netvsc: add support for vlans in AF_PACKET mode Vlan tagged packets are getting dropped when used with DPDK that uses the AF_PACKET interface on a hyperV guest. The packet layer uses the tpacket interface to communicate the vlans information to the upper layers. On Rx path, these drivers can read the vlan info from the tpacket header but on the Tx path, this information is still within the packet frame and requires the paravirtual drivers to push this back into the NDIS header which is then used by the host OS to form the packet. This transition from the packet frame to NDIS header is currently missing hence causing the host OS to drop the all vlan tagged packets sent by the drivers that use AF_PACKET (ETH_P_ALL) such as DPDK. Here is an overview of the changes in the vlan header in the packet path: The RX path (userspace handles everything): 1. RX VLAN packet is stripped by HOST OS and placed in NDIS header 2. Guest Kernel RX hv_netvsc packets and moves VLAN info from NDIS header into kernel SKB 3. Kernel shares packets with user space application with PACKET_MMAP. The SKB VLAN info is copied to tpacket layer and indication set TP_STATUS_VLAN_VALID. 4. The user space application will re-insert the VLAN info into the frame The TX path: 1. The user space application has the VLAN info in the frame. 2. Guest kernel gets packets from the application with PACKET_MMAP. 3. The kernel later sends the frame to the hv_netvsc driver. The only way to send VLANs is when the SKB is setup & the VLAN is stripped from the frame. 4. TX VLAN is re-inserted by HOST OS based on the NDIS header. If it sees a VLAN in the frame the packet is dropped. Cc: xe-linux-external@cisco.com Cc: Sriram Krishnan Signed-off-by: Sriram Krishnan Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc_drv.c | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index abda736e7c7d..2181d4538ab7 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -897,6 +897,7 @@ struct netvsc_ethtool_stats { unsigned long rx_no_memory; unsigned long stop_queue; unsigned long wake_queue; + unsigned long vlan_error; }; struct netvsc_ethtool_pcpu_stats { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 6267f706e8ee..e0327b88732c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -605,6 +605,29 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) *hash_info = hash; } + /* When using AF_PACKET we need to drop VLAN header from + * the frame and update the SKB to allow the HOST OS + * to transmit the 802.1Q packet + */ + if (skb->protocol == htons(ETH_P_8021Q)) { + u16 vlan_tci; + + skb_reset_mac_header(skb); + if (eth_type_vlan(eth_hdr(skb)->h_proto)) { + if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) { + ++net_device_ctx->eth_stats.vlan_error; + goto drop; + } + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); + /* Update the NDIS header pkt lengths */ + packet->total_data_buflen -= VLAN_HLEN; + packet->total_bytes -= VLAN_HLEN; + rndis_msg->msg_len = packet->total_data_buflen; + rndis_msg->msg.pkt.data_len = packet->total_data_buflen; + } + } + if (skb_vlan_tag_present(skb)) { struct ndis_pkt_8021q_info *vlan; @@ -1427,6 +1450,7 @@ static const struct { { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) }, { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) }, { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) }, + { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) }, }, pcpu_stats[] = { { "cpu%u_rx_packets", offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) }, From 637989b5d77e954007aecaf6cadc7badc6ab94fb Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Wed, 22 Jul 2020 18:57:11 +0300 Subject: [PATCH 1480/2056] devlink: Always use user_ptr[0] for devlink and simplify post_doit Currently devlink instance is searched on all doit() operations. But it is optionally stored into user_ptr[0]. This requires rediscovering devlink again doing post_doit(). Few devlink commands related to port shared buffers needs 3 pointers (devlink, devlink_port, and devlink_sb) while executing doit commands. Though devlink pointer can be derived from the devlink_port during post_doit() operation when doit() callback has acquired devlink instance lock, relying on such scheme to access devlik pointer makes code very fragile. Hence, to avoid ambiguity in post_doit() and to avoid searching devlink instance again, simplify code by always storing devlink instance in user_ptr[0] and derive devlink_sb pointer in their respective callback routines. Signed-off-by: Parav Pandit Reviewed-by: Jiri Pirko Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- net/core/devlink.c | 164 +++++++++++++++++++-------------------------- 1 file changed, 70 insertions(+), 94 deletions(-) diff --git a/net/core/devlink.c b/net/core/devlink.c index 8b7bb4bfb6d0..0ca89196a367 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -386,16 +386,14 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id) return NULL; } -#define DEVLINK_NL_FLAG_NEED_DEVLINK BIT(0) -#define DEVLINK_NL_FLAG_NEED_PORT BIT(1) -#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(2) -#define DEVLINK_NL_FLAG_NEED_SB BIT(3) +#define DEVLINK_NL_FLAG_NEED_PORT BIT(0) +#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT BIT(1) /* The per devlink instance lock is taken by default in the pre-doit * operation, yet several commands do not require this. The global * devlink lock is taken and protects from disruption by user-calls. */ -#define DEVLINK_NL_FLAG_NO_LOCK BIT(4) +#define DEVLINK_NL_FLAG_NO_LOCK BIT(2) static int devlink_nl_pre_doit(const struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) @@ -412,31 +410,19 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops, } if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK) mutex_lock(&devlink->lock); - if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) { - info->user_ptr[0] = devlink; - } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) { + info->user_ptr[0] = devlink; + if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) { devlink_port = devlink_port_get_from_info(devlink, info); if (IS_ERR(devlink_port)) { err = PTR_ERR(devlink_port); goto unlock; } - info->user_ptr[0] = devlink_port; + info->user_ptr[1] = devlink_port; } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) { - info->user_ptr[0] = devlink; devlink_port = devlink_port_get_from_info(devlink, info); if (!IS_ERR(devlink_port)) info->user_ptr[1] = devlink_port; } - if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) { - struct devlink_sb *devlink_sb; - - devlink_sb = devlink_sb_get_from_info(devlink, info); - if (IS_ERR(devlink_sb)) { - err = PTR_ERR(devlink_sb); - goto unlock; - } - info->user_ptr[1] = devlink_sb; - } return 0; unlock: @@ -451,16 +437,8 @@ static void devlink_nl_post_doit(const struct genl_ops *ops, { struct devlink *devlink; - /* When devlink changes netns, it would not be found - * by devlink_get_from_info(). So try if it is stored first. - */ - if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) { - devlink = info->user_ptr[0]; - } else { - devlink = devlink_get_from_info(info); - WARN_ON(IS_ERR(devlink)); - } - if (!IS_ERR(devlink) && ~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK) + devlink = info->user_ptr[0]; + if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK) mutex_unlock(&devlink->lock); mutex_unlock(&devlink_mutex); } @@ -759,7 +737,7 @@ static int devlink_nl_cmd_get_dumpit(struct sk_buff *msg, static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; struct sk_buff *msg; int err; @@ -906,7 +884,7 @@ devlink_port_function_set(struct devlink *devlink, struct devlink_port *port, static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; int err; @@ -1041,10 +1019,14 @@ static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct devlink_sb *devlink_sb; struct sk_buff *msg; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; @@ -1146,11 +1128,15 @@ static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct devlink_sb *devlink_sb; struct sk_buff *msg; u16 pool_index; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) @@ -1255,12 +1241,16 @@ static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; enum devlink_sb_threshold_type threshold_type; + struct devlink_sb *devlink_sb; u16 pool_index; u32 size; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) @@ -1339,13 +1329,17 @@ static int devlink_nl_sb_port_pool_fill(struct sk_buff *msg, static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; - struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct devlink_sb *devlink_sb; struct sk_buff *msg; u16 pool_index; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) @@ -1455,12 +1449,17 @@ static int devlink_sb_port_pool_set(struct devlink_port *devlink_port, static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct devlink_port *devlink_port = info->user_ptr[1]; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_sb *devlink_sb; u16 pool_index; u32 threshold; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + err = devlink_sb_pool_index_get_from_info(devlink_sb, info, &pool_index); if (err) @@ -1542,14 +1541,18 @@ devlink_nl_sb_tc_pool_bind_fill(struct sk_buff *msg, struct devlink *devlink, static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[0]; + struct devlink_port *devlink_port = info->user_ptr[1]; struct devlink *devlink = devlink_port->devlink; - struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct devlink_sb *devlink_sb; struct sk_buff *msg; enum devlink_sb_pool_type pool_type; u16 tc_index; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + err = devlink_sb_pool_type_get_from_info(info, &pool_type); if (err) return err; @@ -1689,14 +1692,19 @@ static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb, struct genl_info *info) { - struct devlink_port *devlink_port = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; + struct devlink_port *devlink_port = info->user_ptr[1]; + struct devlink *devlink = info->user_ptr[0]; enum devlink_sb_pool_type pool_type; + struct devlink_sb *devlink_sb; u16 tc_index; u16 pool_index; u32 threshold; int err; + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); + err = devlink_sb_pool_type_get_from_info(info, &pool_type); if (err) return err; @@ -1724,8 +1732,12 @@ static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; const struct devlink_ops *ops = devlink->ops; + struct devlink_sb *devlink_sb; + + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); if (ops->sb_occ_snapshot) return ops->sb_occ_snapshot(devlink, devlink_sb->index); @@ -1736,8 +1748,12 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; - struct devlink_sb *devlink_sb = info->user_ptr[1]; const struct devlink_ops *ops = devlink->ops; + struct devlink_sb *devlink_sb; + + devlink_sb = devlink_sb_get_from_info(devlink, info); + if (IS_ERR(devlink_sb)) + return PTR_ERR(devlink_sb); if (ops->sb_occ_max_clear) return ops->sb_occ_max_clear(devlink, devlink_sb->index); @@ -7018,7 +7034,6 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_get_doit, .dumpit = devlink_nl_cmd_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { @@ -7041,24 +7056,20 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_port_split_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NO_LOCK, + .internal_flags = DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_PORT_UNSPLIT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_port_unsplit_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NO_LOCK, + .internal_flags = DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_SB_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_get_doit, .dumpit = devlink_nl_cmd_sb_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NEED_SB, /* can be retrieved by unprivileged users */ }, { @@ -7066,8 +7077,6 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_pool_get_doit, .dumpit = devlink_nl_cmd_sb_pool_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NEED_SB, /* can be retrieved by unprivileged users */ }, { @@ -7075,16 +7084,13 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_pool_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NEED_SB, }, { .cmd = DEVLINK_CMD_SB_PORT_POOL_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_port_pool_get_doit, .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | - DEVLINK_NL_FLAG_NEED_SB, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, /* can be retrieved by unprivileged users */ }, { @@ -7092,16 +7098,14 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_port_pool_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | - DEVLINK_NL_FLAG_NEED_SB, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, { .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit, .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | - DEVLINK_NL_FLAG_NEED_SB, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, /* can be retrieved by unprivileged users */ }, { @@ -7109,60 +7113,50 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_PORT | - DEVLINK_NL_FLAG_NEED_SB, + .internal_flags = DEVLINK_NL_FLAG_NEED_PORT, }, { .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_occ_snapshot_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NEED_SB, }, { .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_sb_occ_max_clear_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NEED_SB, }, { .cmd = DEVLINK_CMD_ESWITCH_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_eswitch_get_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NO_LOCK, + .internal_flags = DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_ESWITCH_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_eswitch_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NO_LOCK, + .internal_flags = DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_DPIPE_TABLE_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_dpipe_table_get, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_dpipe_entries_get, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_dpipe_headers_get, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { @@ -7170,20 +7164,17 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_dpipe_table_counters_set, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_RESOURCE_SET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_resource_set, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_RESOURCE_DUMP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_resource_dump, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { @@ -7191,15 +7182,13 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_reload, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK | - DEVLINK_NL_FLAG_NO_LOCK, + .internal_flags = DEVLINK_NL_FLAG_NO_LOCK, }, { .cmd = DEVLINK_CMD_PARAM_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_param_get_doit, .dumpit = devlink_nl_cmd_param_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { @@ -7207,7 +7196,6 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_param_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_PORT_PARAM_GET, @@ -7230,21 +7218,18 @@ static const struct genl_ops devlink_nl_ops[] = { .doit = devlink_nl_cmd_region_get_doit, .dumpit = devlink_nl_cmd_region_get_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_REGION_NEW, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_region_new, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_REGION_DEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_region_del, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_REGION_READ, @@ -7252,14 +7237,12 @@ static const struct genl_ops devlink_nl_ops[] = { GENL_DONT_VALIDATE_DUMP_STRICT, .dumpit = devlink_nl_cmd_region_read_dumpit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_INFO_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_info_get_doit, .dumpit = devlink_nl_cmd_info_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { @@ -7317,46 +7300,39 @@ static const struct genl_ops devlink_nl_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = devlink_nl_cmd_flash_update, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_TRAP_GET, .doit = devlink_nl_cmd_trap_get_doit, .dumpit = devlink_nl_cmd_trap_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { .cmd = DEVLINK_CMD_TRAP_SET, .doit = devlink_nl_cmd_trap_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_TRAP_GROUP_GET, .doit = devlink_nl_cmd_trap_group_get_doit, .dumpit = devlink_nl_cmd_trap_group_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { .cmd = DEVLINK_CMD_TRAP_GROUP_SET, .doit = devlink_nl_cmd_trap_group_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, { .cmd = DEVLINK_CMD_TRAP_POLICER_GET, .doit = devlink_nl_cmd_trap_policer_get_doit, .dumpit = devlink_nl_cmd_trap_policer_get_dumpit, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, /* can be retrieved by unprivileged users */ }, { .cmd = DEVLINK_CMD_TRAP_POLICER_SET, .doit = devlink_nl_cmd_trap_policer_set_doit, .flags = GENL_ADMIN_PERM, - .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, }; From b71a61ccfebb4ff733d2d9fc66cd5c75b7ae46a2 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:05 +0100 Subject: [PATCH 1481/2056] l2tp: cleanup whitespace use Fix up various whitespace issues as reported by checkpatch.pl: * remove spaces around operators where appropriate, * add missing blank lines following declarations, * remove multiple blank lines, or trailing blank lines at the end of functions. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 38 +++++++++++++++++++------------------- net/l2tp/l2tp_debugfs.c | 2 -- net/l2tp/l2tp_eth.c | 16 ++++++---------- net/l2tp/l2tp_ip.c | 14 ++++++++------ net/l2tp/l2tp_ip6.c | 14 +++++++------- net/l2tp/l2tp_netlink.c | 3 ++- net/l2tp/l2tp_ppp.c | 9 ++++++--- 7 files changed, 48 insertions(+), 48 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6434d17e6e8e..0992d5fab9b8 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -94,7 +94,7 @@ struct l2tp_skb_cb { unsigned long expires; }; -#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)]) +#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&skb->cb[sizeof(struct inet_skb_parm)]) static struct workqueue_struct *l2tp_wq; @@ -134,7 +134,6 @@ static inline struct hlist_head * l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) { return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)]; - } /* Session hash list. @@ -648,9 +647,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, L2TP_SKB_CB(skb)->has_seq = 0; if (tunnel->version == L2TP_HDR_VER_2) { if (hdrflags & L2TP_HDRFLAG_S) { - ns = ntohs(*(__be16 *) ptr); + ns = ntohs(*(__be16 *)ptr); ptr += 2; - nr = ntohs(*(__be16 *) ptr); + nr = ntohs(*(__be16 *)ptr); ptr += 2; /* Store L2TP info in the skb */ @@ -662,7 +661,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, session->name, ns, nr, session->nr); } } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) { - u32 l2h = ntohl(*(__be32 *) ptr); + u32 l2h = ntohl(*(__be32 *)ptr); if (l2h & 0x40000000) { ns = l2h & 0x00ffffff; @@ -777,6 +776,7 @@ EXPORT_SYMBOL(l2tp_recv_common); static int l2tp_session_queue_purge(struct l2tp_session *session) { struct sk_buff *skb = NULL; + BUG_ON(!session); BUG_ON(session->magic != L2TP_SESSION_MAGIC); while ((skb = skb_dequeue(&session->reorder_q))) { @@ -828,7 +828,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) optr = ptr = skb->data; /* Get L2TP header flags */ - hdrflags = ntohs(*(__be16 *) ptr); + hdrflags = ntohs(*(__be16 *)ptr); /* Check protocol version */ version = hdrflags & L2TP_HDR_VER_MASK; @@ -859,14 +859,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) ptr += 2; /* Extract tunnel and session ID */ - tunnel_id = ntohs(*(__be16 *) ptr); + tunnel_id = ntohs(*(__be16 *)ptr); ptr += 2; - session_id = ntohs(*(__be16 *) ptr); + session_id = ntohs(*(__be16 *)ptr); ptr += 2; } else { ptr += 2; /* skip reserved bits */ tunnel_id = tunnel->tunnel_id; - session_id = ntohl(*(__be32 *) ptr); + session_id = ntohl(*(__be32 *)ptr); ptr += 4; } @@ -971,13 +971,13 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf) */ if (tunnel->encap == L2TP_ENCAPTYPE_UDP) { u16 flags = L2TP_HDR_VER_3; - *((__be16 *) bufp) = htons(flags); + *((__be16 *)bufp) = htons(flags); bufp += 2; - *((__be16 *) bufp) = 0; + *((__be16 *)bufp) = 0; bufp += 2; } - *((__be32 *) bufp) = htonl(session->peer_session_id); + *((__be32 *)bufp) = htonl(session->peer_session_id); bufp += 4; if (session->cookie_len) { memcpy(bufp, &session->cookie[0], session->cookie_len); @@ -1305,9 +1305,9 @@ static int l2tp_tunnel_sock_create(struct net *net, memcpy(&udp_conf.peer_ip6, cfg->peer_ip6, sizeof(udp_conf.peer_ip6)); udp_conf.use_udp6_tx_checksums = - ! cfg->udp6_zero_tx_checksums; + !cfg->udp6_zero_tx_checksums; udp_conf.use_udp6_rx_checksums = - ! cfg->udp6_zero_rx_checksums; + !cfg->udp6_zero_rx_checksums; } else #endif { @@ -1340,7 +1340,7 @@ static int l2tp_tunnel_sock_create(struct net *net, memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6, sizeof(ip6_addr.l2tp_addr)); ip6_addr.l2tp_conn_id = tunnel_id; - err = kernel_bind(sock, (struct sockaddr *) &ip6_addr, + err = kernel_bind(sock, (struct sockaddr *)&ip6_addr, sizeof(ip6_addr)); if (err < 0) goto out; @@ -1350,7 +1350,7 @@ static int l2tp_tunnel_sock_create(struct net *net, sizeof(ip6_addr.l2tp_addr)); ip6_addr.l2tp_conn_id = peer_tunnel_id; err = kernel_connect(sock, - (struct sockaddr *) &ip6_addr, + (struct sockaddr *)&ip6_addr, sizeof(ip6_addr), 0); if (err < 0) goto out; @@ -1367,7 +1367,7 @@ static int l2tp_tunnel_sock_create(struct net *net, ip_addr.l2tp_family = AF_INET; ip_addr.l2tp_addr = cfg->local_ip; ip_addr.l2tp_conn_id = tunnel_id; - err = kernel_bind(sock, (struct sockaddr *) &ip_addr, + err = kernel_bind(sock, (struct sockaddr *)&ip_addr, sizeof(ip_addr)); if (err < 0) goto out; @@ -1375,7 +1375,7 @@ static int l2tp_tunnel_sock_create(struct net *net, ip_addr.l2tp_family = AF_INET; ip_addr.l2tp_addr = cfg->peer_ip; ip_addr.l2tp_conn_id = peer_tunnel_id; - err = kernel_connect(sock, (struct sockaddr *) &ip_addr, + err = kernel_connect(sock, (struct sockaddr *)&ip_addr, sizeof(ip_addr), 0); if (err < 0) goto out; @@ -1593,6 +1593,7 @@ void __l2tp_session_unhash(struct l2tp_session *session) /* For L2TPv3 we have a per-net hash: remove from there, too */ if (tunnel->version != L2TP_HDR_VER_2) { struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); + spin_lock_bh(&pn->l2tp_session_hlist_lock); hlist_del_init_rcu(&session->global_hlist); spin_unlock_bh(&pn->l2tp_session_hlist_lock); @@ -1636,7 +1637,6 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version) if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP) session->hdr_len += 4; } - } EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 35bb4f3bdbe0..f0301cb41ae0 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -63,7 +63,6 @@ static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) pd->session_idx = 0; l2tp_dfs_next_tunnel(pd); } - } static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) @@ -90,7 +89,6 @@ static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) return pd; } - static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) { (*pos)++; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 3099efa19249..a84627d7be27 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -51,7 +51,6 @@ struct l2tp_eth_sess { struct net_device __rcu *dev; }; - static int l2tp_eth_dev_init(struct net_device *dev) { eth_hw_addr_random(dev); @@ -94,13 +93,12 @@ static void l2tp_eth_get_stats64(struct net_device *dev, { struct l2tp_eth *priv = netdev_priv(dev); - stats->tx_bytes = (unsigned long) atomic_long_read(&priv->tx_bytes); - stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets); - stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped); - stats->rx_bytes = (unsigned long) atomic_long_read(&priv->rx_bytes); - stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets); - stats->rx_errors = (unsigned long) atomic_long_read(&priv->rx_errors); - + stats->tx_bytes = (unsigned long)atomic_long_read(&priv->tx_bytes); + stats->tx_packets = (unsigned long)atomic_long_read(&priv->tx_packets); + stats->tx_dropped = (unsigned long)atomic_long_read(&priv->tx_dropped); + stats->rx_bytes = (unsigned long)atomic_long_read(&priv->rx_bytes); + stats->rx_packets = (unsigned long)atomic_long_read(&priv->rx_packets); + stats->rx_errors = (unsigned long)atomic_long_read(&priv->rx_errors); } static const struct net_device_ops l2tp_eth_netdev_ops = { @@ -348,13 +346,11 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, return rc; } - static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { .session_create = l2tp_eth_create, .session_delete = l2tp_session_delete, }; - static int __init l2tp_eth_init(void) { int err = 0; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 2a3fd31fb589..e5b63eab887d 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -126,7 +126,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) /* Point to L2TP header */ optr = ptr = skb->data; - session_id = ntohl(*((__be32 *) ptr)); + session_id = ntohl(*((__be32 *)ptr)); ptr += 4; /* RFC3931: L2TP/IP packets have the first 4 bytes containing @@ -176,7 +176,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) if ((skb->data[0] & 0xc0) != 0xc0) goto discard; - tunnel_id = ntohl(*(__be32 *) &skb->data[4]); + tunnel_id = ntohl(*(__be32 *)&skb->data[4]); iph = (struct iphdr *)skb_network_header(skb); read_lock_bh(&l2tp_ip_lock); @@ -260,7 +260,7 @@ static void l2tp_ip_destroy_sock(struct sock *sk) static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); - struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; + struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr; struct net *net = sock_net(sk); int ret; int chk_addr_ret; @@ -316,7 +316,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { - struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; + struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr; int rc; if (addr_len < sizeof(*lsa)) @@ -375,6 +375,7 @@ static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, lsa->l2tp_addr.s_addr = inet->inet_daddr; } else { __be32 addr = inet->inet_rcv_saddr; + if (!addr) addr = inet->inet_saddr; lsa->l2tp_conn_id = lsk->conn_id; @@ -422,6 +423,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* Get and verify the address. */ if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name); + rc = -EINVAL; if (msg->msg_namelen < sizeof(*lip)) goto out; @@ -456,7 +458,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) skb_reset_transport_header(skb); /* Insert 0 session_id */ - *((__be32 *) skb_put(skb, 4)) = 0; + *((__be32 *)skb_put(skb, 4)) = 0; /* Copy user data into skb */ rc = memcpy_from_msg(skb_put(skb, len), msg, len); @@ -467,7 +469,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl4 = &inet->cork.fl.u.ip4; if (connected) - rt = (struct rtable *) __sk_dst_check(sk, 0); + rt = (struct rtable *)__sk_dst_check(sk, 0); rcu_read_lock(); if (rt == NULL) { diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 4799bec87b33..d17b9fe1180f 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -138,7 +138,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) /* Point to L2TP header */ optr = ptr = skb->data; - session_id = ntohl(*((__be32 *) ptr)); + session_id = ntohl(*((__be32 *)ptr)); ptr += 4; /* RFC3931: L2TP/IP packets have the first 4 bytes containing @@ -188,7 +188,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) if ((skb->data[0] & 0xc0) != 0xc0) goto discard; - tunnel_id = ntohl(*(__be32 *) &skb->data[4]); + tunnel_id = ntohl(*(__be32 *)&skb->data[4]); iph = ipv6_hdr(skb); read_lock_bh(&l2tp_ip6_lock); @@ -276,7 +276,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); - struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; + struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr; struct net *net = sock_net(sk); __be32 v4addr = 0; int bound_dev_if; @@ -375,8 +375,8 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { - struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr; - struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; + struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; + struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; struct in6_addr *daddr; int addr_type; int rc; @@ -548,7 +548,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = &lsa->l2tp_addr; if (np->sndflow) { fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK; - if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { + if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (IS_ERR(flowlabel)) return -EINVAL; @@ -594,7 +594,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (IS_ERR(flowlabel)) return -EINVAL; } - if (!(opt->opt_nflen|opt->opt_flen)) + if (!(opt->opt_nflen | opt->opt_flen)) opt = NULL; } diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index ebb381c3f1b9..0ce8e94ace78 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -27,7 +27,6 @@ #include "l2tp_core.h" - static struct genl_family l2tp_nl_family; static const struct genl_multicast_group l2tp_multicast_group[] = { @@ -570,6 +569,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_COOKIE]) { u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]); + if (len > 8) { ret = -EINVAL; goto out_tunnel; @@ -579,6 +579,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf } if (info->attrs[L2TP_ATTR_PEER_COOKIE]) { u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]); + if (len > 8) { ret = -EINVAL; goto out_tunnel; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index c54cb59593ef..6dccffa29d02 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -351,7 +351,7 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m, */ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) { - struct sock *sk = (struct sock *) chan->private; + struct sock *sk = (struct sock *)chan->private; struct l2tp_session *session; struct l2tp_tunnel *tunnel; int uhlen, headroom; @@ -928,6 +928,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, inet = inet_sk(tunnel->sock); if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) { struct sockaddr_pppol2tp sp; + len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; @@ -984,6 +985,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, #endif } else if (tunnel->version == 3) { struct sockaddr_pppol2tpv3 sp; + len = sizeof(sp); memset(&sp, 0, len); sp.sa_family = AF_PPPOX; @@ -1343,7 +1345,7 @@ static int pppol2tp_session_getsockopt(struct sock *sk, break; case PPPOL2TP_SO_REORDERTO: - *val = (int) jiffies_to_msecs(session->reorder_timeout); + *val = (int)jiffies_to_msecs(session->reorder_timeout); l2tp_info(session, L2TP_MSG_CONTROL, "%s: get reorder_timeout=%d\n", session->name, *val); break; @@ -1407,7 +1409,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, if (put_user(len, optlen)) goto end_put_sess; - if (copy_to_user((void __user *) optval, &val, len)) + if (copy_to_user((void __user *)optval, &val, len)) goto end_put_sess; err = 0; @@ -1551,6 +1553,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) if (tunnel->sock) { struct inet_sock *inet = inet_sk(tunnel->sock); + ip = ntohl(inet->inet_saddr); port = ntohs(inet->inet_sport); } From 20dcb1107ab1a3423c72a88269b9775cc549262a Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:06 +0100 Subject: [PATCH 1482/2056] l2tp: cleanup comments Modify some l2tp comments to better adhere to kernel coding style, as reported by checkpatch.pl. Add descriptive comments for the l2tp per-net spinlocks to document their use. Fix an incorrect comment in l2tp_recv_common: RFC2661 section 5.4 states that: "The LNS controls enabling and disabling of sequence numbers by sending a data message with or without sequence numbers present at any time during the life of a session." l2tp handles this correctly in l2tp_recv_common, but the comment around the code was incorrect and confusing. Fix up the comment accordingly. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 9 ++--- net/l2tp/l2tp_core.h | 76 +++++++++++++++++------------------------ net/l2tp/l2tp_debugfs.c | 3 +- net/l2tp/l2tp_eth.c | 3 +- net/l2tp/l2tp_ip.c | 3 +- net/l2tp/l2tp_ip6.c | 15 ++++---- net/l2tp/l2tp_netlink.c | 3 +- net/l2tp/l2tp_ppp.c | 3 +- 8 files changed, 47 insertions(+), 68 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 0992d5fab9b8..f5e314d8a02b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * L2TP core. +/* L2TP core. * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * @@ -102,8 +101,10 @@ static struct workqueue_struct *l2tp_wq; static unsigned int l2tp_net_id; struct l2tp_net { struct list_head l2tp_tunnel_list; + /* Lock for write access to l2tp_tunnel_list */ spinlock_t l2tp_tunnel_list_lock; struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; + /* Lock for write access to l2tp_session_hlist */ spinlock_t l2tp_session_hlist_lock; }; @@ -678,7 +679,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, } if (L2TP_SKB_CB(skb)->has_seq) { - /* Received a packet with sequence numbers. If we're the LNS, + /* Received a packet with sequence numbers. If we're the LAC, * check if we sre sending sequence numbers and if not, * configure it so. */ @@ -1604,7 +1605,7 @@ void __l2tp_session_unhash(struct l2tp_session *session) EXPORT_SYMBOL_GPL(__l2tp_session_unhash); /* This function is used by the netlink SESSION_DELETE command and by - pseudowire modules. + * pseudowire modules. */ int l2tp_session_delete(struct l2tp_session *session) { diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 10cf7c3dcbb3..3ebb701eebbf 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ -/* - * L2TP internal definitions. +/* L2TP internal definitions. * * Copyright (c) 2008,2009 Katalix Systems Ltd */ @@ -49,32 +48,26 @@ struct l2tp_tunnel; */ struct l2tp_session_cfg { enum l2tp_pwtype pw_type; - unsigned int recv_seq:1; /* expect receive packets with - * sequence numbers? */ - unsigned int send_seq:1; /* send packets with sequence - * numbers? */ - unsigned int lns_mode:1; /* behave as LNS? LAC enables - * sequence numbers under - * control of LNS. */ - int debug; /* bitmask of debug message - * categories */ + unsigned int recv_seq:1; /* expect receive packets with sequence numbers? */ + unsigned int send_seq:1; /* send packets with sequence numbers? */ + unsigned int lns_mode:1; /* behave as LNS? + * LAC enables sequence numbers under LNS control. + */ + int debug; /* bitmask of debug message categories */ u16 l2specific_type; /* Layer 2 specific type */ u8 cookie[8]; /* optional cookie */ int cookie_len; /* 0, 4 or 8 bytes */ u8 peer_cookie[8]; /* peer's cookie */ int peer_cookie_len; /* 0, 4 or 8 bytes */ - int reorder_timeout; /* configured reorder timeout - * (in jiffies) */ + int reorder_timeout; /* configured reorder timeout (in jiffies) */ char *ifname; }; struct l2tp_session { - int magic; /* should be - * L2TP_SESSION_MAGIC */ + int magic; /* should be L2TP_SESSION_MAGIC */ long dead; - struct l2tp_tunnel *tunnel; /* back pointer to tunnel - * context */ + struct l2tp_tunnel *tunnel; /* back pointer to tunnel context */ u32 session_id; u32 peer_session_id; u8 cookie[8]; @@ -89,42 +82,37 @@ struct l2tp_session { u32 nr_max; /* max NR. Depends on tunnel */ u32 nr_window_size; /* NR window size */ u32 nr_oos; /* NR of last OOS packet */ - int nr_oos_count; /* For OOS recovery */ + int nr_oos_count; /* for OOS recovery */ int nr_oos_count_max; - struct hlist_node hlist; /* Hash list node */ + struct hlist_node hlist; /* hash list node */ refcount_t ref_count; char name[32]; /* for logging */ char ifname[IFNAMSIZ]; - unsigned int recv_seq:1; /* expect receive packets with - * sequence numbers? */ - unsigned int send_seq:1; /* send packets with sequence - * numbers? */ - unsigned int lns_mode:1; /* behave as LNS? LAC enables - * sequence numbers under - * control of LNS. */ - int debug; /* bitmask of debug message - * categories */ - int reorder_timeout; /* configured reorder timeout - * (in jiffies) */ + unsigned int recv_seq:1; /* expect receive packets with sequence numbers? */ + unsigned int send_seq:1; /* send packets with sequence numbers? */ + unsigned int lns_mode:1; /* behave as LNS? + * LAC enables sequence numbers under LNS control. + */ + int debug; /* bitmask of debug message categories */ + int reorder_timeout; /* configured reorder timeout (in jiffies) */ int reorder_skip; /* set if skip to next nr */ enum l2tp_pwtype pwtype; struct l2tp_stats stats; - struct hlist_node global_hlist; /* Global hash list node */ + struct hlist_node global_hlist; /* global hash list node */ int (*build_header)(struct l2tp_session *session, void *buf); void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); void (*session_close)(struct l2tp_session *session); void (*show)(struct seq_file *m, void *priv); - u8 priv[]; /* private data */ + u8 priv[]; /* private data */ }; /* Describes the tunnel. It contains info to track all the associated * sessions so incoming packets can be sorted out */ struct l2tp_tunnel_cfg { - int debug; /* bitmask of debug message - * categories */ + int debug; /* bitmask of debug message categories */ enum l2tp_encap_type encap; /* Used only for kernel-created sockets */ @@ -148,31 +136,29 @@ struct l2tp_tunnel { struct rcu_head rcu; rwlock_t hlist_lock; /* protect session_hlist */ - bool acpt_newsess; /* Indicates whether this - * tunnel accepts new sessions. - * Protected by hlist_lock. + bool acpt_newsess; /* indicates whether this tunnel accepts + * new sessions. Protected by hlist_lock. */ struct hlist_head session_hlist[L2TP_HASH_SIZE]; - /* hashed list of sessions, - * hashed by id */ + /* hashed list of sessions, hashed by id */ u32 tunnel_id; u32 peer_tunnel_id; int version; /* 2=>L2TPv2, 3=>L2TPv3 */ char name[20]; /* for logging */ - int debug; /* bitmask of debug message - * categories */ + int debug; /* bitmask of debug message categories */ enum l2tp_encap_type encap; struct l2tp_stats stats; - struct list_head list; /* Keep a list of all tunnels */ + struct list_head list; /* list node on per-namespace list of tunnels */ struct net *l2tp_net; /* the net we belong to */ refcount_t ref_count; void (*old_sk_destruct)(struct sock *); - struct sock *sock; /* Parent socket */ - int fd; /* Parent fd, if tunnel socket - * was created by userspace */ + struct sock *sock; /* parent socket */ + int fd; /* parent fd, if tunnel socket was created + * by userspace + */ struct work_struct del_work; }; diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index f0301cb41ae0..93181133e155 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * L2TP subsystem debugfs +/* L2TP subsystem debugfs * * Copyright (c) 2010 Katalix Systems Ltd */ diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index a84627d7be27..7ed2b4eced94 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * L2TPv3 ethernet pseudowire driver +/* L2TPv3 ethernet pseudowire driver * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd */ diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index e5b63eab887d..70f9fdaf6c86 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * L2TPv3 IP encapsulation support +/* L2TPv3 IP encapsulation support * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd */ diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index d17b9fe1180f..ca7696147c7e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/* - * L2TPv3 IP encapsulation support for IPv6 +/* L2TPv3 IP encapsulation support for IPv6 * * Copyright (c) 2012 Katalix Systems Ltd */ @@ -38,7 +37,8 @@ struct l2tp_ip6_sock { u32 peer_conn_id; /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see - inet6_sk_generic */ + * inet6_sk_generic + */ struct ipv6_pinfo inet6; }; @@ -519,7 +519,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int err; /* Rough check on arithmetic overflow, - better check is made in ip6_append_data(). + * better check is made in ip6_append_data(). */ if (len > INT_MAX) return -EMSGSIZE; @@ -528,9 +528,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; - /* - * Get and verify the address. - */ + /* Get and verify the address */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_mark = sk->sk_mark; @@ -555,8 +553,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } } - /* - * Otherwise it will be difficult to maintain + /* Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 0ce8e94ace78..3120f8dcc56a 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/* - * L2TP netlink layer, for management +/* L2TP netlink layer, for management * * Copyright (c) 2008,2009,2010 Katalix Systems Ltd * diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6dccffa29d02..48fbaf5ee82c 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -117,8 +117,7 @@ struct pppol2tp_session { int owner; /* pid that opened the socket */ struct mutex sk_lock; /* Protects .sk */ - struct sock __rcu *sk; /* Pointer to the session - * PPPoX socket */ + struct sock __rcu *sk; /* Pointer to the session PPPoX socket */ struct sock *__sk; /* Copy of .sk, for cleanup */ struct rcu_head rcu; /* For asynchronous release */ }; From 9f7da9a0e3dc43fa6046fd29cc4531b86af65446 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:07 +0100 Subject: [PATCH 1483/2056] l2tp: cleanup difficult-to-read line breaks Some l2tp code had line breaks which made the code more difficult to read. These were originally motivated by the 80-character line width coding guidelines, but were actually a negative from the perspective of trying to follow the code. Remove these linebreaks for clearer code, even if we do exceed 80 characters in width in some places. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_netlink.c | 69 +++++++++++++++++------------------------ net/l2tp/l2tp_ppp.c | 6 ++-- 2 files changed, 31 insertions(+), 44 deletions(-) diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 3120f8dcc56a..42b409bc0de7 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -165,71 +165,63 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info struct l2tp_tunnel_cfg cfg = { 0, }; struct l2tp_tunnel *tunnel; struct net *net = genl_info_net(info); + struct nlattr **attrs = info->attrs; - if (!info->attrs[L2TP_ATTR_CONN_ID]) { + if (!attrs[L2TP_ATTR_CONN_ID]) { ret = -EINVAL; goto out; } - tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]); + tunnel_id = nla_get_u32(attrs[L2TP_ATTR_CONN_ID]); - if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) { + if (!attrs[L2TP_ATTR_PEER_CONN_ID]) { ret = -EINVAL; goto out; } - peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]); + peer_tunnel_id = nla_get_u32(attrs[L2TP_ATTR_PEER_CONN_ID]); - if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) { + if (!attrs[L2TP_ATTR_PROTO_VERSION]) { ret = -EINVAL; goto out; } - proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]); + proto_version = nla_get_u8(attrs[L2TP_ATTR_PROTO_VERSION]); - if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) { + if (!attrs[L2TP_ATTR_ENCAP_TYPE]) { ret = -EINVAL; goto out; } - cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]); + cfg.encap = nla_get_u16(attrs[L2TP_ATTR_ENCAP_TYPE]); fd = -1; - if (info->attrs[L2TP_ATTR_FD]) { - fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); + if (attrs[L2TP_ATTR_FD]) { + fd = nla_get_u32(attrs[L2TP_ATTR_FD]); } else { #if IS_ENABLED(CONFIG_IPV6) - if (info->attrs[L2TP_ATTR_IP6_SADDR] && - info->attrs[L2TP_ATTR_IP6_DADDR]) { - cfg.local_ip6 = nla_data( - info->attrs[L2TP_ATTR_IP6_SADDR]); - cfg.peer_ip6 = nla_data( - info->attrs[L2TP_ATTR_IP6_DADDR]); + if (attrs[L2TP_ATTR_IP6_SADDR] && attrs[L2TP_ATTR_IP6_DADDR]) { + cfg.local_ip6 = nla_data(attrs[L2TP_ATTR_IP6_SADDR]); + cfg.peer_ip6 = nla_data(attrs[L2TP_ATTR_IP6_DADDR]); } else #endif - if (info->attrs[L2TP_ATTR_IP_SADDR] && - info->attrs[L2TP_ATTR_IP_DADDR]) { - cfg.local_ip.s_addr = nla_get_in_addr( - info->attrs[L2TP_ATTR_IP_SADDR]); - cfg.peer_ip.s_addr = nla_get_in_addr( - info->attrs[L2TP_ATTR_IP_DADDR]); + if (attrs[L2TP_ATTR_IP_SADDR] && attrs[L2TP_ATTR_IP_DADDR]) { + cfg.local_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_SADDR]); + cfg.peer_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_DADDR]); } else { ret = -EINVAL; goto out; } - if (info->attrs[L2TP_ATTR_UDP_SPORT]) - cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); - if (info->attrs[L2TP_ATTR_UDP_DPORT]) - cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]); - cfg.use_udp_checksums = nla_get_flag( - info->attrs[L2TP_ATTR_UDP_CSUM]); + if (attrs[L2TP_ATTR_UDP_SPORT]) + cfg.local_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_SPORT]); + if (attrs[L2TP_ATTR_UDP_DPORT]) + cfg.peer_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_DPORT]); + cfg.use_udp_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_CSUM]); #if IS_ENABLED(CONFIG_IPV6) - cfg.udp6_zero_tx_checksums = nla_get_flag( - info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]); - cfg.udp6_zero_rx_checksums = nla_get_flag( - info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]); + cfg.udp6_zero_tx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]); + cfg.udp6_zero_rx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]); #endif } - if (info->attrs[L2TP_ATTR_DEBUG]) - cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]); + if (attrs[L2TP_ATTR_DEBUG]) + cfg.debug = nla_get_u32(attrs[L2TP_ATTR_DEBUG]); ret = -EINVAL; switch (cfg.encap) { @@ -715,8 +707,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) || - nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, - session->peer_session_id) || + nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) || nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) || nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype)) goto nla_put_failure; @@ -724,11 +715,9 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl if ((session->ifname[0] && nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || (session->cookie_len && - nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, - &session->cookie[0])) || + nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, session->cookie)) || (session->peer_cookie_len && - nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, - &session->peer_cookie[0])) || + nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, session->peer_cookie)) || nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) || nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) || nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) || diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 48fbaf5ee82c..3fed922addb5 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1566,8 +1566,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) user_data_ok = 'N'; } - seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> " - "%04X/%04X %d %c\n", + seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> %04X/%04X %d %c\n", session->name, ip, port, tunnel->tunnel_id, session->session_id, @@ -1606,8 +1605,7 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v) seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n"); seq_puts(m, "TUNNEL name, user-data-ok session-count\n"); seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); - seq_puts(m, " SESSION name, addr/port src-tid/sid " - "dest-tid/sid state user-data-ok\n"); + seq_puts(m, " SESSION name, addr/port src-tid/sid dest-tid/sid state user-data-ok\n"); seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n"); seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n"); goto out; From 8ce9825a5993a935631a84b5cce9f65af61d4e59 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:08 +0100 Subject: [PATCH 1484/2056] l2tp: cleanup wonky alignment of line-broken function calls Arguments should be aligned with the function call open parenthesis as per checkpatch. Tweak some function calls which were not aligned correctly. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 12 ++++++------ net/l2tp/l2tp_debugfs.c | 2 +- net/l2tp/l2tp_ppp.c | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f5e314d8a02b..3162e395cd4a 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1285,10 +1285,10 @@ static void l2tp_tunnel_del_work(struct work_struct *work) * exit hook. */ static int l2tp_tunnel_sock_create(struct net *net, - u32 tunnel_id, - u32 peer_tunnel_id, - struct l2tp_tunnel_cfg *cfg, - struct socket **sockp) + u32 tunnel_id, + u32 peer_tunnel_id, + struct l2tp_tunnel_cfg *cfg, + struct socket **sockp) { int err = -EINVAL; struct socket *sock = NULL; @@ -1333,7 +1333,7 @@ static int l2tp_tunnel_sock_create(struct net *net, struct sockaddr_l2tpip6 ip6_addr = {0}; err = sock_create_kern(net, AF_INET6, SOCK_DGRAM, - IPPROTO_L2TP, &sock); + IPPROTO_L2TP, &sock); if (err < 0) goto out; @@ -1361,7 +1361,7 @@ static int l2tp_tunnel_sock_create(struct net *net, struct sockaddr_l2tpip ip_addr = {0}; err = sock_create_kern(net, AF_INET, SOCK_DGRAM, - IPPROTO_L2TP, &sock); + IPPROTO_L2TP, &sock); if (err < 0) goto out; diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 93181133e155..bea89c383b7d 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -145,7 +145,7 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) const struct ipv6_pinfo *np = inet6_sk(tunnel->sock); seq_printf(m, " from %pI6c to %pI6c\n", - &np->saddr, &tunnel->sock->sk_v6_daddr); + &np->saddr, &tunnel->sock->sk_v6_daddr); } else #endif seq_printf(m, " from %pI4 to %pI4\n", diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 3fed922addb5..f894dc275393 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1638,7 +1638,7 @@ static __net_init int pppol2tp_init_net(struct net *net) int err = 0; pde = proc_create_net("pppol2tp", 0444, net->proc_net, - &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data)); + &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data)); if (!pde) { err = -ENOMEM; goto out; From 0864e331fd53fec3933b99beeb7b1136b14efc63 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:09 +0100 Subject: [PATCH 1485/2056] l2tp: cleanup suspect code indent l2tp_core has conditionally compiled code in l2tp_xmit_skb for IPv6 support. The structure of this code triggered a checkpatch warning due to incorrect indentation. Fix up the indentation to address the checkpatch warning. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 3162e395cd4a..64d3a1d3ff3c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1122,8 +1122,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len &sk->sk_v6_daddr, udp_len); else #endif - udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr, - inet->inet_daddr, udp_len); + udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr, + inet->inet_daddr, udp_len); break; case L2TP_ENCAPTYPE_IP: From bef04d162c52bd7e0b98ba5ee3f904e6556c6551 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:10 +0100 Subject: [PATCH 1486/2056] l2tp: add identifier name in function pointer prototype Reported by checkpatch: "WARNING: function definition argument 'struct sock *' should also have an identifier name" Add an identifier name to help document the prototype. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 3ebb701eebbf..f23b3ff7ffff 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -154,7 +154,7 @@ struct l2tp_tunnel { struct net *l2tp_net; /* the net we belong to */ refcount_t ref_count; - void (*old_sk_destruct)(struct sock *); + void (*old_sk_destruct)(struct sock *sk); struct sock *sock; /* parent socket */ int fd; /* parent fd, if tunnel socket was created * by userspace From dbf82f3fac9d7cde572b08bde94d8aa117f92ac9 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:11 +0100 Subject: [PATCH 1487/2056] l2tp: prefer using BIT macro Use BIT(x) rather than (1< Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index f23b3ff7ffff..2d2dd219a176 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -21,11 +21,11 @@ /* Per tunnel, session hash table size */ #define L2TP_HASH_BITS 4 -#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS) +#define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS) /* System-wide, session hash table size */ #define L2TP_HASH_BITS_2 8 -#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2) +#define L2TP_HASH_SIZE_2 BIT(L2TP_HASH_BITS_2) struct sk_buff; From bdf9866e4b1be12a7af7eaf11f6dcf1b92f7073f Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:12 +0100 Subject: [PATCH 1488/2056] l2tp: prefer seq_puts for unformatted output checkpatch warns about use of seq_printf where seq_puts would do. Modify l2tp_debugfs accordingly. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index bea89c383b7d..ebe03bbb5948 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -199,7 +199,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) seq_printf(m, "%02x%02x%02x%02x", session->cookie[4], session->cookie[5], session->cookie[6], session->cookie[7]); - seq_printf(m, "\n"); + seq_puts(m, "\n"); } if (session->peer_cookie_len) { seq_printf(m, " peer cookie %02x%02x%02x%02x", @@ -209,7 +209,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) seq_printf(m, "%02x%02x%02x%02x", session->peer_cookie[4], session->peer_cookie[5], session->peer_cookie[6], session->peer_cookie[7]); - seq_printf(m, "\n"); + seq_puts(m, "\n"); } seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n", From c0235fb39b0cdb8e33f1a68a031a94544487a56d Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:13 +0100 Subject: [PATCH 1489/2056] l2tp: line-break long function prototypes In l2tp_core.c both l2tp_tunnel_create and l2tp_session_create take quite a number of arguments and have a correspondingly long prototype. This is both quite difficult to scan visually, and triggers checkpatch warnings. Add a line break to make these function prototypes more readable. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 64d3a1d3ff3c..3dc712647f34 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1400,7 +1400,8 @@ static int l2tp_tunnel_sock_create(struct net *net, static struct lock_class_key l2tp_socket_class; -int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) +int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, + struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) { struct l2tp_tunnel *tunnel = NULL; int err; @@ -1641,7 +1642,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version) } EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); -struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) +struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, + u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct l2tp_session *session; From efcd8c8540f73829a26f5d224ebdd99382fd1552 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Wed, 22 Jul 2020 17:32:14 +0100 Subject: [PATCH 1490/2056] l2tp: avoid precidence issues in L2TP_SKB_CB macro checkpatch warned about the L2TP_SKB_CB macro's use of its argument: add braces to avoid the problem. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 3dc712647f34..6871611d99f2 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -93,7 +93,7 @@ struct l2tp_skb_cb { unsigned long expires; }; -#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&skb->cb[sizeof(struct inet_skb_parm)]) +#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)]) static struct workqueue_struct *l2tp_wq; From 4b1debbe63f46f951af81d80c0cc9dd646b6ceb6 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 22 Jul 2020 18:40:03 +0100 Subject: [PATCH 1491/2056] ionic: fix memory leak of object 'lid' Currently when netdev fails to allocate the error return path fails to free the allocated object 'lid'. Fix this by setting err to the return error code and jumping to a new label that performs the kfree of lid before returning. Addresses-Coverity: ("Resource leak") Fixes: 4b03b27349c0 ("ionic: get MTU from lif identity") Signed-off-by: Colin Ian King Acked-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_lif.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index db60c5405a58..3aa64030f4e2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2037,7 +2037,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index ionic->ntxqs_per_lif, ionic->ntxqs_per_lif); if (!netdev) { dev_err(dev, "Cannot allocate netdev, aborting\n"); - return ERR_PTR(-ENOMEM); + err = -ENOMEM; + goto err_out_free_lid; } SET_NETDEV_DEV(netdev, dev); @@ -2123,6 +2124,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index err_out_free_netdev: free_netdev(lif->netdev); lif = NULL; +err_out_free_lid: kfree(lid); return ERR_PTR(err); From 7ec3e95e7a778f1202f85a9046917d4f231ce799 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 22 Jul 2020 13:43:58 -0500 Subject: [PATCH 1492/2056] tg3: Avoid the use of one-element array One-element arrays are being deprecated[1]. Replace the one-element array with a simple value type 'u32 reserved2'[2], once it seems this is just a placeholder for alignment. [1] https://github.com/KSPP/linux/issues/79 [2] https://github.com/KSPP/linux/issues/86 Tested-by: kernel test robot Link: https://github.com/GustavoARSilva/linux-hardening/blob/master/cii/0-day/tg3-20200718.md Signed-off-by: Gustavo A. R. Silva Reviewed-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 6953d0546acb..1000c894064f 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2847,7 +2847,7 @@ struct tg3_ocir { u32 port1_flags; u32 port2_flags; u32 port3_flags; - u32 reserved2[1]; + u32 reserved2; }; From 6fcf9affd1cb17bca2c4c4e9129c61d864dda6c9 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 22 Jul 2020 13:50:24 -0500 Subject: [PATCH 1493/2056] bna: bfi.h: Avoid the use of one-element array One-element arrays are being deprecated[1]. Replace the one-element array with a simple value type 'u8 rsvd'[2], once it seems this is just a placeholder for alignment. [1] https://github.com/KSPP/linux/issues/79 [2] https://github.com/KSPP/linux/issues/86 Tested-by: kernel test robot Link: https://github.com/GustavoARSilva/linux-hardening/blob/master/cii/0-day/bfi-20200718.md Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bfi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 09c912e984fe..f780d42c946d 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -389,7 +389,7 @@ struct bfi_msgq_mhdr { u16 msg_token; u16 num_entries; u8 enet_id; - u8 rsvd[1]; + u8 rsvd; } __packed; #define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \ From f1fa27f590e56c08bc1824974c5f69610133003d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 22 Jul 2020 13:58:52 -0500 Subject: [PATCH 1494/2056] net: qed_hsi.h: Avoid the use of one-element array One-element arrays are being deprecated[1]. Replace the one-element array with a simple value type '__le32 reserved1'[2], once it seems this is just a placeholder for alignment. [1] https://github.com/KSPP/linux/issues/79 [2] https://github.com/KSPP/linux/issues/86 Tested-by: kernel test robot Link: https://github.com/GustavoARSilva/linux-hardening/blob/master/cii/0-day/qed_hsi-20200718.md Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 1af3f65ab862..559df9f4d656 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -361,7 +361,7 @@ struct core_tx_update_ramrod_data { u8 update_qm_pq_id_flg; u8 reserved0; __le16 qm_pq_id; - __le32 reserved1[1]; + __le32 reserved1; }; /* Enum flag for what type of dcb data to update */ From 2cf2f4f546f1dea54b63302a7eb28d1fe15f1e28 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:31 +0300 Subject: [PATCH 1495/2056] qed: reformat "qed_chain.h" a bit Reformat structs and macros definitions a bit prior to making functional changes. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 126 ++++++++++++++++++---------------- 1 file changed, 66 insertions(+), 60 deletions(-) diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 7071dc92b4e2..087073517c09 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -26,9 +26,9 @@ enum qed_chain_mode { }; enum qed_chain_use_mode { - QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ - QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ - QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; enum qed_chain_cnt_type { @@ -40,84 +40,86 @@ enum qed_chain_cnt_type { }; struct qed_chain_next { - struct regpair next_phys; - void *next_virt; + struct regpair next_phys; + void *next_virt; }; struct qed_chain_pbl_u16 { - u16 prod_page_idx; - u16 cons_page_idx; + u16 prod_page_idx; + u16 cons_page_idx; }; struct qed_chain_pbl_u32 { - u32 prod_page_idx; - u32 cons_page_idx; + u32 prod_page_idx; + u32 cons_page_idx; }; struct qed_chain_ext_pbl { - dma_addr_t p_pbl_phys; - void *p_pbl_virt; + dma_addr_t p_pbl_phys; + void *p_pbl_virt; }; struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ - u16 prod_idx; - u16 cons_idx; + u16 prod_idx; + u16 cons_idx; }; struct qed_chain_u32 { /* Cyclic index of next element to produce/consme */ - u32 prod_idx; - u32 cons_idx; + u32 prod_idx; + u32 cons_idx; }; struct addr_tbl_entry { - void *virt_addr; - dma_addr_t dma_map; + void *virt_addr; + dma_addr_t dma_map; }; struct qed_chain { - /* fastpath portion of the chain - required for commands such + /* Fastpath portion of the chain - required for commands such * as produce / consume. */ + /* Point to next element to produce/consume */ - void *p_prod_elem; - void *p_cons_elem; + void *p_prod_elem; + void *p_cons_elem; /* Fastpath portions of the PBL [if exists] */ + struct { /* Table for keeping the virtual and physical addresses of the * chain pages, respectively to the physical addresses * in the pbl table. */ - struct addr_tbl_entry *pp_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { - struct qed_chain_pbl_u16 u16; - struct qed_chain_pbl_u32 u32; - } c; - } pbl; + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; union { - struct qed_chain_u16 chain16; - struct qed_chain_u32 chain32; - } u; + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; /* Capacity counts only usable elements */ - u32 capacity; - u32 page_cnt; + u32 capacity; + u32 page_cnt; - enum qed_chain_mode mode; + enum qed_chain_mode mode; /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_size; - u16 next_page_mask; - u16 usable_per_page; - u8 elem_unusable; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; - u8 cnt_type; + u8 cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. @@ -125,43 +127,47 @@ struct qed_chain { /* Base address of a pre-allocated buffer for pbl */ struct { - dma_addr_t p_phys_table; - void *p_virt_table; - } pbl_sp; + dma_addr_t p_phys_table; + void *p_virt_table; + } pbl_sp; /* Address of first page of the chain - the address is required * for fastpath operation [consume/produce] but only for the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ - void *p_virt_addr; - dma_addr_t p_phys_addr; + void *p_virt_addr; + dma_addr_t p_phys_addr; /* Total number of elements [for entire chain] */ - u32 size; + u32 size; - u8 intended_use; + u8 intended_use; - bool b_external_pbl; + bool b_external_pbl; }; -#define QED_CHAIN_PBL_ENTRY_SIZE (8) -#define QED_CHAIN_PAGE_SIZE (0x1000) -#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) +#define QED_CHAIN_PBL_ENTRY_SIZE 8 +#define QED_CHAIN_PAGE_SIZE 0x1000 -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ - (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ - (elem_size))) : 0) +#define ELEMS_PER_PAGE(elem_size) \ + (QED_CHAIN_PAGE_SIZE / (elem_size)) -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ - UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ + 0) -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) -#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) -#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP((elem_cnt), USABLE_ELEMS_PER_PAGE((elem_size), (mode))) + +#define is_chain_u16(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) From bdaf98f6d526a8d64b5ff1441924f57fb85ba614 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:32 +0300 Subject: [PATCH 1496/2056] qed: reformat Makefile List one entry per line and sort them alphabetically to simplify the addition of the new ones. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 36 +++++++++++++++++++----- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 4176bbf2a22b..3c75e4fa9b02 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -3,12 +3,34 @@ obj-$(CONFIG_QED) := qed.o -qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ - qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ - qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o -qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o -qed-$(CONFIG_QED_LL2) += qed_ll2.o -qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o -qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o +qed-y := \ + qed_cxt.o \ + qed_dcbx.o \ + qed_debug.o \ + qed_dev.o \ + qed_hw.o \ + qed_init_fw_funcs.o \ + qed_init_ops.o \ + qed_int.o \ + qed_l2.o \ + qed_main.o \ + qed_mcp.o \ + qed_mng_tlv.o \ + qed_ptp.o \ + qed_selftest.o \ + qed_sp_commands.o \ + qed_spq.o + qed-$(CONFIG_QED_FCOE) += qed_fcoe.o +qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o +qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_OOO) += qed_ooo.o + +qed-$(CONFIG_QED_RDMA) += \ + qed_iwarp.o \ + qed_rdma.o \ + qed_roce.o + +qed-$(CONFIG_QED_SRIOV) += \ + qed_sriov.o \ + qed_vf.o From a08c9b2c7ce52709f1915dae0bcd02070f39104e Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:33 +0300 Subject: [PATCH 1497/2056] qed: move chain methods to a separate file Move chain allocation/freeing functions to a new file to not mix it with hardware-related code. Reported-by: kernel test robot Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/Makefile | 1 + drivers/net/ethernet/qlogic/qed/qed_chain.c | 302 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_dev.c | 273 ------------------ 3 files changed, 303 insertions(+), 273 deletions(-) create mode 100644 drivers/net/ethernet/qlogic/qed/qed_chain.c diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 3c75e4fa9b02..f947b105cf14 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_QED) := qed.o qed-y := \ + qed_chain.o \ qed_cxt.o \ qed_dcbx.o \ qed_debug.o \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c new file mode 100644 index 000000000000..bab02ff32514 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (c) 2020 Marvell International Ltd. */ + +#include +#include +#include + +#include "qed_dev_api.h" + +static void qed_chain_free_next_ptr(struct qed_dev *cdev, + struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct qed_chain_next *next; + dma_addr_t phys, phys_next; + void *virt, *virt_next; + u32 size, i; + + size = chain->elem_size * chain->usable_per_page; + virt = chain->p_virt_addr; + phys = chain->p_phys_addr; + + for (i = 0; i < chain->page_cnt; i++) { + if (!virt) + break; + + next = virt + size; + virt_next = next->next_virt; + phys_next = HILO_DMA_REGPAIR(next->next_phys); + + dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys); + + virt = virt_next; + phys = phys_next; + } +} + +static void qed_chain_free_single(struct qed_dev *cdev, + struct qed_chain *chain) +{ + if (!chain->p_virt_addr) + return; + + dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + chain->p_virt_addr, chain->p_phys_addr); +} + +static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct addr_tbl_entry *entry; + u32 pbl_size, i; + + if (!chain->pbl.pp_addr_tbl) + return; + + for (i = 0; i < chain->page_cnt; i++) { + entry = chain->pbl.pp_addr_tbl + i; + if (!entry->virt_addr) + break; + + dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr, + entry->dma_map); + } + + pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + + if (!chain->b_external_pbl) + dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table, + chain->pbl_sp.p_phys_table); + + vfree(chain->pbl.pp_addr_tbl); + chain->pbl.pp_addr_tbl = NULL; +} + +/** + * qed_chain_free() - Free chain DMA memory. + * + * @cdev: Main device structure. + * @chain: Chain to free. + */ +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) +{ + switch (chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + qed_chain_free_next_ptr(cdev, chain); + break; + case QED_CHAIN_MODE_SINGLE: + qed_chain_free_single(cdev, chain); + break; + case QED_CHAIN_MODE_PBL: + qed_chain_free_pbl(cdev, chain); + break; + default: + break; + } +} + +static int +qed_chain_alloc_sanity_check(struct qed_dev *cdev, + enum qed_chain_cnt_type cnt_type, + size_t elem_size, u32 page_cnt) +{ + u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into account the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of u32 type. + */ + switch (cnt_type) { + case QED_CHAIN_CNT_TYPE_U16: + if (chain_size > U16_MAX + 1) + break; + + return 0; + case QED_CHAIN_CNT_TYPE_U32: + if (chain_size > U32_MAX) + break; + + return 0; + default: + return -EINVAL; + } + + DP_NOTICE(cdev, + "The actual chain size (0x%llx) is larger than the maximal possible value\n", + chain_size); + + return -EINVAL; +} + +static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, + struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + void *virt, *virt_prev = NULL; + dma_addr_t phys; + u32 i; + + for (i = 0; i < chain->page_cnt; i++) { + virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + GFP_KERNEL); + if (!virt) + return -ENOMEM; + + if (i == 0) { + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + } else { + qed_chain_init_next_ptr_elem(chain, virt_prev, virt, + phys); + } + + virt_prev = virt; + } + + /* Last page's next element should point to the beginning of the + * chain. + */ + qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr, + chain->p_phys_addr); + + return 0; +} + +static int qed_chain_alloc_single(struct qed_dev *cdev, + struct qed_chain *chain) +{ + dma_addr_t phys; + void *virt; + + virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + &phys, GFP_KERNEL); + if (!virt) + return -ENOMEM; + + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + + return 0; +} + +static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_ext_pbl *ext_pbl) +{ + struct device *dev = &cdev->pdev->dev; + struct addr_tbl_entry *addr_tbl; + dma_addr_t phys, pbl_phys; + void *pbl_virt; + u32 page_cnt, i; + size_t size; + void *virt; + + page_cnt = chain->page_cnt; + + size = array_size(page_cnt, sizeof(*addr_tbl)); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + + addr_tbl = vzalloc(size); + if (!addr_tbl) + return -ENOMEM; + + chain->pbl.pp_addr_tbl = addr_tbl; + + if (ext_pbl) { + size = 0; + pbl_virt = ext_pbl->p_pbl_virt; + pbl_phys = ext_pbl->p_pbl_phys; + + chain->b_external_pbl = true; + } else { + size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + + pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, + GFP_KERNEL); + } + + if (!pbl_virt) + return -ENOMEM; + + chain->pbl_sp.p_virt_table = pbl_virt; + chain->pbl_sp.p_phys_table = pbl_phys; + + for (i = 0; i < page_cnt; i++) { + virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + GFP_KERNEL); + if (!virt) + return -ENOMEM; + + if (i == 0) { + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + } + + /* Fill the PBL table with the physical address of the page */ + *(dma_addr_t *)pbl_virt = phys; + pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + + /* Keep the virtual address of the page */ + addr_tbl[i].virt_addr = virt; + addr_tbl[i].dma_map = phys; + } + + return 0; +} + +int qed_chain_alloc(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, + size_t elem_size, + struct qed_chain *chain, + struct qed_chain_ext_pbl *ext_pbl) +{ + u32 page_cnt; + int rc; + + if (mode == QED_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + + rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); + if (rc) { + DP_NOTICE(cdev, + "Cannot allocate a chain with the given arguments:\n"); + DP_NOTICE(cdev, + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + intended_use, mode, cnt_type, num_elems, elem_size); + return rc; + } + + qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode, + cnt_type); + + switch (mode) { + case QED_CHAIN_MODE_NEXT_PTR: + rc = qed_chain_alloc_next_ptr(cdev, chain); + break; + case QED_CHAIN_MODE_SINGLE: + rc = qed_chain_alloc_single(cdev, chain); + break; + case QED_CHAIN_MODE_PBL: + rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl); + break; + default: + return -EINVAL; + } + + if (!rc) + return 0; + + qed_chain_free(cdev, chain); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6516a1f921da..d9c7a1a6be94 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4716,279 +4716,6 @@ void qed_hw_remove(struct qed_dev *cdev) qed_mcp_nvm_info_free(p_hwfn); } -static void qed_chain_free_next_ptr(struct qed_dev *cdev, - struct qed_chain *p_chain) -{ - void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; - dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; - struct qed_chain_next *p_next; - u32 size, i; - - if (!p_virt) - return; - - size = p_chain->elem_size * p_chain->usable_per_page; - - for (i = 0; i < p_chain->page_cnt; i++) { - if (!p_virt) - break; - - p_next = (struct qed_chain_next *)((u8 *)p_virt + size); - p_virt_next = p_next->next_virt; - p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, p_virt, p_phys); - - p_virt = p_virt_next; - p_phys = p_phys_next; - } -} - -static void qed_chain_free_single(struct qed_dev *cdev, - struct qed_chain *p_chain) -{ - if (!p_chain->p_virt_addr) - return; - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - p_chain->p_virt_addr, p_chain->p_phys_addr); -} - -static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl; - u32 page_cnt = p_chain->page_cnt, i, pbl_size; - - if (!pp_addr_tbl) - return; - - for (i = 0; i < page_cnt; i++) { - if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map) - break; - - dma_free_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - pp_addr_tbl[i].virt_addr, - pp_addr_tbl[i].dma_map); - } - - pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - - if (!p_chain->b_external_pbl) - dma_free_coherent(&cdev->pdev->dev, - pbl_size, - p_chain->pbl_sp.p_virt_table, - p_chain->pbl_sp.p_phys_table); - - vfree(p_chain->pbl.pp_addr_tbl); - p_chain->pbl.pp_addr_tbl = NULL; -} - -void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - switch (p_chain->mode) { - case QED_CHAIN_MODE_NEXT_PTR: - qed_chain_free_next_ptr(cdev, p_chain); - break; - case QED_CHAIN_MODE_SINGLE: - qed_chain_free_single(cdev, p_chain); - break; - case QED_CHAIN_MODE_PBL: - qed_chain_free_pbl(cdev, p_chain); - break; - } -} - -static int -qed_chain_alloc_sanity_check(struct qed_dev *cdev, - enum qed_chain_cnt_type cnt_type, - size_t elem_size, u32 page_cnt) -{ - u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; - - /* The actual chain size can be larger than the maximal possible value - * after rounding up the requested elements number to pages, and after - * taking into acount the unusuable elements (next-ptr elements). - * The size of a "u16" chain can be (U16_MAX + 1) since the chain - * size/capacity fields are of a u32 type. - */ - if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && - chain_size > ((u32)U16_MAX + 1)) || - (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) { - DP_NOTICE(cdev, - "The actual chain size (0x%llx) is larger than the maximal possible value\n", - chain_size); - return -EINVAL; - } - - return 0; -} - -static int -qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - void *p_virt = NULL, *p_virt_prev = NULL; - dma_addr_t p_phys = 0; - u32 i; - - for (i = 0; i < p_chain->page_cnt; i++) { - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - if (i == 0) { - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - } else { - qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, - p_virt, p_phys); - } - - p_virt_prev = p_virt; - } - /* Last page's next element should point to the beginning of the - * chain. - */ - qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, - p_chain->p_virt_addr, - p_chain->p_phys_addr); - - return 0; -} - -static int -qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) -{ - dma_addr_t p_phys = 0; - void *p_virt = NULL; - - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - - return 0; -} - -static int -qed_chain_alloc_pbl(struct qed_dev *cdev, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl) -{ - u32 page_cnt = p_chain->page_cnt, size, i; - dma_addr_t p_phys = 0, p_pbl_phys = 0; - struct addr_tbl_entry *pp_addr_tbl; - u8 *p_pbl_virt = NULL; - void *p_virt = NULL; - - size = page_cnt * sizeof(*pp_addr_tbl); - pp_addr_tbl = vzalloc(size); - if (!pp_addr_tbl) - return -ENOMEM; - - /* The allocation of the PBL table is done with its full size, since it - * is expected to be successive. - * qed_chain_init_pbl_mem() is called even in a case of an allocation - * failure, since tbl was previously allocated, and it - * should be saved to allow its freeing during the error flow. - */ - size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - - if (!ext_pbl) { - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, GFP_KERNEL); - } else { - p_pbl_virt = ext_pbl->p_pbl_virt; - p_pbl_phys = ext_pbl->p_pbl_phys; - p_chain->b_external_pbl = true; - } - - qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl); - if (!p_pbl_virt) - return -ENOMEM; - - for (i = 0; i < page_cnt; i++) { - p_virt = dma_alloc_coherent(&cdev->pdev->dev, - QED_CHAIN_PAGE_SIZE, - &p_phys, GFP_KERNEL); - if (!p_virt) - return -ENOMEM; - - if (i == 0) { - qed_chain_init_mem(p_chain, p_virt, p_phys); - qed_chain_reset(p_chain); - } - - /* Fill the PBL table with the physical address of the page */ - *(dma_addr_t *)p_pbl_virt = p_phys; - /* Keep the virtual address of the page */ - p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt; - p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys; - - p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; - } - - return 0; -} - -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl) -{ - u32 page_cnt; - int rc = 0; - - if (mode == QED_CHAIN_MODE_SINGLE) - page_cnt = 1; - else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); - - rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); - if (rc) { - DP_NOTICE(cdev, - "Cannot allocate a chain with the given arguments:\n"); - DP_NOTICE(cdev, - "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", - intended_use, mode, cnt_type, num_elems, elem_size); - return rc; - } - - qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, - mode, cnt_type); - - switch (mode) { - case QED_CHAIN_MODE_NEXT_PTR: - rc = qed_chain_alloc_next_ptr(cdev, p_chain); - break; - case QED_CHAIN_MODE_SINGLE: - rc = qed_chain_alloc_single(cdev, p_chain); - break; - case QED_CHAIN_MODE_PBL: - rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl); - break; - } - if (rc) - goto nomem; - - return 0; - -nomem: - qed_chain_free(cdev, p_chain); - return rc; -} - int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { From 96ca4c50c7f711531d4fa238906f8abbbd0ff517 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:34 +0300 Subject: [PATCH 1498/2056] qed: prevent possible double-frees of the chains Zero-initialize chain on qed_chain_free(), so it couldn't be freed twice and provoke undefined behaviour. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index bab02ff32514..917b783433f7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -92,8 +92,10 @@ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) qed_chain_free_pbl(cdev, chain); break; default: - break; + return; } + + qed_chain_init_mem(chain, NULL, 0); } static int From 9b6ee3cf95d322ab02e9927f5b08ebc870ca9f1f Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:35 +0300 Subject: [PATCH 1499/2056] qed: sanitize PBL chains allocation PBL chain elements are actually DMA addresses stored in __le64, but currently their size is hardcoded to 8, and DMA addresses are assigned via cast to variable-sized dma_addr_t without any bitwise conversions. Change the type of pbl_virt array to match the actual one, add a new field to store the size of allocated DMA memory and sanitize elements assignment. Misc: give more logic names to the members of qed_chain::pbl_sp embedded struct. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 21 +++++++++---------- .../net/ethernet/qlogic/qed/qed_sp_commands.c | 4 ++-- include/linux/qed/qed_chain.h | 16 +++++++------- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index 917b783433f7..a9ff15b9d8c0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -49,7 +49,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *entry; - u32 pbl_size, i; + u32 i; if (!chain->pbl.pp_addr_tbl) return; @@ -63,11 +63,10 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) entry->dma_map); } - pbl_size = chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - if (!chain->b_external_pbl) - dma_free_coherent(dev, pbl_size, chain->pbl_sp.p_virt_table, - chain->pbl_sp.p_phys_table); + dma_free_coherent(dev, chain->pbl_sp.table_size, + chain->pbl_sp.table_virt, + chain->pbl_sp.table_phys); vfree(chain->pbl.pp_addr_tbl); chain->pbl.pp_addr_tbl = NULL; @@ -190,7 +189,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *addr_tbl; dma_addr_t phys, pbl_phys; - void *pbl_virt; + __le64 *pbl_virt; u32 page_cnt, i; size_t size; void *virt; @@ -214,7 +213,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, chain->b_external_pbl = true; } else { - size = array_size(page_cnt, QED_CHAIN_PBL_ENTRY_SIZE); + size = array_size(page_cnt, sizeof(*pbl_virt)); if (unlikely(size == SIZE_MAX)) return -EOVERFLOW; @@ -225,8 +224,9 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, if (!pbl_virt) return -ENOMEM; - chain->pbl_sp.p_virt_table = pbl_virt; - chain->pbl_sp.p_phys_table = pbl_phys; + chain->pbl_sp.table_virt = pbl_virt; + chain->pbl_sp.table_phys = pbl_phys; + chain->pbl_sp.table_size = size; for (i = 0; i < page_cnt; i++) { virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, @@ -240,8 +240,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, } /* Fill the PBL table with the physical address of the page */ - *(dma_addr_t *)pbl_virt = phys; - pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + pbl_virt[i] = cpu_to_le64(phys); /* Keep the virtual address of the page */ addr_tbl[i].virt_addr = virt; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 8142f5669b26..aa71adcf31ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -366,11 +366,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, - p_hwfn->p_eq->chain.pbl_sp.p_phys_table); + qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, - p_hwfn->p_consq->chain.pbl_sp.p_phys_table); + qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 087073517c09..265e0b671a5c 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -127,8 +127,9 @@ struct qed_chain { /* Base address of a pre-allocated buffer for pbl */ struct { - dma_addr_t p_phys_table; - void *p_virt_table; + __le64 *table_virt; + dma_addr_t table_phys; + size_t table_size; } pbl_sp; /* Address of first page of the chain - the address is required @@ -146,7 +147,6 @@ struct qed_chain { bool b_external_pbl; }; -#define QED_CHAIN_PBL_ENTRY_SIZE 8 #define QED_CHAIN_PAGE_SIZE 0x1000 #define ELEMS_PER_PAGE(elem_size) \ @@ -236,7 +236,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) { - return p_chain->pbl_sp.p_phys_table; + return p_chain->pbl_sp.table_phys; } /** @@ -527,8 +527,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, p_chain->capacity = p_chain->usable_per_page * page_cnt; p_chain->size = p_chain->elem_per_page * page_cnt; - p_chain->pbl_sp.p_phys_table = 0; - p_chain->pbl_sp.p_virt_table = NULL; + p_chain->pbl_sp.table_phys = 0; + p_chain->pbl_sp.table_virt = NULL; p_chain->pbl.pp_addr_tbl = NULL; } @@ -569,8 +569,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, dma_addr_t p_phys_pbl, struct addr_tbl_entry *pp_addr_tbl) { - p_chain->pbl_sp.p_phys_table = p_phys_pbl; - p_chain->pbl_sp.p_virt_table = p_virt_pbl; + p_chain->pbl_sp.table_phys = p_phys_pbl; + p_chain->pbl_sp.table_virt = p_virt_pbl; p_chain->pbl.pp_addr_tbl = pp_addr_tbl; } From 5e776d8016119e13c27fbb6e87c9e1fd6f8b2a75 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:36 +0300 Subject: [PATCH 1500/2056] qed: move chain initialization inlines next to allocation functions qed_chain_init*() are used in one file/place on "cold" path only, so they can be uninlined and moved next to the call sites. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 47 ++++++++ include/linux/qed/qed_chain.h | 112 -------------------- 2 files changed, 47 insertions(+), 112 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index a9ff15b9d8c0..b60ec3e4654c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -7,6 +7,53 @@ #include "qed_dev_api.h" +static void qed_chain_init_params(struct qed_chain *chain, + u32 page_cnt, u8 elem_size, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type) +{ + memset(chain, 0, sizeof(*chain)); + + chain->elem_size = elem_size; + chain->intended_use = intended_use; + chain->mode = mode; + chain->cnt_type = cnt_type; + + chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); + chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + + chain->elem_per_page_mask = chain->elem_per_page - 1; + chain->next_page_mask = chain->usable_per_page & + chain->elem_per_page_mask; + + chain->page_cnt = page_cnt; + chain->capacity = chain->usable_per_page * page_cnt; + chain->size = chain->elem_per_page * page_cnt; +} + +static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain, + void *virt_curr, void *virt_next, + dma_addr_t phys_next) +{ + struct qed_chain_next *next; + u32 size; + + size = chain->elem_size * chain->usable_per_page; + next = virt_curr + size; + + DMA_REGPAIR_LE(next->next_phys, phys_next); + next->next_virt = virt_next; +} + +static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr, + dma_addr_t phys_addr) +{ + chain->p_virt_addr = virt_addr; + chain->p_phys_addr = phys_addr; +} + static void qed_chain_free_next_ptr(struct qed_dev *cdev, struct qed_chain *chain) { diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 265e0b671a5c..a0d83095dc73 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -490,118 +490,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } } -/** - * @brief qed_chain_init - Initalizes a basic chain struct - * - * @param p_chain - * @param p_virt_addr - * @param p_phys_addr physical address of allocated buffer's beginning - * @param page_cnt number of pages in the allocated buffer - * @param elem_size size of each element in the chain - * @param intended_use - * @param mode - */ -static inline void qed_chain_init_params(struct qed_chain *p_chain, - u32 page_cnt, - u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type) -{ - /* chain fixed parameters */ - p_chain->p_virt_addr = NULL; - p_chain->p_phys_addr = 0; - p_chain->elem_size = elem_size; - p_chain->intended_use = (u8)intended_use; - p_chain->mode = mode; - p_chain->cnt_type = (u8)cnt_type; - - p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; - p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->next_page_mask = (p_chain->usable_per_page & - p_chain->elem_per_page_mask); - - p_chain->page_cnt = page_cnt; - p_chain->capacity = p_chain->usable_per_page * page_cnt; - p_chain->size = p_chain->elem_per_page * page_cnt; - - p_chain->pbl_sp.table_phys = 0; - p_chain->pbl_sp.table_virt = NULL; - p_chain->pbl.pp_addr_tbl = NULL; -} - -/** - * @brief qed_chain_init_mem - - * - * Initalizes a basic chain struct with its chain buffers - * - * @param p_chain - * @param p_virt_addr virtual address of allocated buffer's beginning - * @param p_phys_addr physical address of allocated buffer's beginning - * - */ -static inline void qed_chain_init_mem(struct qed_chain *p_chain, - void *p_virt_addr, dma_addr_t p_phys_addr) -{ - p_chain->p_virt_addr = p_virt_addr; - p_chain->p_phys_addr = p_phys_addr; -} - -/** - * @brief qed_chain_init_pbl_mem - - * - * Initalizes a basic chain struct with its pbl buffers - * - * @param p_chain - * @param p_virt_pbl pointer to a pre allocated side table which will hold - * virtual page addresses. - * @param p_phys_pbl pointer to a pre-allocated side table which will hold - * physical page addresses. - * @param pp_virt_addr_tbl - * pointer to a pre-allocated side table which will hold - * the virtual addresses of the chain pages. - * - */ -static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, - void *p_virt_pbl, - dma_addr_t p_phys_pbl, - struct addr_tbl_entry *pp_addr_tbl) -{ - p_chain->pbl_sp.table_phys = p_phys_pbl; - p_chain->pbl_sp.table_virt = p_virt_pbl; - p_chain->pbl.pp_addr_tbl = pp_addr_tbl; -} - -/** - * @brief qed_chain_init_next_ptr_elem - - * - * Initalizes a next pointer element - * - * @param p_chain - * @param p_virt_curr virtual address of a chain page of which the next - * pointer element is initialized - * @param p_virt_next virtual address of the next chain page - * @param p_phys_next physical address of the next chain page - * - */ -static inline void -qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, - void *p_virt_curr, - void *p_virt_next, dma_addr_t p_phys_next) -{ - struct qed_chain_next *p_next; - u32 size; - - size = p_chain->elem_size * p_chain->usable_per_page; - p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); - - DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); - - p_next->next_virt = p_virt_next; -} - /** * @brief qed_chain_get_last_elem - * From c3a321b06a806614350db4bda04215fee796f0fb Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:37 +0300 Subject: [PATCH 1501/2056] qed: simplify initialization of the chains with an external PBL Fill PBL table parameters for chains with an external PBL data earlier on qed_chain_init_params() rather than on allocation itself. This simplifies allocation code and allows to extend struct ext_pbl for other chain types. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 37 +++++++++++---------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index b60ec3e4654c..6effee3b50f4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -11,7 +11,8 @@ static void qed_chain_init_params(struct qed_chain *chain, u32 page_cnt, u8 elem_size, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type) + enum qed_chain_cnt_type cnt_type, + const struct qed_chain_ext_pbl *ext_pbl) { memset(chain, 0, sizeof(*chain)); @@ -31,6 +32,13 @@ static void qed_chain_init_params(struct qed_chain *chain, chain->page_cnt = page_cnt; chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; + + if (ext_pbl && ext_pbl->p_pbl_virt) { + chain->pbl_sp.table_virt = ext_pbl->p_pbl_virt; + chain->pbl_sp.table_phys = ext_pbl->p_pbl_phys; + + chain->b_external_pbl = true; + } } static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain, @@ -230,8 +238,7 @@ static int qed_chain_alloc_single(struct qed_dev *cdev, return 0; } -static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, - struct qed_chain_ext_pbl *ext_pbl) +static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) { struct device *dev = &cdev->pdev->dev; struct addr_tbl_entry *addr_tbl; @@ -253,21 +260,14 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, chain->pbl.pp_addr_tbl = addr_tbl; - if (ext_pbl) { - size = 0; - pbl_virt = ext_pbl->p_pbl_virt; - pbl_phys = ext_pbl->p_pbl_phys; + if (chain->b_external_pbl) + goto alloc_pages; - chain->b_external_pbl = true; - } else { - size = array_size(page_cnt, sizeof(*pbl_virt)); - if (unlikely(size == SIZE_MAX)) - return -EOVERFLOW; - - pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, - GFP_KERNEL); - } + size = array_size(page_cnt, sizeof(*pbl_virt)); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL); if (!pbl_virt) return -ENOMEM; @@ -275,6 +275,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain, chain->pbl_sp.table_phys = pbl_phys; chain->pbl_sp.table_size = size; +alloc_pages: for (i = 0; i < page_cnt; i++) { virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, GFP_KERNEL); @@ -325,7 +326,7 @@ int qed_chain_alloc(struct qed_dev *cdev, } qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode, - cnt_type); + cnt_type, ext_pbl); switch (mode) { case QED_CHAIN_MODE_NEXT_PTR: @@ -335,7 +336,7 @@ int qed_chain_alloc(struct qed_dev *cdev, rc = qed_chain_alloc_single(cdev, chain); break; case QED_CHAIN_MODE_PBL: - rc = qed_chain_alloc_pbl(cdev, chain, ext_pbl); + rc = qed_chain_alloc_pbl(cdev, chain); break; default: return -EINVAL; From b6db3f71c976ea92361dbc7ebfb65db666ac9f02 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:38 +0300 Subject: [PATCH 1502/2056] qed: simplify chain allocation with init params struct To simplify qed_chain_alloc() prototype and call sites, introduce struct qed_chain_init_params to specify chain params, and pass a pointer to filled struct to the actual qed_chain_alloc() instead of a long list of separate arguments. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/main.c | 20 ++-- drivers/infiniband/hw/qedr/verbs.c | 95 +++++++++---------- drivers/net/ethernet/qlogic/qed/qed_chain.c | 80 +++++++++------- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 32 +------ drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 39 ++++---- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 44 +++++---- drivers/net/ethernet/qlogic/qed/qed_spq.c | 90 +++++++++++------- drivers/net/ethernet/qlogic/qede/qede_main.c | 45 ++++----- include/linux/qed/qed_chain.h | 21 ++-- include/linux/qed/qed_if.h | 9 +- 10 files changed, 242 insertions(+), 233 deletions(-) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index ccaedfd53e49..b1de8d608e4d 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -346,9 +346,14 @@ static void qedr_free_resources(struct qedr_dev *dev) static int qedr_alloc_resources(struct qedr_dev *dev) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct regpair *), + }; struct qedr_cnq *cnq; __le16 *cons_pi; - u16 n_entries; int i, rc; dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid), @@ -382,7 +387,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev) dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev); /* Allocate CNQ PBLs */ - n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE); + params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, + QEDR_ROCE_MAX_CNQ_SIZE); + for (i = 0; i < dev->num_cnq; i++) { cnq = &dev->cnq_array[i]; @@ -391,13 +398,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (rc) goto err3; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - n_entries, - sizeof(struct regpair *), - &cnq->pbl, NULL); + rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl, + ¶ms); if (rc) goto err4; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9b9e80266367..6737895a0d68 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -891,6 +891,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, udata, struct qedr_ucontext, ibucontext); struct qed_rdma_destroy_cq_out_params destroy_oparams; struct qed_rdma_destroy_cq_in_params destroy_iparams; + struct qed_chain_init_params chain_params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + .elem_size = sizeof(union rdma_cqe), + }; struct qedr_dev *dev = get_qedr_dev(ibdev); struct qed_rdma_create_cq_in_params params; struct qedr_create_cq_ureq ureq = {}; @@ -917,6 +923,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, chain_entries = qedr_align_cq_entries(entries); chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES); + chain_params.num_elems = chain_entries; /* calc db offset. user will add DPI base, kernel will add db addr */ db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); @@ -951,13 +958,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } else { cq->cq_type = QEDR_CQ_TYPE_KERNEL; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - chain_entries, - sizeof(union rdma_cqe), - &cq->pbl, NULL); + rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl, + &chain_params); if (rc) goto err0; @@ -1446,6 +1448,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, struct ib_srq_init_attr *init_attr) { struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + .elem_size = QEDR_SRQ_WQE_ELEM_SIZE, + }; dma_addr_t phy_prod_pair_addr; u32 num_elems; void *va; @@ -1464,13 +1472,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq, hw_srq->virt_prod_pair_addr = va; num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - num_elems, - QEDR_SRQ_WQE_ELEM_SIZE, - &hw_srq->pbl, NULL); + params.num_elems = num_elems; + + rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, ¶ms); if (rc) goto err0; @@ -1901,29 +1905,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + }; int rc; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_sq_elems, - QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl, NULL); + params.intended_use = QED_CHAIN_USE_TO_PRODUCE; + params.num_elems = n_sq_elems; + params.elem_size = QEDR_SQE_ELEMENT_SIZE; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms); if (rc) return rc; in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_rq_elems, - QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl, NULL); + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.elem_size = n_rq_elems; + params.elem_size = QEDR_RQE_ELEMENT_SIZE; + + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); if (rc) return rc; @@ -1949,7 +1952,10 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, u32 n_sq_elems, u32 n_rq_elems) { struct qed_rdma_create_qp_out_params out_params; - struct qed_chain_ext_pbl ext_pbl; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .cnt_type = QED_CHAIN_CNT_TYPE_U32, + }; int rc; in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, @@ -1966,31 +1972,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, return -EINVAL; /* Now we allocate the chain */ - ext_pbl.p_pbl_virt = out_params.sq_pbl_virt; - ext_pbl.p_pbl_phys = out_params.sq_pbl_phys; - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_sq_elems, - QEDR_SQE_ELEMENT_SIZE, - &qp->sq.pbl, &ext_pbl); + params.intended_use = QED_CHAIN_USE_TO_PRODUCE; + params.num_elems = n_sq_elems; + params.elem_size = QEDR_SQE_ELEMENT_SIZE; + params.ext_pbl_virt = out_params.sq_pbl_virt; + params.ext_pbl_phys = out_params.sq_pbl_phys; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, ¶ms); if (rc) goto err; - ext_pbl.p_pbl_virt = out_params.rq_pbl_virt; - ext_pbl.p_pbl_phys = out_params.rq_pbl_phys; - - rc = dev->ops->common->chain_alloc(dev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U32, - n_rq_elems, - QEDR_RQE_ELEMENT_SIZE, - &qp->rq.pbl, &ext_pbl); + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.num_elems = n_rq_elems; + params.elem_size = QEDR_RQE_ELEMENT_SIZE; + params.ext_pbl_virt = out_params.rq_pbl_virt; + params.ext_pbl_phys = out_params.rq_pbl_phys; + rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); if (rc) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index 6effee3b50f4..a68ee4b3dbbc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -7,23 +7,22 @@ #include "qed_dev_api.h" -static void qed_chain_init_params(struct qed_chain *chain, - u32 page_cnt, u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - const struct qed_chain_ext_pbl *ext_pbl) +static void qed_chain_init(struct qed_chain *chain, + const struct qed_chain_init_params *params, + u32 page_cnt) { memset(chain, 0, sizeof(*chain)); - chain->elem_size = elem_size; - chain->intended_use = intended_use; - chain->mode = mode; - chain->cnt_type = cnt_type; + chain->elem_size = params->elem_size; + chain->intended_use = params->intended_use; + chain->mode = params->mode; + chain->cnt_type = params->cnt_type; - chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size); + chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->mode); + chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, + params->mode); chain->elem_per_page_mask = chain->elem_per_page - 1; chain->next_page_mask = chain->usable_per_page & @@ -33,9 +32,9 @@ static void qed_chain_init_params(struct qed_chain *chain, chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; - if (ext_pbl && ext_pbl->p_pbl_virt) { - chain->pbl_sp.table_virt = ext_pbl->p_pbl_virt; - chain->pbl_sp.table_phys = ext_pbl->p_pbl_phys; + if (params->ext_pbl_virt) { + chain->pbl_sp.table_virt = params->ext_pbl_virt; + chain->pbl_sp.table_phys = params->ext_pbl_phys; chain->b_external_pbl = true; } @@ -154,10 +153,16 @@ void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) static int qed_chain_alloc_sanity_check(struct qed_dev *cdev, - enum qed_chain_cnt_type cnt_type, - size_t elem_size, u32 page_cnt) + const struct qed_chain_init_params *params, + u32 page_cnt) { - u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + u64 chain_size; + + chain_size = ELEMS_PER_PAGE(params->elem_size); + chain_size *= page_cnt; + + if (!chain_size) + return -EINVAL; /* The actual chain size can be larger than the maximal possible value * after rounding up the requested elements number to pages, and after @@ -165,7 +170,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev, * The size of a "u16" chain can be (U16_MAX + 1) since the chain * size/capacity fields are of u32 type. */ - switch (cnt_type) { + switch (params->cnt_type) { case QED_CHAIN_CNT_TYPE_U16: if (chain_size > U16_MAX + 1) break; @@ -298,37 +303,42 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) return 0; } -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *chain, - struct qed_chain_ext_pbl *ext_pbl) +/** + * qed_chain_alloc() - Allocate and initialize a chain. + * + * @cdev: Main device structure. + * @chain: Chain to be processed. + * @params: Chain initialization parameters. + * + * Return: 0 on success, negative errno otherwise. + */ +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params) { u32 page_cnt; int rc; - if (mode == QED_CHAIN_MODE_SINGLE) + if (params->mode == QED_CHAIN_MODE_SINGLE) page_cnt = 1; else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, + params->elem_size, + params->mode); - rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); + rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); if (rc) { DP_NOTICE(cdev, "Cannot allocate a chain with the given arguments:\n"); DP_NOTICE(cdev, "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", - intended_use, mode, cnt_type, num_elems, elem_size); + params->intended_use, params->mode, params->cnt_type, + params->num_elems, params->elem_size); return rc; } - qed_chain_init_params(chain, page_cnt, elem_size, intended_use, mode, - cnt_type, ext_pbl); + qed_chain_init(chain, params, page_cnt); - switch (mode) { + switch (params->mode) { case QED_CHAIN_MODE_NEXT_PTR: rc = qed_chain_alloc_next_ptr(cdev, chain); break; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 395d4932c262..d3c1f3879be8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -254,35 +254,9 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); -/** - * @brief qed_chain_alloc - Allocate and initialize a chain - * - * @param p_hwfn - * @param intended_use - * @param mode - * @param num_elems - * @param elem_size - * @param p_chain - * @param ext_pbl - a possible external PBL - * - * @return int - */ -int -qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl); - -/** - * @brief qed_chain_free - Free chain DMA memory - * - * @param p_hwfn - * @param p_chain - */ -void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain); +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params); +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** * @@brief qed_fw_l2_queue - Get absolute L2 queue ID diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index 25d2c882d7ac..4eae4ee3538f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -684,9 +684,13 @@ static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn) static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, struct qed_iscsi_conn **p_out_conn) { - u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0; struct scsi_terminate_extra_params *p_q_cnts = NULL; struct qed_iscsi_pf_params *p_params = NULL; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + }; struct tcp_upload_params *p_tcp = NULL; struct qed_iscsi_conn *p_conn = NULL; int rc = 0; @@ -727,34 +731,25 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, goto nomem_upload_param; p_conn->tcp_upload_params_virt_addr = p_tcp; - r2tq_num_elements = p_params->num_r2tq_pages_in_ring * - QED_CHAIN_PAGE_SIZE / 0x80; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - r2tq_num_elements, 0x80, &p_conn->r2tq, NULL); + params.num_elems = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe); + params.elem_size = sizeof(struct iscsi_wqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, ¶ms); if (rc) goto nomem_r2tq; - uhq_num_elements = p_params->num_uhq_pages_in_ring * + params.num_elems = p_params->num_uhq_pages_in_ring * QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - uhq_num_elements, - sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL); + params.elem_size = sizeof(struct iscsi_uhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, ¶ms); if (rc) goto nomem_uhq; - xhq_num_elements = uhq_num_elements; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - xhq_num_elements, - sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL); + params.elem_size = sizeof(struct iscsi_xhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, ¶ms); if (rc) goto nomem; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 6f4aec339cd4..0452b728c527 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1125,6 +1125,12 @@ static int qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.rx_num_desc, + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_ll2_rx_packet *p_descq; u32 capacity; int rc = 0; @@ -1132,13 +1138,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.rx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_bd), - &p_ll2_info->rx_queue.rxq_chain, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.elem_size = sizeof(struct core_rx_bd); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); goto out; @@ -1154,13 +1157,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, } p_ll2_info->rx_queue.descq_array = p_descq; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.rx_num_desc, - sizeof(struct core_rx_fast_path_cqe), - &p_ll2_info->rx_queue.rcq_chain, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.elem_size = sizeof(struct core_rx_fast_path_cqe); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms); if (rc) { DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); goto out; @@ -1177,6 +1177,13 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.tx_num_desc, + .elem_size = sizeof(struct core_tx_bd), + }; struct qed_ll2_tx_packet *p_descq; u32 desc_size; u32 capacity; @@ -1185,13 +1192,8 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, if (!p_ll2_info->input.tx_num_desc) goto out; - rc = qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - p_ll2_info->input.tx_num_desc, - sizeof(struct core_tx_bd), - &p_ll2_info->tx_queue.txq_chain, NULL); + rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, + ¶ms); if (rc) goto out; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 92ab029789e5..0bc1a0aeb56e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -382,22 +382,26 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = num_elem, + .elem_size = sizeof(union event_ring_element), + }; struct qed_eq *p_eq; + int ret; /* Allocate EQ struct */ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) return -ENOMEM; - /* Allocate and initialize EQ chain*/ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - num_elem, - sizeof(union event_ring_element), - &p_eq->chain, NULL)) + ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n"); goto eq_allocate_fail; + } /* register EQ completion on the SP SB */ qed_int_register_cb(p_hwfn, qed_eq_completion, @@ -408,7 +412,8 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) eq_allocate_fail: kfree(p_eq); - return -ENOMEM; + + return ret; } void qed_eq_setup(struct qed_hwfn *p_hwfn) @@ -529,33 +534,40 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) int qed_spq_alloc(struct qed_hwfn *p_hwfn) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_SINGLE, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct slow_path_element), + }; + struct qed_dev *cdev = p_hwfn->cdev; struct qed_spq_entry *p_virt = NULL; struct qed_spq *p_spq = NULL; dma_addr_t p_phys = 0; u32 capacity; + int ret; /* SPQ struct */ p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) return -ENOMEM; - /* SPQ ring */ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_SINGLE, - QED_CHAIN_CNT_TYPE_U16, - 0, /* N/A when the mode is SINGLE */ - sizeof(struct slow_path_element), - &p_spq->chain, NULL)) - goto spq_allocate_fail; + /* SPQ ring */ + ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n"); + goto spq_chain_alloc_fail; + } /* allocate and fill the SPQ elements (incl. ramrod data list) */ capacity = qed_chain_get_capacity(&p_spq->chain); - p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + ret = -ENOMEM; + + p_virt = dma_alloc_coherent(&cdev->pdev->dev, capacity * sizeof(struct qed_spq_entry), &p_phys, GFP_KERNEL); if (!p_virt) - goto spq_allocate_fail; + goto spq_alloc_fail; p_spq->p_virt = p_virt; p_spq->p_phys = p_phys; @@ -563,10 +575,12 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) return 0; -spq_allocate_fail: - qed_chain_free(p_hwfn->cdev, &p_spq->chain); +spq_alloc_fail: + qed_chain_free(cdev, &p_spq->chain); +spq_chain_alloc_fail: kfree(p_spq); - return -ENOMEM; + + return ret; } void qed_spq_free(struct qed_hwfn *p_hwfn) @@ -967,30 +981,40 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, return 0; } +#define QED_SPQ_CONSQ_ELEM_SIZE 0x80 + int qed_consq_alloc(struct qed_hwfn *p_hwfn) { + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, + .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, + }; struct qed_consq *p_consq; + int ret; /* Allocate ConsQ struct */ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) return -ENOMEM; - /* Allocate and initialize EQ chain*/ - if (qed_chain_alloc(p_hwfn->cdev, - QED_CHAIN_USE_TO_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, &p_consq->chain, NULL)) - goto consq_allocate_fail; + /* Allocate and initialize ConsQ chain */ + ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain"); + goto consq_alloc_fail; + } p_hwfn->p_consq = p_consq; + return 0; -consq_allocate_fail: +consq_alloc_fail: kfree(p_consq); - return -ENOMEM; + + return ret; } void qed_consq_setup(struct qed_hwfn *p_hwfn) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6f2171dc0dea..b5a95f165520 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1442,6 +1442,11 @@ static void qede_set_tpa_param(struct qede_rx_queue *rxq) /* This function allocates all memory needed per Rx queue */ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { + struct qed_chain_init_params params = { + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = RX_RING_SIZE, + }; + struct qed_dev *cdev = edev->cdev; int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; @@ -1477,24 +1482,20 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) } /* Allocate FW Rx ring */ - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_NEXT_PTR, - QED_CHAIN_CNT_TYPE_U16, - RX_RING_SIZE, - sizeof(struct eth_rx_bd), - &rxq->rx_bd_ring, NULL); + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; + params.elem_size = sizeof(struct eth_rx_bd); + + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, ¶ms); if (rc) goto err; /* Allocate FW completion ring */ - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - RX_RING_SIZE, - sizeof(union eth_rx_cqe), - &rxq->rx_comp_ring, NULL); + params.mode = QED_CHAIN_MODE_PBL; + params.intended_use = QED_CHAIN_USE_TO_CONSUME; + params.elem_size = sizeof(union eth_rx_cqe); + + rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, ¶ms); if (rc) goto err; @@ -1531,7 +1532,13 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) /* This function allocates all memory needed per Tx queue */ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) { - union eth_tx_bd_types *p_virt; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = edev->q_num_tx_buffers, + .elem_size = sizeof(union eth_tx_bd_types), + }; int size, rc; txq->num_tx_buffers = edev->q_num_tx_buffers; @@ -1549,13 +1556,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) goto err; } - rc = edev->ops->common->chain_alloc(edev->cdev, - QED_CHAIN_USE_TO_CONSUME_PRODUCE, - QED_CHAIN_MODE_PBL, - QED_CHAIN_CNT_TYPE_U16, - txq->num_tx_buffers, - sizeof(*p_virt), - &txq->tx_pbl, NULL); + rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, ¶ms); if (rc) goto err; diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index a0d83095dc73..f5cfee0934e5 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -54,11 +54,6 @@ struct qed_chain_pbl_u32 { u32 cons_page_idx; }; -struct qed_chain_ext_pbl { - dma_addr_t p_pbl_phys; - void *p_pbl_virt; -}; - struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ u16 prod_idx; @@ -119,7 +114,7 @@ struct qed_chain { u16 usable_per_page; u8 elem_unusable; - u8 cnt_type; + enum qed_chain_cnt_type cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. @@ -142,11 +137,23 @@ struct qed_chain { /* Total number of elements [for entire chain] */ u32 size; - u8 intended_use; + enum qed_chain_use_mode intended_use; bool b_external_pbl; }; +struct qed_chain_init_params { + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; + enum qed_chain_cnt_type cnt_type; + + u32 num_elems; + size_t elem_size; + + void *ext_pbl_virt; + dma_addr_t ext_pbl_phys; +}; + #define QED_CHAIN_PAGE_SIZE 0x1000 #define ELEMS_PER_PAGE(elem_size) \ diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index a5c6854343e6..cd6a5c7e56eb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -948,13 +948,8 @@ struct qed_common_ops { u8 dp_level); int (*chain_alloc)(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl); + struct qed_chain *chain, + struct qed_chain_init_params *params); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); From 155065866bc36f20061c55fd2ca287a466911b16 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:39 +0300 Subject: [PATCH 1503/2056] qed: add support for different page sizes for chains Extend current infrastructure to store chain page size in a struct and use it in all functions instead of fixed QED_CHAIN_PAGE_SIZE. Its value remains the default one, but can be overridden in qed_chain_init_params before chain allocation. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/verbs.c | 2 ++ drivers/net/ethernet/qlogic/qed/qed_chain.c | 28 +++++++++++++-------- include/linux/qed/qed_chain.h | 21 ++++++++++------ 3 files changed, 33 insertions(+), 18 deletions(-) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 6737895a0d68..49b8a43e3fa2 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1960,9 +1960,11 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev, in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems, QEDR_SQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems, QEDR_RQE_ELEMENT_SIZE, + QED_CHAIN_PAGE_SIZE, QED_CHAIN_MODE_PBL); qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index a68ee4b3dbbc..f8efd36d66e0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -18,8 +18,10 @@ static void qed_chain_init(struct qed_chain *chain, chain->mode = params->mode; chain->cnt_type = params->cnt_type; - chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size); + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size, + params->page_size); chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->page_size, params->mode); chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, params->mode); @@ -28,6 +30,7 @@ static void qed_chain_init(struct qed_chain *chain, chain->next_page_mask = chain->usable_per_page & chain->elem_per_page_mask; + chain->page_size = params->page_size; chain->page_cnt = page_cnt; chain->capacity = chain->usable_per_page * page_cnt; chain->size = chain->elem_per_page * page_cnt; @@ -82,7 +85,7 @@ static void qed_chain_free_next_ptr(struct qed_dev *cdev, virt_next = next->next_virt; phys_next = HILO_DMA_REGPAIR(next->next_phys); - dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, virt, phys); + dma_free_coherent(dev, chain->page_size, virt, phys); virt = virt_next; phys = phys_next; @@ -95,7 +98,7 @@ static void qed_chain_free_single(struct qed_dev *cdev, if (!chain->p_virt_addr) return; - dma_free_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + dma_free_coherent(&cdev->pdev->dev, chain->page_size, chain->p_virt_addr, chain->p_phys_addr); } @@ -113,7 +116,7 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) if (!entry->virt_addr) break; - dma_free_coherent(dev, QED_CHAIN_PAGE_SIZE, entry->virt_addr, + dma_free_coherent(dev, chain->page_size, entry->virt_addr, entry->dma_map); } @@ -158,7 +161,7 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev, { u64 chain_size; - chain_size = ELEMS_PER_PAGE(params->elem_size); + chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); chain_size *= page_cnt; if (!chain_size) @@ -201,7 +204,7 @@ static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, u32 i; for (i = 0; i < chain->page_cnt; i++) { - virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -232,7 +235,7 @@ static int qed_chain_alloc_single(struct qed_dev *cdev, dma_addr_t phys; void *virt; - virt = dma_alloc_coherent(&cdev->pdev->dev, QED_CHAIN_PAGE_SIZE, + virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -282,7 +285,7 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) alloc_pages: for (i = 0; i < page_cnt; i++) { - virt = dma_alloc_coherent(dev, QED_CHAIN_PAGE_SIZE, &phys, + virt = dma_alloc_coherent(dev, chain->page_size, &phys, GFP_KERNEL); if (!virt) return -ENOMEM; @@ -318,11 +321,15 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, u32 page_cnt; int rc; + if (!params->page_size) + params->page_size = QED_CHAIN_PAGE_SIZE; + if (params->mode == QED_CHAIN_MODE_SINGLE) page_cnt = 1; else page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, params->elem_size, + params->page_size, params->mode); rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); @@ -330,9 +337,10 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, DP_NOTICE(cdev, "Cannot allocate a chain with the given arguments:\n"); DP_NOTICE(cdev, - "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n", params->intended_use, params->mode, params->cnt_type, - params->num_elems, params->elem_size); + params->num_elems, params->elem_size, + params->page_size); return rc; } diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f5cfee0934e5..8a96c361cc19 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -120,6 +121,8 @@ struct qed_chain { * but isn't involved in regular functionality. */ + u32 page_size; + /* Base address of a pre-allocated buffer for pbl */ struct { __le64 *table_virt; @@ -147,6 +150,7 @@ struct qed_chain_init_params { enum qed_chain_use_mode intended_use; enum qed_chain_cnt_type cnt_type; + u32 page_size; u32 num_elems; size_t elem_size; @@ -154,22 +158,23 @@ struct qed_chain_init_params { dma_addr_t ext_pbl_phys; }; -#define QED_CHAIN_PAGE_SIZE 0x1000 +#define QED_CHAIN_PAGE_SIZE SZ_4K -#define ELEMS_PER_PAGE(elem_size) \ - (QED_CHAIN_PAGE_SIZE / (elem_size)) +#define ELEMS_PER_PAGE(elem_size, page_size) \ + ((page_size) / (elem_size)) #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ 0) -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ +#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \ + ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \ UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP((elem_cnt), USABLE_ELEMS_PER_PAGE((elem_size), (mode))) +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \ + DIV_ROUND_UP((elem_cnt), \ + USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode))) #define is_chain_u16(p) \ ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) @@ -604,7 +609,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) for (i = 0; i < page_cnt; i++) memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, - QED_CHAIN_PAGE_SIZE); + p_chain->page_size); } #endif From f2aefd20b02d83b8565c867c38eedd88853967e9 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:40 +0300 Subject: [PATCH 1504/2056] qed: optimize common chain accessors Constify chain pointers and refactor qed_chain_get_elem_left{,u32}() a bit. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 60 +++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 27 deletions(-) diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 8a96c361cc19..434479e2ab65 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -182,73 +182,79 @@ struct qed_chain_init_params { ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ -static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) + +static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.prod_idx; + return chain->u.chain16.prod_idx; } -static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.cons_idx; + return chain->u.chain16.cons_idx; } -static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain) { - return p_chain->u.chain32.cons_idx; + return chain->u.chain32.prod_idx; } -static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) { - u16 elem_per_page = p_chain->elem_per_page; - u32 prod = p_chain->u.chain16.prod_idx; - u32 cons = p_chain->u.chain16.cons_idx; + return chain->u.chain32.cons_idx; +} + +static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +{ + u32 prod = qed_chain_get_prod_idx(chain); + u32 cons = qed_chain_get_cons_idx(chain); + u16 elem_per_page = chain->elem_per_page; u16 used; if (prod < cons) prod += (u32)U16_MAX + 1; used = (u16)(prod - cons); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= prod / elem_per_page - cons / elem_per_page; + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (u16)(prod / elem_per_page - cons / elem_per_page); - return (u16)(p_chain->capacity - used); + return (u16)(chain->capacity - used); } -static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) { - u16 elem_per_page = p_chain->elem_per_page; - u64 prod = p_chain->u.chain32.prod_idx; - u64 cons = p_chain->u.chain32.cons_idx; + u64 prod = qed_chain_get_prod_idx_u32(chain); + u64 cons = qed_chain_get_cons_idx_u32(chain); + u16 elem_per_page = chain->elem_per_page; u32 used; if (prod < cons) prod += (u64)U32_MAX + 1; used = (u32)(prod - cons); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u32)(prod / elem_per_page - cons / elem_per_page); - return p_chain->capacity - used; + return chain->capacity - used; } -static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) +static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) { - return p_chain->usable_per_page; + return chain->usable_per_page; } -static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain) { - return p_chain->elem_unusable; + return chain->elem_unusable; } -static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) +static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain) { - return p_chain->page_cnt; + return chain->page_cnt; } -static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) +static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) { - return p_chain->pbl_sp.table_phys; + return chain->pbl_sp.table_phys; } /** From be0cec6ffd686364bbba2796bbe5eee2fad18181 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:41 +0300 Subject: [PATCH 1505/2056] qed: introduce qed_chain_get_elem_used{,u32}() Add reverse-variants of qed_chain_get_elem_left{,u32}() to be able to know current chain occupation. They will be used in the upcoming qede XDP_REDIRECT code. They share most of the logics with the mentioned ones, so were reused to collapse the latters. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- include/linux/qed/qed_chain.h | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 434479e2ab65..4d58dc8943f0 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -203,7 +203,7 @@ static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) return chain->u.chain32.cons_idx; } -static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain) { u32 prod = qed_chain_get_prod_idx(chain); u32 cons = qed_chain_get_cons_idx(chain); @@ -217,10 +217,15 @@ static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u16)(prod / elem_per_page - cons / elem_per_page); - return (u16)(chain->capacity - used); + return used; } -static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) +static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +{ + return (u16)(chain->capacity - qed_chain_get_elem_used(chain)); +} + +static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain) { u64 prod = qed_chain_get_prod_idx_u32(chain); u64 cons = qed_chain_get_cons_idx_u32(chain); @@ -234,7 +239,12 @@ static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) used -= (u32)(prod / elem_per_page - cons / elem_per_page); - return chain->capacity - used; + return used; +} + +static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) +{ + return chain->capacity - qed_chain_get_elem_used_u32(chain); } static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) From f35535f73c1c7538c26e3468562ca08ecfce9735 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:42 +0300 Subject: [PATCH 1506/2056] qede: reformat several structures in "qede.h" Make the file more readable and easier for adding new fields. Misc: use IFNAMSIZ and netdev_name() instead of sizeof_field() and direct net_device::name dereferencing. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 160 +++++++++++++----------- 1 file changed, 86 insertions(+), 74 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index f1d7f73de902..e8ed0bb94ee0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -176,16 +176,17 @@ struct qede_dev { u32 dp_module; u8 dp_level; - unsigned long flags; -#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags)) + unsigned long flags; +#define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \ + &(edev)->flags) const struct qed_eth_ops *ops; struct qede_ptp *ptp; u64 ptp_skip_txts; - struct qed_dev_eth_info dev_info; -#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) -#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) + struct qed_dev_eth_info dev_info; +#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) +#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues) #define QEDE_IS_BB(edev) \ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB) #define QEDE_IS_AH(edev) \ @@ -198,14 +199,15 @@ struct qede_dev { u8 fp_num_rx; u16 req_queues; u16 num_queues; -#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) -#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) + +#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) +#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) #define QEDE_RX_QUEUE_IDX(edev, i) (i) -#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) +#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx) struct qed_int_info int_info; - /* Smaller private varaiant of the RTNL lock */ + /* Smaller private variant of the RTNL lock */ struct mutex qede_lock; u32 state; /* Protected by qede_lock */ u16 rx_buf_size; @@ -226,22 +228,28 @@ struct qede_dev { SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) struct qede_stats stats; -#define QEDE_RSS_INDIR_INITED BIT(0) -#define QEDE_RSS_KEY_INITED BIT(1) -#define QEDE_RSS_CAPS_INITED BIT(2) - u32 rss_params_inited; /* bit-field to track initialized rss params */ - u16 rss_ind_table[128]; - u32 rss_key[10]; - u8 rss_caps; - u16 q_num_rx_buffers; /* Must be a power of two */ - u16 q_num_tx_buffers; /* Must be a power of two */ + /* Bitfield to track initialized RSS params */ + u32 rss_params_inited; +#define QEDE_RSS_INDIR_INITED BIT(0) +#define QEDE_RSS_KEY_INITED BIT(1) +#define QEDE_RSS_CAPS_INITED BIT(2) + + u16 rss_ind_table[128]; + u32 rss_key[10]; + u8 rss_caps; + + /* Both must be a power of two */ + u16 q_num_rx_buffers; + u16 q_num_tx_buffers; + + bool gro_disable; + + struct list_head vlan_list; + u16 configured_vlans; + u16 non_configured_vlans; + bool accept_any_vlan; - bool gro_disable; - struct list_head vlan_list; - u16 configured_vlans; - u16 non_configured_vlans; - bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; u16 vxlan_dst_port; @@ -252,14 +260,14 @@ struct qede_dev { struct qede_rdma_dev rdma_info; - struct bpf_prog *xdp_prog; + struct bpf_prog *xdp_prog; - unsigned long err_flags; -#define QEDE_ERR_IS_HANDLED 31 -#define QEDE_ERR_ATTN_CLR_EN 0 -#define QEDE_ERR_GET_DBG_INFO 1 -#define QEDE_ERR_IS_RECOVERABLE 2 -#define QEDE_ERR_WARN 3 + unsigned long err_flags; +#define QEDE_ERR_IS_HANDLED 31 +#define QEDE_ERR_ATTN_CLR_EN 0 +#define QEDE_ERR_GET_DBG_INFO 1 +#define QEDE_ERR_IS_RECOVERABLE 2 +#define QEDE_ERR_WARN 3 struct qede_dump_info dump_info; }; @@ -372,29 +380,30 @@ struct sw_tx_bd { }; struct sw_tx_xdp { - struct page *page; - dma_addr_t mapping; + struct page *page; + dma_addr_t mapping; }; struct qede_tx_queue { - u8 is_xdp; - bool is_legacy; - u16 sw_tx_cons; - u16 sw_tx_prod; - u16 num_tx_buffers; /* Slowpath only */ + u8 is_xdp; + bool is_legacy; + u16 sw_tx_cons; + u16 sw_tx_prod; + u16 num_tx_buffers; /* Slowpath only */ - u64 xmit_pkts; - u64 stopped_cnt; - u64 tx_mem_alloc_err; + u64 xmit_pkts; + u64 stopped_cnt; + u64 tx_mem_alloc_err; - __le16 *hw_cons_ptr; + __le16 *hw_cons_ptr; /* Needed for the mapping of packets */ - struct device *dev; + struct device *dev; - void __iomem *doorbell_addr; - union db_prod tx_db; - int index; /* Slowpath only */ + void __iomem *doorbell_addr; + union db_prod tx_db; + + int index; /* Slowpath only */ #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ QEDE_MAX_TSS_CNT(edev)) #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev)) @@ -406,22 +415,22 @@ struct qede_tx_queue { #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)])) -#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) +#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0])) /* Regular Tx requires skb + metadata for release purpose, * while XDP requires the pages and the mapped address. */ union { - struct sw_tx_bd *skbs; - struct sw_tx_xdp *xdp; - } sw_tx_ring; + struct sw_tx_bd *skbs; + struct sw_tx_xdp *xdp; + } sw_tx_ring; - struct qed_chain tx_pbl; + struct qed_chain tx_pbl; /* Slowpath; Should be kept in end [unless missing padding] */ - void *handle; - u16 cos; - u16 ndev_txq_id; + void *handle; + u16 cos; + u16 ndev_txq_id; }; #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ @@ -435,32 +444,35 @@ struct qede_tx_queue { #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes)) struct qede_fastpath { - struct qede_dev *edev; -#define QEDE_FASTPATH_TX BIT(0) -#define QEDE_FASTPATH_RX BIT(1) -#define QEDE_FASTPATH_XDP BIT(2) -#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) - u8 type; - u8 id; - u8 xdp_xmit; - struct napi_struct napi; - struct qed_sb_info *sb_info; - struct qede_rx_queue *rxq; - struct qede_tx_queue *txq; - struct qede_tx_queue *xdp_tx; + struct qede_dev *edev; -#define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8) - char name[VEC_NAME_SIZE]; + u8 type; +#define QEDE_FASTPATH_TX BIT(0) +#define QEDE_FASTPATH_RX BIT(1) +#define QEDE_FASTPATH_XDP BIT(2) +#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) + + u8 id; + + u8 xdp_xmit; + + struct napi_struct napi; + struct qed_sb_info *sb_info; + struct qede_rx_queue *rxq; + struct qede_tx_queue *txq; + struct qede_tx_queue *xdp_tx; + + char name[IFNAMSIZ + 8]; }; /* Debug print definitions */ -#define DP_NAME(edev) ((edev)->ndev->name) +#define DP_NAME(edev) netdev_name((edev)->ndev) -#define XMIT_PLAIN 0 -#define XMIT_L4_CSUM BIT(0) -#define XMIT_LSO BIT(1) -#define XMIT_ENC BIT(2) -#define XMIT_ENC_GSO_L4_CSUM BIT(3) +#define XMIT_PLAIN 0 +#define XMIT_L4_CSUM BIT(0) +#define XMIT_LSO BIT(1) +#define XMIT_ENC BIT(2) +#define XMIT_ENC_GSO_L4_CSUM BIT(3) #define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) From f285ad5726e0430f46155026ca584d1d85c6ceff Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:43 +0300 Subject: [PATCH 1507/2056] qede: reformat net_device_ops declarations Correct the indentation of net_device_ops declarations for fancier look. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 122 +++++++++---------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index b5a95f165520..92bcdfa27961 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -639,79 +639,79 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type, } static const struct net_device_ops qede_netdev_ops = { - .ndo_open = qede_open, - .ndo_stop = qede_close, - .ndo_start_xmit = qede_start_xmit, - .ndo_select_queue = qede_select_queue, - .ndo_set_rx_mode = qede_set_rx_mode, - .ndo_set_mac_address = qede_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = qede_change_mtu, - .ndo_do_ioctl = qede_ioctl, - .ndo_tx_timeout = qede_tx_timeout, + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_do_ioctl = qede_ioctl, + .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV - .ndo_set_vf_mac = qede_set_vf_mac, - .ndo_set_vf_vlan = qede_set_vf_vlan, - .ndo_set_vf_trust = qede_set_vf_trust, + .ndo_set_vf_mac = qede_set_vf_mac, + .ndo_set_vf_vlan = qede_set_vf_vlan, + .ndo_set_vf_trust = qede_set_vf_trust, #endif - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, - .ndo_fix_features = qede_fix_features, - .ndo_set_features = qede_set_features, - .ndo_get_stats64 = qede_get_stats64, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, #ifdef CONFIG_QED_SRIOV - .ndo_set_vf_link_state = qede_set_vf_link_state, - .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, - .ndo_get_vf_config = qede_get_vf_config, - .ndo_set_vf_rate = qede_set_vf_rate, + .ndo_set_vf_link_state = qede_set_vf_link_state, + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, + .ndo_get_vf_config = qede_get_vf_config, + .ndo_set_vf_rate = qede_set_vf_rate, #endif - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, - .ndo_features_check = qede_features_check, - .ndo_bpf = qede_xdp, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, + .ndo_features_check = qede_features_check, + .ndo_bpf = qede_xdp, #ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = qede_rx_flow_steer, + .ndo_rx_flow_steer = qede_rx_flow_steer, #endif - .ndo_setup_tc = qede_setup_tc_offload, + .ndo_setup_tc = qede_setup_tc_offload, }; static const struct net_device_ops qede_netdev_vf_ops = { - .ndo_open = qede_open, - .ndo_stop = qede_close, - .ndo_start_xmit = qede_start_xmit, - .ndo_select_queue = qede_select_queue, - .ndo_set_rx_mode = qede_set_rx_mode, - .ndo_set_mac_address = qede_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = qede_change_mtu, - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, - .ndo_fix_features = qede_fix_features, - .ndo_set_features = qede_set_features, - .ndo_get_stats64 = qede_get_stats64, - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, - .ndo_features_check = qede_features_check, + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, + .ndo_features_check = qede_features_check, }; static const struct net_device_ops qede_netdev_vf_xdp_ops = { - .ndo_open = qede_open, - .ndo_stop = qede_close, - .ndo_start_xmit = qede_start_xmit, - .ndo_select_queue = qede_select_queue, - .ndo_set_rx_mode = qede_set_rx_mode, - .ndo_set_mac_address = qede_set_mac_addr, - .ndo_validate_addr = eth_validate_addr, - .ndo_change_mtu = qede_change_mtu, - .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, - .ndo_fix_features = qede_fix_features, - .ndo_set_features = qede_set_features, - .ndo_get_stats64 = qede_get_stats64, - .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, - .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, - .ndo_features_check = qede_features_check, - .ndo_bpf = qede_xdp, + .ndo_open = qede_open, + .ndo_stop = qede_close, + .ndo_start_xmit = qede_start_xmit, + .ndo_select_queue = qede_select_queue, + .ndo_set_rx_mode = qede_set_rx_mode, + .ndo_set_mac_address = qede_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, + .ndo_fix_features = qede_fix_features, + .ndo_set_features = qede_set_features, + .ndo_get_stats64 = qede_get_stats64, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, + .ndo_features_check = qede_features_check, + .ndo_bpf = qede_xdp, }; /* ------------------------------------------------------------------------- From 4c2bacbea1a3d49b836d2554383925bcf453d22c Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:44 +0300 Subject: [PATCH 1508/2056] qede: refactor XDP Tx processing Current XDP Tx logic is suboptimal and can't be reused for XDP_REDIRECT path. Make qede_xdp_{tx_int,xmit}() more universal and effective in general to allow future expanding. Misc: use unlikely() hints where appropriate and replace "fallthrough" comments with pseudo-keywords. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 1 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 91 +++++++++++----------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index e8ed0bb94ee0..308c66a5f98f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -455,6 +455,7 @@ struct qede_fastpath { u8 id; u8 xdp_xmit; +#define QEDE_XDP_TX BIT(0) struct napi_struct napi; struct qed_sb_info *sb_info; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1c4ece0713f8..c80bf6d37b89 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -302,48 +302,37 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) wmb(); } -static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, - struct sw_rx_data *metadata, u16 padding, u16 length) +static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, + u16 len, struct page *page) { - struct qede_tx_queue *txq = fp->xdp_tx; - struct eth_tx_1st_bd *first_bd; - u16 idx = txq->sw_tx_prod; + struct eth_tx_1st_bd *bd; + struct sw_tx_xdp *xdp; u16 val; - if (!qed_chain_get_elem_left(&txq->tx_pbl)) { + if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= + txq->num_tx_buffers)) { txq->stopped_cnt++; return -ENOMEM; } - first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); + bd = qed_chain_produce(&txq->tx_pbl); + bd->data.nbds = 1; + bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - memset(first_bd, 0, sizeof(*first_bd)); - first_bd->data.bd_flags.bitfields = - BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); - - val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; - first_bd->data.bitfields |= cpu_to_le16(val); - first_bd->data.nbds = 1; + bd->data.bitfields = cpu_to_le16(val); /* We can safely ignore the offset, as it's 0 for XDP */ - BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); + BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len); - /* Synchronize the buffer back to device, as program [probably] - * has changed it. - */ - dma_sync_single_for_device(&edev->pdev->dev, - metadata->mapping + padding, - length, PCI_DMA_TODEVICE); + xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; + xdp->mapping = dma; + xdp->page = page; - txq->sw_tx_ring.xdp[idx].page = metadata->data; - txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; - /* Mark the fastpath for future XDP doorbell */ - fp->xdp_xmit = 1; - return 0; } @@ -362,20 +351,21 @@ int qede_txq_has_work(struct qede_tx_queue *txq) static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { - u16 hw_bd_cons, idx; + struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; + struct device *dev = &edev->pdev->dev; + u16 hw_bd_cons; hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); barrier(); while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { + xdp_info = xdp_arr + txq->sw_tx_cons; + + dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(xdp_info->page); + qed_chain_consume(&txq->tx_pbl); - idx = txq->sw_tx_cons; - - dma_unmap_page(&edev->pdev->dev, - txq->sw_tx_ring.xdp[idx].mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); - __free_page(txq->sw_tx_ring.xdp[idx].page); - txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; txq->xmit_pkts++; } @@ -1064,32 +1054,39 @@ static bool qede_rx_xdp(struct qede_dev *edev, switch (act) { case XDP_TX: /* We need the replacement buffer before transmit. */ - if (qede_alloc_rx_buffer(rxq, true)) { + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { qede_recycle_rx_bd_ring(rxq, 1); + trace_xdp_exception(edev->ndev, prog, act); - return false; + break; } /* Now if there's a transmission problem, we'd still have to * throw current buffer, as replacement was already allocated. */ - if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) { - dma_unmap_page(rxq->dev, bd->mapping, - PAGE_SIZE, DMA_BIDIRECTIONAL); + if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, + *data_offset, *len, bd->data))) { + dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, + rxq->data_direction); __free_page(bd->data); + trace_xdp_exception(edev->ndev, prog, act); + } else { + dma_sync_single_for_device(rxq->dev, + bd->mapping + *data_offset, + *len, rxq->data_direction); + fp->xdp_xmit |= QEDE_XDP_TX; } /* Regardless, we've consumed an Rx BD */ qede_rx_bd_ring_consume(rxq); - return false; - + break; default: bpf_warn_invalid_xdp_action(act); - /* Fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(edev->ndev, prog, act); - /* Fall through */ + fallthrough; case XDP_DROP: qede_recycle_rx_bd_ring(rxq, cqe->bd_num); } @@ -1353,6 +1350,9 @@ int qede_poll(struct napi_struct *napi, int budget) napi); struct qede_dev *edev = fp->edev; int rx_work_done = 0; + u16 xdp_prod; + + fp->xdp_xmit = 0; if (likely(fp->type & QEDE_FASTPATH_TX)) { int cos; @@ -1380,10 +1380,9 @@ int qede_poll(struct napi_struct *napi, int budget) } } - if (fp->xdp_xmit) { - u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); + if (fp->xdp_xmit & QEDE_XDP_TX) { + xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); - fp->xdp_xmit = 0; fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); qede_update_tx_producer(fp->xdp_tx); } From d1b25b79e162b23ec1bbdfb13bda7154b1f46cfb Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 23 Jul 2020 01:10:45 +0300 Subject: [PATCH 1509/2056] qede: add .ndo_xdp_xmit() and XDP_REDIRECT support Add XDP_REDIRECT case handling and the corresponding NDO to support redirecting XDP frames. This also includes registering driver memory model (currently order-0 page mode) in BPF subsystem. The total number of XDP queues is usually 1:1 with Rx ones. Signed-off-by: Alexander Lobakin Signed-off-by: Igor Russkikh Signed-off-by: Michal Kalderon Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 8 ++ drivers/net/ethernet/qlogic/qede/qede_fp.c | 97 +++++++++++++++++++- drivers/net/ethernet/qlogic/qede/qede_main.c | 18 ++++ 3 files changed, 118 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 308c66a5f98f..803c1fcca8ad 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -199,6 +199,7 @@ struct qede_dev { u8 fp_num_rx; u16 req_queues; u16 num_queues; + u16 total_xdp_queues; #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) @@ -381,6 +382,7 @@ struct sw_tx_bd { struct sw_tx_xdp { struct page *page; + struct xdp_frame *xdpf; dma_addr_t mapping; }; @@ -403,6 +405,9 @@ struct qede_tx_queue { void __iomem *doorbell_addr; union db_prod tx_db; + /* Spinlock for XDP queues in case of XDP_REDIRECT */ + spinlock_t xdp_tx_lock; + int index; /* Slowpath only */ #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \ QEDE_MAX_TSS_CNT(edev)) @@ -456,6 +461,7 @@ struct qede_fastpath { u8 xdp_xmit; #define QEDE_XDP_TX BIT(0) +#define QEDE_XDP_REDIRECT BIT(1) struct napi_struct napi; struct qed_sb_info *sb_info; @@ -516,6 +522,8 @@ struct qede_reload_args { /* Datapath functions definition */ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); +int qede_xdp_transmit(struct net_device *dev, int n_frames, + struct xdp_frame **frames, u32 flags); u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); netdev_features_t qede_features_check(struct sk_buff *skb, diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index c80bf6d37b89..a2494bf85007 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -303,7 +303,7 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq) } static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, - u16 len, struct page *page) + u16 len, struct page *page, struct xdp_frame *xdpf) { struct eth_tx_1st_bd *bd; struct sw_tx_xdp *xdp; @@ -330,12 +330,66 @@ static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad, xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; xdp->mapping = dma; xdp->page = page; + xdp->xdpf = xdpf; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; return 0; } +int qede_xdp_transmit(struct net_device *dev, int n_frames, + struct xdp_frame **frames, u32 flags) +{ + struct qede_dev *edev = netdev_priv(dev); + struct device *dmadev = &edev->pdev->dev; + struct qede_tx_queue *xdp_tx; + struct xdp_frame *xdpf; + dma_addr_t mapping; + int i, drops = 0; + u16 xdp_prod; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + if (unlikely(!netif_running(dev))) + return -ENETDOWN; + + i = smp_processor_id() % edev->total_xdp_queues; + xdp_tx = edev->fp_array[i].xdp_tx; + + spin_lock(&xdp_tx->xdp_tx_lock); + + for (i = 0; i < n_frames; i++) { + xdpf = frames[i]; + + mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dmadev, mapping))) { + xdp_return_frame_rx_napi(xdpf); + drops++; + + continue; + } + + if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, + NULL, xdpf))) { + xdp_return_frame_rx_napi(xdpf); + drops++; + } + } + + if (flags & XDP_XMIT_FLUSH) { + xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); + + xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); + qede_update_tx_producer(xdp_tx); + } + + spin_unlock(&xdp_tx->xdp_tx_lock); + + return n_frames - drops; +} + int qede_txq_has_work(struct qede_tx_queue *txq) { u16 hw_bd_cons; @@ -353,6 +407,7 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) { struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; struct device *dev = &edev->pdev->dev; + struct xdp_frame *xdpf; u16 hw_bd_cons; hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); @@ -360,10 +415,19 @@ static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq) while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { xdp_info = xdp_arr + txq->sw_tx_cons; + xdpf = xdp_info->xdpf; - dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, - DMA_BIDIRECTIONAL); - __free_page(xdp_info->page); + if (xdpf) { + dma_unmap_single(dev, xdp_info->mapping, xdpf->len, + DMA_TO_DEVICE); + xdp_return_frame(xdpf); + + xdp_info->xdpf = NULL; + } else { + dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(xdp_info->page); + } qed_chain_consume(&txq->tx_pbl); txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; @@ -1065,7 +1129,8 @@ static bool qede_rx_xdp(struct qede_dev *edev, * throw current buffer, as replacement was already allocated. */ if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, - *data_offset, *len, bd->data))) { + *data_offset, *len, bd->data, + NULL))) { dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, rxq->data_direction); __free_page(bd->data); @@ -1079,6 +1144,25 @@ static bool qede_rx_xdp(struct qede_dev *edev, } /* Regardless, we've consumed an Rx BD */ + qede_rx_bd_ring_consume(rxq); + break; + case XDP_REDIRECT: + /* We need the replacement buffer before transmit. */ + if (unlikely(qede_alloc_rx_buffer(rxq, true))) { + qede_recycle_rx_bd_ring(rxq, 1); + + trace_xdp_exception(edev->ndev, prog, act); + break; + } + + dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, + rxq->data_direction); + + if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) + DP_NOTICE(edev, "Failed to redirect the packet\n"); + else + fp->xdp_xmit |= QEDE_XDP_REDIRECT; + qede_rx_bd_ring_consume(rxq); break; default: @@ -1387,6 +1471,9 @@ int qede_poll(struct napi_struct *napi, int budget) qede_update_tx_producer(fp->xdp_tx); } + if (fp->xdp_xmit & QEDE_XDP_REDIRECT) + xdp_do_flush_map(); + return rx_work_done; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 92bcdfa27961..1aaae3203f5a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -672,6 +672,7 @@ static const struct net_device_ops qede_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = qede_rx_flow_steer, #endif + .ndo_xdp_xmit = qede_xdp_transmit, .ndo_setup_tc = qede_setup_tc_offload, }; @@ -712,6 +713,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = qede_features_check, .ndo_bpf = qede_xdp, + .ndo_xdp_xmit = qede_xdp_transmit, }; /* ------------------------------------------------------------------------- @@ -1712,6 +1714,7 @@ static void qede_init_fp(struct qede_dev *edev) { int queue_id, rxq_index = 0, txq_index = 0; struct qede_fastpath *fp; + bool init_xdp = false; for_each_queue(queue_id) { fp = &edev->fp_array[queue_id]; @@ -1723,6 +1726,9 @@ static void qede_init_fp(struct qede_dev *edev) fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev, rxq_index); fp->xdp_tx->is_xdp = 1; + + spin_lock_init(&fp->xdp_tx->xdp_tx_lock); + init_xdp = true; } if (fp->type & QEDE_FASTPATH_RX) { @@ -1738,6 +1744,13 @@ static void qede_init_fp(struct qede_dev *edev) /* Driver have no error path from here */ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev, fp->rxq->rxq_id) < 0); + + if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq, + MEM_TYPE_PAGE_ORDER0, + NULL)) { + DP_NOTICE(edev, + "Failed to register XDP memory model\n"); + } } if (fp->type & QEDE_FASTPATH_TX) { @@ -1763,6 +1776,11 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, queue_id); } + + if (init_xdp) { + edev->total_xdp_queues = QEDE_RSS_COUNT(edev); + DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues); + } } static int qede_set_real_num_queues(struct qede_dev *edev) From 205a55f4e65353dd4846547d376a6f85cdda3d04 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 22 Jul 2020 12:05:10 -0700 Subject: [PATCH 1510/2056] sfc: convert to new udp_tunnel infrastructure Check MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED, before setting the info, which will hopefully protect us from -EPERM errors the previous code was gracefully ignoring. Ed reports this is not the 100% correct bit, but it's the best approximation we have. Shared code reports the port information back to user space, so we really want to know what was added and what failed. Ignoring -EPERM is not an option. The driver does not call udp_tunnel_get_rx_info(), so its own management of table state is not really all that problematic, we can leave it be. This allows the driver to continue with its copious table syncing, and matching the ports to TX frames, which it will reportedly do one day. Leave the feature checking in the callbacks, as the device may remove the capabilities on reset. Inline the loop from __efx_ef10_udp_tnl_lookup_port() into efx_ef10_udp_tnl_has_port(), since it's the only caller now. With new infra this driver gains port replace - when space frees up in a full table a new port will be selected for offload. Plus efx will no longer sleep in an atomic context. v2: - amend the commit message about TRUSTED not being 100% - add TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID to mark unsed entries Signed-off-by: Jakub Kicinski Acked-By: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 162 ++++++++++---------------- drivers/net/ethernet/sfc/efx.c | 72 +----------- drivers/net/ethernet/sfc/net_driver.h | 11 +- 3 files changed, 63 insertions(+), 182 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index cb7b634a1150..fa7229fff2ff 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Hardware control for EF10 architecture including 'Huntington'. */ @@ -38,6 +39,7 @@ struct efx_ef10_vlan { }; static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading); +static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels; static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { @@ -564,6 +566,9 @@ static int efx_ef10_probe(struct efx_nic *efx) goto fail2; mutex_init(&nic_data->udp_tunnels_lock); + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) + nic_data->udp_tunnels[i].type = + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; /* Reset (most) configuration for this function */ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); @@ -665,6 +670,12 @@ static int efx_ef10_probe(struct efx_nic *efx) if (rc) goto fail_add_vid_0; + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) && + efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) + efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels; + return 0; fail_add_vid_0: @@ -3702,8 +3713,8 @@ static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading) MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { - if (nic_data->udp_tunnels[i].count && - nic_data->udp_tunnels[i].port) { + if (nic_data->udp_tunnels[i].type != + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) { efx_dword_t entry; EFX_POPULATE_DWORD_2(entry, @@ -3789,79 +3800,34 @@ static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx) return rc; } -static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx, - __be16 port) +static int efx_ef10_udp_tnl_set_port(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - size_t i; + struct efx_nic *efx = netdev_priv(dev); + struct efx_ef10_nic_data *nic_data; + int efx_tunnel_type, rc; - for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) { - if (!nic_data->udp_tunnels[i].count) - continue; - if (nic_data->udp_tunnels[i].port == port) - return &nic_data->udp_tunnels[i]; - } - return NULL; -} - -static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, - struct efx_udp_tunnel tnl) -{ - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct efx_udp_tunnel *match; - char typebuf[8]; - size_t i; - int rc; + if (ti->type == UDP_TUNNEL_TYPE_VXLAN) + efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; + else + efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; + nic_data = efx->nic_data; if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) - return 0; - - efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); - netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n", - typebuf, ntohs(tnl.port)); + return -EOPNOTSUPP; mutex_lock(&nic_data->udp_tunnels_lock); /* Make sure all TX are stopped while we add to the table, else we * might race against an efx_features_check(). */ efx_device_detach_sync(efx); - - match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); - if (match != NULL) { - if (match->type == tnl.type) { - netif_dbg(efx, drv, efx->net_dev, - "Referencing existing tunnel entry\n"); - match->count++; - /* No need to cause an MCDI update */ - rc = 0; - goto unlock_out; - } - efx_get_udp_tunnel_type_name(match->type, - typebuf, sizeof(typebuf)); - netif_dbg(efx, drv, efx->net_dev, - "UDP port %d is already in use by %s\n", - ntohs(tnl.port), typebuf); - rc = -EEXIST; - goto unlock_out; - } - - for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) - if (!nic_data->udp_tunnels[i].count) { - nic_data->udp_tunnels[i] = tnl; - nic_data->udp_tunnels[i].count = 1; - rc = efx_ef10_set_udp_tnl_ports(efx, false); - goto unlock_out; - } - - netif_dbg(efx, drv, efx->net_dev, - "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n", - typebuf, ntohs(tnl.port)); - - rc = -ENOMEM; - -unlock_out: + nic_data->udp_tunnels[entry].type = efx_tunnel_type; + nic_data->udp_tunnels[entry].port = ti->port; + rc = efx_ef10_set_udp_tnl_ports(efx, false); mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; } @@ -3873,6 +3839,7 @@ static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx, static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) { struct efx_ef10_nic_data *nic_data = efx->nic_data; + size_t i; if (!(nic_data->datapath_caps & (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) @@ -3884,58 +3851,51 @@ static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port) */ return false; - return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL; + for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) + if (nic_data->udp_tunnels[i].type != + TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID && + nic_data->udp_tunnels[i].port == port) + return true; + + return false; } -static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx, - struct efx_udp_tunnel tnl) +static int efx_ef10_udp_tnl_unset_port(struct net_device *dev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct efx_udp_tunnel *match; - char typebuf[8]; + struct efx_nic *efx = netdev_priv(dev); + struct efx_ef10_nic_data *nic_data; int rc; - if (!(nic_data->datapath_caps & - (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) - return 0; - - efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf)); - netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n", - typebuf, ntohs(tnl.port)); + nic_data = efx->nic_data; mutex_lock(&nic_data->udp_tunnels_lock); /* Make sure all TX are stopped while we remove from the table, else we * might race against an efx_features_check(). */ efx_device_detach_sync(efx); - - match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port); - if (match != NULL) { - if (match->type == tnl.type) { - if (--match->count) { - /* Port is still in use, so nothing to do */ - netif_dbg(efx, drv, efx->net_dev, - "UDP tunnel port %d remains active\n", - ntohs(tnl.port)); - rc = 0; - goto out_unlock; - } - rc = efx_ef10_set_udp_tnl_ports(efx, false); - goto out_unlock; - } - efx_get_udp_tunnel_type_name(match->type, - typebuf, sizeof(typebuf)); - netif_warn(efx, drv, efx->net_dev, - "UDP port %d is actually in use by %s, not removing\n", - ntohs(tnl.port), typebuf); - } - rc = -ENOENT; - -out_unlock: + nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID; + nic_data->udp_tunnels[entry].port = 0; + rc = efx_ef10_set_udp_tnl_ports(efx, false); mutex_unlock(&nic_data->udp_tunnels_lock); + return rc; } +static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = { + .set_port = efx_ef10_udp_tnl_set_port, + .unset_port = efx_ef10_udp_tnl_unset_port, + .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP, + .tables = { + { + .n_entries = 16, + .tunnel_types = UDP_TUNNEL_TYPE_VXLAN | + UDP_TUNNEL_TYPE_GENEVE, + }, + }, +}; + /* EF10 may have multiple datapath firmware variants within a * single version. Report which variants are running. */ @@ -4172,9 +4132,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports, - .udp_tnl_add_port = efx_ef10_udp_tnl_add_port, .udp_tnl_has_port = efx_ef10_udp_tnl_has_port, - .udp_tnl_del_port = efx_ef10_udp_tnl_del_port, #ifdef CONFIG_SFC_SRIOV .sriov_configure = efx_ef10_sriov_configure, .sriov_init = efx_ef10_sriov_init, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index befd253af918..f16b4f236031 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -36,28 +36,6 @@ #include "mcdi_pcol.h" #include "workarounds.h" -/************************************************************************** - * - * Type name strings - * - ************************************************************************** - */ - -/* UDP tunnel type names */ -static const char *const efx_udp_tunnel_type_names[] = { - [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan", - [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve", -}; - -void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen) -{ - if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) && - efx_udp_tunnel_type_names[type] != NULL) - snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]); - else - snprintf(buf, buflen, "type %d", type); -} - /************************************************************************** * * Configurable values @@ -612,52 +590,6 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi return -EOPNOTSUPP; } -static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in) -{ - switch (in) { - case UDP_TUNNEL_TYPE_VXLAN: - return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN; - case UDP_TUNNEL_TYPE_GENEVE: - return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE; - default: - return -1; - } -} - -static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) -{ - struct efx_nic *efx = netdev_priv(dev); - struct efx_udp_tunnel tnl; - int efx_tunnel_type; - - efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); - if (efx_tunnel_type < 0) - return; - - tnl.type = (u16)efx_tunnel_type; - tnl.port = ti->port; - - if (efx->type->udp_tnl_add_port) - (void)efx->type->udp_tnl_add_port(efx, tnl); -} - -static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) -{ - struct efx_nic *efx = netdev_priv(dev); - struct efx_udp_tunnel tnl; - int efx_tunnel_type; - - efx_tunnel_type = efx_udp_tunnel_type_map(ti->type); - if (efx_tunnel_type < 0) - return; - - tnl.type = (u16)efx_tunnel_type; - tnl.port = ti->port; - - if (efx->type->udp_tnl_del_port) - (void)efx->type->udp_tnl_del_port(efx, tnl); -} - static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, @@ -685,8 +617,8 @@ static const struct net_device_ops efx_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = efx_filter_rfs, #endif - .ndo_udp_tunnel_add = efx_udp_tunnel_add, - .ndo_udp_tunnel_del = efx_udp_tunnel_del, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_xdp_xmit = efx_xdp_xmit, .ndo_bpf = efx_xdp }; diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 0bf11ebb03cf..786fda559976 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -609,8 +609,6 @@ extern const unsigned int efx_reset_type_max; #define RESET_TYPE(type) \ STRING_TABLE_LOOKUP(type, efx_reset_type) -void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen); - enum efx_int_mode { /* Be careful if altering to correct macro below */ EFX_INT_MODE_MSIX = 0, @@ -1173,12 +1171,9 @@ struct efx_mtd_partition { }; struct efx_udp_tunnel { +#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID 0xffff u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */ __be16 port; - /* Count of repeated adds of the same port. Used only inside the list, - * not in request arguments. - */ - u16 count; }; /** @@ -1302,9 +1297,7 @@ struct efx_udp_tunnel { * @tso_versions: Returns mask of firmware-assisted TSO versions supported. * If %NULL, then device does not support any TSO version. * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. - * @udp_tnl_add_port: Add a UDP tunnel port * @udp_tnl_has_port: Check if a port has been added as UDP tunnel - * @udp_tnl_del_port: Remove a UDP tunnel port * @print_additional_fwver: Dump NIC-specific additional FW version info * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address @@ -1474,9 +1467,7 @@ struct efx_nic_type { int (*set_mac_address)(struct efx_nic *efx); u32 (*tso_versions)(struct efx_nic *efx); int (*udp_tnl_push_ports)(struct efx_nic *efx); - int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); - int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl); size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf, size_t len); From b0977bb268db1df6decd3405903ca500721cdc5f Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:29 +0200 Subject: [PATCH 1511/2056] subflow: always init 'rel_write_seq' Currently we do not init the subflow write sequence for MP_JOIN subflows. This will cause bad mapping being generated as soon as we will use non backup subflow. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 1 - net/mptcp/subflow.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f0b0b503c262..59c0eef807b3 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1814,7 +1814,6 @@ void mptcp_finish_connect(struct sock *ssk) ack_seq++; subflow->map_seq = ack_seq; subflow->map_subflow_seq = 1; - subflow->rel_write_seq = 1; /* the socket is not connected yet, no msk/subflow ops can access/race * accessing the field below diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 519122e66f17..84e70806b250 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -200,6 +200,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) if (subflow->conn_finished) return; + subflow->rel_write_seq = 1; subflow->conn_finished = 1; subflow->ssn_offset = TCP_SKB_CB(skb)->seq; pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); From 53eb4c383deb97b59f1755fe3035ec7992488375 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:30 +0200 Subject: [PATCH 1512/2056] mptcp: avoid data corruption on reinsert When updating a partially acked data fragment, we actually corrupt it. This is irrelevant till we send data on a single subflow, as retransmitted data, if any are discarded by the peer as duplicate, but it will cause data corruption as soon as we will start creating non backup subflows. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 59c0eef807b3..254e6ef2b4e0 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -460,15 +460,20 @@ static void mptcp_clean_una(struct sock *sk) dfrag = mptcp_rtx_head(sk); if (dfrag && after64(snd_una, dfrag->data_seq)) { - u64 delta = dfrag->data_seq + dfrag->data_len - snd_una; + u64 delta = snd_una - dfrag->data_seq; + + if (WARN_ON_ONCE(delta > dfrag->data_len)) + goto out; dfrag->data_seq += delta; + dfrag->offset += delta; dfrag->data_len -= delta; dfrag_uncharge(sk, delta); cleaned = true; } +out: if (cleaned) { sk_mem_reclaim_partial(sk); From 0235d075a592dfde575df81f150feb0d95a5ef5c Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:31 +0200 Subject: [PATCH 1513/2056] mptcp: mark as fallback even early ones In the unlikely event of a failure at connect time, we currently clear the request_mptcp flag - so that the MPC handshake is not started at all, but the msk is not explicitly marked as fallback. This would lead to later insertion of wrong DSS options in the xmitted packets, in violation of RFC specs and possibly fooling the peer. Fixes: e1ff9e82e2ea ("net: mptcp: improve fallback to TCP") Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 254e6ef2b4e0..2936413171be 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1944,6 +1944,13 @@ static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return err; } +static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, + struct mptcp_subflow_context *subflow) +{ + subflow->request_mptcp = 0; + __mptcp_do_fallback(msk); +} + static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { @@ -1975,10 +1982,10 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, * TCP option space. */ if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) - subflow->request_mptcp = 0; + mptcp_subflow_early_fallback(msk, subflow); #endif if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) - subflow->request_mptcp = 0; + mptcp_subflow_early_fallback(msk, subflow); do_connect: err = ssock->ops->connect(ssock, uaddr, addr_len, flags); From b93df08ccda326ef89a6e80fb796588b9a30a980 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:32 +0200 Subject: [PATCH 1514/2056] mptcp: explicitly track the fully established status Currently accepted msk sockets become established only after accept() returns the new sk to user-space. As MP_JOIN request are refused as per RFC spec on non fully established socket, the above causes mp_join self-tests instabilities. This change lets the msk entering the established status as soon as it receives the 3rd ack and propagates the first subflow fully established status on the msk socket. Finally we can change the subflow acceptance condition to take in account both the sock state and the msk fully established flag. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/options.c | 5 ++--- net/mptcp/protocol.c | 4 ++-- net/mptcp/protocol.h | 8 ++++++++ net/mptcp/subflow.c | 23 +++++++++++++++++++---- 4 files changed, 31 insertions(+), 9 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 19707c07efc1..3bc56eb608d8 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -709,6 +709,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk, * additional ack. */ subflow->fully_established = 1; + WRITE_ONCE(msk->fully_established, true); goto fully_established; } @@ -724,9 +725,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk, if (unlikely(!READ_ONCE(msk->pm.server_side))) pr_warn_once("bogus mpc option on established client sk"); - subflow->fully_established = 1; - subflow->remote_key = mp_opt->sndr_key; - subflow->can_ack = 1; + mptcp_subflow_fully_established(subflow, mp_opt); fully_established: if (likely(subflow->pm_notified)) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 2936413171be..979dfcd2aa14 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1522,6 +1522,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk->local_key = subflow_req->local_key; msk->token = subflow_req->token; msk->subflow = NULL; + WRITE_ONCE(msk->fully_established, false); msk->write_seq = subflow_req->idsn + 1; atomic64_set(&msk->snd_una, msk->write_seq); @@ -1605,7 +1606,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, newsk = new_mptcp_sock; mptcp_copy_inaddrs(newsk, ssk); list_add(&subflow->node, &msk->conn_list); - inet_sk_state_store(newsk, TCP_ESTABLISHED); mptcp_rcv_space_init(msk, ssk); bh_unlock_sock(new_mptcp_sock); @@ -1855,7 +1855,7 @@ bool mptcp_finish_join(struct sock *sk) pr_debug("msk=%p, subflow=%p", msk, subflow); /* mptcp socket already closing? */ - if (inet_sk_state_load(parent) != TCP_ESTABLISHED) + if (!mptcp_is_fully_established(parent)) return false; if (!msk->pm.server_side) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 6e114c09e5b4..67634b595466 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -198,6 +198,7 @@ struct mptcp_sock { u32 token; unsigned long flags; bool can_ack; + bool fully_established; spinlock_t join_list_lock; struct work_struct work; struct list_head conn_list; @@ -342,6 +343,8 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow) } int mptcp_is_enabled(struct net *net); +void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, + struct mptcp_options_received *mp_opt); bool mptcp_subflow_data_available(struct sock *sk); void __init mptcp_subflow_init(void); @@ -373,6 +376,11 @@ void mptcp_get_options(const struct sk_buff *skb, struct mptcp_options_received *mp_opt); void mptcp_finish_connect(struct sock *sk); +static inline bool mptcp_is_fully_established(struct sock *sk) +{ + return inet_sk_state_load(sk) == TCP_ESTABLISHED && + READ_ONCE(mptcp_sk(sk)->fully_established); +} void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk); void mptcp_data_ready(struct sock *sk, struct sock *ssk); bool mptcp_finish_join(struct sock *sk); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 84e70806b250..ea81842fc3b2 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -387,6 +387,17 @@ static void subflow_drop_ctx(struct sock *ssk) kfree_rcu(ctx, rcu); } +void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow, + struct mptcp_options_received *mp_opt) +{ + struct mptcp_sock *msk = mptcp_sk(subflow->conn); + + subflow->remote_key = mp_opt->sndr_key; + subflow->fully_established = 1; + subflow->can_ack = 1; + WRITE_ONCE(msk->fully_established, true); +} + static struct sock *subflow_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, @@ -466,6 +477,11 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, } if (ctx->mp_capable) { + /* this can't race with mptcp_close(), as the msk is + * not yet exposted to user-space + */ + inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED); + /* new mpc subflow takes ownership of the newly * created mptcp socket */ @@ -478,9 +494,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, /* with OoO packets we can reach here without ingress * mpc option */ - ctx->remote_key = mp_opt.sndr_key; - ctx->fully_established = mp_opt.mp_capable; - ctx->can_ack = mp_opt.mp_capable; + if (mp_opt.mp_capable) + mptcp_subflow_fully_established(ctx, &mp_opt); } else if (ctx->mp_join) { struct mptcp_sock *owner; @@ -967,7 +982,7 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex, int addrlen; int err; - if (sk->sk_state != TCP_ESTABLISHED) + if (!mptcp_is_fully_established(sk)) return -ENOTCONN; err = mptcp_subflow_create_socket(sk, &sf); From fa25e815d963115eb06036a8f6a50e724bc259e2 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:33 +0200 Subject: [PATCH 1515/2056] mptcp: cleanup subflow_finish_connect() The mentioned function has several unneeded branches, handle each case - MP_CAPABLE, MP_JOIN, fallback - under a single conditional and drop quite a bit of duplicate code. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 56 ++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index ea81842fc3b2..7f3ef1840df5 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -206,44 +206,34 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset); mptcp_get_options(skb, &mp_opt); - if (subflow->request_mptcp && mp_opt.mp_capable) { + if (subflow->request_mptcp) { + if (!mp_opt.mp_capable) { + MPTCP_INC_STATS(sock_net(sk), + MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); + mptcp_do_fallback(sk); + pr_fallback(mptcp_sk(subflow->conn)); + goto fallback; + } + subflow->mp_capable = 1; subflow->can_ack = 1; subflow->remote_key = mp_opt.sndr_key; pr_debug("subflow=%p, remote_key=%llu", subflow, subflow->remote_key); - } else if (subflow->request_join && mp_opt.mp_join) { - subflow->mp_join = 1; + mptcp_finish_connect(sk); + } else if (subflow->request_join) { + u8 hmac[SHA256_DIGEST_SIZE]; + + if (!mp_opt.mp_join) + goto do_reset; + subflow->thmac = mp_opt.thmac; subflow->remote_nonce = mp_opt.nonce; pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow, subflow->thmac, subflow->remote_nonce); - } else { - if (subflow->request_mptcp) - MPTCP_INC_STATS(sock_net(sk), - MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); - mptcp_do_fallback(sk); - pr_fallback(mptcp_sk(subflow->conn)); - } - if (mptcp_check_fallback(sk)) { - mptcp_rcv_space_init(mptcp_sk(parent), sk); - return; - } - - if (subflow->mp_capable) { - pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk), - subflow->remote_key); - mptcp_finish_connect(sk); - } else if (subflow->mp_join) { - u8 hmac[SHA256_DIGEST_SIZE]; - - pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", - subflow, subflow->thmac, - subflow->remote_nonce); if (!subflow_thmac_valid(subflow)) { MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC); - subflow->mp_join = 0; goto do_reset; } @@ -251,18 +241,22 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) subflow->local_nonce, subflow->remote_nonce, hmac); - memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); if (!mptcp_finish_join(sk)) goto do_reset; + subflow->mp_join = 1; MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); - } else { -do_reset: - tcp_send_active_reset(sk, GFP_ATOMIC); - tcp_done(sk); + } else if (mptcp_check_fallback(sk)) { +fallback: + mptcp_rcv_space_init(mptcp_sk(parent), sk); } + return; + +do_reset: + tcp_send_active_reset(sk, GFP_ATOMIC); + tcp_done(sk); } static struct request_sock_ops subflow_request_sock_ops; From b7514694ed2952684a1e4fc44d83682140fd8cef Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:34 +0200 Subject: [PATCH 1516/2056] subflow: explicitly check for plain tcp rsk When syncookie are in use, the TCP stack may feed into subflow_syn_recv_sock() plain TCP request sockets. We can't access mptcp_subflow_request_sock-specific fields on such sockets. Explicitly check the rsk ops to do safe accesses. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 7f3ef1840df5..3ef445f59556 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -415,7 +415,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, /* hopefully temporary handling for MP_JOIN+syncookie */ subflow_req = mptcp_subflow_rsk(req); - fallback_is_fatal = subflow_req->mp_join; + fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join; fallback = !tcp_rsk(req)->is_mptcp; if (fallback) goto create_child; From 97e617518cbc318113b034a5fb33f49c81701278 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:35 +0200 Subject: [PATCH 1517/2056] subflow: use rsk_ops->send_reset() tcp_send_active_reset() is more prone to transient errors (memory allocation or xmit queue full): in stress conditions the kernel may drop the egress packet, and the client will be stuck. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 3ef445f59556..ada04df6f99f 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -524,9 +524,9 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, dispose_child: subflow_drop_ctx(child); tcp_rsk(req)->drop_req = true; - tcp_send_active_reset(child, GFP_ATOMIC); inet_csk_prepare_for_destroy_sock(child); tcp_done(child); + req->rsk_ops->send_reset(sk, skb); /* The last child reference will be released by the caller */ return child; From 4cf8b7e48a09745145881b311fe6a9154ba69ebc Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 23 Jul 2020 13:02:36 +0200 Subject: [PATCH 1518/2056] subflow: introduce and use mptcp_can_accept_new_subflow() So that we can easily perform some basic PM-related adimission checks before creating the child socket. Reviewed-by: Mat Martineau Tested-by: Christoph Paasch Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index ada04df6f99f..e645483d1200 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -53,6 +53,12 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2, mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); } +static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk) +{ + return mptcp_is_fully_established((void *)msk) && + READ_ONCE(msk->pm.accept_subflow); +} + /* validate received token and create truncated hmac and nonce for SYN-ACK */ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req, const struct sk_buff *skb) @@ -443,6 +449,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, } else if (subflow_req->mp_join) { mptcp_get_options(skb, &mp_opt); if (!mp_opt.mp_join || + !mptcp_can_accept_new_subflow(subflow_req->msk) || !subflow_hmac_valid(req, &mp_opt)) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); fallback = true; From 8bf9d8eabb88d9eae963a035e58826d3b6a65b53 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 23 Jul 2020 19:05:00 +0800 Subject: [PATCH 1519/2056] cxgb4: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address insetad of memset(). Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/smt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c index cbe72ed27b1e..e617e4aabbcc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/smt.c +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -55,7 +55,7 @@ struct smt_data *t4_init_smt(void) for (i = 0; i < s->smt_size; ++i) { s->smtab[i].idx = i; s->smtab[i].state = SMT_STATE_UNUSED; - memset(&s->smtab[i].src_mac, 0, ETH_ALEN); + eth_zero_addr(s->smtab[i].src_mac); spin_lock_init(&s->smtab[i].lock); s->smtab[i].refcnt = 0; } From 49b0aa1b6585705de4a08971602cd2726d9027f0 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 23 Jul 2020 19:13:43 +0800 Subject: [PATCH 1520/2056] net/ncsi: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address insetad of memset(). Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- net/ncsi/ncsi-rsp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index a94bb59793f0..5b1f4ec66dd9 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -471,7 +471,7 @@ static int ncsi_rsp_handler_sma(struct ncsi_request *nr) memcpy(&ncf->addrs[index], cmd->mac, ETH_ALEN); } else { clear_bit(cmd->index - 1, bitmap); - memset(&ncf->addrs[index], 0, ETH_ALEN); + eth_zero_addr(&ncf->addrs[index]); } spin_unlock_irqrestore(&nc->lock, flags); From 0febc7b3cd17bd2323a036356c2ae6514acf9010 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Thu, 23 Jul 2020 12:29:50 +0100 Subject: [PATCH 1521/2056] l2tp: cleanup comparisons to NULL checkpatch warns about comparisons to NULL, e.g. CHECK: Comparison to NULL could be written "!rt" #474: FILE: net/l2tp/l2tp_ip.c:474: + if (rt == NULL) { These sort of comparisons are generally clearer and more readable the way checkpatch suggests, so update l2tp accordingly. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 20 ++++++++++---------- net/l2tp/l2tp_debugfs.c | 12 ++++++------ net/l2tp/l2tp_ip.c | 2 +- net/l2tp/l2tp_ip6.c | 2 +- net/l2tp/l2tp_netlink.c | 23 +++++++++++------------ net/l2tp/l2tp_ppp.c | 36 ++++++++++++++++++------------------ 6 files changed, 47 insertions(+), 48 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6871611d99f2..2f3e6b3a7d8e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -412,7 +412,7 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * } /* call private receive handler */ - if (session->recv_skb != NULL) + if (session->recv_skb) (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length); else kfree_skb(skb); @@ -911,7 +911,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) struct l2tp_tunnel *tunnel; tunnel = rcu_dereference_sk_user_data(sk); - if (tunnel == NULL) + if (!tunnel) goto pass_up; l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n", @@ -1150,7 +1150,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) { struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); - if (tunnel == NULL) + if (!tunnel) goto end; l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name); @@ -1189,7 +1189,7 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) struct hlist_node *tmp; struct l2tp_session *session; - BUG_ON(tunnel == NULL); + BUG_ON(!tunnel); l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n", tunnel->name); @@ -1214,7 +1214,7 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) __l2tp_session_unhash(session); l2tp_session_queue_purge(session); - if (session->session_close != NULL) + if (session->session_close) (*session->session_close)(session); l2tp_session_dec_refcount(session); @@ -1407,11 +1407,11 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 int err; enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP; - if (cfg != NULL) + if (cfg) encap = cfg->encap; tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL); - if (tunnel == NULL) { + if (!tunnel) { err = -ENOMEM; goto err; } @@ -1426,7 +1426,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 rwlock_init(&tunnel->hlist_lock); tunnel->acpt_newsess = true; - if (cfg != NULL) + if (cfg) tunnel->debug = cfg->debug; tunnel->encap = encap; @@ -1615,7 +1615,7 @@ int l2tp_session_delete(struct l2tp_session *session) __l2tp_session_unhash(session); l2tp_session_queue_purge(session); - if (session->session_close != NULL) + if (session->session_close) (*session->session_close)(session); l2tp_session_dec_refcount(session); @@ -1648,7 +1648,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn struct l2tp_session *session; session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); - if (session != NULL) { + if (session) { session->magic = L2TP_SESSION_MAGIC; session->tunnel = tunnel; diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index ebe03bbb5948..117a6697da72 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -58,7 +58,7 @@ static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd) pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx); pd->session_idx++; - if (pd->session == NULL) { + if (!pd->session) { pd->session_idx = 0; l2tp_dfs_next_tunnel(pd); } @@ -72,16 +72,16 @@ static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) if (!pos) goto out; - BUG_ON(m->private == NULL); + BUG_ON(!m->private); pd = m->private; - if (pd->tunnel == NULL) + if (!pd->tunnel) l2tp_dfs_next_tunnel(pd); else l2tp_dfs_next_session(pd); /* NULL tunnel and session indicates end of list */ - if ((pd->tunnel == NULL) && (pd->session == NULL)) + if (!pd->tunnel && !pd->session) pd = NULL; out: @@ -221,7 +221,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) atomic_long_read(&session->stats.rx_bytes), atomic_long_read(&session->stats.rx_errors)); - if (session->show != NULL) + if (session->show) session->show(m, session); } @@ -268,7 +268,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) int rc = -ENOMEM; pd = kzalloc(sizeof(*pd), GFP_KERNEL); - if (pd == NULL) + if (!pd) goto out; /* Derive the network namespace from the pid opening the diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 70f9fdaf6c86..d81564cf1e7f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -471,7 +471,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rt = (struct rtable *)__sk_dst_check(sk, 0); rcu_read_lock(); - if (rt == NULL) { + if (!rt) { const struct ip_options_rcu *inet_opt; inet_opt = rcu_dereference(inet->inet_opt); diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index ca7696147c7e..614febf8dd0d 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -486,7 +486,7 @@ static int l2tp_ip6_push_pending_frames(struct sock *sk) int err = 0; skb = skb_peek(&sk->sk_write_queue); - if (skb == NULL) + if (!skb) goto out; transhdr = (__be32 *)skb_transport_header(skb); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 42b409bc0de7..fc26ebad2f4f 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -333,7 +333,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla goto nla_put_failure; nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS); - if (nest == NULL) + if (!nest) goto nla_put_failure; if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS, @@ -475,7 +475,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback for (;;) { tunnel = l2tp_tunnel_get_nth(net, ti); - if (tunnel == NULL) + if (!tunnel) goto out; if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, @@ -598,14 +598,13 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]); #ifdef CONFIG_MODULES - if (l2tp_nl_cmd_ops[cfg.pw_type] == NULL) { + if (!l2tp_nl_cmd_ops[cfg.pw_type]) { genl_unlock(); request_module("net-l2tp-type-%u", cfg.pw_type); genl_lock(); } #endif - if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) || - (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) { + if (!l2tp_nl_cmd_ops[cfg.pw_type] || !l2tp_nl_cmd_ops[cfg.pw_type]->session_create) { ret = -EPROTONOSUPPORT; goto out_tunnel; } @@ -637,7 +636,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf u16 pw_type; session = l2tp_nl_session_get(info); - if (session == NULL) { + if (!session) { ret = -ENODEV; goto out; } @@ -662,7 +661,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf struct l2tp_session *session; session = l2tp_nl_session_get(info); - if (session == NULL) { + if (!session) { ret = -ENODEV; goto out; } @@ -729,7 +728,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl goto nla_put_failure; nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS); - if (nest == NULL) + if (!nest) goto nla_put_failure; if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS, @@ -774,7 +773,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info) int ret; session = l2tp_nl_session_get(info); - if (session == NULL) { + if (!session) { ret = -ENODEV; goto err; } @@ -813,14 +812,14 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback int si = cb->args[1]; for (;;) { - if (tunnel == NULL) { + if (!tunnel) { tunnel = l2tp_tunnel_get_nth(net, ti); - if (tunnel == NULL) + if (!tunnel) goto out; } session = l2tp_session_get_nth(tunnel, si); - if (session == NULL) { + if (!session) { ti++; l2tp_tunnel_dec_refcount(tunnel); tunnel = NULL; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index f894dc275393..7404661d4117 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -154,12 +154,12 @@ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) { struct l2tp_session *session; - if (sk == NULL) + if (!sk) return NULL; sock_hold(sk); session = (struct l2tp_session *)(sk->sk_user_data); - if (session == NULL) { + if (!session) { sock_put(sk); goto out; } @@ -217,7 +217,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int */ rcu_read_lock(); sk = rcu_dereference(ps->sk); - if (sk == NULL) + if (!sk) goto no_sock; /* If the first two bytes are 0xFF03, consider that it is the PPP's @@ -285,7 +285,7 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m, /* Get session and tunnel contexts */ error = -EBADF; session = pppol2tp_sock_to_session(sk); - if (session == NULL) + if (!session) goto error; tunnel = session->tunnel; @@ -360,7 +360,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) /* Get session and tunnel contexts from the socket */ session = pppol2tp_sock_to_session(sk); - if (session == NULL) + if (!session) goto abort; tunnel = session->tunnel; @@ -703,7 +703,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, * tunnel id. */ if (!info.session_id && !info.peer_session_id) { - if (tunnel == NULL) { + if (!tunnel) { struct l2tp_tunnel_cfg tcfg = { .encap = L2TP_ENCAPTYPE_UDP, .debug = 0, @@ -738,11 +738,11 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, } else { /* Error if we can't find the tunnel */ error = -ENOENT; - if (tunnel == NULL) + if (!tunnel) goto end; /* Error if socket is not prepped */ - if (tunnel->sock == NULL) + if (!tunnel->sock) goto end; } @@ -911,14 +911,14 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, struct pppol2tp_session *pls; error = -ENOTCONN; - if (sk == NULL) + if (!sk) goto end; if (!(sk->sk_state & PPPOX_CONNECTED)) goto end; error = -EBADF; session = pppol2tp_sock_to_session(sk); - if (session == NULL) + if (!session) goto end; pls = l2tp_session_priv(session); @@ -1263,13 +1263,13 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; err = -ENOTCONN; - if (sk->sk_user_data == NULL) + if (!sk->sk_user_data) goto end; /* Get session context from the socket */ err = -EBADF; session = pppol2tp_sock_to_session(sk); - if (session == NULL) + if (!session) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel @@ -1382,13 +1382,13 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, return -EINVAL; err = -ENOTCONN; - if (sk->sk_user_data == NULL) + if (!sk->sk_user_data) goto end; /* Get the session context */ err = -EBADF; session = pppol2tp_sock_to_session(sk); - if (session == NULL) + if (!session) goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ @@ -1464,7 +1464,7 @@ static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd) pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx); pd->session_idx++; - if (pd->session == NULL) { + if (!pd->session) { pd->session_idx = 0; pppol2tp_next_tunnel(net, pd); } @@ -1479,17 +1479,17 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) if (!pos) goto out; - BUG_ON(m->private == NULL); + BUG_ON(!m->private); pd = m->private; net = seq_file_net(m); - if (pd->tunnel == NULL) + if (!pd->tunnel) pppol2tp_next_tunnel(net, pd); else pppol2tp_next_session(net, pd); /* NULL tunnel and session indicates end of list */ - if ((pd->tunnel == NULL) && (pd->session == NULL)) + if (!pd->tunnel && !pd->session) pd = NULL; out: From 6c0ec37b82834630cfe99ef42690beef18d2b400 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Thu, 23 Jul 2020 12:29:51 +0100 Subject: [PATCH 1522/2056] l2tp: cleanup unnecessary braces in if statements These checks are all simple and don't benefit from extra braces to clarify intent. Remove them for easier-reading code. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 6 +++--- net/l2tp/l2tp_ppp.c | 23 +++++++++-------------- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 2f3e6b3a7d8e..d1403f27135e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -683,7 +683,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * check if we sre sending sequence numbers and if not, * configure it so. */ - if ((!session->lns_mode) && (!session->send_seq)) { + if (!session->lns_mode && !session->send_seq) { l2tp_info(session, L2TP_MSG_SEQ, "%s: requested to enable seq numbers by LNS\n", session->name); @@ -707,7 +707,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * If we're the LNS and we're sending sequence numbers, the * LAC is broken. Discard the frame. */ - if ((!session->lns_mode) && (session->send_seq)) { + if (!session->lns_mode && session->send_seq) { l2tp_info(session, L2TP_MSG_SEQ, "%s: requested to disable seq numbers by LNS\n", session->name); @@ -1389,7 +1389,7 @@ static int l2tp_tunnel_sock_create(struct net *net, out: *sockp = sock; - if ((err < 0) && sock) { + if (err < 0 && sock) { kernel_sock_shutdown(sock, SHUT_RDWR); sock_release(sock); *sockp = NULL; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 7404661d4117..e58fe7e3b884 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -802,8 +802,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, * the internal context for use by ioctl() and sockopt() * handlers. */ - if ((session->session_id == 0) && - (session->peer_session_id == 0)) { + if (session->session_id == 0 && session->peer_session_id == 0) { error = 0; goto out_no_ppp; } @@ -925,7 +924,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, tunnel = session->tunnel; inet = inet_sk(tunnel->sock); - if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) { + if (tunnel->version == 2 && tunnel->sock->sk_family == AF_INET) { struct sockaddr_pppol2tp sp; len = sizeof(sp); @@ -943,8 +942,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; memcpy(uaddr, &sp, len); #if IS_ENABLED(CONFIG_IPV6) - } else if ((tunnel->version == 2) && - (tunnel->sock->sk_family == AF_INET6)) { + } else if (tunnel->version == 2 && tunnel->sock->sk_family == AF_INET6) { struct sockaddr_pppol2tpin6 sp; len = sizeof(sp); @@ -962,8 +960,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr, sizeof(tunnel->sock->sk_v6_daddr)); memcpy(uaddr, &sp, len); - } else if ((tunnel->version == 3) && - (tunnel->sock->sk_family == AF_INET6)) { + } else if (tunnel->version == 3 && tunnel->sock->sk_family == AF_INET6) { struct sockaddr_pppol2tpv3in6 sp; len = sizeof(sp); @@ -1179,7 +1176,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, switch (optname) { case PPPOL2TP_SO_RECVSEQ: - if ((val != 0) && (val != 1)) { + if (val != 0 && val != 1) { err = -EINVAL; break; } @@ -1190,7 +1187,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; case PPPOL2TP_SO_SENDSEQ: - if ((val != 0) && (val != 1)) { + if (val != 0 && val != 1) { err = -EINVAL; break; } @@ -1208,7 +1205,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, break; case PPPOL2TP_SO_LNSMODE: - if ((val != 0) && (val != 1)) { + if (val != 0 && val != 1) { err = -EINVAL; break; } @@ -1274,8 +1271,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, /* Special case: if session_id == 0x0000, treat as operation on tunnel */ - if ((session->session_id == 0) && - (session->peer_session_id == 0)) { + if (session->session_id == 0 && session->peer_session_id == 0) { tunnel = session->tunnel; err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); } else { @@ -1392,8 +1388,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname, goto end; /* Special case: if session_id == 0x0000, treat as operation on tunnel */ - if ((session->session_id == 0) && - (session->peer_session_id == 0)) { + if (session->session_id == 0 && session->peer_session_id == 0) { tunnel = session->tunnel; err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); if (err) From 26d9a2710616c4499b661cada35ac5e4b125ebe8 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Thu, 23 Jul 2020 12:29:52 +0100 Subject: [PATCH 1523/2056] l2tp: check socket address type in l2tp_dfs_seq_tunnel_show checkpatch warns about indentation and brace balancing around the conditionally compiled code for AF_INET6 support in l2tp_dfs_seq_tunnel_show. By adding another check on the socket address type we can make the code more readable while removing the checkpatch warning. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_debugfs.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 117a6697da72..72ba83aa0eaf 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -146,10 +146,12 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) seq_printf(m, " from %pI6c to %pI6c\n", &np->saddr, &tunnel->sock->sk_v6_daddr); - } else + } #endif - seq_printf(m, " from %pI4 to %pI4\n", - &inet->inet_saddr, &inet->inet_daddr); + if (tunnel->sock->sk_family == AF_INET) + seq_printf(m, " from %pI4 to %pI4\n", + &inet->inet_saddr, &inet->inet_daddr); + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) seq_printf(m, " source port %hu, dest port %hu\n", ntohs(inet->inet_sport), ntohs(inet->inet_dport)); From 584ca31f469dece9a83cdce33953ba23b115945c Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Thu, 23 Jul 2020 12:29:53 +0100 Subject: [PATCH 1524/2056] l2tp: cleanup netlink send of tunnel address information l2tp_nl_tunnel_send has conditionally compiled code to support AF_INET6, which makes the code difficult to follow and triggers checkpatch warnings. Split the code out into functions to handle the AF_INET v.s. AF_INET6 cases, which both improves readability and resolves the checkpatch warnings. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_netlink.c | 126 ++++++++++++++++++++++------------------ 1 file changed, 70 insertions(+), 56 deletions(-) diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index fc26ebad2f4f..0021cc03417e 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -310,16 +310,79 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info return ret; } +#if IS_ENABLED(CONFIG_IPV6) +static int l2tp_nl_tunnel_send_addr6(struct sk_buff *skb, struct sock *sk, + enum l2tp_encap_type encap) +{ + struct inet_sock *inet = inet_sk(sk); + struct ipv6_pinfo *np = inet6_sk(sk); + + switch (encap) { + case L2TP_ENCAPTYPE_UDP: + if (udp_get_no_check6_tx(sk) && + nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_TX)) + return -1; + if (udp_get_no_check6_rx(sk) && + nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_RX)) + return -1; + if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || + nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport))) + return -1; + fallthrough; + case L2TP_ENCAPTYPE_IP: + if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR, &np->saddr) || + nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR, &sk->sk_v6_daddr)) + return -1; + break; + } + return 0; +} +#endif + +static int l2tp_nl_tunnel_send_addr4(struct sk_buff *skb, struct sock *sk, + enum l2tp_encap_type encap) +{ + struct inet_sock *inet = inet_sk(sk); + + switch (encap) { + case L2TP_ENCAPTYPE_UDP: + if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx) || + nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || + nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport))) + return -1; + fallthrough; + case L2TP_ENCAPTYPE_IP: + if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) || + nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr)) + return -1; + break; + } + + return 0; +} + +/* Append attributes for the tunnel address, handling the different attribute types + * used for different tunnel encapsulation and AF_INET v.s. AF_INET6. + */ +static int l2tp_nl_tunnel_send_addr(struct sk_buff *skb, struct l2tp_tunnel *tunnel) +{ + struct sock *sk = tunnel->sock; + + if (!sk) + return 0; + +#if IS_ENABLED(CONFIG_IPV6) + if (sk->sk_family == AF_INET6) + return l2tp_nl_tunnel_send_addr6(skb, sk, tunnel->encap); +#endif + return l2tp_nl_tunnel_send_addr4(skb, sk, tunnel->encap); +} + static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags, struct l2tp_tunnel *tunnel, u8 cmd) { void *hdr; struct nlattr *nest; - struct sock *sk = NULL; - struct inet_sock *inet; -#if IS_ENABLED(CONFIG_IPV6) - struct ipv6_pinfo *np = NULL; -#endif hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd); if (!hdr) @@ -363,58 +426,9 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla goto nla_put_failure; nla_nest_end(skb, nest); - sk = tunnel->sock; - if (!sk) - goto out; + if (l2tp_nl_tunnel_send_addr(skb, tunnel)) + goto nla_put_failure; -#if IS_ENABLED(CONFIG_IPV6) - if (sk->sk_family == AF_INET6) - np = inet6_sk(sk); -#endif - - inet = inet_sk(sk); - - switch (tunnel->encap) { - case L2TP_ENCAPTYPE_UDP: - switch (sk->sk_family) { - case AF_INET: - if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx)) - goto nla_put_failure; - break; -#if IS_ENABLED(CONFIG_IPV6) - case AF_INET6: - if (udp_get_no_check6_tx(sk) && - nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_TX)) - goto nla_put_failure; - if (udp_get_no_check6_rx(sk) && - nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_RX)) - goto nla_put_failure; - break; -#endif - } - if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) || - nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport))) - goto nla_put_failure; - /* fall through */ - case L2TP_ENCAPTYPE_IP: -#if IS_ENABLED(CONFIG_IPV6) - if (np) { - if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR, - &np->saddr) || - nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR, - &sk->sk_v6_daddr)) - goto nla_put_failure; - } else -#endif - if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR, - inet->inet_saddr) || - nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR, - inet->inet_daddr)) - goto nla_put_failure; - break; - } - -out: genlmsg_end(skb, hdr); return 0; From 0787840dad4ce7875f1e85eb8b72a54e9d87f9db Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Thu, 23 Jul 2020 12:29:54 +0100 Subject: [PATCH 1525/2056] l2tp: cleanup netlink tunnel create address handling When creating an L2TP tunnel using the netlink API, userspace must either pass a socket FD for the tunnel to use (for managed tunnels), or specify the tunnel source/destination address (for unmanaged tunnels). Since source/destination addresses may be AF_INET or AF_INET6, the l2tp netlink code has conditionally compiled blocks to support IPv6. Rather than embedding these directly into l2tp_nl_cmd_tunnel_create (where it makes the code difficult to read and confuses checkpatch to boot) split the handling of address-related attributes into a separate function. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_netlink.c | 57 ++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 0021cc03417e..35716a6e1e2c 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -155,12 +155,38 @@ static int l2tp_session_notify(struct genl_family *family, return ret; } +static int l2tp_nl_cmd_tunnel_create_get_addr(struct nlattr **attrs, struct l2tp_tunnel_cfg *cfg) +{ + if (attrs[L2TP_ATTR_UDP_SPORT]) + cfg->local_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_SPORT]); + if (attrs[L2TP_ATTR_UDP_DPORT]) + cfg->peer_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_DPORT]); + cfg->use_udp_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_CSUM]); + + /* Must have either AF_INET or AF_INET6 address for source and destination */ +#if IS_ENABLED(CONFIG_IPV6) + if (attrs[L2TP_ATTR_IP6_SADDR] && attrs[L2TP_ATTR_IP6_DADDR]) { + cfg->local_ip6 = nla_data(attrs[L2TP_ATTR_IP6_SADDR]); + cfg->peer_ip6 = nla_data(attrs[L2TP_ATTR_IP6_DADDR]); + cfg->udp6_zero_tx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]); + cfg->udp6_zero_rx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]); + return 0; + } +#endif + if (attrs[L2TP_ATTR_IP_SADDR] && attrs[L2TP_ATTR_IP_DADDR]) { + cfg->local_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_SADDR]); + cfg->peer_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_DADDR]); + return 0; + } + return -EINVAL; +} + static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info) { u32 tunnel_id; u32 peer_tunnel_id; int proto_version; - int fd; + int fd = -1; int ret = 0; struct l2tp_tunnel_cfg cfg = { 0, }; struct l2tp_tunnel *tunnel; @@ -191,33 +217,16 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info } cfg.encap = nla_get_u16(attrs[L2TP_ATTR_ENCAP_TYPE]); - fd = -1; + /* Managed tunnels take the tunnel socket from userspace. + * Unmanaged tunnels must call out the source and destination addresses + * for the kernel to create the tunnel socket itself. + */ if (attrs[L2TP_ATTR_FD]) { fd = nla_get_u32(attrs[L2TP_ATTR_FD]); } else { -#if IS_ENABLED(CONFIG_IPV6) - if (attrs[L2TP_ATTR_IP6_SADDR] && attrs[L2TP_ATTR_IP6_DADDR]) { - cfg.local_ip6 = nla_data(attrs[L2TP_ATTR_IP6_SADDR]); - cfg.peer_ip6 = nla_data(attrs[L2TP_ATTR_IP6_DADDR]); - } else -#endif - if (attrs[L2TP_ATTR_IP_SADDR] && attrs[L2TP_ATTR_IP_DADDR]) { - cfg.local_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_SADDR]); - cfg.peer_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_DADDR]); - } else { - ret = -EINVAL; + ret = l2tp_nl_cmd_tunnel_create_get_addr(attrs, &cfg); + if (ret < 0) goto out; - } - if (attrs[L2TP_ATTR_UDP_SPORT]) - cfg.local_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_SPORT]); - if (attrs[L2TP_ATTR_UDP_DPORT]) - cfg.peer_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_DPORT]); - cfg.use_udp_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_CSUM]); - -#if IS_ENABLED(CONFIG_IPV6) - cfg.udp6_zero_tx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]); - cfg.udp6_zero_rx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]); -#endif } if (attrs[L2TP_ATTR_DEBUG]) From 70c05bfa4a3d36c0d8e7cef86b0f6c8a9cbb8881 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Thu, 23 Jul 2020 12:29:55 +0100 Subject: [PATCH 1526/2056] l2tp: cleanup kzalloc calls Passing "sizeof(struct blah)" in kzalloc calls is less readable, potentially prone to future bugs if the type of the pointer is changed, and triggers checkpatch warnings. Tweak the kzalloc calls in l2tp which use this form to avoid the warning. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d1403f27135e..7e3523015d6f 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1410,7 +1410,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 if (cfg) encap = cfg->encap; - tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL); + tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); if (!tunnel) { err = -ENOMEM; goto err; @@ -1647,7 +1647,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn { struct l2tp_session *session; - session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL); + session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL); if (session) { session->magic = L2TP_SESSION_MAGIC; session->tunnel = tunnel; From 7235ffae3d2cd3dd02ea840b1f51eeb394e40b0d Mon Sep 17 00:00:00 2001 From: Vishal Kulkarni Date: Thu, 23 Jul 2020 18:19:50 +0530 Subject: [PATCH 1527/2056] cxgb4: add loopback ethtool self-test In this test, loopback pkt is created and sent on default queue. The packet goes until the Multi Port Switch (MPS) just before the MAC and based on the specified channel number, it either goes outside the wire on one of the physical ports or looped back to Rx path by MPS. In this case, we're specifying loopback channel, instead of physical ports, so the packet gets looped back to Rx path, instead of getting transmitted on the wire. v3: - Modify commit message to include test details. v2: - Add only loopback self-test. Signed-off-by: Vishal Kulkarni Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 8 ++ .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 52 +++++++++ drivers/net/ethernet/chelsio/cxgb4/sge.c | 107 +++++++++++++++++- 3 files changed, 166 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c59e9ccc2f18..adbc0d088070 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -532,6 +532,12 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD)) #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) +struct cxgb4_ethtool_lb_test { + struct completion completion; + int result; + int loopback; +}; + struct fw_info { u8 chip; char *fs_name; @@ -685,6 +691,7 @@ struct port_info { u16 nmirrorqsets; u32 vi_mirror_count; struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */ + struct cxgb4_ethtool_lb_test ethtool_lb; }; struct dentry; @@ -1595,6 +1602,7 @@ void t4_free_sge_resources(struct adapter *adap); void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); irq_handler_t t4_intr_handler(struct adapter *adap); netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev); +int cxgb4_selftest_lb_pkt(struct net_device *netdev); int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, const struct pkt_gl *gl); int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index b66a2e6cbbeb..12ef9ddd1e54 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -25,6 +25,15 @@ static void set_msglevel(struct net_device *dev, u32 val) netdev2adap(dev)->msg_enable = val; } +enum cxgb4_ethtool_tests { + CXGB4_ETHTOOL_LB_TEST, + CXGB4_ETHTOOL_MAX_TEST, +}; + +static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = { + "Loop back test", +}; + static const char * const flash_region_strings[] = { "All", "Firmware", @@ -166,6 +175,8 @@ static int get_sset_count(struct net_device *dev, int sset) ARRAY_SIZE(loopback_stats_strings); case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(cxgb4_priv_flags_strings); + case ETH_SS_TEST: + return ARRAY_SIZE(cxgb4_selftest_strings); default: return -EOPNOTSUPP; } @@ -228,6 +239,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data) } else if (stringset == ETH_SS_PRIV_FLAGS) { memcpy(data, cxgb4_priv_flags_strings, sizeof(cxgb4_priv_flags_strings)); + } else if (stringset == ETH_SS_TEST) { + memcpy(data, cxgb4_selftest_strings, + sizeof(cxgb4_selftest_strings)); } } @@ -2056,6 +2070,43 @@ static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags) return 0; } +static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status) +{ + int dev_state = netif_running(netdev); + + if (dev_state) { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + + *lb_status = cxgb4_selftest_lb_pkt(netdev); + + if (dev_state) { + netif_tx_start_all_queues(netdev); + netif_carrier_on(netdev); + } +} + +static void cxgb4_self_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + + memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST); + + if (!(adap->flags & CXGB4_FW_OK)) { + eth_test->flags |= ETH_TEST_FL_FAILED; + return; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) + cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]); + + if (data[CXGB4_ETHTOOL_LB_TEST]) + eth_test->flags |= ETH_TEST_FL_FAILED; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | @@ -2090,6 +2141,7 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_rxfh_indir_size = get_rss_table_size, .get_rxfh = get_rss_table, .set_rxfh = set_rss_table, + .self_test = cxgb4_self_test, .flash_device = set_flash, .get_ts_info = get_ts_info, .set_dump = set_dump, diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index f9c6f13d7c99..3f0fdffdb2b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2537,6 +2537,80 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) } } +#define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST" + +int cxgb4_selftest_lb_pkt(struct net_device *netdev) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + struct cxgb4_ethtool_lb_test *lb; + int ret, i = 0, pkt_len, credits; + struct fw_eth_tx_pkt_wr *wr; + struct cpl_tx_pkt_core *cpl; + u32 ctrl0, ndesc, flits; + struct sge_eth_txq *q; + u8 *sgl; + + pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR); + + flits = DIV_ROUND_UP(pkt_len + sizeof(struct cpl_tx_pkt) + + sizeof(*wr), sizeof(__be64)); + ndesc = flits_to_desc(flits); + + lb = &pi->ethtool_lb; + lb->loopback = 1; + + q = &adap->sge.ethtxq[pi->first_qset]; + + reclaim_completed_tx(adap, &q->q, -1, true); + credits = txq_avail(&q->q) - ndesc; + if (unlikely(credits < 0)) + return -ENOMEM; + + wr = (void *)&q->q.desc[q->q.pidx]; + memset(wr, 0, sizeof(struct tx_desc)); + + wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | + FW_WR_IMMDLEN_V(pkt_len + + sizeof(*cpl))); + wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); + wr->r3 = cpu_to_be64(0); + + cpl = (void *)(wr + 1); + sgl = (u8 *)(cpl + 1); + + ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | + TXPKT_INTF_V(pi->tx_chan + 4); + + cpl->ctrl0 = htonl(ctrl0); + cpl->pack = htons(0); + cpl->len = htons(pkt_len); + cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); + + eth_broadcast_addr(sgl); + i += ETH_ALEN; + ether_addr_copy(&sgl[i], netdev->dev_addr); + i += ETH_ALEN; + + snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s", + CXGB4_SELFTEST_LB_STR); + + init_completion(&lb->completion); + txq_advance(&q->q, ndesc); + cxgb4_ring_tx_db(adap, &q->q, ndesc); + + /* wait for the pkt to return */ + ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); + if (!ret) + ret = -ETIMEDOUT; + else + ret = lb->result; + + lb->loopback = 0; + + return ret; +} + /** * ctrl_xmit - send a packet through an SGE control Tx queue * @q: the control queue @@ -3413,6 +3487,31 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq, t4_sge_eth_txq_egress_update(adapter, txq, -1); } +static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si) +{ + struct adapter *adap = pi->adapter; + struct cxgb4_ethtool_lb_test *lb; + struct sge *s = &adap->sge; + struct net_device *netdev; + u8 *data; + int i; + + netdev = adap->port[pi->port_id]; + lb = &pi->ethtool_lb; + data = si->va + s->pktshift; + + i = ETH_ALEN; + if (!ether_addr_equal(data + i, netdev->dev_addr)) + return -1; + + i += ETH_ALEN; + if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR)) + lb->result = -EIO; + + complete(&lb->completion); + return 0; +} + /** * t4_ethrx_handler - process an ingress ethernet packet * @q: the response queue that received the packet @@ -3436,6 +3535,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct port_info *pi; int ret = 0; + pi = netdev_priv(q->netdev); /* If we're looking at TX Queue CIDX Update, handle that separately * and return. */ @@ -3463,6 +3563,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, if (err_vec) rxq->stats.bad_rx_pkts++; + if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { + ret = cxgb4_validate_lb_pkt(pi, si); + if (!ret) + return 0; + } + if (((pkt->l2info & htonl(RXF_TCP_F)) || tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { @@ -3476,7 +3582,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, rxq->stats.rx_drops++; return 0; } - pi = netdev_priv(q->netdev); /* Handle PTP Event Rx packet */ if (unlikely(pi->ptp_enable)) { From 1082b360e3f6c0c7ce46417f3ecd5517a11abf60 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Jul 2020 09:16:00 -0700 Subject: [PATCH 1528/2056] ice: refactor ice_discover_caps to avoid need to retry The ice_discover_caps function is used to read the device and function capabilities, updating the hardware capabilities structures with relevant data. The exact number of capabilities returned by the hardware is unknown ahead of time. The AdminQ command will report the total number of capabilities in the return buffer. The current implementation involves requesting capabilities once, reading this returned size, and then re-requested with that size. This isn't really necessary. The firmware interface has a maximum size of ICE_AQ_MAX_BUF_LEN. Firmware can never return more than ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem) capabilities. Avoid the retry loop by simply allocating a buffer of size ICE_AQ_MAX_BUF_LEN. This is significantly simpler than retrying. The extra allocation isn't a big deal, as it will be released after we finish parsing the capabilities. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 43 ++++++--------------- 1 file changed, 12 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 57fd815c41bc..37ea042ece13 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1901,40 +1901,21 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) { enum ice_status status; u32 cap_count; - u16 cbuf_len; - u8 retries; + void *cbuf; - /* The driver doesn't know how many capabilities the device will return - * so the buffer size required isn't known ahead of time. The driver - * starts with cbuf_len and if this turns out to be insufficient, the - * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs. - * The driver then allocates the buffer based on the count and retries - * the operation. So it follows that the retry count is 2. + cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!cbuf) + return ICE_ERR_NO_MEMORY; + + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. */ -#define ICE_GET_CAP_BUF_COUNT 40 -#define ICE_GET_CAP_RETRY_COUNT 2 + cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); - cap_count = ICE_GET_CAP_BUF_COUNT; - retries = ICE_GET_CAP_RETRY_COUNT; - - do { - void *cbuf; - - cbuf_len = (u16)(cap_count * - sizeof(struct ice_aqc_list_caps_elem)); - cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL); - if (!cbuf) - return ICE_ERR_NO_MEMORY; - - status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count, - opc, NULL); - devm_kfree(ice_hw_to_dev(hw), cbuf); - - if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM) - break; - - /* If ENOMEM is returned, try again with bigger buffer */ - } while (--retries); + status = ice_aq_discover_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, + opc, NULL); + kfree(cbuf); return status; } From 595b13e228443239a7a7b04ed7546847023d6115 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Jul 2020 09:16:01 -0700 Subject: [PATCH 1529/2056] ice: split ice_parse_caps into separate functions The ice_parse_caps function is used to convert the capability block data coming from firmware into a structured format used by other parts of the code. The current implementation directly updates the hw->func_caps and hw->dev_caps structures. It is directly called from within ice_aq_discover_caps. This causes the discover_caps function to have the side effect of modifying the HW capability structures, which is not intuitive. Split this function into ice_parse_dev_caps and ice_parse_func_caps. These functions will take a pointer to the dev_caps and func_caps respectively. Also create an ice_parse_common_caps for sharing the capability logic that is common to device and function. Doing so enables a future refactor to allow reading and parsing capabilities into a local caps structure instead of modifying the members of the HW structure directly. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 572 +++++++++++++------- 1 file changed, 390 insertions(+), 182 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 37ea042ece13..c32b9100eec3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1616,6 +1616,391 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max) return max / funcs; } +/** + * ice_parse_common_caps - parse common device/function capabilities + * @hw: pointer to the HW struct + * @caps: pointer to common capabilities structure + * @elem: the capability element to parse + * @prefix: message prefix for tracing capabilities + * + * Given a capability element, extract relevant details into the common + * capability structure. + * + * Returns: true if the capability matches one of the common capability ids, + * false otherwise. + */ +static bool +ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, + struct ice_aqc_list_caps_elem *elem, const char *prefix) +{ + u32 logical_id = le32_to_cpu(elem->logical_id); + u32 phys_id = le32_to_cpu(elem->phys_id); + u32 number = le32_to_cpu(elem->number); + u16 cap = le16_to_cpu(elem->cap); + bool found = true; + + switch (cap) { + case ICE_AQC_CAPS_VALID_FUNCTIONS: + caps->valid_functions = number; + ice_debug(hw, ICE_DBG_INIT, + "%s: valid_functions (bitmap) = %d\n", prefix, + caps->valid_functions); + break; + case ICE_AQC_CAPS_SRIOV: + caps->sr_iov_1_1 = (number == 1); + ice_debug(hw, ICE_DBG_INIT, + "%s: sr_iov_1_1 = %d\n", prefix, + caps->sr_iov_1_1); + break; + case ICE_AQC_CAPS_DCB: + caps->dcb = (number == 1); + caps->active_tc_bitmap = logical_id; + caps->maxtc = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: dcb = %d\n", prefix, caps->dcb); + ice_debug(hw, ICE_DBG_INIT, + "%s: active_tc_bitmap = %d\n", prefix, + caps->active_tc_bitmap); + ice_debug(hw, ICE_DBG_INIT, + "%s: maxtc = %d\n", prefix, caps->maxtc); + break; + case ICE_AQC_CAPS_RSS: + caps->rss_table_size = number; + caps->rss_table_entry_width = logical_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: rss_table_size = %d\n", prefix, + caps->rss_table_size); + ice_debug(hw, ICE_DBG_INIT, + "%s: rss_table_entry_width = %d\n", prefix, + caps->rss_table_entry_width); + break; + case ICE_AQC_CAPS_RXQS: + caps->num_rxq = number; + caps->rxq_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_rxq = %d\n", prefix, + caps->num_rxq); + ice_debug(hw, ICE_DBG_INIT, + "%s: rxq_first_id = %d\n", prefix, + caps->rxq_first_id); + break; + case ICE_AQC_CAPS_TXQS: + caps->num_txq = number; + caps->txq_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_txq = %d\n", prefix, + caps->num_txq); + ice_debug(hw, ICE_DBG_INIT, + "%s: txq_first_id = %d\n", prefix, + caps->txq_first_id); + break; + case ICE_AQC_CAPS_MSIX: + caps->num_msix_vectors = number; + caps->msix_vector_first_id = phys_id; + ice_debug(hw, ICE_DBG_INIT, + "%s: num_msix_vectors = %d\n", prefix, + caps->num_msix_vectors); + ice_debug(hw, ICE_DBG_INIT, + "%s: msix_vector_first_id = %d\n", prefix, + caps->msix_vector_first_id); + break; + case ICE_AQC_CAPS_MAX_MTU: + caps->max_mtu = number; + ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", + prefix, caps->max_mtu); + break; + default: + /* Not one of the recognized common capabilities */ + found = false; + } + + return found; +} + +/** + * ice_recalc_port_limited_caps - Recalculate port limited capabilities + * @hw: pointer to the HW structure + * @caps: pointer to capabilities structure to fix + * + * Re-calculate the capabilities that are dependent on the number of physical + * ports; i.e. some features are not supported or function differently on + * devices with more than 4 ports. + */ +static void +ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps) +{ + /* This assumes device capabilities are always scanned before function + * capabilities during the initialization flow. + */ + if (hw->dev_caps.num_funcs > 4) { + /* Max 4 TCs per port */ + caps->maxtc = 4; + ice_debug(hw, ICE_DBG_INIT, + "reducing maxtc to %d (based on #ports)\n", + caps->maxtc); + } +} + +/** + * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @cap: pointer to the capability element to parse + * + * Extract function capabilities for ICE_AQC_CAPS_VF. + */ +static void +ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, + struct ice_aqc_list_caps_elem *cap) +{ + u32 logical_id = le32_to_cpu(cap->logical_id); + u32 number = le32_to_cpu(cap->number); + + func_p->num_allocd_vfs = number; + func_p->vf_base_id = logical_id; + ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n", + func_p->num_allocd_vfs); + ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n", + func_p->vf_base_id); +} + +/** + * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @cap: pointer to the capability element to parse + * + * Extract function capabilities for ICE_AQC_CAPS_VSI. + */ +static void +ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, + struct ice_aqc_list_caps_elem *cap) +{ + func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI); + ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n", + le32_to_cpu(cap->number)); + ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n", + func_p->guar_num_vsi); +} + +/** + * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * + * Extract function capabilities for ICE_AQC_CAPS_FD. + */ +static void +ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p) +{ + u32 reg_val, val; + + reg_val = rd32(hw, GLQF_FD_SIZE); + val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> + GLQF_FD_SIZE_FD_GSIZE_S; + func_p->fd_fltr_guar = + ice_get_num_per_func(hw, val); + val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> + GLQF_FD_SIZE_FD_BSIZE_S; + func_p->fd_fltr_best_effort = val; + + ice_debug(hw, ICE_DBG_INIT, + "func caps: fd_fltr_guar = %d\n", + func_p->fd_fltr_guar); + ice_debug(hw, ICE_DBG_INIT, + "func caps: fd_fltr_best_effort = %d\n", + func_p->fd_fltr_best_effort); +} + +/** + * ice_parse_func_caps - Parse function capabilities + * @hw: pointer to the HW struct + * @func_p: pointer to function capabilities structure + * @buf: buffer containing the function capability records + * @cap_count: the number of capabilities + * + * Helper function to parse function (0x000A) capabilities list. For + * capabilities shared between device and function, this relies on + * ice_parse_common_caps. + * + * Loop through the list of provided capabilities and extract the relevant + * data into the function capabilities structured. + */ +static void +ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, + void *buf, u32 cap_count) +{ + struct ice_aqc_list_caps_elem *cap_resp; + u32 i; + + cap_resp = (struct ice_aqc_list_caps_elem *)buf; + + memset(func_p, 0, sizeof(*func_p)); + + for (i = 0; i < cap_count; i++) { + u16 cap = le16_to_cpu(cap_resp[i].cap); + bool found; + + found = ice_parse_common_caps(hw, &func_p->common_cap, + &cap_resp[i], "func caps"); + + switch (cap) { + case ICE_AQC_CAPS_VF: + ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]); + break; + case ICE_AQC_CAPS_VSI: + ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]); + break; + case ICE_AQC_CAPS_FD: + ice_parse_fdir_func_caps(hw, func_p); + break; + default: + /* Don't list common capabilities as unknown */ + if (!found) + ice_debug(hw, ICE_DBG_INIT, + "func caps: unknown capability[%d]: 0x%x\n", + i, cap); + break; + } + } + + ice_recalc_port_limited_caps(hw, &func_p->common_cap); +} + +/** + * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities. + */ +static void +ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + u32 number = le32_to_cpu(cap->number); + + dev_p->num_funcs = hweight32(number); + ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", + dev_p->num_funcs); +} + +/** + * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_VF for device capabilities. + */ +static void +ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + u32 number = le32_to_cpu(cap->number); + + dev_p->num_vfs_exposed = number; + ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n", + dev_p->num_vfs_exposed); +} + +/** + * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_VSI for device capabilities. + */ +static void +ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + u32 number = le32_to_cpu(cap->number); + + dev_p->num_vsi_allocd_to_host = number; + ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n", + dev_p->num_vsi_allocd_to_host); +} + +/** + * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_FD for device capabilities. + */ +static void +ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + u32 number = le32_to_cpu(cap->number); + + dev_p->num_flow_director_fltr = number; + ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n", + dev_p->num_flow_director_fltr); +} + +/** + * ice_parse_dev_caps - Parse device capabilities + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @buf: buffer containing the device capability records + * @cap_count: the number of capabilities + * + * Helper device to parse device (0x000B) capabilities list. For + * capabilities shared between device and device, this relies on + * ice_parse_common_caps. + * + * Loop through the list of provided capabilities and extract the relevant + * data into the device capabilities structured. + */ +static void +ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, + void *buf, u32 cap_count) +{ + struct ice_aqc_list_caps_elem *cap_resp; + u32 i; + + cap_resp = (struct ice_aqc_list_caps_elem *)buf; + + memset(dev_p, 0, sizeof(*dev_p)); + + for (i = 0; i < cap_count; i++) { + u16 cap = le16_to_cpu(cap_resp[i].cap); + bool found; + + found = ice_parse_common_caps(hw, &dev_p->common_cap, + &cap_resp[i], "dev caps"); + + switch (cap) { + case ICE_AQC_CAPS_VALID_FUNCTIONS: + ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]); + break; + case ICE_AQC_CAPS_VF: + ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]); + break; + case ICE_AQC_CAPS_VSI: + ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]); + break; + case ICE_AQC_CAPS_FD: + ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]); + break; + default: + /* Don't list common capabilities as unknown */ + if (!found) + ice_debug(hw, ICE_DBG_INIT, + "dev caps: unknown capability[%d]: 0x%x\n", + i, cap); + break; + } + } + + ice_recalc_port_limited_caps(hw, &dev_p->common_cap); +} + /** * ice_parse_caps - parse function/device capabilities * @hw: pointer to the HW struct @@ -1629,192 +2014,15 @@ static void ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, enum ice_adminq_opc opc) { - struct ice_aqc_list_caps_elem *cap_resp; - struct ice_hw_func_caps *func_p = NULL; - struct ice_hw_dev_caps *dev_p = NULL; - struct ice_hw_common_caps *caps; - char const *prefix; - u32 i; - if (!buf) return; - cap_resp = (struct ice_aqc_list_caps_elem *)buf; - - if (opc == ice_aqc_opc_list_dev_caps) { - dev_p = &hw->dev_caps; - caps = &dev_p->common_cap; - prefix = "dev cap"; - } else if (opc == ice_aqc_opc_list_func_caps) { - func_p = &hw->func_caps; - caps = &func_p->common_cap; - prefix = "func cap"; - } else { + if (opc == ice_aqc_opc_list_dev_caps) + ice_parse_dev_caps(hw, &hw->dev_caps, buf, cap_count); + else if (opc == ice_aqc_opc_list_func_caps) + ice_parse_func_caps(hw, &hw->func_caps, buf, cap_count); + else ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); - return; - } - - for (i = 0; caps && i < cap_count; i++, cap_resp++) { - u32 logical_id = le32_to_cpu(cap_resp->logical_id); - u32 phys_id = le32_to_cpu(cap_resp->phys_id); - u32 number = le32_to_cpu(cap_resp->number); - u16 cap = le16_to_cpu(cap_resp->cap); - - switch (cap) { - case ICE_AQC_CAPS_VALID_FUNCTIONS: - caps->valid_functions = number; - ice_debug(hw, ICE_DBG_INIT, - "%s: valid_functions (bitmap) = %d\n", prefix, - caps->valid_functions); - - /* store func count for resource management purposes */ - if (dev_p) - dev_p->num_funcs = hweight32(number); - break; - case ICE_AQC_CAPS_SRIOV: - caps->sr_iov_1_1 = (number == 1); - ice_debug(hw, ICE_DBG_INIT, - "%s: sr_iov_1_1 = %d\n", prefix, - caps->sr_iov_1_1); - break; - case ICE_AQC_CAPS_VF: - if (dev_p) { - dev_p->num_vfs_exposed = number; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_vfs_exposed = %d\n", prefix, - dev_p->num_vfs_exposed); - } else if (func_p) { - func_p->num_allocd_vfs = number; - func_p->vf_base_id = logical_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_allocd_vfs = %d\n", prefix, - func_p->num_allocd_vfs); - ice_debug(hw, ICE_DBG_INIT, - "%s: vf_base_id = %d\n", prefix, - func_p->vf_base_id); - } - break; - case ICE_AQC_CAPS_VSI: - if (dev_p) { - dev_p->num_vsi_allocd_to_host = number; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_vsi_allocd_to_host = %d\n", - prefix, - dev_p->num_vsi_allocd_to_host); - } else if (func_p) { - func_p->guar_num_vsi = - ice_get_num_per_func(hw, ICE_MAX_VSI); - ice_debug(hw, ICE_DBG_INIT, - "%s: guar_num_vsi (fw) = %d\n", - prefix, number); - ice_debug(hw, ICE_DBG_INIT, - "%s: guar_num_vsi = %d\n", - prefix, func_p->guar_num_vsi); - } - break; - case ICE_AQC_CAPS_DCB: - caps->dcb = (number == 1); - caps->active_tc_bitmap = logical_id; - caps->maxtc = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: dcb = %d\n", prefix, caps->dcb); - ice_debug(hw, ICE_DBG_INIT, - "%s: active_tc_bitmap = %d\n", prefix, - caps->active_tc_bitmap); - ice_debug(hw, ICE_DBG_INIT, - "%s: maxtc = %d\n", prefix, caps->maxtc); - break; - case ICE_AQC_CAPS_RSS: - caps->rss_table_size = number; - caps->rss_table_entry_width = logical_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: rss_table_size = %d\n", prefix, - caps->rss_table_size); - ice_debug(hw, ICE_DBG_INIT, - "%s: rss_table_entry_width = %d\n", prefix, - caps->rss_table_entry_width); - break; - case ICE_AQC_CAPS_RXQS: - caps->num_rxq = number; - caps->rxq_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_rxq = %d\n", prefix, - caps->num_rxq); - ice_debug(hw, ICE_DBG_INIT, - "%s: rxq_first_id = %d\n", prefix, - caps->rxq_first_id); - break; - case ICE_AQC_CAPS_TXQS: - caps->num_txq = number; - caps->txq_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_txq = %d\n", prefix, - caps->num_txq); - ice_debug(hw, ICE_DBG_INIT, - "%s: txq_first_id = %d\n", prefix, - caps->txq_first_id); - break; - case ICE_AQC_CAPS_MSIX: - caps->num_msix_vectors = number; - caps->msix_vector_first_id = phys_id; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_msix_vectors = %d\n", prefix, - caps->num_msix_vectors); - ice_debug(hw, ICE_DBG_INIT, - "%s: msix_vector_first_id = %d\n", prefix, - caps->msix_vector_first_id); - break; - case ICE_AQC_CAPS_FD: - if (dev_p) { - dev_p->num_flow_director_fltr = number; - ice_debug(hw, ICE_DBG_INIT, - "%s: num_flow_director_fltr = %d\n", - prefix, - dev_p->num_flow_director_fltr); - } - if (func_p) { - u32 reg_val, val; - - reg_val = rd32(hw, GLQF_FD_SIZE); - val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >> - GLQF_FD_SIZE_FD_GSIZE_S; - func_p->fd_fltr_guar = - ice_get_num_per_func(hw, val); - val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >> - GLQF_FD_SIZE_FD_BSIZE_S; - func_p->fd_fltr_best_effort = val; - ice_debug(hw, ICE_DBG_INIT, - "%s: fd_fltr_guar = %d\n", - prefix, func_p->fd_fltr_guar); - ice_debug(hw, ICE_DBG_INIT, - "%s: fd_fltr_best_effort = %d\n", - prefix, func_p->fd_fltr_best_effort); - } - break; - case ICE_AQC_CAPS_MAX_MTU: - caps->max_mtu = number; - ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", - prefix, caps->max_mtu); - break; - default: - ice_debug(hw, ICE_DBG_INIT, - "%s: unknown capability[%d]: 0x%x\n", prefix, - i, cap); - break; - } - } - - /* Re-calculate capabilities that are dependent on the number of - * physical ports; i.e. some features are not supported or function - * differently on devices with more than 4 ports. - */ - if (hw->dev_caps.num_funcs > 4) { - /* Max 4 TCs per port */ - caps->maxtc = 4; - ice_debug(hw, ICE_DBG_INIT, - "%s: maxtc = %d (based on #ports)\n", prefix, - caps->maxtc); - } } /** From 81aed6475dbe90b45e8da4b476a4c985d4890042 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 9 Jul 2020 09:16:02 -0700 Subject: [PATCH 1530/2056] ice: split ice_discover_caps into two functions Using the new ice_aq_list_caps and ice_parse_(dev|func)_caps functions, replace ice_discover_caps with two functions that each take a pointer to the dev_caps and func_caps structures respectively. This makes the side effect of updating the hw->dev_caps and hw->func_caps obvious from reading the implementation of the function. Additionally, it opens the way for enabling reading of device capabilities outside of the initialization flow. By passing in a pointer, another caller will be able to read the capabilities without modifying the HW capabilities structures. As there are no other callers, it is safe to now remove ice_aq_discover_caps and ice_parse_caps. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 118 +++++++++----------- 1 file changed, 51 insertions(+), 67 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index c32b9100eec3..04e28090fef3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -52,7 +52,8 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw) * is returned in user specified buffer. Please interpret user specified * buffer as "manage_mac_read" response. * Response such as various MAC addresses are stored in HW struct (port.mac) - * ice_aq_discover_caps is expected to be called before this function is called. + * ice_discover_dev_caps is expected to be called before this function is + * called. */ static enum ice_status ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, @@ -2001,30 +2002,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, ice_recalc_port_limited_caps(hw, &dev_p->common_cap); } -/** - * ice_parse_caps - parse function/device capabilities - * @hw: pointer to the HW struct - * @buf: pointer to a buffer containing function/device capability records - * @cap_count: number of capability records in the list - * @opc: type of capabilities list to parse - * - * Helper function to parse function(0x000a)/device(0x000b) capabilities list. - */ -static void -ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, - enum ice_adminq_opc opc) -{ - if (!buf) - return; - - if (opc == ice_aqc_opc_list_dev_caps) - ice_parse_dev_caps(hw, &hw->dev_caps, buf, cap_count); - else if (opc == ice_aqc_opc_list_func_caps) - ice_parse_func_caps(hw, &hw->func_caps, buf, cap_count); - else - ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n"); -} - /** * ice_aq_list_caps - query function/device capabilities * @hw: pointer to the HW struct @@ -2068,47 +2045,18 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, } /** - * ice_aq_discover_caps - query function/device capabilities - * @hw: pointer to the HW struct - * @buf: a virtual buffer to hold the capabilities - * @buf_size: Size of the virtual buffer - * @cap_count: cap count needed if AQ err==ENOMEM - * @opc: capabilities type to discover - pass in the command opcode - * @cd: pointer to command details structure or NULL - * - * Get the function(0x000a)/device(0x000b) capabilities description from - * the firmware. - * - * NOTE: this function has the side effect of updating the hw->dev_caps or - * hw->func_caps by way of calling ice_parse_caps. - */ -static enum ice_status -ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, - enum ice_adminq_opc opc, struct ice_sq_cd *cd) -{ - u32 local_cap_count = 0; - enum ice_status status; - - status = ice_aq_list_caps(hw, buf, buf_size, &local_cap_count, - opc, cd); - if (!status) - ice_parse_caps(hw, buf, local_cap_count, opc); - else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM) - *cap_count = local_cap_count; - - return status; -} - -/** - * ice_discover_caps - get info about the HW + * ice_discover_dev_caps - Read and extract device capabilities * @hw: pointer to the hardware structure - * @opc: capabilities type to discover - pass in the command opcode + * @dev_caps: pointer to device capabilities structure + * + * Read the device capabilities and extract them into the dev_caps structure + * for later use. */ static enum ice_status -ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) +ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { enum ice_status status; - u32 cap_count; + u32 cap_count = 0; void *cbuf; cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); @@ -2121,8 +2069,44 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc) */ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); - status = ice_aq_discover_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, - opc, NULL); + status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, + ice_aqc_opc_list_dev_caps, NULL); + if (!status) + ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count); + kfree(cbuf); + + return status; +} + +/** + * ice_discover_func_caps - Read and extract function capabilities + * @hw: pointer to the hardware structure + * @func_caps: pointer to function capabilities structure + * + * Read the function capabilities and extract them into the func_caps structure + * for later use. + */ +static enum ice_status +ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps) +{ + enum ice_status status; + u32 cap_count = 0; + void *cbuf; + + cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!cbuf) + return ICE_ERR_NO_MEMORY; + + /* Although the driver doesn't know the number of capabilities the + * device will return, we can simply send a 4KB buffer, the maximum + * possible size that firmware can return. + */ + cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem); + + status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count, + ice_aqc_opc_list_func_caps, NULL); + if (!status) + ice_parse_func_caps(hw, func_caps, cbuf, cap_count); kfree(cbuf); return status; @@ -2200,11 +2184,11 @@ enum ice_status ice_get_caps(struct ice_hw *hw) { enum ice_status status; - status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps); - if (!status) - status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps); + status = ice_discover_dev_caps(hw, &hw->dev_caps); + if (status) + return status; - return status; + return ice_discover_func_caps(hw, &hw->func_caps); } /** From 769c500dcc1edb6e65f84b2a7304589b748a3e35 Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Thu, 9 Jul 2020 09:16:03 -0700 Subject: [PATCH 1531/2056] ice: Add advanced power mgmt for WoL Add callbacks needed to support advanced power management for Wake on LAN. Also make ice_pf_state_is_nominal function available for all configurations not just CONFIG_PCI_IOV. Signed-off-by: Akeem G Abodunrin Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 3 + drivers/net/ethernet/intel/ice/ice_ethtool.c | 56 +++ .../net/ethernet/intel/ice/ice_hw_autogen.h | 9 + drivers/net/ethernet/intel/ice/ice_lib.c | 24 ++ drivers/net/ethernet/intel/ice/ice_lib.h | 2 + drivers/net/ethernet/intel/ice/ice_main.c | 363 +++++++++++++++++- drivers/net/ethernet/intel/ice/ice_nvm.c | 3 +- drivers/net/ethernet/intel/ice/ice_nvm.h | 1 + drivers/net/ethernet/intel/ice/ice_type.h | 1 + .../net/ethernet/intel/ice/ice_virtchnl_pf.c | 25 -- 10 files changed, 458 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 7486d010a619..2a0a69fe8b2b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -423,6 +423,8 @@ struct ice_pf { u16 empr_count; /* EMP reset count */ u16 pfr_count; /* PF reset count */ + u8 wol_ena : 1; /* software state of WoL */ + u32 wakeup_reason; /* last wakeup reason */ struct ice_hw_port_stats stats; struct ice_hw_port_stats stats_prev; struct ice_hw hw; @@ -568,6 +570,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset); void ice_print_link_msg(struct ice_vsi *vsi, bool isup); const char *ice_stat_str(enum ice_status stat_err); const char *ice_aq_str(enum ice_aq_err aq_err); +bool ice_is_wol_supported(struct ice_pf *pf); int ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add, bool is_tun); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 7066775769eb..c2291cf4dc6e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3322,6 +3322,58 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return 0; } +/** + * ice_get_wol - get current Wake on LAN configuration + * @netdev: network interface device structure + * @wol: Ethtool structure to retrieve WoL settings + */ +static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_pf *pf = np->vsi->back; + + if (np->vsi->type != ICE_VSI_PF) + netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n"); + + /* Get WoL settings based on the HW capability */ + if (ice_is_wol_supported(pf)) { + wol->supported = WAKE_MAGIC; + wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0; + } else { + wol->supported = 0; + wol->wolopts = 0; + } +} + +/** + * ice_set_wol - set Wake on LAN on supported device + * @netdev: network interface device structure + * @wol: Ethtool structure to set WoL + */ +static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + + if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf)) + return -EOPNOTSUPP; + + /* only magic packet is supported */ + if (wol->wolopts && wol->wolopts != WAKE_MAGIC) + return -EOPNOTSUPP; + + /* Set WoL only if there is a new value */ + if (pf->wol_ena != !!wol->wolopts) { + pf->wol_ena = !!wol->wolopts; + device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena); + netdev_dbg(netdev, "WoL magic packet %sabled\n", + pf->wol_ena ? "en" : "dis"); + } + + return 0; +} + enum ice_container_type { ICE_RX_CONTAINER, ICE_TX_CONTAINER, @@ -3805,6 +3857,8 @@ static const struct ethtool_ops ice_ethtool_ops = { .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, + .get_wol = ice_get_wol, + .set_wol = ice_set_wol, .get_msglevel = ice_get_msglevel, .set_msglevel = ice_set_msglevel, .self_test = ice_self_test, @@ -3847,6 +3901,8 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = { .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, + .get_wol = ice_get_wol, + .set_wol = ice_set_wol, .get_msglevel = ice_get_msglevel, .set_msglevel = ice_set_msglevel, .get_link = ethtool_op_get_link, diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 1086c9f778b4..92e4abca62a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -367,6 +367,15 @@ #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) #define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HLUT_MAX_INDEX 15 +#define PFPM_APM 0x000B8080 +#define PFPM_APM_APME_M BIT(0) +#define PFPM_WUFC 0x0009DC00 +#define PFPM_WUFC_MAG_M BIT(1) +#define PFPM_WUS 0x0009DB80 +#define PFPM_WUS_LNKC_M BIT(0) +#define PFPM_WUS_MAG_M BIT(1) +#define PFPM_WUS_MNG_M BIT(3) +#define PFPM_WUS_FW_RST_WK_M BIT(31) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) #define PRTRPB_RDPC 0x000AC260 diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8a4c7b8b95df..8f6a191839f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -1467,6 +1467,30 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) vsi_num, ice_stat_str(status)); } +/** + * ice_pf_state_is_nominal - checks the PF for nominal state + * @pf: pointer to PF to check + * + * Check the PF's state for a collection of bits that would indicate + * the PF is in a state that would inhibit normal operation for + * driver functionality. + * + * Returns true if PF is in a nominal state, false otherwise + */ +bool ice_pf_state_is_nominal(struct ice_pf *pf) +{ + DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; + + if (!pf) + return false; + + bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); + if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) + return false; + + return true; +} + /** * ice_update_eth_stats - Update VSI-specific ethernet statistics counters * @vsi: the VSI to be updated diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index d80e6afa4511..981f3a156c24 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -8,6 +8,8 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); +bool ice_pf_state_is_nominal(struct ice_pf *pf); + void ice_update_eth_stats(struct ice_vsi *vsi); int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a1cef089201a..794ec145d9cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1129,10 +1129,15 @@ static void ice_service_task_complete(struct ice_pf *pf) /** * ice_service_task_stop - stop service task and cancel works * @pf: board private structure + * + * Return 0 if the __ICE_SERVICE_DIS bit was not already set, + * 1 otherwise. */ -static void ice_service_task_stop(struct ice_pf *pf) +static int ice_service_task_stop(struct ice_pf *pf) { - set_bit(__ICE_SERVICE_DIS, pf->state); + int ret; + + ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state); if (pf->serv_tmr.function) del_timer_sync(&pf->serv_tmr); @@ -1140,6 +1145,7 @@ static void ice_service_task_stop(struct ice_pf *pf) cancel_work_sync(&pf->serv_task); clear_bit(__ICE_SERVICE_SCHED, pf->state); + return ret; } /** @@ -2940,6 +2946,27 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf) return 0; } +/** + * ice_is_wol_supported - get NVM state of WoL + * @pf: board private structure + * + * Check if WoL is supported based on the HW configuration. + * Returns true if NVM supports and enables WoL for this port, false otherwise + */ +bool ice_is_wol_supported(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u16 wol_ctrl; + + /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control + * word) indicates WoL is not supported on the corresponding PF ID. + */ + if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl)) + return false; + + return !(BIT(hw->pf_id) & wol_ctrl); +} + /** * ice_vsi_recfg_qs - Change the number of queues on a VSI * @vsi: VSI being changed @@ -3287,6 +3314,33 @@ static void ice_request_fw(struct ice_pf *pf) release_firmware(firmware); } +/** + * ice_print_wake_reason - show the wake up cause in the log + * @pf: pointer to the PF struct + */ +static void ice_print_wake_reason(struct ice_pf *pf) +{ + u32 wus = pf->wakeup_reason; + const char *wake_str; + + /* if no wake event, nothing to print */ + if (!wus) + return; + + if (wus & PFPM_WUS_LNKC_M) + wake_str = "Link\n"; + else if (wus & PFPM_WUS_MAG_M) + wake_str = "Magic Packet\n"; + else if (wus & PFPM_WUS_MNG_M) + wake_str = "Management\n"; + else if (wus & PFPM_WUS_FW_RST_WK_M) + wake_str = "Firmware Reset\n"; + else + wake_str = "Unknown\n"; + + dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str); +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -3470,6 +3524,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ice_verify_cacheline_size(pf); + /* Save wakeup reason register for later use */ + pf->wakeup_reason = rd32(hw, PFPM_WUS); + + /* check for a power management event */ + ice_print_wake_reason(pf); + + /* clear wake status, all bits */ + wr32(hw, PFPM_WUS, U32_MAX); + + /* Disable WoL at init, wait for user to enable */ + device_set_wakeup_enable(dev, false); + /* If no DDP driven features have to be setup, we are done with probe */ if (ice_is_safe_mode(pf)) goto probe_done; @@ -3514,9 +3580,72 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err_exit_unroll: ice_devlink_unregister(pf); pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); return err; } +/** + * ice_set_wake - enable or disable Wake on LAN + * @pf: pointer to the PF struct + * + * Simple helper for WoL control + */ +static void ice_set_wake(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool wol = pf->wol_ena; + + /* clear wake state, otherwise new wake events won't fire */ + wr32(hw, PFPM_WUS, U32_MAX); + + /* enable / disable APM wake up, no RMW needed */ + wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0); + + /* set magic packet filter enabled */ + wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0); +} + +/** + * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet + * @pf: pointer to the PF struct + * + * Issue firmware command to enable multicast magic wake, making + * sure that any locally administered address (LAA) is used for + * wake, and that PF reset doesn't undo the LAA. + */ +static void ice_setup_mc_magic_wake(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 mac_addr[ETH_ALEN]; + struct ice_vsi *vsi; + u8 flags; + + if (!pf->wol_ena) + return; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return; + + /* Get current MAC address in case it's an LAA */ + if (vsi->netdev) + ether_addr_copy(mac_addr, vsi->netdev->dev_addr); + else + ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); + + flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN | + ICE_AQC_MAN_MAC_UPDATE_LAA_WOL | + ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP; + + status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL); + if (status) + dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); +} + /** * ice_remove - Device removal routine * @pdev: PCI device information struct @@ -3546,8 +3675,10 @@ static void ice_remove(struct pci_dev *pdev) mutex_destroy(&(&pf->hw)->fdir_fltr_lock); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); + ice_setup_mc_magic_wake(pf); ice_devlink_destroy_port(pf); ice_vsi_release_all(pf); + ice_set_wake(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { if (!pf->vsi[i]) @@ -3567,8 +3698,230 @@ static void ice_remove(struct pci_dev *pdev) pci_wait_for_pending_transaction(pdev); ice_clear_interrupt_scheme(pf); pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); } +/** + * ice_shutdown - PCI callback for shutting down device + * @pdev: PCI device information struct + */ +static void ice_shutdown(struct pci_dev *pdev) +{ + struct ice_pf *pf = pci_get_drvdata(pdev); + + ice_remove(pdev); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, pf->wol_ena); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_PM +/** + * ice_prepare_for_shutdown - prep for PCI shutdown + * @pf: board private structure + * + * Inform or close all dependent features in prep for PCI device shutdown + */ +static void ice_prepare_for_shutdown(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + u32 v; + + /* Notify VFs of impending reset */ + if (ice_check_sq_alive(hw, &hw->mailboxq)) + ice_vc_notify_reset(pf); + + dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n"); + + /* disable the VSIs and their queues that are not already DOWN */ + ice_pf_dis_all_vsi(pf, false); + + ice_for_each_vsi(pf, v) + if (pf->vsi[v]) + pf->vsi[v]->vsi_num = 0; + + ice_shutdown_all_ctrlq(hw); +} + +/** + * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme + * @pf: board private structure to reinitialize + * + * This routine reinitialize interrupt scheme that was cleared during + * power management suspend callback. + * + * This should be called during resume routine to re-allocate the q_vectors + * and reacquire interrupts. + */ +static int ice_reinit_interrupt_scheme(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int ret, v; + + /* Since we clear MSIX flag during suspend, we need to + * set it back during resume... + */ + + ret = ice_init_interrupt_scheme(pf); + if (ret) { + dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); + return ret; + } + + /* Remap vectors and rings, after successful re-init interrupts */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + + ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); + if (ret) + goto err_reinit; + ice_vsi_map_rings_to_vectors(pf->vsi[v]); + } + + ret = ice_req_irq_msix_misc(pf); + if (ret) { + dev_err(dev, "Setting up misc vector failed after device suspend %d\n", + ret); + goto err_reinit; + } + + return 0; + +err_reinit: + while (v--) + if (pf->vsi[v]) + ice_vsi_free_q_vectors(pf->vsi[v]); + + return ret; +} + +/** + * ice_suspend + * @dev: generic device information structure + * + * Power Management callback to quiesce the device and prepare + * for D3 transition. + */ +static int ice_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct ice_pf *pf; + int disabled, v; + + pf = pci_get_drvdata(pdev); + + if (!ice_pf_state_is_nominal(pf)) { + dev_err(dev, "Device is not ready, no need to suspend it\n"); + return -EBUSY; + } + + /* Stop watchdog tasks until resume completion. + * Even though it is most likely that the service task is + * disabled if the device is suspended or down, the service task's + * state is controlled by a different state bit, and we should + * store and honor whatever state that bit is in at this point. + */ + disabled = ice_service_task_stop(pf); + + /* Already suspended?, then there is nothing to do */ + if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) { + if (!disabled) + ice_service_task_restart(pf); + return 0; + } + + if (test_bit(__ICE_DOWN, pf->state) || + ice_is_reset_in_progress(pf->state)) { + dev_err(dev, "can't suspend device in reset or already down\n"); + if (!disabled) + ice_service_task_restart(pf); + return 0; + } + + ice_setup_mc_magic_wake(pf); + + ice_prepare_for_shutdown(pf); + + ice_set_wake(pf); + + /* Free vectors, clear the interrupt scheme and release IRQs + * for proper hibernation, especially with large number of CPUs. + * Otherwise hibernation might fail when mapping all the vectors back + * to CPU0. + */ + ice_free_irq_msix_misc(pf); + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + ice_vsi_free_q_vectors(pf->vsi[v]); + } + ice_clear_interrupt_scheme(pf); + + pci_wake_from_d3(pdev, pf->wol_ena); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +/** + * ice_resume - PM callback for waking up from D3 + * @dev: generic device information structure + */ +static int ice_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + enum ice_reset_req reset_type; + struct ice_pf *pf; + struct ice_hw *hw; + int ret; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (!pci_device_is_present(pdev)) + return -ENODEV; + + ret = pci_enable_device_mem(pdev); + if (ret) { + dev_err(dev, "Cannot enable device after suspend\n"); + return ret; + } + + pf = pci_get_drvdata(pdev); + hw = &pf->hw; + + pf->wakeup_reason = rd32(hw, PFPM_WUS); + ice_print_wake_reason(pf); + + /* We cleared the interrupt scheme when we suspended, so we need to + * restore it now to resume device functionality. + */ + ret = ice_reinit_interrupt_scheme(pf); + if (ret) + dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); + + clear_bit(__ICE_DOWN, pf->state); + /* Now perform PF reset and rebuild */ + reset_type = ICE_RESET_PFR; + /* re-enable service task for reset, but allow reset to schedule it */ + clear_bit(__ICE_SERVICE_DIS, pf->state); + + if (ice_schedule_reset(pf, reset_type)) + dev_err(dev, "Reset during resume failed.\n"); + + clear_bit(__ICE_SUSPENDED, pf->state); + ice_service_task_restart(pf); + + /* Restart the service task */ + mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); + + return 0; +} +#endif /* CONFIG_PM */ + /** * ice_pci_err_detected - warning that PCI error has been detected * @pdev: PCI device information struct @@ -3734,6 +4087,8 @@ static const struct pci_device_id ice_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); +static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); + static const struct pci_error_handlers ice_pci_err_handler = { .error_detected = ice_pci_err_detected, .slot_reset = ice_pci_err_slot_reset, @@ -3747,6 +4102,10 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, +#ifdef CONFIG_PM + .driver.pm = &ice_pm_ops, +#endif /* CONFIG_PM */ + .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, .err_handler = &ice_pci_err_handler }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index b049c1c30c88..b1172a67573b 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -172,8 +172,7 @@ void ice_release_nvm(struct ice_hw *hw) * * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq. */ -static enum ice_status -ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) +enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) { enum ice_status status; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 165eda07b93d..e3993c04c97a 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -13,4 +13,5 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, enum ice_status ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); enum ice_status ice_init_nvm(struct ice_hw *hw); +enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); #endif /* _ICE_NVM_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index c1ad8622e65c..21fe9221a6f3 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -709,6 +709,7 @@ struct ice_hw_port_stats { /* Checksum and Shadow RAM pointers */ #define ICE_SR_BOOT_CFG_PTR 0x132 +#define ICE_SR_NVM_WOL_CFG 0x19 #define ICE_NVM_OROM_VER_OFF 0x02 #define ICE_SR_PBA_BLOCK_PTR 0x16 #define ICE_SR_NVM_DEV_STARTER_VER 0x18 diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 16a2f2526ccc..9df5ceb26ab9 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -1592,31 +1592,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) return ret; } -/** - * ice_pf_state_is_nominal - checks the PF for nominal state - * @pf: pointer to PF to check - * - * Check the PF's state for a collection of bits that would indicate - * the PF is in a state that would inhibit normal operation for - * driver functionality. - * - * Returns true if PF is in a nominal state. - * Returns false otherwise - */ -static bool ice_pf_state_is_nominal(struct ice_pf *pf) -{ - DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 }; - - if (!pf) - return false; - - bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS); - if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS)) - return false; - - return true; -} - /** * ice_pci_sriov_ena - Enable or change number of VFs * @pf: pointer to the PF structure From 2ffb60856ac857ecae3e87a69c0a977e4a608ae0 Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:04 -0700 Subject: [PATCH 1532/2056] ice: refactor FC functions Create a helper function for configuring requested flow control so that it can be utilized by other functions looking to configure flow control settings. Utilize the existing helper ice_copy_phy_caps_to_cfg() to copy a PHY capability to configuration instead duplicating the code for it. Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 75 +++++++++++++-------- 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 04e28090fef3..18bfd9a47ee0 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2515,28 +2515,19 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) } /** - * ice_set_fc - * @pi: port information structure - * @aq_failures: pointer to status code, specific to ice_set_fc routine - * @ena_auto_link_update: enable automatic link update - * - * Set the requested flow control mode. + * ice_cfg_phy_fc - Configure PHY FC data based on FC mode + * @cfg: PHY configuration data to set FC mode + * @req_mode: FC mode to configure */ -enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) +static enum ice_status +ice_cfg_phy_fc(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) { - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; - struct ice_aqc_get_phy_caps_data *pcaps; - enum ice_status status; u8 pause_mask = 0x0; - struct ice_hw *hw; - if (!pi) - return ICE_ERR_PARAM; - hw = pi->hw; - *aq_failures = ICE_SET_FC_AQ_FAIL_NONE; + if (!cfg) + return ICE_ERR_BAD_PTR; - switch (pi->fc.req_mode) { + switch (req_mode) { case ICE_FC_FULL: pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE; pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE; @@ -2551,6 +2542,38 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) break; } + /* clear the old pause settings */ + cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | + ICE_AQC_PHY_EN_RX_LINK_PAUSE); + + /* set the new capabilities */ + cfg->caps |= pause_mask; + + return 0; +} + +/** + * ice_set_fc + * @pi: port information structure + * @aq_failures: pointer to status code, specific to ice_set_fc routine + * @ena_auto_link_update: enable automatic link update + * + * Set the requested flow control mode. + */ +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) +{ + struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + struct ice_hw *hw; + + if (!pi) + return ICE_ERR_BAD_PTR; + + *aq_failures = 0; + hw = pi->hw; + pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return ICE_ERR_NO_MEMORY; @@ -2563,12 +2586,12 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) goto out; } - /* clear the old pause settings */ - cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE | - ICE_AQC_PHY_EN_RX_LINK_PAUSE); + ice_copy_phy_caps_to_cfg(pcaps, &cfg); - /* set the new capabilities */ - cfg.caps |= pause_mask; + /* Configure the set PHY data */ + status = ice_cfg_phy_fc(&cfg, pi->fc.req_mode); + if (status) + goto out; /* If the capabilities have changed, then set the new config */ if (cfg.caps != pcaps->caps) { @@ -2577,13 +2600,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) /* Auto restart link so settings take effect */ if (ena_auto_link_update) cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - /* Copy over all the old settings */ - cfg.phy_type_high = pcaps->phy_type_high; - cfg.phy_type_low = pcaps->phy_type_low; - cfg.low_power_ctrl = pcaps->low_power_ctrl; - cfg.eee_cap = pcaps->eee_cap; - cfg.eeer_value = pcaps->eeer_value; - cfg.link_fec_opt = pcaps->link_fec_options; status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); if (status) { @@ -2629,6 +2645,7 @@ ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, if (!caps || !cfg) return; + memset(cfg, 0, sizeof(*cfg)); cfg->phy_type_low = caps->phy_type_low; cfg->phy_type_high = caps->phy_type_high; cfg->caps = caps->caps; From 61cf42e71abcb13950c6cc32805defb0fb3b658e Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:05 -0700 Subject: [PATCH 1533/2056] ice: move auto FEC checks into ice_cfg_phy_fec() The call to ice_cfg_phy_fec() requires the caller to perform certain actions before calling it. Instead of imposing these preconditions move the operations into the function and perform them ourselves. Also, fix some style issues in nearby touched code. Signed-off-by: Paul Greenwalt Signed-off-by: Chinh T Cao Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 43 ++++++++++++++++---- drivers/net/ethernet/intel/ice/ice_common.h | 5 ++- drivers/net/ethernet/intel/ice/ice_ethtool.c | 31 ++------------ 3 files changed, 40 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 18bfd9a47ee0..59890eeb8339 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2657,25 +2657,41 @@ ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, /** * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode + * @pi: port information structure * @cfg: PHY configuration data to set FEC mode * @fec: FEC mode to configure - * - * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC - * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps - * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling. */ -void -ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) +enum ice_status +ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fec_mode fec) { + struct ice_aqc_get_phy_caps_data *pcaps; + enum ice_status status; + + if (!pi || !cfg) + return ICE_ERR_BAD_PTR; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return ICE_ERR_NO_MEMORY; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) + goto out; + + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; + cfg->link_fec_opt = pcaps->link_fec_options; + switch (fec) { case ICE_FEC_BASER: /* Clear RS bits, and AND BASE-R ability * bits and OR request bits. */ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | - ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | - ICE_AQC_PHY_FEC_25G_KR_REQ; + ICE_AQC_PHY_FEC_25G_KR_REQ; break; case ICE_FEC_RS: /* Clear BASE-R bits, and AND RS ability @@ -2683,7 +2699,7 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) */ cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN; cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ | - ICE_AQC_PHY_FEC_25G_RS_544_REQ; + ICE_AQC_PHY_FEC_25G_RS_544_REQ; break; case ICE_FEC_NONE: /* Clear all FEC option bits. */ @@ -2692,8 +2708,17 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) case ICE_FEC_AUTO: /* AND auto FEC bit, and all caps bits. */ cfg->caps &= ICE_AQC_PHY_CAPS_MASK; + cfg->link_fec_opt |= pcaps->link_fec_options; + break; + default: + status = ICE_ERR_PARAM; break; } + +out: + kfree(pcaps); + + return status; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d1238f43e872..2f912c671e6b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -103,8 +103,9 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); -void -ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec); +enum ice_status +ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fec_mode fec); void ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c2291cf4dc6e..4567b0175712 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -968,7 +968,6 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) struct ice_aqc_set_phy_cfg_data config = { 0 }; struct ice_aqc_get_phy_caps_data *caps; struct ice_vsi *vsi = np->vsi; - u8 sw_cfg_caps, sw_cfg_fec; struct ice_port_info *pi; enum ice_status status; int err = 0; @@ -997,36 +996,12 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) /* Copy SW configuration returned from PHY caps to PHY config */ ice_copy_phy_caps_to_cfg(caps, &config); - sw_cfg_caps = caps->caps; - sw_cfg_fec = caps->link_fec_options; - /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */ - memset(caps, 0, sizeof(*caps)); + ice_cfg_phy_fec(pi, &config, req_fec); + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, - caps, NULL); - if (status) { + if (ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL)) err = -EAGAIN; - goto done; - } - - config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC); - config.link_fec_opt = caps->link_fec_options; - - ice_cfg_phy_fec(&config, req_fec); - - /* If FEC mode has changed, then set PHY configuration and enable AN. */ - if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) != - (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) || - config.link_fec_opt != sw_cfg_fec) { - if (caps->caps & ICE_AQC_PHY_AN_MODE) - config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - - status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL); - - if (status) - err = -EAGAIN; - } done: kfree(caps); From 5df5661a1387e829c901d009cdd1fccc376cdb74 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 23 Jul 2020 01:43:12 +0300 Subject: [PATCH 1534/2056] net: dsa: stop overriding master's ndo_get_phys_port_name The purpose of this override is to give the user an indication of what the number of the CPU port is (in DSA, the CPU port is a hardware implementation detail and not a network interface capable of traffic). However, it has always failed (by design) at providing this information to the user in a reliable fashion. Prior to commit 3369afba1e46 ("net: Call into DSA netdevice_ops wrappers"), the behavior was to only override this callback if it was not provided by the DSA master. That was its first failure: if the DSA master itself was a DSA port or a switchdev, then the user would not see the number of the CPU port in /sys/class/net/eth0/phys_port_name, but the number of the DSA master port within its respective physical switch. But that was actually ok in a way. The commit mentioned above changed that behavior, and now overrides the master's ndo_get_phys_port_name unconditionally. That comes with problems of its own, which are worse in a way. The idea is that it's typical for switchdev users to have udev rules for consistent interface naming. These are based, among other things, on the phys_port_name attribute. If we let the DSA switch at the bottom to start randomly overriding ndo_get_phys_port_name with its own CPU port, we basically lose any predictability in interface naming, or even uniqueness, for that matter. So, there are reasons to let DSA override the master's callback (to provide a consistent interface, a number which has a clear meaning and must not be interpreted according to context), and there are reasons to not let DSA override it (it breaks udev matching for the DSA master). But, there is an alternative method for users to retrieve the number of the CPU port of each DSA switch in the system: $ devlink port pci/0000:00:00.5/0: type eth netdev swp0 flavour physical port 0 pci/0000:00:00.5/2: type eth netdev swp2 flavour physical port 2 pci/0000:00:00.5/4: type notset flavour cpu port 4 spi/spi2.0/0: type eth netdev sw0p0 flavour physical port 0 spi/spi2.0/1: type eth netdev sw0p1 flavour physical port 1 spi/spi2.0/2: type eth netdev sw0p2 flavour physical port 2 spi/spi2.0/4: type notset flavour cpu port 4 spi/spi2.1/0: type eth netdev sw1p0 flavour physical port 0 spi/spi2.1/1: type eth netdev sw1p1 flavour physical port 1 spi/spi2.1/2: type eth netdev sw1p2 flavour physical port 2 spi/spi2.1/3: type eth netdev sw1p3 flavour physical port 3 spi/spi2.1/4: type notset flavour cpu port 4 So remove this duplicated, unreliable and troublesome method. From this patch on, the phys_port_name attribute of the DSA master will only contain information about itself (if at all). If the users need reliable information about the CPU port they're probably using devlink anyway. Signed-off-by: Vladimir Oltean Acked-by: florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 23 ----------------------- net/core/dev.c | 5 ----- net/dsa/master.c | 12 ------------ 3 files changed, 40 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index f1b63d06d132..75c8fac82017 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -94,8 +94,6 @@ struct dsa_device_ops { struct dsa_netdevice_ops { int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - int (*ndo_get_phys_port_name)(struct net_device *dev, char *name, - size_t len); }; #define DSA_TAG_DRIVER_ALIAS "dsa_tag-" @@ -719,33 +717,12 @@ static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, return ops->ndo_do_ioctl(dev, ifr, cmd); } - -static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, - char *name, size_t len) -{ - const struct dsa_netdevice_ops *ops; - int err; - - err = __dsa_netdevice_ops_check(dev); - if (err) - return err; - - ops = dev->dsa_ptr->netdev_ops; - - return ops->ndo_get_phys_port_name(dev, name, len); -} #else static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { return -EOPNOTSUPP; } - -static inline int dsa_ndo_get_phys_port_name(struct net_device *dev, - char *name, size_t len) -{ - return -EOPNOTSUPP; -} #endif void dsa_unregister_switch(struct dsa_switch *ds); diff --git a/net/core/dev.c b/net/core/dev.c index 316349f6cea5..a986b07ea845 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -98,7 +98,6 @@ #include #include #include -#include #include #include #include @@ -8605,10 +8604,6 @@ int dev_get_phys_port_name(struct net_device *dev, const struct net_device_ops *ops = dev->netdev_ops; int err; - err = dsa_ndo_get_phys_port_name(dev, name, len); - if (err == 0 || err != -EOPNOTSUPP) - return err; - if (ops->ndo_get_phys_port_name) { err = ops->ndo_get_phys_port_name(dev, name, len); if (err != -EOPNOTSUPP) diff --git a/net/dsa/master.c b/net/dsa/master.c index 0a90911ae31b..61615ebc70e9 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c @@ -186,17 +186,6 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, } } -static int dsa_master_get_phys_port_name(struct net_device *dev, - char *name, size_t len) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - - if (snprintf(name, len, "p%d", cpu_dp->index) >= len) - return -EINVAL; - - return 0; -} - static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct dsa_port *cpu_dp = dev->dsa_ptr; @@ -228,7 +217,6 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static const struct dsa_netdevice_ops dsa_netdev_ops = { .ndo_do_ioctl = dsa_master_ioctl, - .ndo_get_phys_port_name = dsa_master_get_phys_port_name, }; static int dsa_master_ethtool_setup(struct net_device *dev) From 1a3571b5938cfb069903262c4821affeeeb7fe23 Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:06 -0700 Subject: [PATCH 1535/2056] ice: restore PHY settings on media insertion After the transition from no media to media FW will clear the set-phy-cfg data set by the user. Save initial PHY settings and any settings later requested by the user and use that data to restore PHY settings on media insertion. Since PHY configuration is now being stored, replace calls that were calling FW to get the configuration with the saved copy. Signed-off-by: Paul Greenwalt Signed-off-by: Chinh T Cao Signed-off-by: Paul M Stillwell Jr Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 4 + .../net/ethernet/intel/ice/ice_adminq_cmd.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 145 ++++++++- drivers/net/ethernet/intel/ice/ice_common.h | 14 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 132 ++++---- drivers/net/ethernet/intel/ice/ice_main.c | 308 ++++++++++++++++-- drivers/net/ethernet/intel/ice/ice_type.h | 21 ++ 7 files changed, 524 insertions(+), 101 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 2a0a69fe8b2b..132abfd068df 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -222,6 +222,7 @@ enum ice_state { __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ + __ICE_PHY_INIT_COMPLETE, __ICE_STATE_NBITS /* must be last */ }; @@ -437,6 +438,9 @@ struct ice_pf { u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; u32 sw_int_count; + + __le64 nvm_phy_type_lo; /* NVM PHY type low */ + __le64 nvm_phy_type_hi; /* NVM PHY type high */ }; struct ice_netdev_priv { diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 99c39249613a..352f3b278698 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1147,6 +1147,7 @@ struct ice_aqc_get_link_status_data { #define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2 #define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3 __le16 link_speed; +#define ICE_AQ_LINK_SPEED_M 0x7FF #define ICE_AQ_LINK_SPEED_10MB BIT(0) #define ICE_AQ_LINK_SPEED_100MB BIT(1) #define ICE_AQ_LINK_SPEED_1000MB BIT(2) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 59890eeb8339..803b894472ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2424,7 +2424,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, /** * ice_aq_set_phy_cfg * @hw: pointer to the HW struct - * @lport: logical port number + * @pi: port info structure of the interested logical port * @cfg: structure with PHY configuration data to be set * @cd: pointer to command details structure or NULL * @@ -2434,7 +2434,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, * parameters. This status will be indicated by the command response (0x0601). */ enum ice_status -ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, +ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { struct ice_aq_desc desc; @@ -2453,7 +2453,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, } ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); - desc.params.set_phy.lport_num = lport; + desc.params.set_phy.lport_num = pi->lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", @@ -2471,6 +2471,9 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) status = 0; + if (!status) + pi->phy.curr_user_phy_cfg = *cfg; + return status; } @@ -2514,17 +2517,99 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) return status; } +/** + * ice_cache_phy_user_req + * @pi: port information structure + * @cache_data: PHY logging data + * @cache_mode: PHY logging mode + * + * Log the user request on (FC, FEC, SPEED) for later use. + */ +static void +ice_cache_phy_user_req(struct ice_port_info *pi, + struct ice_phy_cache_mode_data cache_data, + enum ice_phy_cache_mode cache_mode) +{ + if (!pi) + return; + + switch (cache_mode) { + case ICE_FC_MODE: + pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req; + break; + case ICE_SPEED_MODE: + pi->phy.curr_user_speed_req = + cache_data.data.curr_user_speed_req; + break; + case ICE_FEC_MODE: + pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req; + break; + default: + break; + } +} + +/** + * ice_caps_to_fc_mode + * @caps: PHY capabilities + * + * Convert PHY FC capabilities to ice FC mode + */ +enum ice_fc_mode ice_caps_to_fc_mode(u8 caps) +{ + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE && + caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) + return ICE_FC_FULL; + + if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE) + return ICE_FC_TX_PAUSE; + + if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE) + return ICE_FC_RX_PAUSE; + + return ICE_FC_NONE; +} + +/** + * ice_caps_to_fec_mode + * @caps: PHY capabilities + * @fec_options: Link FEC options + * + * Convert PHY FEC capabilities to ice FEC mode + */ +enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options) +{ + if (caps & ICE_AQC_PHY_EN_AUTO_FEC) + return ICE_FEC_AUTO; + + if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN | + ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ | + ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN | + ICE_AQC_PHY_FEC_25G_KR_REQ)) + return ICE_FEC_BASER; + + if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ | + ICE_AQC_PHY_FEC_25G_RS_544_REQ | + ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)) + return ICE_FEC_RS; + + return ICE_FEC_NONE; +} + /** * ice_cfg_phy_fc - Configure PHY FC data based on FC mode + * @pi: port information structure * @cfg: PHY configuration data to set FC mode * @req_mode: FC mode to configure */ -static enum ice_status -ice_cfg_phy_fc(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) +enum ice_status +ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fc_mode req_mode) { + struct ice_phy_cache_mode_data cache_data; u8 pause_mask = 0x0; - if (!cfg) + if (!pi || !cfg) return ICE_ERR_BAD_PTR; switch (req_mode) { @@ -2549,6 +2634,10 @@ ice_cfg_phy_fc(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) /* set the new capabilities */ cfg->caps |= pause_mask; + /* Cache user FC request */ + cache_data.data.curr_user_fc_req = req_mode; + ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE); + return 0; } @@ -2563,12 +2652,12 @@ ice_cfg_phy_fc(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fc_mode req_mode) enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { - struct ice_aqc_set_phy_cfg_data cfg = { 0 }; + struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; enum ice_status status; struct ice_hw *hw; - if (!pi) + if (!pi || !aq_failures) return ICE_ERR_BAD_PTR; *aq_failures = 0; @@ -2589,7 +2678,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) ice_copy_phy_caps_to_cfg(pcaps, &cfg); /* Configure the set PHY data */ - status = ice_cfg_phy_fc(&cfg, pi->fc.req_mode); + status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); if (status) goto out; @@ -2601,7 +2690,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) if (ena_auto_link_update) cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL); + status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL); if (status) { *aq_failures = ICE_SET_FC_AQ_FAIL_SET; goto out; @@ -2630,6 +2719,42 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) return status; } +/** + * ice_phy_caps_equals_cfg + * @phy_caps: PHY capabilities + * @phy_cfg: PHY configuration + * + * Helper function to determine if PHY capabilities matches PHY + * configuration + */ +bool +ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, + struct ice_aqc_set_phy_cfg_data *phy_cfg) +{ + u8 caps_mask, cfg_mask; + + if (!phy_caps || !phy_cfg) + return false; + + /* These bits are not common between capabilities and configuration. + * Do not use them to determine equality. + */ + caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE | + ICE_AQC_GET_PHY_EN_MOD_QUAL); + cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + if (phy_caps->phy_type_low != phy_cfg->phy_type_low || + phy_caps->phy_type_high != phy_cfg->phy_type_high || + ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || + phy_caps->low_power_ctrl != phy_cfg->low_power_ctrl || + phy_caps->eee_cap != phy_cfg->eee_cap || + phy_caps->eeer_value != phy_cfg->eeer_value || + phy_caps->link_fec_options != phy_cfg->link_fec_opt) + return false; + + return true; +} + /** * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data * @caps: PHY ability structure to copy date from diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 2f912c671e6b..5246f6138506 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -98,18 +98,26 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status -ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, +ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); +enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); +enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); enum ice_status ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update); enum ice_status -ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, - enum ice_fec_mode fec); +ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fc_mode fc); +bool +ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, + struct ice_aqc_set_phy_cfg_data *cfg); void ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); enum ice_status +ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, + enum ice_fec_mode fec); +enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 4567b0175712..a20474208b7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -966,11 +966,8 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_aqc_set_phy_cfg_data config = { 0 }; - struct ice_aqc_get_phy_caps_data *caps; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; - enum ice_status status; - int err = 0; pi = vsi->port_info; if (!pi) @@ -982,30 +979,26 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec) return -EOPNOTSUPP; } - /* Get last SW configuration */ - caps = kzalloc(sizeof(*caps), GFP_KERNEL); - if (!caps) - return -ENOMEM; + /* Proceed only if requesting different FEC mode */ + if (pi->phy.curr_user_fec_req == req_fec) + return 0; - status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, - caps, NULL); - if (status) { - err = -EAGAIN; - goto done; - } - - /* Copy SW configuration returned from PHY caps to PHY config */ - ice_copy_phy_caps_to_cfg(caps, &config); + /* Copy the current user PHY configuration. The current user PHY + * configuration is initialized during probe from PHY capabilities + * software mode, and updated on set PHY configuration. + */ + memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config)); ice_cfg_phy_fec(pi, &config, req_fec); config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - if (ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL)) - err = -EAGAIN; + if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL)) + return -EAGAIN; -done: - kfree(caps); - return err; + /* Save requested FEC config */ + pi->phy.curr_user_fec_req = req_fec; + + return 0; } /** @@ -2102,10 +2095,10 @@ static int ice_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *ks) { - u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; struct ice_netdev_priv *np = netdev_priv(netdev); struct ethtool_link_ksettings safe_ks, copy_ks; struct ice_aqc_get_phy_caps_data *abilities; + u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT; u16 adv_link_speed, curr_link_speed, idx; struct ice_aqc_set_phy_cfg_data config; struct ice_pf *pf = np->vsi->back; @@ -2137,6 +2130,18 @@ ice_set_link_ksettings(struct net_device *netdev, p->phy.link_info.link_info & ICE_AQ_LINK_UP) return -EOPNOTSUPP; + abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); + if (!abilities) + return -ENOMEM; + + /* Get the PHY capabilities based on media */ + status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP, + abilities, NULL); + if (status) { + err = -EAGAIN; + goto done; + } + /* copy the ksettings to copy_ks to avoid modifying the original */ memcpy(©_ks, ks, sizeof(copy_ks)); @@ -2153,8 +2158,10 @@ ice_set_link_ksettings(struct net_device *netdev, */ if (!bitmap_subset(copy_ks.link_modes.advertising, safe_ks.link_modes.supported, - __ETHTOOL_LINK_MODE_MASK_NBITS)) - return -EINVAL; + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + err = -EINVAL; + goto done; + } /* get our own copy of the bits to check against */ memset(&safe_ks, 0, sizeof(safe_ks)); @@ -2171,33 +2178,27 @@ ice_set_link_ksettings(struct net_device *netdev, /* If copy_ks.base and safe_ks.base are not the same now, then they are * trying to set something that we do not support. */ - if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) - return -EOPNOTSUPP; - - while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { - timeout--; - if (!timeout) - return -EBUSY; - usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); - } - - abilities = kzalloc(sizeof(*abilities), GFP_KERNEL); - if (!abilities) - return -ENOMEM; - - /* Get the current PHY config */ - status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, - NULL); - if (status) { - err = -EAGAIN; + if (memcmp(©_ks.base, &safe_ks.base, sizeof(copy_ks.base))) { + err = -EOPNOTSUPP; goto done; } - /* Copy abilities to config in case autoneg is not set below */ - memset(&config, 0, sizeof(config)); - config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; - if (abilities->caps & ICE_AQC_PHY_AN_MODE) - config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) { + err = -EBUSY; + goto done; + } + usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); + } + + /* Copy the current user PHY configuration. The current user PHY + * configuration is initialized during probe from PHY capabilities + * software mode, and updated on set PHY configuration. + */ + memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config)); + + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Check autoneg */ err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, @@ -2232,26 +2233,30 @@ ice_set_link_ksettings(struct net_device *netdev, goto done; } - /* copy over the rest of the abilities */ - config.low_power_ctrl = abilities->low_power_ctrl; - config.eee_cap = abilities->eee_cap; - config.eeer_value = abilities->eeer_value; - config.link_fec_opt = abilities->link_fec_options; - /* save the requested speeds */ p->phy.link_info.req_speeds = adv_link_speed; /* set link and auto negotiation so changes take effect */ config.caps |= ICE_AQ_PHY_ENA_LINK; - if (phy_type_low || phy_type_high) { - config.phy_type_high = cpu_to_le64(phy_type_high) & - abilities->phy_type_high; - config.phy_type_low = cpu_to_le64(phy_type_low) & - abilities->phy_type_low; - } else { + /* check if there is a PHY type for the requested advertised speed */ + if (!(phy_type_low || phy_type_high)) { + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); + err = -EAGAIN; + goto done; + } + + /* intersect requested advertised speed PHY types with media PHY types + * for set PHY configuration + */ + config.phy_type_high = cpu_to_le64(phy_type_high) & + abilities->phy_type_high; + config.phy_type_low = cpu_to_le64(phy_type_low) & + abilities->phy_type_low; + + if (!(config.phy_type_high || config.phy_type_low)) { + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); err = -EAGAIN; - netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); goto done; } @@ -2266,12 +2271,15 @@ ice_set_link_ksettings(struct net_device *netdev, } /* make the aq call */ - status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); + status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL); if (status) { netdev_info(netdev, "Set phy config failed,\n"); err = -EAGAIN; + goto done; } + /* Save speed request */ + p->phy.curr_user_speed_req = adv_link_speed; done: kfree(abilities); clear_bit(__ICE_CFG_BUSY, pf->state); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 794ec145d9cf..4c515bdf0c2c 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -796,10 +796,6 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", pi->lport); - /* if the old link up/down and speed is the same as the new */ - if (link_up == old_link && link_speed == old_link_speed) - return result; - vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; @@ -817,6 +813,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, } } + /* if the old link up/down and speed is the same as the new */ + if (link_up == old_link && link_speed == old_link_speed) + return result; + ice_dcb_rebuild(pf); ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); @@ -1380,25 +1380,23 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) goto out; - cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + /* Use the current user PHY configuration. The current user PHY + * configuration is initialized during probe from PHY capabilities + * software mode, and updated on set PHY configuration. + */ + cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); if (!cfg) { retcode = -ENOMEM; goto out; } - cfg->phy_type_low = pcaps->phy_type_low; - cfg->phy_type_high = pcaps->phy_type_high; - cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; - cfg->low_power_ctrl = pcaps->low_power_ctrl; - cfg->eee_cap = pcaps->eee_cap; - cfg->eeer_value = pcaps->eeer_value; - cfg->link_fec_opt = pcaps->link_fec_options; + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; if (link_up) cfg->caps |= ICE_AQ_PHY_ENA_LINK; else cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; - retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL); + retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); if (retcode) { dev_err(dev, "Failed to set phy config, VSI %d error %d\n", vsi->vsi_num, retcode); @@ -1412,8 +1410,219 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) } /** - * ice_check_media_subtask - Check for media; bring link up if detected. + * ice_init_nvm_phy_type - Initialize the NVM PHY type + * @pi: port info structure + * + * Initialize nvm_phy_type_[low|high] + */ +static int ice_init_nvm_phy_type(struct ice_port_info *pi) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_pf *pf = pi->hw->back; + enum ice_status status; + int err = 0; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps, + NULL); + + if (status) { + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); + err = -EIO; + goto out; + } + + pf->nvm_phy_type_hi = pcaps->phy_type_high; + pf->nvm_phy_type_lo = pcaps->phy_type_low; + +out: + kfree(pcaps); + return err; +} + +/** + * ice_init_phy_user_cfg - Initialize the PHY user configuration + * @pi: port info structure + * + * Initialize the current user PHY configuration, speed, FEC, and FC requested + * mode to default. The PHY defaults are from get PHY capabilities topology + * with media so call when media is first available. An error is returned if + * called when media is not available. The PHY initialization completed state is + * set here. + * + * These configurations are used when setting PHY + * configuration. The user PHY configuration is updated on set PHY + * configuration. Returns 0 on success, negative on failure + */ +static int ice_init_phy_user_cfg(struct ice_port_info *pi) +{ + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_phy_info *phy = &pi->phy; + struct ice_pf *pf = pi->hw->back; + enum ice_status status; + struct ice_vsi *vsi; + int err = 0; + + if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) + return -EIO; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EINVAL; + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) { + dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n"); + err = -EIO; + goto err_out; + } + + ice_copy_phy_caps_to_cfg(pcaps, &pi->phy.curr_user_phy_cfg); + /* initialize PHY using topology with media */ + phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, + pcaps->link_fec_options); + phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); + + phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; + set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); +err_out: + kfree(pcaps); + return err; +} + +/** + * ice_configure_phy - configure PHY + * @vsi: VSI of PHY + * + * Set the PHY configuration. If the current PHY configuration is the same as + * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise + * configure the based get PHY capabilities for topology with media. + */ +static int ice_configure_phy(struct ice_vsi *vsi) +{ + struct device *dev = ice_pf_to_dev(vsi->back); + struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_set_phy_cfg_data *cfg; + u64 phy_low = 0, phy_high = 0; + struct ice_port_info *pi; + enum ice_status status; + int err = 0; + + pi = vsi->port_info; + if (!pi) + return -EINVAL; + + /* Ensure we have media as we cannot configure a medialess port */ + if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) + return -EPERM; + + ice_print_topo_conflict(vsi); + + if (vsi->port_info->phy.link_info.topo_media_conflict == + ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) + return -EPERM; + + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) + return ice_force_phys_link_state(vsi, true); + + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); + if (!pcaps) + return -ENOMEM; + + /* Get current PHY config */ + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, + NULL); + if (status) { + dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n", + vsi->vsi_num, ice_stat_str(status)); + err = -EIO; + goto done; + } + + /* If PHY enable link is configured and configuration has not changed, + * there's nothing to do + */ + if (pcaps->caps & ICE_AQC_PHY_EN_LINK && + ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg)) + goto done; + + /* Use PHY topology as baseline for configuration */ + memset(pcaps, 0, sizeof(*pcaps)); + status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, + NULL); + if (status) { + dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n", + vsi->vsi_num, ice_stat_str(status)); + err = -EIO; + goto done; + } + + cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + err = -ENOMEM; + goto done; + } + + ice_copy_phy_caps_to_cfg(pcaps, cfg); + + /* Speed - If default override pending, use curr_user_phy_cfg set in + * ice_init_phy_user_cfg_ldo. + */ + ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); + cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); + cfg->phy_type_high = pcaps->phy_type_high & cpu_to_le64(phy_high); + + /* Can't provide what was requested; use PHY capabilities */ + if (!cfg->phy_type_low && !cfg->phy_type_high) { + cfg->phy_type_low = pcaps->phy_type_low; + cfg->phy_type_high = pcaps->phy_type_high; + } + + /* FEC */ + ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req); + + /* Can't provide what was requested; use PHY capabilities */ + if (cfg->link_fec_opt != + (cfg->link_fec_opt & pcaps->link_fec_options)) { + cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; + cfg->link_fec_opt = pcaps->link_fec_options; + } + + /* Flow Control - always supported; no need to check against + * capabilities + */ + ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req); + + /* Enable link and link update */ + cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; + + status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); + if (status) { + dev_err(dev, "Failed to set phy config, VSI %d error %s\n", + vsi->vsi_num, ice_stat_str(status)); + err = -EIO; + } + + kfree(cfg); +done: + kfree(pcaps); + return err; +} + +/** + * ice_check_media_subtask - Check for media * @pf: pointer to PF struct + * + * If media is available, then initialize PHY user configuration if it is not + * been, and configure the PHY if the interface is up. */ static void ice_check_media_subtask(struct ice_pf *pf) { @@ -1421,15 +1630,12 @@ static void ice_check_media_subtask(struct ice_pf *pf) struct ice_vsi *vsi; int err; - vsi = ice_get_main_vsi(pf); - if (!vsi) + /* No need to check for media if it's already present */ + if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) return; - /* No need to check for media if it's already present or the interface - * is down - */ - if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) || - test_bit(__ICE_DOWN, vsi->state)) + vsi = ice_get_main_vsi(pf); + if (!vsi) return; /* Refresh link info and check if media is present */ @@ -1439,10 +1645,19 @@ static void ice_check_media_subtask(struct ice_pf *pf) return; if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - err = ice_force_phys_link_state(vsi, true); - if (err) + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) + ice_init_phy_user_cfg(pi); + + /* PHY settings are reset on media insertion, reconfigure + * PHY to preserve settings. + */ + if (test_bit(__ICE_DOWN, vsi->state) && + test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) return; - clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + + err = ice_configure_phy(vsi); + if (!err) + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); /* A Link Status Event will be generated; the event handler * will complete bringing the interface up @@ -3522,6 +3737,37 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_alloc_sw_unroll; } + err = ice_init_nvm_phy_type(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + err = ice_update_link_info(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_update_link_info failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + /* if media available, initialize PHY settings */ + if (pf->hw.port_info->phy.link_info.link_info & + ICE_AQ_MEDIA_AVAILABLE) { + err = ice_init_phy_user_cfg(pf->hw.port_info); + if (err) { + dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); + goto err_alloc_sw_unroll; + } + + if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { + struct ice_vsi *vsi = ice_get_main_vsi(pf); + + if (vsi) + ice_configure_phy(vsi); + } + } else { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); + } + ice_verify_cacheline_size(pf); /* Save wakeup reason register for later use */ @@ -6018,20 +6264,30 @@ int ice_open(struct net_device *netdev) /* Set PHY if there is media, otherwise, turn off PHY */ if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { - err = ice_force_phys_link_state(vsi, true); + clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); + if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) { + err = ice_init_phy_user_cfg(pi); + if (err) { + netdev_err(netdev, "Failed to initialize PHY settings, error %d\n", + err); + return err; + } + } + + err = ice_configure_phy(vsi); if (err) { netdev_err(netdev, "Failed to set physical link up, error %d\n", err); return err; } } else { + set_bit(ICE_FLAG_NO_MEDIA, pf->flags); err = ice_aq_set_link_restart_an(pi, false, NULL); if (err) { netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n", vsi->vsi_num, err); return err; } - set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags); } err = ice_vsi_open(vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 21fe9221a6f3..f08913611ed9 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -87,6 +87,12 @@ enum ice_fc_mode { ICE_FC_DFLT }; +enum ice_phy_cache_mode { + ICE_FC_MODE = 0, + ICE_SPEED_MODE, + ICE_FEC_MODE +}; + enum ice_fec_mode { ICE_FEC_NONE = 0, ICE_FEC_RS, @@ -94,6 +100,14 @@ enum ice_fec_mode { ICE_FEC_AUTO }; +struct ice_phy_cache_mode_data { + union { + enum ice_fec_mode curr_user_fec_req; + enum ice_fc_mode curr_user_fc_req; + u16 curr_user_speed_req; + } data; +}; + enum ice_set_fc_aq_failures { ICE_SET_FC_AQ_FAIL_NONE = 0, ICE_SET_FC_AQ_FAIL_GET, @@ -160,6 +174,13 @@ struct ice_phy_info { u64 phy_type_high; enum ice_media_type media_type; u8 get_link_info; + /* Please refer to struct ice_aqc_get_link_status_data to get + * detail of enable bit in curr_user_speed_req + */ + u16 curr_user_speed_req; + enum ice_fec_mode curr_user_fec_req; + enum ice_fc_mode curr_user_fc_req; + struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg; }; /* protocol enumeration for filters */ From ea78ce4dab05f435b1eff178a5b79d98e1847b2d Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:07 -0700 Subject: [PATCH 1536/2056] ice: add link lenient and default override support Adds functions to check for link override firmware support and get the override settings for a port. The previously supported/default link mode was strict mode. In strict mode link is configured based on get PHY capabilities PHY types with media. Lenient mode is now the default link mode. In lenient mode the link is configured based on get PHY capabilities PHY types without media. This allows the user to configure link that the media does not report. Limit the minimum supported link mode to 25G for devices that support 100G, and 1G for devices that support less than 100G. Default override is only supported in lenient mode. If default override is supported and enabled, then default override values are used for configuring speed and FEC. Default override provide persistent link settings in the NVM. Signed-off-by: Paul Greenwalt Signed-off-by: Evan Swanson Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 3 + .../net/ethernet/intel/ice/ice_adminq_cmd.h | 5 +- drivers/net/ethernet/intel/ice/ice_common.c | 170 +++++- drivers/net/ethernet/intel/ice/ice_common.h | 8 +- drivers/net/ethernet/intel/ice/ice_ethtool.c | 509 +++++++++++------- drivers/net/ethernet/intel/ice/ice_main.c | 99 +++- drivers/net/ethernet/intel/ice/ice_nvm.c | 2 +- drivers/net/ethernet/intel/ice/ice_nvm.h | 3 + drivers/net/ethernet/intel/ice/ice_type.h | 35 ++ 9 files changed, 623 insertions(+), 211 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 132abfd068df..e67f4290fa92 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -222,6 +222,7 @@ enum ice_state { __ICE_OICR_INTR_DIS, /* Global OICR interrupt disabled */ __ICE_MDD_VF_PRINT_PENDING, /* set when MDD event handle */ __ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */ + __ICE_LINK_DEFAULT_OVERRIDE_PENDING, __ICE_PHY_INIT_COMPLETE, __ICE_STATE_NBITS /* must be last */ }; @@ -364,6 +365,7 @@ enum ice_pf_flags { ICE_FLAG_LEGACY_RX, ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_MDD_AUTO_RESET_VF, + ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -441,6 +443,7 @@ struct ice_pf { __le64 nvm_phy_type_lo; /* NVM PHY type low */ __le64 nvm_phy_type_hi; /* NVM PHY type high */ + struct ice_link_default_override_tlv link_dflt_override; }; struct ice_netdev_priv { diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 352f3b278698..02a8d46540dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -983,7 +983,8 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) #define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0) - u8 rsvd1; /* Byte 35 reserved */ + u8 module_compliance_enforcement; +#define ICE_AQC_MOD_ENFORCE_STRICT_MODE BIT(0) u8 extended_compliance_code; #define ICE_MODULE_TYPE_TOTAL_BYTE 3 u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; @@ -1036,7 +1037,7 @@ struct ice_aqc_set_phy_cfg_data { __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ - u8 rsvd1; + u8 module_compliance_enforcement; }; /* Set MAC Config command data structure (direct 0x0603) */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 803b894472ee..606e69086cb8 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -20,7 +20,40 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) if (hw->vendor_id != PCI_VENDOR_ID_INTEL) return ICE_ERR_DEVICE_NOT_SUPPORTED; - hw->mac_type = ICE_MAC_GENERIC; + switch (hw->device_id) { + case ICE_DEV_ID_E810C_BACKPLANE: + case ICE_DEV_ID_E810C_QSFP: + case ICE_DEV_ID_E810C_SFP: + case ICE_DEV_ID_E810_XXV_SFP: + hw->mac_type = ICE_MAC_E810; + break; + case ICE_DEV_ID_E823C_10G_BASE_T: + case ICE_DEV_ID_E823C_BACKPLANE: + case ICE_DEV_ID_E823C_QSFP: + case ICE_DEV_ID_E823C_SFP: + case ICE_DEV_ID_E823C_SGMII: + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_SGMII: + case ICE_DEV_ID_E823L_10G_BASE_T: + case ICE_DEV_ID_E823L_1GBE: + case ICE_DEV_ID_E823L_BACKPLANE: + case ICE_DEV_ID_E823L_QSFP: + case ICE_DEV_ID_E823L_SFP: + hw->mac_type = ICE_MAC_GENERIC; + break; + default: + hw->mac_type = ICE_MAC_UNKNOWN; + break; + } + + ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type); return 0; } @@ -2675,7 +2708,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) goto out; } - ice_copy_phy_caps_to_cfg(pcaps, &cfg); + ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg); /* Configure the set PHY data */ status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode); @@ -2757,6 +2790,7 @@ ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, /** * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data + * @pi: port information structure * @caps: PHY ability structure to copy date from * @cfg: PHY configuration structure to copy data to * @@ -2764,10 +2798,11 @@ ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, * data structure */ void -ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, +ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, + struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg) { - if (!caps || !cfg) + if (!pi || !caps || !cfg) return; memset(cfg, 0, sizeof(*cfg)); @@ -2778,6 +2813,19 @@ ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, cfg->eee_cap = caps->eee_cap; cfg->eeer_value = caps->eeer_value; cfg->link_fec_opt = caps->link_fec_options; + cfg->module_compliance_enforcement = + caps->module_compliance_enforcement; + + if (ice_fw_supports_link_override(pi->hw)) { + struct ice_link_default_override_tlv tlv; + + if (ice_get_link_default_override(&tlv, pi)) + return; + + if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) + cfg->module_compliance_enforcement |= + ICE_LINK_OVERRIDE_STRICT_MODE; + } } /** @@ -2840,6 +2888,17 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, break; } + if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { + struct ice_link_default_override_tlv tlv; + + if (ice_get_link_default_override(&tlv, pi)) + goto out; + + if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && + (tlv.options & ICE_LINK_OVERRIDE_EN)) + cfg->link_fec_opt = tlv.fec_options; + } + out: kfree(pcaps); @@ -4043,3 +4102,106 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); return status; } + +/** + * ice_fw_supports_link_override + * @hw: pointer to the hardware structure + * + * Checks if the firmware supports link override + */ +bool ice_fw_supports_link_override(struct ice_hw *hw) +{ + /* Currently, only supported for E810 devices */ + if (hw->mac_type != ICE_MAC_E810) + return false; + + if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) { + if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN) + return true; + if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN && + hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH) + return true; + } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) { + return true; + } + + return false; +} + +/** + * ice_get_link_default_override + * @ldo: pointer to the link default override struct + * @pi: pointer to the port info struct + * + * Gets the link default override for a port + */ +enum ice_status +ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, + struct ice_port_info *pi) +{ + u16 i, tlv, tlv_len, tlv_start, buf, offset; + struct ice_hw *hw = pi->hw; + enum ice_status status; + + status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len, + ICE_SR_LINK_DEFAULT_OVERRIDE_PTR); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read link override TLV.\n"); + return status; + } + + /* Each port has its own config; calculate for our port */ + tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS + + ICE_SR_PFA_LINK_OVERRIDE_OFFSET; + + /* link options first */ + status = ice_read_sr_word(hw, tlv_start, &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M; + ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >> + ICE_LINK_OVERRIDE_PHY_CFG_S; + + /* link PHY config */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET; + status = ice_read_sr_word(hw, offset, &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override phy config.\n"); + return status; + } + ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M; + + /* PHY types low */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET; + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { + status = ice_read_sr_word(hw, (offset + i), &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + /* shift 16 bits at a time to fill 64 bits */ + ldo->phy_type_low |= ((u64)buf << (i * 16)); + } + + /* PHY types high */ + offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET + + ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; + for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) { + status = ice_read_sr_word(hw, (offset + i), &buf); + if (status) { + ice_debug(hw, ICE_DBG_INIT, + "Failed to read override link options.\n"); + return status; + } + /* shift 16 bits at a time to fill 64 bits */ + ldo->phy_type_high |= ((u64)buf << (i * 16)); + } + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 5246f6138506..1b8b02bb4399 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -100,6 +100,11 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); +bool ice_fw_supports_link_override(struct ice_hw *hw); +enum ice_status +ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, + struct ice_port_info *pi); + enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); enum ice_status @@ -112,7 +117,8 @@ bool ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); void -ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps, +ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, + struct ice_aqc_get_phy_caps_data *caps, struct ice_aqc_set_phy_cfg_data *cfg); enum ice_status ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a20474208b7d..f382faaf64e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1387,6 +1387,77 @@ ice_get_ethtool_stats(struct net_device *netdev, } } +#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \ + ICE_PHY_TYPE_LOW_100M_SGMII) + +#define ICE_PHY_TYPE_LOW_MASK_MIN_25G (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \ + ICE_PHY_TYPE_LOW_1000BASE_T | \ + ICE_PHY_TYPE_LOW_1000BASE_SX | \ + ICE_PHY_TYPE_LOW_1000BASE_LX | \ + ICE_PHY_TYPE_LOW_1000BASE_KX | \ + ICE_PHY_TYPE_LOW_1G_SGMII | \ + ICE_PHY_TYPE_LOW_2500BASE_T | \ + ICE_PHY_TYPE_LOW_2500BASE_X | \ + ICE_PHY_TYPE_LOW_2500BASE_KX | \ + ICE_PHY_TYPE_LOW_5GBASE_T | \ + ICE_PHY_TYPE_LOW_5GBASE_KR | \ + ICE_PHY_TYPE_LOW_10GBASE_T | \ + ICE_PHY_TYPE_LOW_10G_SFI_DA | \ + ICE_PHY_TYPE_LOW_10GBASE_SR | \ + ICE_PHY_TYPE_LOW_10GBASE_LR | \ + ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \ + ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \ + ICE_PHY_TYPE_LOW_10G_SFI_C2C) + +#define ICE_PHY_TYPE_LOW_MASK_100G (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \ + ICE_PHY_TYPE_LOW_100GBASE_SR4 | \ + ICE_PHY_TYPE_LOW_100GBASE_LR4 | \ + ICE_PHY_TYPE_LOW_100GBASE_KR4 | \ + ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \ + ICE_PHY_TYPE_LOW_100G_CAUI4 | \ + ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \ + ICE_PHY_TYPE_LOW_100G_AUI4 | \ + ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \ + ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \ + ICE_PHY_TYPE_LOW_100GBASE_CP2 | \ + ICE_PHY_TYPE_LOW_100GBASE_SR2 | \ + ICE_PHY_TYPE_LOW_100GBASE_DR) + +#define ICE_PHY_TYPE_HIGH_MASK_100G (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \ + ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\ + ICE_PHY_TYPE_HIGH_100G_CAUI2 | \ + ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \ + ICE_PHY_TYPE_HIGH_100G_AUI2) + +/** + * ice_mask_min_supported_speeds + * @phy_types_high: PHY type high + * @phy_types_low: PHY type low to apply minimum supported speeds mask + * + * Apply minimum supported speeds mask to PHY type low. These are the speeds + * for ethtool supported link mode. + */ +static +void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low) +{ + /* if QSFP connection with 100G speed, minimum supported speed is 25G */ + if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G || + phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) + *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G; + else + *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G; +} + +#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode) \ + do { \ + if (req_speeds & (aq_link_speed) || \ + (!req_speeds && \ + (adv_phy_type_lo & phy_type_mask_lo || \ + adv_phy_type_hi & phy_type_mask_hi))) \ + ethtool_link_ksettings_add_link_mode(ks, advertising,\ + ethtool_link_mode); \ + } while (0) + /** * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes * @netdev: network interface device structure @@ -1397,277 +1468,312 @@ ice_phy_type_to_ethtool(struct net_device *netdev, struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_link_status *hw_link_info; - bool need_add_adv_mode = false; struct ice_vsi *vsi = np->vsi; - u64 phy_types_high; - u64 phy_types_low; + struct ice_pf *pf = vsi->back; + u64 phy_type_mask_lo = 0; + u64 phy_type_mask_hi = 0; + u64 adv_phy_type_lo = 0; + u64 adv_phy_type_hi = 0; + u64 phy_types_high = 0; + u64 phy_types_low = 0; + u16 req_speeds; - hw_link_info = &vsi->port_info->phy.link_info; - phy_types_low = vsi->port_info->phy.phy_type_low; - phy_types_high = vsi->port_info->phy.phy_type_high; + req_speeds = vsi->port_info->phy.link_info.req_speeds; + + /* Check if lenient mode is supported and enabled, or in strict mode. + * + * In lenient mode the Supported link modes are the PHY types without + * media. The Advertising link mode is either 1. the user requested + * speed, 2. the override PHY mask, or 3. the PHY types with media. + * + * In strict mode Supported link mode are the PHY type with media, + * and Advertising link modes are the media PHY type or the speed + * requested by user. + */ + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { + struct ice_link_default_override_tlv *ldo; + + ldo = &pf->link_dflt_override; + phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo); + phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi); + + ice_mask_min_supported_speeds(phy_types_high, &phy_types_low); + + /* If override enabled and PHY mask set, then + * Advertising link mode is the intersection of the PHY + * types without media and the override PHY mask. + */ + if (ldo->options & ICE_LINK_OVERRIDE_EN && + (ldo->phy_type_low || ldo->phy_type_high)) { + adv_phy_type_lo = + le64_to_cpu(pf->nvm_phy_type_lo) & + ldo->phy_type_low; + adv_phy_type_hi = + le64_to_cpu(pf->nvm_phy_type_hi) & + ldo->phy_type_high; + } + } else { + phy_types_low = vsi->port_info->phy.phy_type_low; + phy_types_high = vsi->port_info->phy.phy_type_high; + } + + /* If Advertising link mode PHY type is not using override PHY type, + * then use PHY type with media. + */ + if (!adv_phy_type_lo && !adv_phy_type_hi) { + adv_phy_type_lo = vsi->port_info->phy.phy_type_low; + adv_phy_type_hi = vsi->port_info->phy.phy_type_high; + } ethtool_link_ksettings_zero_link_mode(ks, supported); ethtool_link_ksettings_zero_link_mode(ks, advertising); - if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || - phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX | + ICE_PHY_TYPE_LOW_100M_SGMII; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 100baseT_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 100baseT_Full); + + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB, + 100baseT_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || - phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T | + ICE_PHY_TYPE_LOW_1G_SGMII; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseT_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 1000baseT_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, + 1000baseT_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseKX_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 1000baseKX_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, + 1000baseKX_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || - phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX | + ICE_PHY_TYPE_LOW_1000BASE_LX; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 1000baseX_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 1000baseX_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB, + 1000baseX_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseT_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 2500baseT_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, + 2500baseT_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || - phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X | + ICE_PHY_TYPE_LOW_2500BASE_KX; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 2500baseX_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 2500baseX_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB, + 2500baseX_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || - phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T | + ICE_PHY_TYPE_LOW_5GBASE_KR; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 5000baseT_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 5000baseT_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB, + 5000baseT_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || - phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || - phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T | + ICE_PHY_TYPE_LOW_10G_SFI_DA | + ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | + ICE_PHY_TYPE_LOW_10G_SFI_C2C; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseT_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseT_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, + 10000baseT_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseKR_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseKR_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, + 10000baseKR_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseSR_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseSR_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, + 10000baseSR_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 10000baseLR_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseLR_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB, + 10000baseLR_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || - phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T | + ICE_PHY_TYPE_LOW_25GBASE_CR | + ICE_PHY_TYPE_LOW_25GBASE_CR_S | + ICE_PHY_TYPE_LOW_25GBASE_CR1 | + ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC | + ICE_PHY_TYPE_LOW_25G_AUI_C2C; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseCR_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 25000baseCR_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, + 25000baseCR_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR | + ICE_PHY_TYPE_LOW_25GBASE_LR; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseSR_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 25000baseSR_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, + 25000baseSR_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || - phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR | + ICE_PHY_TYPE_LOW_25GBASE_KR_S | + ICE_PHY_TYPE_LOW_25GBASE_KR1; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 25000baseKR_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 25000baseKR_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB, + 25000baseKR_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseKR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 40000baseKR4_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, + 40000baseKR4_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || - phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 | + ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC | + ICE_PHY_TYPE_LOW_40G_XLAUI; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseCR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 40000baseCR4_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, + 40000baseCR4_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseSR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 40000baseSR4_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, + 40000baseSR4_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 40000baseLR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 40000baseLR4_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB, + 40000baseLR4_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || - phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 || - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 || - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR || - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 | + ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC | + ICE_PHY_TYPE_LOW_50G_LAUI2 | + ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC | + ICE_PHY_TYPE_LOW_50G_AUI2 | + ICE_PHY_TYPE_LOW_50GBASE_CP | + ICE_PHY_TYPE_LOW_50GBASE_SR | + ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC | + ICE_PHY_TYPE_LOW_50G_AUI1; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 50000baseCR2_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 50000baseCR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, + 50000baseCR2_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 | + ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 50000baseKR2_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 50000baseKR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, + 50000baseKR2_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 || - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 || - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR || - phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 | + ICE_PHY_TYPE_LOW_50GBASE_LR2 | + ICE_PHY_TYPE_LOW_50GBASE_FR | + ICE_PHY_TYPE_LOW_50GBASE_LR; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 50000baseSR2_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 50000baseSR2_Full); + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB, + 50000baseSR2_Full); } - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || - phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 || - phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC || - phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 || - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 || - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2 || - phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC || - phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 || - phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC || - phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 | + ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | + ICE_PHY_TYPE_LOW_100G_CAUI4 | + ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | + ICE_PHY_TYPE_LOW_100G_AUI4 | + ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | + ICE_PHY_TYPE_LOW_100GBASE_CP2; + phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | + ICE_PHY_TYPE_HIGH_100G_CAUI2 | + ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | + ICE_PHY_TYPE_HIGH_100G_AUI2; + if (phy_types_low & phy_type_mask_lo || + phy_types_high & phy_type_mask_hi) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseCR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) - need_add_adv_mode = true; + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseCR4_Full); } - if (need_add_adv_mode) { - need_add_adv_mode = false; - ethtool_link_ksettings_add_link_mode(ks, advertising, - 100000baseCR4_Full); - } - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 || - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 | + ICE_PHY_TYPE_LOW_100GBASE_SR2; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseSR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) - need_add_adv_mode = true; + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseSR4_Full); } - if (need_add_adv_mode) { - need_add_adv_mode = false; - ethtool_link_ksettings_add_link_mode(ks, advertising, - 100000baseSR4_Full); - } - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 || - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 | + ICE_PHY_TYPE_LOW_100GBASE_DR; + if (phy_types_low & phy_type_mask_lo) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseLR4_ER4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) - need_add_adv_mode = true; + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseLR4_ER4_Full); } - if (need_add_adv_mode) { - need_add_adv_mode = false; - ethtool_link_ksettings_add_link_mode(ks, advertising, - 100000baseLR4_ER4_Full); - } - if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || - phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || - phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) { + + phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 | + ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4; + phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4; + if (phy_types_low & phy_type_mask_lo || + phy_types_high & phy_type_mask_hi) { ethtool_link_ksettings_add_link_mode(ks, supported, 100000baseKR4_Full); - if (!hw_link_info->req_speeds || - hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB) - need_add_adv_mode = true; + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseKR4_Full); } - if (need_add_adv_mode) - ethtool_link_ksettings_add_link_mode(ks, advertising, - 100000baseKR4_Full); /* Autoneg PHY types */ if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || @@ -2105,8 +2211,8 @@ ice_set_link_ksettings(struct net_device *netdev, struct ice_port_info *p; u8 autoneg_changed = 0; enum ice_status status; - u64 phy_type_high; - u64 phy_type_low; + u64 phy_type_high = 0; + u64 phy_type_low = 0; int err = 0; bool linkup; @@ -2159,6 +2265,8 @@ ice_set_link_ksettings(struct net_device *netdev, if (!bitmap_subset(copy_ks.link_modes.advertising, safe_ks.link_modes.supported, __ETHTOOL_LINK_MODE_MASK_NBITS)) { + if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); err = -EINVAL; goto done; } @@ -2255,9 +2363,20 @@ ice_set_link_ksettings(struct net_device *netdev, abilities->phy_type_low; if (!(config.phy_type_high || config.phy_type_low)) { - netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); - err = -EAGAIN; - goto done; + /* If there is no intersection and lenient mode is enabled, then + * intersect the requested advertised speed with NVM media type + * PHY types. + */ + if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) { + config.phy_type_high = cpu_to_le64(phy_type_high) & + pf->nvm_phy_type_hi; + config.phy_type_low = cpu_to_le64(phy_type_low) & + pf->nvm_phy_type_lo; + } else { + netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n"); + err = -EAGAIN; + goto done; + } } /* If link is up put link down */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 4c515bdf0c2c..9580c6096e56 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1413,7 +1413,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up) * ice_init_nvm_phy_type - Initialize the NVM PHY type * @pi: port info structure * - * Initialize nvm_phy_type_[low|high] + * Initialize nvm_phy_type_[low|high] for link lenient mode support */ static int ice_init_nvm_phy_type(struct ice_port_info *pi) { @@ -1443,6 +1443,59 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) return err; } +/** + * ice_init_link_dflt_override - Initialize link default override + * @pi: port info structure + */ +static void ice_init_link_dflt_override(struct ice_port_info *pi) +{ + struct ice_link_default_override_tlv *ldo; + struct ice_pf *pf = pi->hw->back; + + ldo = &pf->link_dflt_override; + ice_get_link_default_override(ldo, pi); +} + +/** + * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings + * @pi: port info structure + * + * If default override is enabled, initialized the user PHY cfg speed and FEC + * settings using the default override mask from the NVM. + * + * The PHY should only be configured with the default override settings the + * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state + * is used to indicate that the user PHY cfg default override is initialized + * and the PHY has not been configured with the default override settings. The + * state is set here, and cleared in ice_configure_phy the first time the PHY is + * configured. + */ +static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi) +{ + struct ice_link_default_override_tlv *ldo; + struct ice_aqc_set_phy_cfg_data *cfg; + struct ice_phy_info *phy = &pi->phy; + struct ice_pf *pf = pi->hw->back; + + ldo = &pf->link_dflt_override; + + /* If link default override is enabled, use to mask NVM PHY capabilities + * for speed and FEC default configuration. + */ + cfg = &phy->curr_user_phy_cfg; + + if (ldo->phy_type_low || ldo->phy_type_high) { + cfg->phy_type_low = pf->nvm_phy_type_lo & + cpu_to_le64(ldo->phy_type_low); + cfg->phy_type_high = pf->nvm_phy_type_hi & + cpu_to_le64(ldo->phy_type_high); + } + cfg->link_fec_opt = ldo->fec_options; + phy->curr_user_fec_req = ICE_FEC_AUTO; + + set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); +} + /** * ice_init_phy_user_cfg - Initialize the PHY user configuration * @pi: port info structure @@ -1485,12 +1538,31 @@ static int ice_init_phy_user_cfg(struct ice_port_info *pi) goto err_out; } - ice_copy_phy_caps_to_cfg(pcaps, &pi->phy.curr_user_phy_cfg); - /* initialize PHY using topology with media */ + ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); + + /* check if lenient mode is supported and enabled */ + if (ice_fw_supports_link_override(&vsi->back->hw) && + !(pcaps->module_compliance_enforcement & + ICE_AQC_MOD_ENFORCE_STRICT_MODE)) { + set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); + + /* if link default override is enabled, initialize user PHY + * configuration with link default override values + */ + if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) { + ice_init_phy_cfg_dflt_override(pi); + goto out; + } + } + + /* if link default override is not enabled, initialize PHY using + * topology with media + */ phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, pcaps->link_fec_options); phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); +out: phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; set_bit(__ICE_PHY_INIT_COMPLETE, pf->state); err_out: @@ -1511,7 +1583,6 @@ static int ice_configure_phy(struct ice_vsi *vsi) struct device *dev = ice_pf_to_dev(vsi->back); struct ice_aqc_get_phy_caps_data *pcaps; struct ice_aqc_set_phy_cfg_data *cfg; - u64 phy_low = 0, phy_high = 0; struct ice_port_info *pi; enum ice_status status; int err = 0; @@ -1571,14 +1642,24 @@ static int ice_configure_phy(struct ice_vsi *vsi) goto done; } - ice_copy_phy_caps_to_cfg(pcaps, cfg); + ice_copy_phy_caps_to_cfg(pi, pcaps, cfg); /* Speed - If default override pending, use curr_user_phy_cfg set in * ice_init_phy_user_cfg_ldo. */ - ice_update_phy_type(&phy_low, &phy_high, pi->phy.curr_user_speed_req); - cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); - cfg->phy_type_high = pcaps->phy_type_high & cpu_to_le64(phy_high); + if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, + vsi->back->state)) { + cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low; + cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high; + } else { + u64 phy_low = 0, phy_high = 0; + + ice_update_phy_type(&phy_low, &phy_high, + pi->phy.curr_user_speed_req); + cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); + cfg->phy_type_high = pcaps->phy_type_high & + cpu_to_le64(phy_high); + } /* Can't provide what was requested; use PHY capabilities */ if (!cfg->phy_type_low && !cfg->phy_type_high) { @@ -3749,6 +3830,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) goto err_alloc_sw_unroll; } + ice_init_link_dflt_override(pf->hw.port_info); + /* if media available, initialize PHY settings */ if (pf->hw.port_info->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index b1172a67573b..7c2a06892bbb 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -196,7 +196,7 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data) * Area (PFA) and returns the TLV pointer and length. The caller can * use these to read the variable length TLV value. */ -static enum ice_status +enum ice_status ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index e3993c04c97a..999f273ba6ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -11,6 +11,9 @@ enum ice_status ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); enum ice_status +ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, + u16 module_type); +enum ice_status ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index f08913611ed9..08c616d9fffd 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -118,6 +118,7 @@ enum ice_set_fc_aq_failures { /* Various MAC types */ enum ice_mac_type { ICE_MAC_UNKNOWN = 0, + ICE_MAC_E810, ICE_MAC_GENERIC, }; @@ -314,6 +315,28 @@ struct ice_nvm_info { u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; +struct ice_link_default_override_tlv { + u8 options; +#define ICE_LINK_OVERRIDE_OPT_M 0x3F +#define ICE_LINK_OVERRIDE_STRICT_MODE BIT(0) +#define ICE_LINK_OVERRIDE_EPCT_DIS BIT(1) +#define ICE_LINK_OVERRIDE_PORT_DIS BIT(2) +#define ICE_LINK_OVERRIDE_EN BIT(3) +#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS BIT(4) +#define ICE_LINK_OVERRIDE_EEE_EN BIT(5) + u8 phy_config; +#define ICE_LINK_OVERRIDE_PHY_CFG_S 8 +#define ICE_LINK_OVERRIDE_PHY_CFG_M (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S) +#define ICE_LINK_OVERRIDE_PAUSE_M 0x3 +#define ICE_LINK_OVERRIDE_LESM_EN BIT(6) +#define ICE_LINK_OVERRIDE_AUTO_FEC_EN BIT(7) + u8 fec_options; +#define ICE_LINK_OVERRIDE_FEC_OPT_M 0xFF + u8 rsvd1; + u64 phy_type_low; + u64 phy_type_high; +}; + #define ICE_NVM_VER_LEN 32 /* netlist version information */ @@ -465,6 +488,7 @@ struct ice_dcb_app_priority_table { #define ICE_APP_SEL_ETHTYPE 0x1 #define ICE_APP_SEL_TCPIP 0x2 #define ICE_CEE_APP_SEL_ETHTYPE 0x0 +#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR 0x134 #define ICE_CEE_APP_SEL_TCPIP 0x1 struct ice_dcbx_cfg { @@ -748,6 +772,17 @@ struct ice_hw_port_stats { #define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 + +/* Link override related */ +#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10 +#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4 +#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET 2 +#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET 1 +#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET 2 +#define ICE_FW_API_LINK_OVERRIDE_MAJ 1 +#define ICE_FW_API_LINK_OVERRIDE_MIN 5 +#define ICE_FW_API_LINK_OVERRIDE_PATCH 2 + #define ICE_SR_WORDS_IN_1KB 512 /* Hash redirection LUT for VSI - maximum array size */ From b4e813dd04e87610503f1972ecd74d87ea062be9 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 9 Jul 2020 09:16:08 -0700 Subject: [PATCH 1537/2056] ice: support Total Port Shutdown on devices that support it When the Port Disable bit is set in the Link Default Override Mask TLV PFA module in the NVM, Total Port Shutdown mode is supported and enabled. In this mode, the driver should act as if the link-down-on-close ethtool private flag is always enabled and dis-allow any change to that flag. Signed-off-by: Bruce Allan Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_ethtool.c | 12 ++++++++++++ drivers/net/ethernet/intel/ice/ice_main.c | 14 +++++++++++++- 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index e67f4290fa92..c665220bb637 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -359,6 +359,7 @@ enum ice_pf_flags { ICE_FLAG_FD_ENA, ICE_FLAG_ADV_FEATURES, ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, + ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, ICE_FLAG_NO_MEDIA, ICE_FLAG_FW_LLDP_AGENT, ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index f382faaf64e3..60abd261b8bf 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -1196,6 +1196,17 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS); + /* Do not allow change to link-down-on-close when Total Port Shutdown + * is enabled. + */ + if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) && + test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { + dev_err(dev, "Setting link-down-on-close not supported on this port\n"); + set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); + ret = -EINVAL; + goto ethtool_exit; + } + if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) { if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) { enum ice_status status; @@ -1283,6 +1294,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags); ret = -EAGAIN; } +ethtool_exit: clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); return ret; } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9580c6096e56..c8c570f95b92 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1446,6 +1446,8 @@ static int ice_init_nvm_phy_type(struct ice_port_info *pi) /** * ice_init_link_dflt_override - Initialize link default override * @pi: port info structure + * + * Initialize link default override and PHY total port shutdown during probe */ static void ice_init_link_dflt_override(struct ice_port_info *pi) { @@ -1453,7 +1455,17 @@ static void ice_init_link_dflt_override(struct ice_port_info *pi) struct ice_pf *pf = pi->hw->back; ldo = &pf->link_dflt_override; - ice_get_link_default_override(ldo, pi); + if (ice_get_link_default_override(ldo, pi)) + return; + + if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) + return; + + /* Enable Total Port Shutdown (override/replace link-down-on-close + * ethtool private flag) for ports with Port Disable bit set. + */ + set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); + set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); } /** From 55df52a0bcc0e2fd7ac1f050c8d2ab76b3df8dfd Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:09 -0700 Subject: [PATCH 1538/2056] ice: add ice_aq_get_phy_caps() debug logs Add debug logs for ice_aq_get_phy_caps(), and format ice_aq_set_phy_cfg() and ice_aq_get_link_info() debug logs to make them more readable. Signed-off-by: Paul Greenwalt Signed-off-by: Paul M Stillwell Jr Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 68 +++++++++++++++------ 1 file changed, 50 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 606e69086cb8..4c6a8e6bdd40 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -150,11 +150,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, u16 pcaps_size = sizeof(*pcaps); struct ice_aq_desc desc; enum ice_status status; + struct ice_hw *hw; cmd = &desc.params.get_phy; if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi) return ICE_ERR_PARAM; + hw = pi->hw; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); @@ -162,7 +164,32 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM); cmd->param0 |= cpu_to_le16(report_mode); - status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd); + status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd); + + ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n", + report_mode); + ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", + (unsigned long long)le64_to_cpu(pcaps->phy_type_low)); + ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", + (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); + ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); + ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl = 0x%x\n", + pcaps->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); + ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", + pcaps->eeer_value); + ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n", + pcaps->link_fec_options); + ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n", + pcaps->module_compliance_enforcement); + ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n", + pcaps->extended_compliance_code); + ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n", + pcaps->module_type[0]); + ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n", + pcaps->module_type[1]); + ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", + pcaps->module_type[2]); if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); @@ -326,18 +353,21 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED)); - ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed); - ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + ice_debug(hw, ICE_DBG_LINK, "get link info\n"); + ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed); + ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)li->phy_type_low); - ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)li->phy_type_high); - ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type); - ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info); - ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info); - ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info); - ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena); - ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size); - ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing); + ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type); + ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info); + ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info); + ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info); + ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info); + ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena); + ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n", + li->max_frame_size); + ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing); /* save link status information */ if (link) @@ -2489,16 +2519,18 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, desc.params.set_phy.lport_num = pi->lport; desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n", + ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n"); + ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n", (unsigned long long)le64_to_cpu(cfg->phy_type_low)); - ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n", + ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)le64_to_cpu(cfg->phy_type_high)); - ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps); - ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n", + ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); + ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl = 0x%x\n", cfg->low_power_ctrl); - ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap); - ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value); - ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt); + ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); + ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); + ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", + cfg->link_fec_opt); status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE) From 5ee30564c85c94b7dc78aa6cce09e9712b2ad70d Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:10 -0700 Subject: [PATCH 1539/2056] ice: update reporting of autoneg capabilities Firmware now reports AN28, AN32, and AN73. Add a helper and check these new values and report PHY autoneg capability. Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 3 +++ drivers/net/ethernet/intel/ice/ice_common.c | 15 +++++++++++++++ drivers/net/ethernet/intel/ice/ice_common.h | 1 + drivers/net/ethernet/intel/ice/ice_ethtool.c | 8 ++++---- drivers/net/ethernet/intel/ice/ice_main.c | 8 ++++++-- 5 files changed, 29 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 02a8d46540dc..800364be3bc8 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -963,6 +963,9 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0) u8 low_power_ctrl; #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) +#define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1) +#define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2) +#define ICE_AQC_PHY_AN_EN_CLAUSE37 BIT(3) __le16 eee_cap; #define ICE_AQC_PHY_EEE_EN_100BASE_TX BIT(0) #define ICE_AQC_PHY_EEE_EN_1000BASE_T BIT(1) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 4c6a8e6bdd40..ea8f09497d44 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -4237,3 +4237,18 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, return status; } + +/** + * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled + * @caps: get PHY capability data + */ +bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) +{ + if (caps->caps & ICE_AQC_PHY_AN_MODE || + caps->low_power_ctrl & (ICE_AQC_PHY_AN_EN_CLAUSE28 | + ICE_AQC_PHY_AN_EN_CLAUSE73 | + ICE_AQC_PHY_AN_EN_CLAUSE37)) + return true; + + return false; +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 1b8b02bb4399..33a681a75439 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -104,6 +104,7 @@ bool ice_fw_supports_link_override(struct ice_hw *hw); enum ice_status ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); +bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 60abd261b8bf..06b93e97892d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2987,8 +2987,8 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) if (status) goto out; - pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; if (dcbx_cfg->pfc.pfcena) /* PFC enabled so report LFC as off */ @@ -3056,8 +3056,8 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) return -EIO; } - is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE : + AUTONEG_DISABLE; kfree(pcaps); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c8c570f95b92..16a4096bb780 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -612,6 +612,7 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi) void ice_print_link_msg(struct ice_vsi *vsi, bool isup) { struct ice_aqc_get_phy_caps_data *caps; + const char *an_advertised; enum ice_status status; const char *fec_req; const char *speed; @@ -710,6 +711,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) caps = kzalloc(sizeof(*caps), GFP_KERNEL); if (!caps) { fec_req = "Unknown"; + an_advertised = "Unknown"; goto done; } @@ -718,6 +720,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) if (status) netdev_info(vsi->netdev, "Get phy capability failed.\n"); + an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off"; + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) fec_req = "RS-FEC"; @@ -730,8 +734,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) kfree(caps); done: - netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n", - speed, fec_req, fec, an, fc); + netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n", + speed, fec_req, fec, an_advertised, an, fc); ice_print_topo_conflict(vsi); } From bdeff9718a1b13032357165df7dd97bc18b41b9e Mon Sep 17 00:00:00 2001 From: Lev Faerman Date: Thu, 9 Jul 2020 09:16:11 -0700 Subject: [PATCH 1540/2056] ice: Rename low_power_ctrl Rename the low_power_ctrl field to low_power_ctrl_an to be properly descriptive of it being an autoneg field. Signed-off-by: Lev Faerman Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 4 ++-- drivers/net/ethernet/intel/ice/ice_common.c | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 800364be3bc8..31b6fd8dc0dc 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -961,7 +961,7 @@ struct ice_aqc_get_phy_caps_data { #define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5) #define ICE_AQC_PHY_EN_AUTO_FEC BIT(7) #define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0) - u8 low_power_ctrl; + u8 low_power_ctrl_an; #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) #define ICE_AQC_PHY_AN_EN_CLAUSE28 BIT(1) #define ICE_AQC_PHY_AN_EN_CLAUSE73 BIT(2) @@ -1036,7 +1036,7 @@ struct ice_aqc_set_phy_cfg_data { #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) #define ICE_AQ_PHY_ENA_LESM BIT(6) #define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) - u8 low_power_ctrl; + u8 low_power_ctrl_an; __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index ea8f09497d44..695711e48acc 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -173,8 +173,8 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)le64_to_cpu(pcaps->phy_type_high)); ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps); - ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl = 0x%x\n", - pcaps->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", + pcaps->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap); ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", pcaps->eeer_value); @@ -2525,8 +2525,8 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n", (unsigned long long)le64_to_cpu(cfg->phy_type_high)); ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps); - ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl = 0x%x\n", - cfg->low_power_ctrl); + ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n", + cfg->low_power_ctrl_an); ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap); ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value); ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n", @@ -2811,7 +2811,7 @@ ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps, if (phy_caps->phy_type_low != phy_cfg->phy_type_low || phy_caps->phy_type_high != phy_cfg->phy_type_high || ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) || - phy_caps->low_power_ctrl != phy_cfg->low_power_ctrl || + phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an || phy_caps->eee_cap != phy_cfg->eee_cap || phy_caps->eeer_value != phy_cfg->eeer_value || phy_caps->link_fec_options != phy_cfg->link_fec_opt) @@ -2841,7 +2841,7 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, cfg->phy_type_low = caps->phy_type_low; cfg->phy_type_high = caps->phy_type_high; cfg->caps = caps->caps; - cfg->low_power_ctrl = caps->low_power_ctrl; + cfg->low_power_ctrl_an = caps->low_power_ctrl_an; cfg->eee_cap = caps->eee_cap; cfg->eeer_value = caps->eeer_value; cfg->link_fec_opt = caps->link_fec_options; @@ -4245,9 +4245,9 @@ ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) { if (caps->caps & ICE_AQC_PHY_AN_MODE || - caps->low_power_ctrl & (ICE_AQC_PHY_AN_EN_CLAUSE28 | - ICE_AQC_PHY_AN_EN_CLAUSE73 | - ICE_AQC_PHY_AN_EN_CLAUSE37)) + caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 | + ICE_AQC_PHY_AN_EN_CLAUSE73 | + ICE_AQC_PHY_AN_EN_CLAUSE37)) return true; return false; From 8ea1da593b25b4b3251c43753e233efe5f06a3f0 Mon Sep 17 00:00:00 2001 From: Paul Greenwalt Date: Thu, 9 Jul 2020 09:16:12 -0700 Subject: [PATCH 1541/2056] ice: add AQC get link topology handle support Add AQC get link topology handle support. This is needed to determine Direct Attach (DA) or backplane media type for PHY types that support either. Get link topology handle cage node type request can be used to determine if a cage is present or not. If a cage is present for PHY types that supports both DA and backplane media type, then the media type is DA, else the media type is backplane. Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 53 +++++++++++++++ drivers/net/ethernet/intel/ice/ice_common.c | 66 ++++++++++++++++++- 2 files changed, 118 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 31b6fd8dc0dc..4315a784b975 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1194,6 +1194,57 @@ struct ice_aqc_set_mac_lb { u8 reserved[15]; }; +struct ice_aqc_link_topo_addr { + u8 lport_num; + u8 lport_num_valid; +#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0) + u8 node_type_ctx; +#define ICE_AQC_LINK_TOPO_NODE_TYPE_S 0 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_M (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S) +#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY 0 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL 2 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL 3 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED 4 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL 5 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7 +#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8 +#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4 +#define ICE_AQC_LINK_TOPO_NODE_CTX_M \ + (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S) +#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL 0 +#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD 1 +#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT 2 +#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE 3 +#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4 +#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5 + u8 index; + __le16 handle; +#define ICE_AQC_LINK_TOPO_HANDLE_S 0 +#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S) +/* Used to decode the handle field */ +#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9) +#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9) +#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0 +#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0 +/* In case of a Mezzanine type */ +#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \ + (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) +#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S 6 +#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S) +/* In case of a LOM type */ +#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M \ + (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S) +}; + +/* Get Link Topology Handle (direct, 0x06E0) */ +struct ice_aqc_get_link_topo { + struct ice_aqc_link_topo_addr addr; + u8 node_part_num; + u8 rsvd[9]; +}; + /* Set Port Identification LED (direct, 0x06E9) */ struct ice_aqc_set_port_id_led { u8 lport_num; @@ -1764,6 +1815,7 @@ struct ice_aq_desc { struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; + struct ice_aqc_get_link_topo get_link_topo; } params; }; @@ -1863,6 +1915,7 @@ enum ice_adminq_opc { ice_aqc_opc_get_link_status = 0x0607, ice_aqc_opc_set_event_mask = 0x0613, ice_aqc_opc_set_mac_lb = 0x0620, + ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_set_port_id_led = 0x06E9, ice_aqc_opc_sff_eeprom = 0x06EE, diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 695711e48acc..d8df97158249 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -199,6 +199,56 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, return status; } +/** + * ice_aq_get_link_topo_handle - get link topology node return status + * @pi: port information structure + * @node_type: requested node type + * @cd: pointer to command details structure or NULL + * + * Get link topology node return status for specified node type (0x06E0) + * + * Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present, then + * connection type is backplane or BASE-T. + */ +static enum ice_status +ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type, + struct ice_sq_cd *cd) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.get_link_topo; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + + cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT << + ICE_AQC_LINK_TOPO_NODE_CTX_S); + + /* set node type */ + cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type); + + return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd); +} + +/** + * ice_is_media_cage_present + * @pi: port information structure + * + * Returns true if media cage is present, else false. If no cage, then + * media type is backplane or BASE-T. + */ +static bool ice_is_media_cage_present(struct ice_port_info *pi) +{ + /* Node type cage can be used to determine if cage is present. If AQC + * returns error (ENOENT), then no cage present. If no cage present then + * connection type is backplane or BASE-T. + */ + return !ice_aq_get_link_topo_handle(pi, + ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE, + NULL); +} + /** * ice_get_media_type - Gets media type * @pi: port information structure @@ -224,7 +274,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_10G_SFI_C2C: case ICE_PHY_TYPE_LOW_25GBASE_SR: case ICE_PHY_TYPE_LOW_25GBASE_LR: - case ICE_PHY_TYPE_LOW_25G_AUI_C2C: case ICE_PHY_TYPE_LOW_40GBASE_SR4: case ICE_PHY_TYPE_LOW_40GBASE_LR4: case ICE_PHY_TYPE_LOW_50GBASE_SR2: @@ -255,6 +304,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4: case ICE_PHY_TYPE_LOW_100GBASE_CP2: return ICE_MEDIA_DA; + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + case ICE_PHY_TYPE_LOW_50G_LAUI2: + case ICE_PHY_TYPE_LOW_50G_AUI2: + case ICE_PHY_TYPE_LOW_50G_AUI1: + case ICE_PHY_TYPE_LOW_100G_AUI4: + case ICE_PHY_TYPE_LOW_100G_CAUI4: + if (ice_is_media_cage_present(pi)) + return ICE_MEDIA_DA; + fallthrough; case ICE_PHY_TYPE_LOW_1000BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_KX: case ICE_PHY_TYPE_LOW_2500BASE_X: @@ -272,6 +331,11 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) } } else { switch (hw_link_info->phy_type_high) { + case ICE_PHY_TYPE_HIGH_100G_AUI2: + case ICE_PHY_TYPE_HIGH_100G_CAUI2: + if (ice_is_media_cage_present(pi)) + return ICE_MEDIA_DA; + fallthrough; case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; } From c1eb3b6b68686d7a5dce7c6d795b7eedf6b0671c Mon Sep 17 00:00:00 2001 From: Doug Dziggel Date: Thu, 9 Jul 2020 09:16:13 -0700 Subject: [PATCH 1542/2056] ice: Report AOC PHY Types as Fiber Report AOC types as fiber instead of unknown. Signed-off-by: Doug Dziggel Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d8df97158249..bb9952038efa 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -285,6 +285,14 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) case ICE_PHY_TYPE_LOW_100GBASE_LR4: case ICE_PHY_TYPE_LOW_100GBASE_SR2: case ICE_PHY_TYPE_LOW_100GBASE_DR: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC: + case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC: return ICE_MEDIA_FIBER; case ICE_PHY_TYPE_LOW_100BASE_TX: case ICE_PHY_TYPE_LOW_1000BASE_T: @@ -338,6 +346,9 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) fallthrough; case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4: return ICE_MEDIA_BACKPLANE; + case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC: + case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC: + return ICE_MEDIA_FIBER; } } return ICE_MEDIA_UNKNOWN; From c2b352262af4360e9a8aefc62bde9bd7012839a8 Mon Sep 17 00:00:00 2001 From: Paul M Stillwell Jr Date: Thu, 9 Jul 2020 09:16:14 -0700 Subject: [PATCH 1543/2056] ice: add 1G SGMII PHY type There isn't a case for 1G SGMII in ice_get_media_type() so add the handling for it. Also handle the special case where some direct attach cables may report that they support 1G SGMII, but that is erroneous since SGMII is supposed to be a backplane media type (between a MAC and a PHY). If the driver doesn't handle this special case then a user could see the 'Port' in ethtool change from 'Direct attach Copper' to 'Backplane' when they have forced the speed to 1G, but the cable hasn't changed. Lastly, change ice_aq_get_phy_caps() to save the module_type info if the function was called with ICE_AQC_REPORT_TOPO_CAP. This call uses the media information to populate the module_type. If no media is present then the values in module_type will be 0. Signed-off-by: Paul M Stillwell Jr Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 17 ++++++++++++++--- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 4315a784b975..b363e0223670 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -993,6 +993,7 @@ struct ice_aqc_get_phy_caps_data { u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; #define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS 0x80 +#define ICE_AQC_MOD_TYPE_IDENT 1 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0) #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1) #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index bb9952038efa..c72cc77b8d67 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -194,6 +194,8 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); + memcpy(pi->phy.link_info.module_type, &pcaps->module_type, + sizeof(pi->phy.link_info.module_type)); } return status; @@ -266,6 +268,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi) return ICE_MEDIA_UNKNOWN; if (hw_link_info->phy_type_low) { + /* 1G SGMII is a special case where some DA cable PHYs + * may show this as an option when it really shouldn't + * be since SGMII is meant to be between a MAC and a PHY + * in a backplane. Try to detect this case and handle it + */ + if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII && + (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == + ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE || + hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] == + ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE)) + return ICE_MEDIA_DA; + switch (hw_link_info->phy_type_low) { case ICE_PHY_TYPE_LOW_1000BASE_SX: case ICE_PHY_TYPE_LOW_1000BASE_LX: @@ -2647,9 +2661,6 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); - if (!status) - memcpy(li->module_type, &pcaps->module_type, - sizeof(li->module_type)); devm_kfree(ice_hw_to_dev(hw), pcaps); } From 1b6687e31a2df9fbdb12d25c1d1d372777bf96a8 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 23 Jul 2020 17:23:09 -0600 Subject: [PATCH 1544/2056] vrf: Handle CONFIG_SYSCTL not set Randy reported compile failure when CONFIG_SYSCTL is not set/enabled: ERROR: modpost: "sysctl_vals" [drivers/net/vrf.ko] undefined! Fix by splitting out the sysctl init and cleanup into helpers that can be set to do nothing when CONFIG_SYSCTL is disabled. In addition, move vrf_strict_mode and vrf_strict_mode_change to above vrf_shared_table_handler (code move only) and wrap all of it in the ifdef CONFIG_SYSCTL. Update the strict mode tests to check for the existence of the /proc/sys entry. Fixes: 33306f1aaf82 ("vrf: add sysctl parameter for strict mode") Cc: Andrea Mayer Reported-by: Randy Dunlap Signed-off-by: David Ahern Acked-by: Randy Dunlap # build-tested Signed-off-by: David S. Miller --- drivers/net/vrf.c | 138 ++++++++++-------- .../selftests/net/vrf_strict_mode_test.sh | 6 + 2 files changed, 83 insertions(+), 61 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 46599606ff10..60c1aadece89 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -260,52 +260,6 @@ static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) spin_unlock(&vmap->vmap_lock); } -static bool vrf_strict_mode(struct vrf_map *vmap) -{ - bool strict_mode; - - vrf_map_lock(vmap); - strict_mode = vmap->strict_mode; - vrf_map_unlock(vmap); - - return strict_mode; -} - -static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode) -{ - bool *cur_mode; - int res = 0; - - vrf_map_lock(vmap); - - cur_mode = &vmap->strict_mode; - if (*cur_mode == new_mode) - goto unlock; - - if (*cur_mode) { - /* disable strict mode */ - *cur_mode = false; - } else { - if (vmap->shared_tables) { - /* we cannot allow strict_mode because there are some - * vrfs that share one or more tables. - */ - res = -EBUSY; - goto unlock; - } - - /* no tables are shared among vrfs, so we can go back - * to 1:1 association between a vrf with its table. - */ - *cur_mode = true; - } - -unlock: - vrf_map_unlock(vmap); - - return res; -} - /* called with rtnl lock held */ static int vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack) @@ -1790,6 +1744,53 @@ static int vrf_map_init(struct vrf_map *vmap) return 0; } +#ifdef CONFIG_SYSCTL +static bool vrf_strict_mode(struct vrf_map *vmap) +{ + bool strict_mode; + + vrf_map_lock(vmap); + strict_mode = vmap->strict_mode; + vrf_map_unlock(vmap); + + return strict_mode; +} + +static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode) +{ + bool *cur_mode; + int res = 0; + + vrf_map_lock(vmap); + + cur_mode = &vmap->strict_mode; + if (*cur_mode == new_mode) + goto unlock; + + if (*cur_mode) { + /* disable strict mode */ + *cur_mode = false; + } else { + if (vmap->shared_tables) { + /* we cannot allow strict_mode because there are some + * vrfs that share one or more tables. + */ + res = -EBUSY; + goto unlock; + } + + /* no tables are shared among vrfs, so we can go back + * to 1:1 association between a vrf with its table. + */ + *cur_mode = true; + } + +unlock: + vrf_map_unlock(vmap); + + return res; +} + static int vrf_shared_table_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { @@ -1830,15 +1831,9 @@ static const struct ctl_table vrf_table[] = { { }, }; -/* Initialize per network namespace state */ -static int __net_init vrf_netns_init(struct net *net) +static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) { - struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); struct ctl_table *table; - int res; - - nn_vrf->add_fib_rules = true; - vrf_map_init(&nn_vrf->vmap); table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL); if (!table) @@ -1849,19 +1844,14 @@ static int __net_init vrf_netns_init(struct net *net) nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table); if (!nn_vrf->ctl_hdr) { - res = -ENOMEM; - goto free_table; + kfree(table); + return -ENOMEM; } return 0; - -free_table: - kfree(table); - - return res; } -static void __net_exit vrf_netns_exit(struct net *net) +static void vrf_netns_exit_sysctl(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); struct ctl_table *table; @@ -1870,6 +1860,32 @@ static void __net_exit vrf_netns_exit(struct net *net) unregister_net_sysctl_table(nn_vrf->ctl_hdr); kfree(table); } +#else +static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) +{ + return 0; +} + +static void vrf_netns_exit_sysctl(struct net *net) +{ +} +#endif + +/* Initialize per network namespace state */ +static int __net_init vrf_netns_init(struct net *net) +{ + struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); + + nn_vrf->add_fib_rules = true; + vrf_map_init(&nn_vrf->vmap); + + return vrf_netns_init_sysctl(net, nn_vrf); +} + +static void __net_exit vrf_netns_exit(struct net *net) +{ + vrf_netns_exit_sysctl(net); +} static struct pernet_operations vrf_net_ops __net_initdata = { .init = vrf_netns_init, diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh index 5274f4a1fba1..18b982d611de 100755 --- a/tools/testing/selftests/net/vrf_strict_mode_test.sh +++ b/tools/testing/selftests/net/vrf_strict_mode_test.sh @@ -379,6 +379,12 @@ if [ ! -x "$(command -v ip)" ]; then exit 0 fi +modprobe vrf &>/dev/null +if [ ! -e /proc/sys/net/vrf/strict_mode ]; then + echo "SKIP: vrf sysctl does not exist" + exit 0 +fi + cleanup &> /dev/null setup From 4a062d66b5a532a9da9bdae643e498b035173794 Mon Sep 17 00:00:00 2001 From: Chi Song Date: Thu, 23 Jul 2020 21:14:26 -0700 Subject: [PATCH 1545/2056] net: hyperv: dump TX indirection table to ethtool regs An imbalanced TX indirection table causes netvsc to have low performance. This table is created and managed during runtime. To help better diagnose performance issues caused by imbalanced tables, it needs make TX indirection tables visible. Because TX indirection table is driver specified information, so display it via ethtool register dump. Signed-off-by: Chi Song Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e0327b88732c..d27b90b4e2bb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1958,6 +1958,23 @@ static int netvsc_set_features(struct net_device *ndev, return ret; } +static int netvsc_get_regs_len(struct net_device *netdev) +{ + return VRSS_SEND_TAB_SIZE * sizeof(u32); +} + +static void netvsc_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct net_device_context *ndc = netdev_priv(netdev); + u32 *regs_buff = p; + + /* increase the version, if buffer format is changed. */ + regs->version = 1; + + memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32)); +} + static u32 netvsc_get_msglevel(struct net_device *ndev) { struct net_device_context *ndev_ctx = netdev_priv(ndev); @@ -1974,6 +1991,8 @@ static void netvsc_set_msglevel(struct net_device *ndev, u32 val) static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, + .get_regs_len = netvsc_get_regs_len, + .get_regs = netvsc_get_regs, .get_msglevel = netvsc_get_msglevel, .set_msglevel = netvsc_set_msglevel, .get_link = ethtool_op_get_link, From 0cb09aff9d49d92305c3969fc84b785117412968 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 23 Jul 2020 01:03:00 +0300 Subject: [PATCH 1546/2056] net/flow_dissector: add packet hash dissection Retreive a hash value from the SKB and store it in the dissector key for future matching. Signed-off-by: Ariel Levkovich Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/linux/skbuff.h | 4 ++++ include/net/flow_dissector.h | 9 +++++++++ net/core/flow_dissector.c | 17 +++++++++++++++++ 3 files changed, 30 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6a82d4a8229e..fa817a105517 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1342,6 +1342,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); +void skb_flow_dissect_hash(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 4b6e36288ddd..cc10b10dc3a1 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -243,6 +243,14 @@ struct flow_dissector_key_ct { u32 ct_labels[4]; }; +/** + * struct flow_dissector_key_hash: + * @hash: hash value + */ +struct flow_dissector_key_hash { + u32 hash; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -271,6 +279,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */ FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */ FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */ + FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 142a8824f0a8..29806eb765cf 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -383,6 +383,23 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, } EXPORT_SYMBOL(skb_flow_dissect_tunnel_info); +void skb_flow_dissect_hash(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container) +{ + struct flow_dissector_key_hash *key; + + if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH)) + return; + + key = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_HASH, + target_container); + + key->hash = skb_get_hash_raw(skb); +} +EXPORT_SYMBOL(skb_flow_dissect_hash); + static enum flow_dissect_ret __skb_flow_dissect_mpls(const struct sk_buff *skb, struct flow_dissector *flow_dissector, From 5923b8f7fa218a9bccd730c0a9692635eb2fc740 Mon Sep 17 00:00:00 2001 From: Ariel Levkovich Date: Thu, 23 Jul 2020 01:03:01 +0300 Subject: [PATCH 1547/2056] net/sched: cls_flower: Add hash info to flow classification Adding new cls flower keys for hash value and hash mask and dissect the hash info from the skb into the flow key towards flow classication. Signed-off-by: Ariel Levkovich Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 3 +++ net/sched/cls_flower.c | 16 ++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 7576209d96f9..ee95f42fb0ec 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -578,6 +578,9 @@ enum { TCA_FLOWER_KEY_MPLS_OPTS, + TCA_FLOWER_KEY_HASH, /* u32 */ + TCA_FLOWER_KEY_HASH_MASK, /* u32 */ + __TCA_FLOWER_MAX, }; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index acd8e05c2ba5..a4f7ef1de7e7 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -64,6 +64,7 @@ struct fl_flow_key { }; } tp_range; struct flow_dissector_key_ct ct; + struct flow_dissector_key_hash hash; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -318,6 +319,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp, skb_flow_dissect_ct(skb, &mask->dissector, &skb_key, fl_ct_info_to_flower_map, ARRAY_SIZE(fl_ct_info_to_flower_map)); + skb_flow_dissect_hash(skb, &mask->dissector, &skb_key); skb_flow_dissect(skb, &mask->dissector, &skb_key, 0); f = fl_mask_lookup(mask, &skb_key); @@ -695,6 +697,9 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY, .len = 128 / BITS_PER_BYTE }, [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, + }; static const struct nla_policy @@ -1626,6 +1631,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip); + fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH, + &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, + sizeof(key->hash.hash)); + if (tb[TCA_FLOWER_KEY_ENC_OPTS]) { ret = fl_set_enc_opt(tb, key, mask, extack); if (ret) @@ -1740,6 +1749,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_CT, ct); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_HASH, hash); skb_flow_dissector_init(dissector, keys, cnt); } @@ -2960,6 +2971,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) goto nla_put_failure; + if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, + &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK, + sizeof(key->hash.hash))) + goto nla_put_failure; + return 0; nla_put_failure: From e024e008186bf9f4109c86b66dd60d0f926bc1fb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:43 +0200 Subject: [PATCH 1548/2056] bpfilter: fix up a sparse annotation The __user doesn't make sense when casting to an integer type, just switch to a uintptr_t cast which also removes the need for the __force. Signed-off-by: Christoph Hellwig Reviewed-by: Luc Van Oostenryck Signed-off-by: David S. Miller --- net/bpfilter/bpfilter_kern.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 2c31e82cb953..3bac5820062a 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -44,7 +44,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname, req.is_set = is_set; req.pid = current->pid; req.cmd = optname; - req.addr = (long __force __user)optval; + req.addr = (uintptr_t)optval; req.len = optlen; if (!bpfilter_ops.info.tgid) goto out; From c9ffebdde8deccf9ea3a7a97bcd84de0a35ddad7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:44 +0200 Subject: [PATCH 1549/2056] net/bpfilter: split __bpfilter_process_sockopt Split __bpfilter_process_sockopt into a low-level send request routine and the actual setsockopt hook to split the init time ping from the actual setsockopt processing. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/bpfilter/bpfilter_kern.c | 51 +++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 3bac5820062a..78d561f2c54d 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -31,48 +31,51 @@ static void __stop_umh(void) shutdown_umh(); } -static int __bpfilter_process_sockopt(struct sock *sk, int optname, - char __user *optval, - unsigned int optlen, bool is_set) +static int bpfilter_send_req(struct mbox_request *req) { - struct mbox_request req; struct mbox_reply reply; loff_t pos; ssize_t n; - int ret = -EFAULT; - req.is_set = is_set; - req.pid = current->pid; - req.cmd = optname; - req.addr = (uintptr_t)optval; - req.len = optlen; if (!bpfilter_ops.info.tgid) - goto out; + return -EFAULT; pos = 0; - n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req), + n = kernel_write(bpfilter_ops.info.pipe_to_umh, req, sizeof(*req), &pos); - if (n != sizeof(req)) { + if (n != sizeof(*req)) { pr_err("write fail %zd\n", n); - __stop_umh(); - ret = -EFAULT; - goto out; + goto stop; } pos = 0; n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply), &pos); if (n != sizeof(reply)) { pr_err("read fail %zd\n", n); - __stop_umh(); - ret = -EFAULT; - goto out; + goto stop; } - ret = reply.status; -out: - return ret; + return reply.status; +stop: + __stop_umh(); + return -EFAULT; +} + +static int bpfilter_process_sockopt(struct sock *sk, int optname, + char __user *optval, unsigned int optlen, + bool is_set) +{ + struct mbox_request req = { + .is_set = is_set, + .pid = current->pid, + .cmd = optname, + .addr = (uintptr_t)optval, + .len = optlen, + }; + return bpfilter_send_req(&req); } static int start_umh(void) { + struct mbox_request req = { .pid = current->pid }; int err; /* fork usermode process */ @@ -82,7 +85,7 @@ static int start_umh(void) pr_info("Loaded bpfilter_umh pid %d\n", pid_nr(bpfilter_ops.info.tgid)); /* health check that usermode process started correctly */ - if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) { + if (bpfilter_send_req(&req) != 0) { shutdown_umh(); return -EFAULT; } @@ -103,7 +106,7 @@ static int __init load_umh(void) mutex_lock(&bpfilter_ops.lock); err = start_umh(); if (!err && IS_ENABLED(CONFIG_INET)) { - bpfilter_ops.sockopt = &__bpfilter_process_sockopt; + bpfilter_ops.sockopt = &bpfilter_process_sockopt; bpfilter_ops.start = &start_umh; } mutex_unlock(&bpfilter_ops.lock); From d200cf624c9247ab52b67d34d9e198262a23df31 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:45 +0200 Subject: [PATCH 1550/2056] bpfilter: reject kernel addresses The bpfilter user mode helper processes the optval address using process_vm_readv. Don't send it kernel addresses fed under set_fs(KERNEL_DS) as that won't work. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/bpfilter/bpfilter_kern.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 78d561f2c54d..00540457e5f4 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -70,6 +70,10 @@ static int bpfilter_process_sockopt(struct sock *sk, int optname, .addr = (uintptr_t)optval, .len = optlen, }; + if (uaccess_kernel()) { + pr_err("kernel access not supported\n"); + return -EFAULT; + } return bpfilter_send_req(&req); } From ba423fdaa589d972473083defedf9e862626d268 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:46 +0200 Subject: [PATCH 1551/2056] net: add a new sockptr_t type Add a uptr_t type that can hold a pointer to either a user or kernel memory region, and simply helpers to copy to and from it. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 104 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 include/linux/sockptr.h diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h new file mode 100644 index 000000000000..700856e13ea0 --- /dev/null +++ b/include/linux/sockptr.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 Christoph Hellwig. + * + * Support for "universal" pointers that can point to either kernel or userspace + * memory. + */ +#ifndef _LINUX_SOCKPTR_H +#define _LINUX_SOCKPTR_H + +#include +#include + +typedef struct { + union { + void *kernel; + void __user *user; + }; + bool is_kernel : 1; +} sockptr_t; + +static inline bool sockptr_is_kernel(sockptr_t sockptr) +{ + return sockptr.is_kernel; +} + +static inline sockptr_t KERNEL_SOCKPTR(void *p) +{ + return (sockptr_t) { .kernel = p, .is_kernel = true }; +} + +static inline sockptr_t USER_SOCKPTR(void __user *p) +{ + return (sockptr_t) { .user = p }; +} + +static inline bool sockptr_is_null(sockptr_t sockptr) +{ + return !sockptr.user && !sockptr.kernel; +} + +static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +{ + if (!sockptr_is_kernel(src)) + return copy_from_user(dst, src.user, size); + memcpy(dst, src.kernel, size); + return 0; +} + +static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +{ + if (!sockptr_is_kernel(dst)) + return copy_to_user(dst.user, src, size); + memcpy(dst.kernel, src, size); + return 0; +} + +static inline void *memdup_sockptr(sockptr_t src, size_t len) +{ + void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + return p; +} + +static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) +{ + char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + p[len] = '\0'; + return p; +} + +static inline void sockptr_advance(sockptr_t sockptr, size_t len) +{ + if (sockptr_is_kernel(sockptr)) + sockptr.kernel += len; + else + sockptr.user += len; +} + +static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) +{ + if (sockptr_is_kernel(src)) { + size_t len = min(strnlen(src.kernel, count - 1) + 1, count); + + memcpy(dst, src.kernel, len); + return len; + } + return strncpy_from_user(dst, src.user, count); +} + +#endif /* _LINUX_SOCKPTR_H */ From b1ea9ff6aff2deae84eccaf0a07cd14912669680 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:47 +0200 Subject: [PATCH 1552/2056] net: switch copy_bpf_fprog_from_user to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/filter.h | 3 ++- net/core/filter.c | 6 +++--- net/core/sock.c | 6 ++++-- net/packet/af_packet.c | 4 ++-- 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 1c6b6d982bf4..d07a6e973a7d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -1276,7 +1277,7 @@ struct bpf_sockopt_kern { s32 retval; }; -int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len); +int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); struct bpf_sk_lookup_kern { u16 family; diff --git a/net/core/filter.c b/net/core/filter.c index 3fa16b8c0d61..29e3455122f7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -77,14 +77,14 @@ #include #include -int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len) +int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len) { if (in_compat_syscall()) { struct compat_sock_fprog f32; if (len != sizeof(f32)) return -EINVAL; - if (copy_from_user(&f32, src, sizeof(f32))) + if (copy_from_sockptr(&f32, src, sizeof(f32))) return -EFAULT; memset(dst, 0, sizeof(*dst)); dst->len = f32.len; @@ -92,7 +92,7 @@ int copy_bpf_fprog_from_user(struct sock_fprog *dst, void __user *src, int len) } else { if (len != sizeof(*dst)) return -EINVAL; - if (copy_from_user(dst, src, sizeof(*dst))) + if (copy_from_sockptr(dst, src, sizeof(*dst))) return -EFAULT; } diff --git a/net/core/sock.c b/net/core/sock.c index 6da54eac2b34..71fc7e4ddd06 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1063,7 +1063,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_ATTACH_FILTER: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), + optlen); if (!ret) ret = sk_attach_filter(&fprog, sk); break; @@ -1084,7 +1085,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_ATTACH_REUSEPORT_CBPF: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); + ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), + optlen); if (!ret) ret = sk_reuseport_attach_filter(&fprog, sk); break; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c240fb5de3f0..d8d4f78f78e4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1536,7 +1536,7 @@ static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new) } } -static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data, +static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, unsigned int len) { struct bpf_prog *new; @@ -1584,7 +1584,7 @@ static int fanout_set_data(struct packet_sock *po, char __user *data, { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: - return fanout_set_data_cbpf(po, data, len); + return fanout_set_data_cbpf(po, USER_SOCKPTR(data), len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: From 5790642b4748acbb5560f7a6fcb19f572297cf63 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:48 +0200 Subject: [PATCH 1553/2056] net: switch sock_setbindtodevice to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/core/sock.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 71fc7e4ddd06..5b55bc9397f2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -609,8 +609,7 @@ int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) } EXPORT_SYMBOL(sock_bindtoindex); -static int sock_setbindtodevice(struct sock *sk, char __user *optval, - int optlen) +static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) { int ret = -ENOPROTOOPT; #ifdef CONFIG_NETDEVICES @@ -632,7 +631,7 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval, memset(devname, 0, sizeof(devname)); ret = -EFAULT; - if (copy_from_user(devname, optval, optlen)) + if (copy_from_sockptr(devname, optval, optlen)) goto out; index = 0; @@ -840,7 +839,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, */ if (optname == SO_BINDTODEVICE) - return sock_setbindtodevice(sk, optval, optlen); + return sock_setbindtodevice(sk, USER_SOCKPTR(optval), optlen); if (optlen < sizeof(int)) return -EINVAL; From c34645ac25484968ddace9a6b140b70e72dc49b3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:49 +0200 Subject: [PATCH 1554/2056] net: switch sock_set_timeout to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/core/sock.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 5b55bc9397f2..8b9eddaff868 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -361,7 +361,8 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval) return sizeof(tv); } -static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval) +static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, + bool old_timeval) { struct __kernel_sock_timeval tv; @@ -371,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool if (optlen < sizeof(tv32)) return -EINVAL; - if (copy_from_user(&tv32, optval, sizeof(tv32))) + if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) return -EFAULT; tv.tv_sec = tv32.tv_sec; tv.tv_usec = tv32.tv_usec; @@ -380,14 +381,14 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool if (optlen < sizeof(old_tv)) return -EINVAL; - if (copy_from_user(&old_tv, optval, sizeof(old_tv))) + if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) return -EFAULT; tv.tv_sec = old_tv.tv_sec; tv.tv_usec = old_tv.tv_usec; } else { if (optlen < sizeof(tv)) return -EINVAL; - if (copy_from_user(&tv, optval, sizeof(tv))) + if (copy_from_sockptr(&tv, optval, sizeof(tv))) return -EFAULT; } if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) @@ -1051,12 +1052,14 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_RCVTIMEO_OLD: case SO_RCVTIMEO_NEW: - ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD); + ret = sock_set_timeout(&sk->sk_rcvtimeo, USER_SOCKPTR(optval), + optlen, optname == SO_RCVTIMEO_OLD); break; case SO_SNDTIMEO_OLD: case SO_SNDTIMEO_NEW: - ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD); + ret = sock_set_timeout(&sk->sk_sndtimeo, USER_SOCKPTR(optval), + optlen, optname == SO_SNDTIMEO_OLD); break; case SO_ATTACH_FILTER: { From c8c1bbb6eb498109286739f8b6090e99313dd104 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:50 +0200 Subject: [PATCH 1555/2056] net: switch sock_set_timeout to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Acked-by: Matthieu Baerts Signed-off-by: David S. Miller --- include/net/sock.h | 3 ++- net/core/sock.c | 26 ++++++++++++-------------- net/mptcp/protocol.c | 6 ++++-- net/socket.c | 3 ++- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 62e18fc8ac9f..bfb2fe2fc368 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -59,6 +59,7 @@ #include #include #include +#include #include #include @@ -1669,7 +1670,7 @@ void sock_pfree(struct sk_buff *skb); #endif int sock_setsockopt(struct socket *sock, int level, int op, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int sock_getsockopt(struct socket *sock, int level, int op, char __user *optval, int __user *optlen); diff --git a/net/core/sock.c b/net/core/sock.c index 8b9eddaff868..1444d7d53ba2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -826,7 +826,7 @@ EXPORT_SYMBOL(sock_set_rcvbuf); */ int sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock_txtime sk_txtime; struct sock *sk = sock->sk; @@ -840,12 +840,12 @@ int sock_setsockopt(struct socket *sock, int level, int optname, */ if (optname == SO_BINDTODEVICE) - return sock_setbindtodevice(sk, USER_SOCKPTR(optval), optlen); + return sock_setbindtodevice(sk, optval, optlen); if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; valbool = val ? 1 : 0; @@ -958,7 +958,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, ret = -EINVAL; /* 1003.1g */ break; } - if (copy_from_user(&ling, optval, sizeof(ling))) { + if (copy_from_sockptr(&ling, optval, sizeof(ling))) { ret = -EFAULT; break; } @@ -1052,21 +1052,20 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_RCVTIMEO_OLD: case SO_RCVTIMEO_NEW: - ret = sock_set_timeout(&sk->sk_rcvtimeo, USER_SOCKPTR(optval), + ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD); break; case SO_SNDTIMEO_OLD: case SO_SNDTIMEO_NEW: - ret = sock_set_timeout(&sk->sk_sndtimeo, USER_SOCKPTR(optval), + ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD); break; case SO_ATTACH_FILTER: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), - optlen); + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); if (!ret) ret = sk_attach_filter(&fprog, sk); break; @@ -1077,7 +1076,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, u32 ufd; ret = -EFAULT; - if (copy_from_user(&ufd, optval, sizeof(ufd))) + if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) break; ret = sk_attach_bpf(ufd, sk); @@ -1087,8 +1086,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_ATTACH_REUSEPORT_CBPF: { struct sock_fprog fprog; - ret = copy_bpf_fprog_from_user(&fprog, USER_SOCKPTR(optval), - optlen); + ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); if (!ret) ret = sk_reuseport_attach_filter(&fprog, sk); break; @@ -1099,7 +1097,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, u32 ufd; ret = -EFAULT; - if (copy_from_user(&ufd, optval, sizeof(ufd))) + if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) break; ret = sk_reuseport_attach_bpf(ufd, sk); @@ -1179,7 +1177,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, if (sizeof(ulval) != sizeof(val) && optlen >= sizeof(ulval) && - get_user(ulval, (unsigned long __user *)optval)) { + copy_from_sockptr(&ulval, optval, sizeof(ulval))) { ret = -EFAULT; break; } @@ -1222,7 +1220,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(struct sock_txtime)) { ret = -EINVAL; break; - } else if (copy_from_user(&sk_txtime, optval, + } else if (copy_from_sockptr(&sk_txtime, optval, sizeof(struct sock_txtime))) { ret = -EFAULT; break; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 979dfcd2aa14..7246847efa90 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1648,7 +1648,8 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return -EINVAL; } - ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); + ret = sock_setsockopt(ssock, SOL_SOCKET, optname, + USER_SOCKPTR(optval), optlen); if (ret == 0) { if (optname == SO_REUSEPORT) sk->sk_reuseport = ssock->sk->sk_reuseport; @@ -1659,7 +1660,8 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return ret; } - return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); + return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, + USER_SOCKPTR(optval), optlen); } static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, diff --git a/net/socket.c b/net/socket.c index 93846568c2fb..c97f83d879ae 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2130,7 +2130,8 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, } if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) - err = sock_setsockopt(sock, level, optname, optval, optlen); + err = sock_setsockopt(sock, level, optname, + USER_SOCKPTR(optval), optlen); else if (unlikely(!sock->ops->setsockopt)) err = -EOPNOTSUPP; else From c6d1b26a8fd4940fe5d4199311838f6c2aef0174 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:51 +0200 Subject: [PATCH 1556/2056] net/xfrm: switch xfrm_user_policy to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/xfrm.h | 8 +++++--- net/ipv4/ip_sockglue.c | 3 ++- net/ipv6/ipv6_sockglue.c | 3 ++- net/xfrm/xfrm_state.c | 6 +++--- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index f9e1fda82ddf..5e81868b574a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -1609,10 +1610,11 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); -int xfrm_user_policy(struct sock *sk, int optname, - u8 __user *optval, int optlen); +int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, + int optlen); #else -static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) +static inline int xfrm_user_policy(struct sock *sk, int optname, + sockptr_t optval, int optlen) { return -ENOPROTOOPT; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index a5ea02d7a183..da933f99b5d5 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1322,7 +1322,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EPERM; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) break; - err = xfrm_user_policy(sk, optname, optval, optlen); + err = xfrm_user_policy(sk, optname, USER_SOCKPTR(optval), + optlen); break; case IP_TRANSPARENT: diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index add8f7912299..56a74707c617 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -935,7 +935,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; - retv = xfrm_user_policy(sk, optname, optval, optlen); + retv = xfrm_user_policy(sk, optname, USER_SOCKPTR(optval), + optlen); break; case IPV6_ADDR_PREFERENCES: diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 8be2d926acc2..69520ad3d83b 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2264,7 +2264,7 @@ static bool km_is_alive(const struct km_event *c) return is_alive; } -int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen) +int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen) { int err; u8 *data; @@ -2274,7 +2274,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen if (in_compat_syscall()) return -EOPNOTSUPP; - if (!optval && !optlen) { + if (sockptr_is_null(optval) && !optlen) { xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); __sk_dst_reset(sk); @@ -2284,7 +2284,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen if (optlen <= 0 || optlen > PAGE_SIZE) return -EMSGSIZE; - data = memdup_user(optval, optlen); + data = memdup_sockptr(optval, optlen); if (IS_ERR(data)) return PTR_ERR(data); From 7e4b9dbabb2ad0d108d12939e34d4d330104ac6d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:52 +0200 Subject: [PATCH 1557/2056] netfilter: remove the unused user argument to do_update_counters Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/bridge/netfilter/ebtables.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index fe13108af1f5..12f8929667bf 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1242,9 +1242,8 @@ void ebt_unregister_table(struct net *net, struct ebt_table *table, /* userspace just supplied us with counters */ static int do_update_counters(struct net *net, const char *name, - struct ebt_counter __user *counters, - unsigned int num_counters, - const void __user *user, unsigned int len) + struct ebt_counter __user *counters, + unsigned int num_counters, unsigned int len) { int i, ret; struct ebt_counter *tmp; @@ -1299,7 +1298,7 @@ static int update_counters(struct net *net, const void __user *user, return -EINVAL; return do_update_counters(net, hlp.name, hlp.counters, - hlp.num_counters, user, len); + hlp.num_counters, len); } static inline int ebt_obj_to_user(char __user *um, const char *_name, @@ -2231,7 +2230,7 @@ static int compat_update_counters(struct net *net, void __user *user, return update_counters(net, user, len); return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), - hlp.num_counters, user, len); + hlp.num_counters, len); } static int compat_do_ebt_get_ctl(struct sock *sk, int cmd, From ab214d1bf8c7ef1ed7af803a72491cb29edfa8f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:53 +0200 Subject: [PATCH 1558/2056] netfilter: switch xt_copy_counters to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter/x_tables.h | 4 ++-- net/ipv4/netfilter/arp_tables.c | 7 +++---- net/ipv4/netfilter/ip_tables.c | 7 +++---- net/ipv6/netfilter/ip6_tables.c | 6 +++--- net/netfilter/x_tables.c | 20 ++++++++++---------- 5 files changed, 21 insertions(+), 23 deletions(-) diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index b8b943ee7b8b..5deb099d156d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -301,8 +301,8 @@ int xt_target_to_user(const struct xt_entry_target *t, int xt_data_to_user(void __user *dst, const void *src, int usersize, int size, int aligned_size); -void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info); +void *xt_copy_counters(sockptr_t arg, unsigned int len, + struct xt_counters_info *info); struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 2c8a4dad39d7..6d24b686c7f0 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -996,8 +996,7 @@ static int do_replace(struct net *net, const void __user *user, return ret; } -static int do_add_counters(struct net *net, const void __user *user, - unsigned int len) +static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1008,7 +1007,7 @@ static int do_add_counters(struct net *net, const void __user *user, struct arpt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp); + paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); @@ -1420,7 +1419,7 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned break; case ARPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len); + ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); break; default: diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 161901dd1cae..4697d09c98dc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1151,8 +1151,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) } static int -do_add_counters(struct net *net, const void __user *user, - unsigned int len) +do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1163,7 +1162,7 @@ do_add_counters(struct net *net, const void __user *user, struct ipt_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp); + paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); @@ -1629,7 +1628,7 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; case IPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len); + ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); break; default: diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index fd1f8f931231..a787aba30e2d 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1168,7 +1168,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) } static int -do_add_counters(struct net *net, const void __user *user, unsigned int len) +do_add_counters(struct net *net, sockptr_t arg, unsigned int len) { unsigned int i; struct xt_counters_info tmp; @@ -1179,7 +1179,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len) struct ip6t_entry *iter; unsigned int addend; - paddc = xt_copy_counters_from_user(user, len, &tmp); + paddc = xt_copy_counters(arg, len, &tmp); if (IS_ERR(paddc)) return PTR_ERR(paddc); t = xt_find_table_lock(net, AF_INET6, tmp.name); @@ -1637,7 +1637,7 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) break; case IP6T_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), user, len); + ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); break; default: diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 32bab45af7e4..b97eb4b538fd 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1028,9 +1028,9 @@ int xt_check_target(struct xt_tgchk_param *par, EXPORT_SYMBOL_GPL(xt_check_target); /** - * xt_copy_counters_from_user - copy counters and metadata from userspace + * xt_copy_counters - copy counters and metadata from a sockptr_t * - * @user: src pointer to userspace memory + * @arg: src sockptr * @len: alleged size of userspace memory * @info: where to store the xt_counters_info metadata * @@ -1047,8 +1047,8 @@ EXPORT_SYMBOL_GPL(xt_check_target); * Return: returns pointer that caller has to test via IS_ERR(). * If IS_ERR is false, caller has to vfree the pointer. */ -void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info) +void *xt_copy_counters(sockptr_t arg, unsigned int len, + struct xt_counters_info *info) { void *mem; u64 size; @@ -1062,12 +1062,12 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, return ERR_PTR(-EINVAL); len -= sizeof(compat_tmp); - if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) + if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0) return ERR_PTR(-EFAULT); memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; - user += sizeof(compat_tmp); + sockptr_advance(arg, sizeof(compat_tmp)); } else #endif { @@ -1075,10 +1075,10 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, return ERR_PTR(-EINVAL); len -= sizeof(*info); - if (copy_from_user(info, user, sizeof(*info)) != 0) + if (copy_from_sockptr(info, arg, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); - user += sizeof(*info); + sockptr_advance(arg, sizeof(*info)); } info->name[sizeof(info->name) - 1] = '\0'; @@ -1092,13 +1092,13 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, if (!mem) return ERR_PTR(-ENOMEM); - if (copy_from_user(mem, user, len) == 0) + if (copy_from_sockptr(mem, arg, len) == 0) return mem; vfree(mem); return ERR_PTR(-EFAULT); } -EXPORT_SYMBOL_GPL(xt_copy_counters_from_user); +EXPORT_SYMBOL_GPL(xt_copy_counters); #ifdef CONFIG_COMPAT int xt_compat_target_offset(const struct xt_target *target) From c2f12630c60ff33a9cafd221646053fc10ec59b6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:54 +0200 Subject: [PATCH 1559/2056] netfilter: switch nf_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/netfilter.h | 6 ++++-- net/bridge/netfilter/ebtables.c | 37 +++++++++++++++------------------ net/decnet/af_decnet.c | 3 ++- net/ipv4/ip_sockglue.c | 3 ++- net/ipv4/netfilter/arp_tables.c | 28 ++++++++++++------------- net/ipv4/netfilter/ip_tables.c | 24 ++++++++++----------- net/ipv6/ipv6_sockglue.c | 3 ++- net/ipv6/netfilter/ip6_tables.c | 24 ++++++++++----------- net/netfilter/ipvs/ip_vs_ctl.c | 4 ++-- net/netfilter/nf_sockopt.c | 2 +- 10 files changed, 68 insertions(+), 66 deletions(-) diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 711b4d4486f0..0101747de549 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -13,6 +13,7 @@ #include #include #include +#include #include static inline int NF_DROP_GETERR(int verdict) @@ -163,7 +164,8 @@ struct nf_sockopt_ops { /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; - int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); + int (*set)(struct sock *sk, int optval, sockptr_t arg, + unsigned int len); int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); @@ -338,7 +340,7 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, } /* Call setsockopt() */ -int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 12f8929667bf..d35173e803d3 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1063,14 +1063,13 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, } /* replace the table */ -static int do_replace(struct net *net, const void __user *user, - unsigned int len) +static int do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret, countersize; struct ebt_table_info *newinfo; struct ebt_replace tmp; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) @@ -1286,12 +1285,11 @@ static int do_update_counters(struct net *net, const char *name, return ret; } -static int update_counters(struct net *net, const void __user *user, - unsigned int len) +static int update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct ebt_replace hlp; - if (copy_from_user(&hlp, user, sizeof(hlp))) + if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) @@ -2079,7 +2077,7 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user, static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, - void __user *user, unsigned int len) + sockptr_t arg, unsigned int len) { struct compat_ebt_replace tmp; int i; @@ -2087,7 +2085,7 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, if (len < sizeof(tmp)) return -EINVAL; - if (copy_from_user(&tmp, user, sizeof(tmp))) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp))) return -EFAULT; if (len != sizeof(tmp) + tmp.entries_size) @@ -2114,8 +2112,7 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl, return 0; } -static int compat_do_replace(struct net *net, void __user *user, - unsigned int len) +static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret, i, countersize, size64; struct ebt_table_info *newinfo; @@ -2123,10 +2120,10 @@ static int compat_do_replace(struct net *net, void __user *user, struct ebt_entries_buf_state state; void *entries_tmp; - ret = compat_copy_ebt_replace_from_user(&tmp, user, len); + ret = compat_copy_ebt_replace_from_user(&tmp, arg, len); if (ret) { /* try real handler in case userland supplied needed padding */ - if (ret == -EINVAL && do_replace(net, user, len) == 0) + if (ret == -EINVAL && do_replace(net, arg, len) == 0) ret = 0; return ret; } @@ -2217,17 +2214,17 @@ static int compat_do_replace(struct net *net, void __user *user, goto free_entries; } -static int compat_update_counters(struct net *net, void __user *user, +static int compat_update_counters(struct net *net, sockptr_t arg, unsigned int len) { struct compat_ebt_replace hlp; - if (copy_from_user(&hlp, user, sizeof(hlp))) + if (copy_from_sockptr(&hlp, arg, sizeof(hlp))) return -EFAULT; /* try real handler in case userland supplied needed padding */ if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter)) - return update_counters(net, user, len); + return update_counters(net, arg, len); return do_update_counters(net, hlp.name, compat_ptr(hlp.counters), hlp.num_counters, len); @@ -2368,7 +2365,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) return ret; } -static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, +static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { struct net *net = sock_net(sk); @@ -2381,18 +2378,18 @@ static int do_ebt_set_ctl(struct sock *sk, int cmd, void __user *user, case EBT_SO_SET_ENTRIES: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(net, user, len); + ret = compat_do_replace(net, arg, len); else #endif - ret = do_replace(net, user, len); + ret = do_replace(net, arg, len); break; case EBT_SO_SET_COUNTERS: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_update_counters(net, user, len); + ret = compat_update_counters(net, arg, len); else #endif - ret = update_counters(net, user, len); + ret = update_counters(net, arg, len); break; default: ret = -EINVAL; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7d7ae2dd69b8..7d51ab608fb3 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -1332,7 +1332,8 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && optname != DSO_STREAM && optname != DSO_SEQPACKET) - err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); + err = nf_setsockopt(sk, PF_DECnet, optname, + USER_SOCKPTR(optval), optlen); #endif return err; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index da933f99b5d5..42befbf12846 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1422,7 +1422,8 @@ int ip_setsockopt(struct sock *sk, int level, optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) - err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); + err = nf_setsockopt(sk, PF_INET, optname, USER_SOCKPTR(optval), + optlen); #endif return err; } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6d24b686c7f0..f5b26ef17820 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0-only + /* * Packet matching code for ARP packets. * @@ -947,8 +947,7 @@ static int __do_replace(struct net *net, const char *name, return ret; } -static int do_replace(struct net *net, const void __user *user, - unsigned int len) +static int do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct arpt_replace tmp; @@ -956,7 +955,7 @@ static int do_replace(struct net *net, const void __user *user, void *loc_cpu_entry; struct arpt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -972,8 +971,8 @@ static int do_replace(struct net *net, const void __user *user, return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1244,8 +1243,7 @@ static int translate_compat_table(struct net *net, return ret; } -static int compat_do_replace(struct net *net, void __user *user, - unsigned int len) +static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_arpt_replace tmp; @@ -1253,7 +1251,7 @@ static int compat_do_replace(struct net *net, void __user *user, void *loc_cpu_entry; struct arpt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1269,7 +1267,8 @@ static int compat_do_replace(struct net *net, void __user *user, return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1401,7 +1400,8 @@ static int compat_get_entries(struct net *net, } #endif -static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +static int do_arpt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, + unsigned int len) { int ret; @@ -1412,14 +1412,14 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned case ARPT_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(sock_net(sk), user, len); + ret = compat_do_replace(sock_net(sk), arg, len); else #endif - ret = do_replace(sock_net(sk), user, len); + ret = do_replace(sock_net(sk), arg, len); break; case ARPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); + ret = do_add_counters(sock_net(sk), arg, len); break; default: diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4697d09c98dc..f2a9680303d8 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1102,7 +1102,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, } static int -do_replace(struct net *net, const void __user *user, unsigned int len) +do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct ipt_replace tmp; @@ -1110,7 +1110,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) void *loc_cpu_entry; struct ipt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1126,8 +1126,8 @@ do_replace(struct net *net, const void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1484,7 +1484,7 @@ translate_compat_table(struct net *net, } static int -compat_do_replace(struct net *net, void __user *user, unsigned int len) +compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_ipt_replace tmp; @@ -1492,7 +1492,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) void *loc_cpu_entry; struct ipt_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1508,8 +1508,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1610,7 +1610,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr, #endif static int -do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +do_ipt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { int ret; @@ -1621,14 +1621,14 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) case IPT_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(sock_net(sk), user, len); + ret = compat_do_replace(sock_net(sk), arg, len); else #endif - ret = do_replace(sock_net(sk), user, len); + ret = do_replace(sock_net(sk), arg, len); break; case IPT_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); + ret = do_add_counters(sock_net(sk), arg, len); break; default: diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 56a74707c617..85892b35cff7 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -996,7 +996,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) - err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); + err = nf_setsockopt(sk, PF_INET6, optname, USER_SOCKPTR(optval), + optlen); #endif return err; } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index a787aba30e2d..1d52957a413f 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1119,7 +1119,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, } static int -do_replace(struct net *net, const void __user *user, unsigned int len) +do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct ip6t_replace tmp; @@ -1127,7 +1127,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len) void *loc_cpu_entry; struct ip6t_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1143,8 +1143,8 @@ do_replace(struct net *net, const void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1493,7 +1493,7 @@ translate_compat_table(struct net *net, } static int -compat_do_replace(struct net *net, void __user *user, unsigned int len) +compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) { int ret; struct compat_ip6t_replace tmp; @@ -1501,7 +1501,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) void *loc_cpu_entry; struct ip6t_entry *iter; - if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) + if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ @@ -1517,8 +1517,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), - tmp.size) != 0) { + sockptr_advance(arg, sizeof(tmp)); + if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1619,7 +1619,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, #endif static int -do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +do_ip6t_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len) { int ret; @@ -1630,14 +1630,14 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) case IP6T_SO_SET_REPLACE: #ifdef CONFIG_COMPAT if (in_compat_syscall()) - ret = compat_do_replace(sock_net(sk), user, len); + ret = compat_do_replace(sock_net(sk), arg, len); else #endif - ret = do_replace(sock_net(sk), user, len); + ret = do_replace(sock_net(sk), arg, len); break; case IP6T_SO_SET_ADD_COUNTERS: - ret = do_add_counters(sock_net(sk), USER_SOCKPTR(user), len); + ret = do_add_counters(sock_net(sk), arg, len); break; default: diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 4af83f466dfc..bcac316addab 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2434,7 +2434,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest, } static int -do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) +do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len) { struct net *net = sock_net(sk); int ret; @@ -2458,7 +2458,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) return -EINVAL; } - if (copy_from_user(arg, user, len) != 0) + if (copy_from_sockptr(arg, ptr, len) != 0) return -EFAULT; /* Handle daemons since they have another lock */ diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c index 90469b1f628a..34afcd03b6f6 100644 --- a/net/netfilter/nf_sockopt.c +++ b/net/netfilter/nf_sockopt.c @@ -89,7 +89,7 @@ static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, return ops; } -int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, sockptr_t opt, unsigned int len) { struct nf_sockopt_ops *ops; From b03afaa82ece13b2a008f0e3a7127bead578e3e6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:55 +0200 Subject: [PATCH 1560/2056] bpfilter: switch bpfilter_ip_set_sockopt to sockptr_t This is mostly to prepare for cleaning up the callers, as bpfilter by design can't handle kernel pointers. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/bpfilter.h | 6 +++--- net/bpfilter/bpfilter_kern.c | 6 +++--- net/ipv4/bpfilter/sockopt.c | 8 ++++---- net/ipv4/ip_sockglue.c | 3 ++- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index 9b114c718a76..2ae3c8e1d83c 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -4,9 +4,10 @@ #include #include +#include struct sock; -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); @@ -16,8 +17,7 @@ struct bpfilter_umh_ops { struct umd_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ struct mutex lock; - int (*sockopt)(struct sock *sk, int optname, - char __user *optval, + int (*sockopt)(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen, bool is_set); int (*start)(void); }; diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index 00540457e5f4..f580c3344cb3 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c @@ -60,17 +60,17 @@ static int bpfilter_send_req(struct mbox_request *req) } static int bpfilter_process_sockopt(struct sock *sk, int optname, - char __user *optval, unsigned int optlen, + sockptr_t optval, unsigned int optlen, bool is_set) { struct mbox_request req = { .is_set = is_set, .pid = current->pid, .cmd = optname, - .addr = (uintptr_t)optval, + .addr = (uintptr_t)optval.user, .len = optlen, }; - if (uaccess_kernel()) { + if (uaccess_kernel() || sockptr_is_kernel(optval)) { pr_err("kernel access not supported\n"); return -EFAULT; } diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 9063c6767d34..1b34cb9a7708 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -21,8 +21,7 @@ void bpfilter_umh_cleanup(struct umd_info *info) } EXPORT_SYMBOL_GPL(bpfilter_umh_cleanup); -static int bpfilter_mbox_request(struct sock *sk, int optname, - char __user *optval, +static int bpfilter_mbox_request(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen, bool is_set) { int err; @@ -52,7 +51,7 @@ static int bpfilter_mbox_request(struct sock *sk, int optname, return err; } -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { return bpfilter_mbox_request(sk, optname, optval, optlen, true); @@ -66,7 +65,8 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, if (get_user(len, optlen)) return -EFAULT; - return bpfilter_mbox_request(sk, optname, optval, len, false); + return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len, + false); } static int __init bpfilter_sockopt_init(void) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 42befbf12846..36f746e01741 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1414,7 +1414,8 @@ int ip_setsockopt(struct sock *sk, int level, #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_SET_REPLACE && optname < BPFILTER_IPT_SET_MAX) - err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen); + err = bpfilter_ip_set_sockopt(sk, optname, USER_SOCKPTR(optval), + optlen); #endif #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ From 01ccb5b48f08f59ca3746d59221f4990aec1b194 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:56 +0200 Subject: [PATCH 1561/2056] net/ipv4: switch ip_mroute_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/mroute.h | 5 +++-- net/ipv4/ip_sockglue.c | 3 ++- net/ipv4/ipmr.c | 14 +++++++------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 9a36fad9e068..6cbbfe94348c 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -8,6 +8,7 @@ #include #include #include +#include #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) @@ -15,7 +16,7 @@ static inline int ip_mroute_opt(int opt) return opt >= MRT_BASE && opt <= MRT_MAX; } -int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); @@ -23,7 +24,7 @@ int ip_mr_init(void); bool ipmr_rule_default(const struct fib_rule *rule); #else static inline int ip_mroute_setsockopt(struct sock *sock, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 36f746e01741..ac495b0cff8f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -925,7 +925,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optname == IP_ROUTER_ALERT) return ip_ra_control(sk, val ? 1 : 0, NULL); if (ip_mroute_opt(optname)) - return ip_mroute_setsockopt(sk, optname, optval, optlen); + return ip_mroute_setsockopt(sk, optname, USER_SOCKPTR(optval), + optlen); err = 0; if (needs_rtnl) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 678639c01e48..cdf3a40f9ff5 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1341,7 +1341,7 @@ static void mrtsock_destruct(struct sock *sk) * MOSPF/PIM router set up we can clean this up. */ -int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, +int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen) { struct net *net = sock_net(sk); @@ -1413,7 +1413,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (copy_from_user(&vif, optval, sizeof(vif))) { + if (copy_from_sockptr(&vif, optval, sizeof(vif))) { ret = -EFAULT; break; } @@ -1441,7 +1441,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (copy_from_user(&mfc, optval, sizeof(mfc))) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1459,7 +1459,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(val, (int __user *)optval)) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1471,7 +1471,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(val, (int __user *)optval)) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1486,7 +1486,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(val, (int __user *)optval)) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ret = -EFAULT; break; } @@ -1508,7 +1508,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, ret = -EINVAL; break; } - if (get_user(uval, (u32 __user *)optval)) { + if (copy_from_sockptr(&uval, optval, sizeof(uval))) { ret = -EFAULT; break; } From de40a3e88311b6f0fc79b876a4768bf2d99f9aae Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:57 +0200 Subject: [PATCH 1562/2056] net/ipv4: merge ip_options_get and ip_options_get_from_user Use the sockptr_t type to merge the versions. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/ip.h | 5 ++--- net/ipv4/ip_options.c | 45 +++++++++++------------------------------- net/ipv4/ip_sockglue.c | 7 ++++--- 3 files changed, 18 insertions(+), 39 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 3d34acc95ca8..d66ad3a95220 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -707,9 +708,7 @@ int __ip_options_compile(struct net *net, struct ip_options *opt, int ip_options_compile(struct net *net, struct ip_options *opt, struct sk_buff *skb); int ip_options_get(struct net *net, struct ip_options_rcu **optp, - unsigned char *data, int optlen); -int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, - unsigned char __user *data, int optlen); + sockptr_t data, int optlen); void ip_options_undo(struct ip_options *opt); void ip_forward_options(struct sk_buff *skb); int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev); diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index ddaa01ec2bce..948747aac4e2 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -519,15 +519,20 @@ void ip_options_undo(struct ip_options *opt) } } -static struct ip_options_rcu *ip_options_get_alloc(const int optlen) +int ip_options_get(struct net *net, struct ip_options_rcu **optp, + sockptr_t data, int optlen) { - return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), - GFP_KERNEL); -} + struct ip_options_rcu *opt; + + opt = kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3), + GFP_KERNEL); + if (!opt) + return -ENOMEM; + if (optlen && copy_from_sockptr(opt->opt.__data, data, optlen)) { + kfree(opt); + return -EFAULT; + } -static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp, - struct ip_options_rcu *opt, int optlen) -{ while (optlen & 3) opt->opt.__data[optlen++] = IPOPT_END; opt->opt.optlen = optlen; @@ -540,32 +545,6 @@ static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp, return 0; } -int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp, - unsigned char __user *data, int optlen) -{ - struct ip_options_rcu *opt = ip_options_get_alloc(optlen); - - if (!opt) - return -ENOMEM; - if (optlen && copy_from_user(opt->opt.__data, data, optlen)) { - kfree(opt); - return -EFAULT; - } - return ip_options_get_finish(net, optp, opt, optlen); -} - -int ip_options_get(struct net *net, struct ip_options_rcu **optp, - unsigned char *data, int optlen) -{ - struct ip_options_rcu *opt = ip_options_get_alloc(optlen); - - if (!opt) - return -ENOMEM; - if (optlen) - memcpy(opt->opt.__data, data, optlen); - return ip_options_get_finish(net, optp, opt, optlen); -} - void ip_forward_options(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ac495b0cff8f..b12f39b52008 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -280,7 +280,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, err = cmsg->cmsg_len - sizeof(struct cmsghdr); /* Our caller is responsible for freeing ipc->opt */ - err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), + err = ip_options_get(net, &ipc->opt, + KERNEL_SOCKPTR(CMSG_DATA(cmsg)), err < 40 ? err : 40); if (err) return err; @@ -940,8 +941,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optlen > 40) goto e_inval; - err = ip_options_get_from_user(sock_net(sk), &opt, - optval, optlen); + err = ip_options_get(sock_net(sk), &opt, USER_SOCKPTR(optval), + optlen); if (err) break; old = rcu_dereference_protected(inet->inet_opt, From 89654c5fcd511d9fe26ec376f5e855581aa44802 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:58 +0200 Subject: [PATCH 1563/2056] net/ipv4: switch do_ip_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 68 ++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index b12f39b52008..f7f1507b89fe 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -683,15 +683,15 @@ static int set_mcast_msfilter(struct sock *sk, int ifindex, return -EADDRNOTAVAIL; } -static int copy_group_source_from_user(struct group_source_req *greqs, - void __user *optval, int optlen) +static int copy_group_source_from_sockptr(struct group_source_req *greqs, + sockptr_t optval, int optlen) { if (in_compat_syscall()) { struct compat_group_source_req gr32; if (optlen != sizeof(gr32)) return -EINVAL; - if (copy_from_user(&gr32, optval, sizeof(gr32))) + if (copy_from_sockptr(&gr32, optval, sizeof(gr32))) return -EFAULT; greqs->gsr_interface = gr32.gsr_interface; greqs->gsr_group = gr32.gsr_group; @@ -699,7 +699,7 @@ static int copy_group_source_from_user(struct group_source_req *greqs, } else { if (optlen != sizeof(*greqs)) return -EINVAL; - if (copy_from_user(greqs, optval, sizeof(*greqs))) + if (copy_from_sockptr(greqs, optval, sizeof(*greqs))) return -EFAULT; } @@ -707,14 +707,14 @@ static int copy_group_source_from_user(struct group_source_req *greqs, } static int do_mcast_group_source(struct sock *sk, int optname, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct group_source_req greqs; struct ip_mreq_source mreqs; struct sockaddr_in *psin; int omode, add, err; - err = copy_group_source_from_user(&greqs, optval, optlen); + err = copy_group_source_from_sockptr(&greqs, optval, optlen); if (err) return err; @@ -754,8 +754,7 @@ static int do_mcast_group_source(struct sock *sk, int optname, return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); } -static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, - int optlen) +static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) { struct group_filter *gsf = NULL; int err; @@ -765,7 +764,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, if (optlen > sysctl_optmem_max) return -ENOBUFS; - gsf = memdup_user(optval, optlen); + gsf = memdup_sockptr(optval, optlen); if (IS_ERR(gsf)) return PTR_ERR(gsf); @@ -786,7 +785,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, void __user *optval, return err; } -static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, +static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) { const int size0 = offsetof(struct compat_group_filter, gf_slist); @@ -806,7 +805,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */ err = -EFAULT; - if (copy_from_user(gf32, optval, optlen)) + if (copy_from_sockptr(gf32, optval, optlen)) goto out_free_gsf; /* numsrc >= (4G-140)/128 overflow in 32 bits */ @@ -831,7 +830,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, void __user *optval, } static int ip_mcast_join_leave(struct sock *sk, int optname, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct ip_mreqn mreq = { }; struct sockaddr_in *psin; @@ -839,7 +838,7 @@ static int ip_mcast_join_leave(struct sock *sk, int optname, if (optlen < sizeof(struct group_req)) return -EINVAL; - if (copy_from_user(&greq, optval, sizeof(greq))) + if (copy_from_sockptr(&greq, optval, sizeof(greq))) return -EFAULT; psin = (struct sockaddr_in *)&greq.gr_group; @@ -853,7 +852,7 @@ static int ip_mcast_join_leave(struct sock *sk, int optname, } static int compat_ip_mcast_join_leave(struct sock *sk, int optname, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct compat_group_req greq; struct ip_mreqn mreq = { }; @@ -861,7 +860,7 @@ static int compat_ip_mcast_join_leave(struct sock *sk, int optname, if (optlen < sizeof(struct compat_group_req)) return -EINVAL; - if (copy_from_user(&greq, optval, sizeof(greq))) + if (copy_from_sockptr(&greq, optval, sizeof(greq))) return -EFAULT; psin = (struct sockaddr_in *)&greq.gr_group; @@ -875,8 +874,8 @@ static int compat_ip_mcast_join_leave(struct sock *sk, int optname, return ip_mc_leave_group(sk, &mreq); } -static int do_ip_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, unsigned int optlen) +static int do_ip_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -910,12 +909,12 @@ static int do_ip_setsockopt(struct sock *sk, int level, case IP_RECVFRAGSIZE: case IP_RECVERR_RFC4884: if (optlen >= sizeof(int)) { - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; } else if (optlen >= sizeof(char)) { unsigned char ucval; - if (get_user(ucval, (unsigned char __user *) optval)) + if (copy_from_sockptr(&ucval, optval, sizeof(ucval))) return -EFAULT; val = (int) ucval; } @@ -926,8 +925,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optname == IP_ROUTER_ALERT) return ip_ra_control(sk, val ? 1 : 0, NULL); if (ip_mroute_opt(optname)) - return ip_mroute_setsockopt(sk, optname, USER_SOCKPTR(optval), - optlen); + return ip_mroute_setsockopt(sk, optname, optval, optlen); err = 0; if (needs_rtnl) @@ -941,8 +939,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optlen > 40) goto e_inval; - err = ip_options_get(sock_net(sk), &opt, USER_SOCKPTR(optval), - optlen); + err = ip_options_get(sock_net(sk), &opt, optval, optlen); if (err) break; old = rcu_dereference_protected(inet->inet_opt, @@ -1140,17 +1137,17 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { - if (copy_from_user(&mreq, optval, sizeof(mreq))) + if (copy_from_sockptr(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); if (optlen >= sizeof(struct ip_mreq)) { - if (copy_from_user(&mreq, optval, - sizeof(struct ip_mreq))) + if (copy_from_sockptr(&mreq, optval, + sizeof(struct ip_mreq))) break; } else if (optlen >= sizeof(struct in_addr)) { - if (copy_from_user(&mreq.imr_address, optval, - sizeof(struct in_addr))) + if (copy_from_sockptr(&mreq.imr_address, optval, + sizeof(struct in_addr))) break; } } @@ -1202,11 +1199,12 @@ static int do_ip_setsockopt(struct sock *sk, int level, goto e_inval; err = -EFAULT; if (optlen >= sizeof(struct ip_mreqn)) { - if (copy_from_user(&mreq, optval, sizeof(mreq))) + if (copy_from_sockptr(&mreq, optval, sizeof(mreq))) break; } else { memset(&mreq, 0, sizeof(mreq)); - if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq))) + if (copy_from_sockptr(&mreq, optval, + sizeof(struct ip_mreq))) break; } @@ -1226,7 +1224,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -ENOBUFS; break; } - msf = memdup_user(optval, optlen); + msf = memdup_sockptr(optval, optlen); if (IS_ERR(msf)) { err = PTR_ERR(msf); break; @@ -1257,7 +1255,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, if (optlen != sizeof(struct ip_mreq_source)) goto e_inval; - if (copy_from_user(&mreqs, optval, sizeof(mreqs))) { + if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) { err = -EFAULT; break; } @@ -1324,8 +1322,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, err = -EPERM; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) break; - err = xfrm_user_policy(sk, optname, USER_SOCKPTR(optval), - optlen); + err = xfrm_user_policy(sk, optname, optval, optlen); break; case IP_TRANSPARENT: @@ -1412,7 +1409,8 @@ int ip_setsockopt(struct sock *sk, int level, if (level != SOL_IP) return -ENOPROTOOPT; - err = do_ip_setsockopt(sk, level, optname, optval, optlen); + err = do_ip_setsockopt(sk, level, optname, USER_SOCKPTR(optval), + optlen); #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_SET_REPLACE && optname < BPFILTER_IPT_SET_MAX) From b43c6153132c92745317f92174af84f57b160b76 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:08:59 +0200 Subject: [PATCH 1564/2056] net/ipv6: switch ip6_mroute_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/mroute6.h | 8 ++++---- net/ipv6/ip6mr.c | 17 +++++++++-------- net/ipv6/ipv6_sockglue.c | 3 ++- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index c4a45859f586..bc351a85ce9b 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #ifdef CONFIG_IPV6_MROUTE @@ -25,7 +26,7 @@ static inline int ip6_mroute_opt(int opt) struct sock; #ifdef CONFIG_IPV6_MROUTE -extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); @@ -33,9 +34,8 @@ extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *ar extern int ip6_mr_init(void); extern void ip6_mr_cleanup(void); #else -static inline -int ip6_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, unsigned int optlen) +static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, + sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 1f4d20e97c07..06b0d2c329b9 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1629,7 +1629,8 @@ EXPORT_SYMBOL(mroute6_is_socket); * MOSPF/PIM router set up we can clean this up. */ -int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen) +int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, + unsigned int optlen) { int ret, parent = 0; struct mif6ctl vif; @@ -1665,7 +1666,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns case MRT6_ADD_MIF: if (optlen < sizeof(vif)) return -EINVAL; - if (copy_from_user(&vif, optval, sizeof(vif))) + if (copy_from_sockptr(&vif, optval, sizeof(vif))) return -EFAULT; if (vif.mif6c_mifi >= MAXMIFS) return -ENFILE; @@ -1678,7 +1679,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns case MRT6_DEL_MIF: if (optlen < sizeof(mifi_t)) return -EINVAL; - if (copy_from_user(&mifi, optval, sizeof(mifi_t))) + if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t))) return -EFAULT; rtnl_lock(); ret = mif6_delete(mrt, mifi, 0, NULL); @@ -1697,7 +1698,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns case MRT6_DEL_MFC_PROXY: if (optlen < sizeof(mfc)) return -EINVAL; - if (copy_from_user(&mfc, optval, sizeof(mfc))) + if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) return -EFAULT; if (parent == 0) parent = mfc.mf6cc_parent; @@ -1718,7 +1719,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(flags)) return -EINVAL; - if (get_user(flags, (int __user *)optval)) + if (copy_from_sockptr(&flags, optval, sizeof(flags))) return -EFAULT; rtnl_lock(); mroute_clean_tables(mrt, flags); @@ -1735,7 +1736,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(v)) return -EINVAL; - if (get_user(v, (int __user *)optval)) + if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; mrt->mroute_do_assert = v; return 0; @@ -1748,7 +1749,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(v)) return -EINVAL; - if (get_user(v, (int __user *)optval)) + if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; v = !!v; rtnl_lock(); @@ -1769,7 +1770,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns if (optlen != sizeof(u32)) return -EINVAL; - if (get_user(v, (u32 __user *)optval)) + if (copy_from_sockptr(&v, optval, sizeof(v))) return -EFAULT; /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */ if (v != RT_TABLE_DEFAULT && v >= 100000000) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 85892b35cff7..119dfaf5f4bb 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -337,7 +337,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, valbool = (val != 0); if (ip6_mroute_opt(optname)) - return ip6_mroute_setsockopt(sk, optname, optval, optlen); + return ip6_mroute_setsockopt(sk, optname, USER_SOCKPTR(optval), + optlen); if (needs_rtnl) rtnl_lock(); From ff6a4cf214effa0f600fff0eff70c60bcab94ecb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:00 +0200 Subject: [PATCH 1565/2056] net/ipv6: split up ipv6_flowlabel_opt Split ipv6_flowlabel_opt into a subfunction for each action and a small wrapper. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/ip6_flowlabel.c | 349 +++++++++++++++++++++------------------ 1 file changed, 186 insertions(+), 163 deletions(-) diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index ce4fbba4acce..817b4f379009 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -533,185 +533,208 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, return -ENOENT; } +#define socklist_dereference(__sflp) \ + rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock)) + +static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_fl_socklist __rcu **sflp; + struct ipv6_fl_socklist *sfl; + + if (freq->flr_flags & IPV6_FL_F_REFLECT) { + if (sk->sk_protocol != IPPROTO_TCP) + return -ENOPROTOOPT; + if (!np->repflow) + return -ESRCH; + np->flow_label = 0; + np->repflow = 0; + return 0; + } + + spin_lock_bh(&ip6_sk_fl_lock); + for (sflp = &np->ipv6_fl_list; + (sfl = socklist_dereference(*sflp)) != NULL; + sflp = &sfl->next) { + if (sfl->fl->label == freq->flr_label) + goto found; + } + spin_unlock_bh(&ip6_sk_fl_lock); + return -ESRCH; +found: + if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK)) + np->flow_label &= ~IPV6_FLOWLABEL_MASK; + *sflp = sfl->next; + spin_unlock_bh(&ip6_sk_fl_lock); + fl_release(sfl->fl); + kfree_rcu(sfl, rcu); + return 0; +} + +static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + struct net *net = sock_net(sk); + struct ipv6_fl_socklist *sfl; + int err; + + rcu_read_lock_bh(); + for_each_sk_fl_rcu(np, sfl) { + if (sfl->fl->label == freq->flr_label) { + err = fl6_renew(sfl->fl, freq->flr_linger, + freq->flr_expires); + rcu_read_unlock_bh(); + return err; + } + } + rcu_read_unlock_bh(); + + if (freq->flr_share == IPV6_FL_S_NONE && + ns_capable(net->user_ns, CAP_NET_ADMIN)) { + struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label); + + if (fl) { + err = fl6_renew(fl, freq->flr_linger, + freq->flr_expires); + fl_release(fl); + return err; + } + } + return -ESRCH; +} + +static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, + void __user *optval, int optlen) +{ + struct ipv6_fl_socklist *sfl, *sfl1 = NULL; + struct ip6_flowlabel *fl, *fl1 = NULL; + struct ipv6_pinfo *np = inet6_sk(sk); + struct net *net = sock_net(sk); + int uninitialized_var(err); + + if (freq->flr_flags & IPV6_FL_F_REFLECT) { + if (net->ipv6.sysctl.flowlabel_consistency) { + net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n"); + return -EPERM; + } + + if (sk->sk_protocol != IPPROTO_TCP) + return -ENOPROTOOPT; + np->repflow = 1; + return 0; + } + + if (freq->flr_label & ~IPV6_FLOWLABEL_MASK) + return -EINVAL; + if (net->ipv6.sysctl.flowlabel_state_ranges && + (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG)) + return -ERANGE; + + fl = fl_create(net, sk, freq, optval, optlen, &err); + if (!fl) + return err; + + sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); + + if (freq->flr_label) { + err = -EEXIST; + rcu_read_lock_bh(); + for_each_sk_fl_rcu(np, sfl) { + if (sfl->fl->label == freq->flr_label) { + if (freq->flr_flags & IPV6_FL_F_EXCL) { + rcu_read_unlock_bh(); + goto done; + } + fl1 = sfl->fl; + if (!atomic_inc_not_zero(&fl1->users)) + fl1 = NULL; + break; + } + } + rcu_read_unlock_bh(); + + if (!fl1) + fl1 = fl_lookup(net, freq->flr_label); + if (fl1) { +recheck: + err = -EEXIST; + if (freq->flr_flags&IPV6_FL_F_EXCL) + goto release; + err = -EPERM; + if (fl1->share == IPV6_FL_S_EXCL || + fl1->share != fl->share || + ((fl1->share == IPV6_FL_S_PROCESS) && + (fl1->owner.pid != fl->owner.pid)) || + ((fl1->share == IPV6_FL_S_USER) && + !uid_eq(fl1->owner.uid, fl->owner.uid))) + goto release; + + err = -ENOMEM; + if (!sfl1) + goto release; + if (fl->linger > fl1->linger) + fl1->linger = fl->linger; + if ((long)(fl->expires - fl1->expires) > 0) + fl1->expires = fl->expires; + fl_link(np, sfl1, fl1); + fl_free(fl); + return 0; + +release: + fl_release(fl1); + goto done; + } + } + err = -ENOENT; + if (!(freq->flr_flags & IPV6_FL_F_CREATE)) + goto done; + + err = -ENOMEM; + if (!sfl1) + goto done; + + err = mem_check(sk); + if (err != 0) + goto done; + + fl1 = fl_intern(net, fl, freq->flr_label); + if (fl1) + goto recheck; + + if (!freq->flr_label) { + if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, + &fl->label, sizeof(fl->label))) { + /* Intentionally ignore fault. */ + } + } + + fl_link(np, sfl1, fl); + return 0; +done: + fl_free(fl); + kfree(sfl1); + return err; +} + int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) { - int uninitialized_var(err); - struct net *net = sock_net(sk); - struct ipv6_pinfo *np = inet6_sk(sk); struct in6_flowlabel_req freq; - struct ipv6_fl_socklist *sfl1 = NULL; - struct ipv6_fl_socklist *sfl; - struct ipv6_fl_socklist __rcu **sflp; - struct ip6_flowlabel *fl, *fl1 = NULL; - if (optlen < sizeof(freq)) return -EINVAL; - if (copy_from_user(&freq, optval, sizeof(freq))) return -EFAULT; switch (freq.flr_action) { case IPV6_FL_A_PUT: - if (freq.flr_flags & IPV6_FL_F_REFLECT) { - if (sk->sk_protocol != IPPROTO_TCP) - return -ENOPROTOOPT; - if (!np->repflow) - return -ESRCH; - np->flow_label = 0; - np->repflow = 0; - return 0; - } - spin_lock_bh(&ip6_sk_fl_lock); - for (sflp = &np->ipv6_fl_list; - (sfl = rcu_dereference_protected(*sflp, - lockdep_is_held(&ip6_sk_fl_lock))) != NULL; - sflp = &sfl->next) { - if (sfl->fl->label == freq.flr_label) { - if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) - np->flow_label &= ~IPV6_FLOWLABEL_MASK; - *sflp = sfl->next; - spin_unlock_bh(&ip6_sk_fl_lock); - fl_release(sfl->fl); - kfree_rcu(sfl, rcu); - return 0; - } - } - spin_unlock_bh(&ip6_sk_fl_lock); - return -ESRCH; - + return ipv6_flowlabel_put(sk, &freq); case IPV6_FL_A_RENEW: - rcu_read_lock_bh(); - for_each_sk_fl_rcu(np, sfl) { - if (sfl->fl->label == freq.flr_label) { - err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires); - rcu_read_unlock_bh(); - return err; - } - } - rcu_read_unlock_bh(); - - if (freq.flr_share == IPV6_FL_S_NONE && - ns_capable(net->user_ns, CAP_NET_ADMIN)) { - fl = fl_lookup(net, freq.flr_label); - if (fl) { - err = fl6_renew(fl, freq.flr_linger, freq.flr_expires); - fl_release(fl); - return err; - } - } - return -ESRCH; - + return ipv6_flowlabel_renew(sk, &freq); case IPV6_FL_A_GET: - if (freq.flr_flags & IPV6_FL_F_REFLECT) { - struct net *net = sock_net(sk); - if (net->ipv6.sysctl.flowlabel_consistency) { - net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n"); - return -EPERM; - } - - if (sk->sk_protocol != IPPROTO_TCP) - return -ENOPROTOOPT; - - np->repflow = 1; - return 0; - } - - if (freq.flr_label & ~IPV6_FLOWLABEL_MASK) - return -EINVAL; - - if (net->ipv6.sysctl.flowlabel_state_ranges && - (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG)) - return -ERANGE; - - fl = fl_create(net, sk, &freq, optval, optlen, &err); - if (!fl) - return err; - sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL); - - if (freq.flr_label) { - err = -EEXIST; - rcu_read_lock_bh(); - for_each_sk_fl_rcu(np, sfl) { - if (sfl->fl->label == freq.flr_label) { - if (freq.flr_flags&IPV6_FL_F_EXCL) { - rcu_read_unlock_bh(); - goto done; - } - fl1 = sfl->fl; - if (!atomic_inc_not_zero(&fl1->users)) - fl1 = NULL; - break; - } - } - rcu_read_unlock_bh(); - - if (!fl1) - fl1 = fl_lookup(net, freq.flr_label); - if (fl1) { -recheck: - err = -EEXIST; - if (freq.flr_flags&IPV6_FL_F_EXCL) - goto release; - err = -EPERM; - if (fl1->share == IPV6_FL_S_EXCL || - fl1->share != fl->share || - ((fl1->share == IPV6_FL_S_PROCESS) && - (fl1->owner.pid != fl->owner.pid)) || - ((fl1->share == IPV6_FL_S_USER) && - !uid_eq(fl1->owner.uid, fl->owner.uid))) - goto release; - - err = -ENOMEM; - if (!sfl1) - goto release; - if (fl->linger > fl1->linger) - fl1->linger = fl->linger; - if ((long)(fl->expires - fl1->expires) > 0) - fl1->expires = fl->expires; - fl_link(np, sfl1, fl1); - fl_free(fl); - return 0; - -release: - fl_release(fl1); - goto done; - } - } - err = -ENOENT; - if (!(freq.flr_flags&IPV6_FL_F_CREATE)) - goto done; - - err = -ENOMEM; - if (!sfl1) - goto done; - - err = mem_check(sk); - if (err != 0) - goto done; - - fl1 = fl_intern(net, fl, freq.flr_label); - if (fl1) - goto recheck; - - if (!freq.flr_label) { - if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, - &fl->label, sizeof(fl->label))) { - /* Intentionally ignore fault. */ - } - } - - fl_link(np, sfl1, fl); - return 0; - + return ipv6_flowlabel_get(sk, &freq, optval, optlen); default: return -EINVAL; } - -done: - fl_free(fl); - kfree(sfl1); - return err; } #ifdef CONFIG_PROC_FS From 86298285c9ae3a41ce21c2d00ebdde51dd2abc73 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:01 +0200 Subject: [PATCH 1566/2056] net/ipv6: switch ipv6_flowlabel_opt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Note that the get case is pretty weird in that it actually copies data back to userspace from setsockopt. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/ipv6.h | 2 +- net/ipv6/ip6_flowlabel.c | 16 +++++++++------- net/ipv6/ipv6_sockglue.c | 2 +- 3 files changed, 11 insertions(+), 9 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 262fc88dbd7e..4c9d89b5d732 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -406,7 +406,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, struct ip6_flowlabel *fl, struct ipv6_txoptions *fopt); void fl6_free_socklist(struct sock *sk); -int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen); +int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen); int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, int flags); int ip6_flowlabel_init(void); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 817b4f379009..215b6f5e733e 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -371,7 +371,7 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo static struct ip6_flowlabel * fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, - char __user *optval, int optlen, int *err_p) + sockptr_t optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; @@ -401,7 +401,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; - if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) + sockptr_advance(optval, CMSG_ALIGN(sizeof(*freq))); + if (copy_from_sockptr(fl->opt + 1, optval, olen)) goto done; msg.msg_controllen = olen; @@ -604,7 +605,7 @@ static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq) } static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct ipv6_fl_socklist *sfl, *sfl1 = NULL; struct ip6_flowlabel *fl, *fl1 = NULL; @@ -702,8 +703,9 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, goto recheck; if (!freq->flr_label) { - if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label, - &fl->label, sizeof(fl->label))) { + sockptr_advance(optval, + offsetof(struct in6_flowlabel_req, flr_label)); + if (copy_to_sockptr(optval, &fl->label, sizeof(fl->label))) { /* Intentionally ignore fault. */ } } @@ -716,13 +718,13 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, return err; } -int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) +int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen) { struct in6_flowlabel_req freq; if (optlen < sizeof(freq)) return -EINVAL; - if (copy_from_user(&freq, optval, sizeof(freq))) + if (copy_from_sockptr(&freq, optval, sizeof(freq))) return -EFAULT; switch (freq.flr_action) { diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 119dfaf5f4bb..3897fb55372d 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -929,7 +929,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = 0; break; case IPV6_FLOWLABEL_MGR: - retv = ipv6_flowlabel_opt(sk, optval, optlen); + retv = ipv6_flowlabel_opt(sk, USER_SOCKPTR(optval), optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: From b84d2b73af401dc00d9f03c5d78ccf71369a5707 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:02 +0200 Subject: [PATCH 1567/2056] net/ipv6: factor out a ipv6_set_opt_hdr helper Factour out a helper to set the IPv6 option headers from do_ipv6_setsockopt. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/ipv6_sockglue.c | 150 +++++++++++++++++++-------------------- 1 file changed, 75 insertions(+), 75 deletions(-) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 3897fb55372d..90442c8366df 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -315,6 +315,80 @@ static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, return ipv6_sock_mc_drop(sk, gr32.gr_interface, &psin6->sin6_addr); } +static int ipv6_set_opt_hdr(struct sock *sk, int optname, void __user *optval, + int optlen) +{ + struct ipv6_pinfo *np = inet6_sk(sk); + struct ipv6_opt_hdr *new = NULL; + struct net *net = sock_net(sk); + struct ipv6_txoptions *opt; + int err; + + /* hop-by-hop / destination options are privileged option */ + if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) + return -EPERM; + + /* remove any sticky options header with a zero option + * length, per RFC3542. + */ + if (optlen > 0) { + if (!optval) + return -EINVAL; + if (optlen < sizeof(struct ipv6_opt_hdr) || + optlen & 0x7 || + optlen > 8 * 255) + return -EINVAL; + + new = memdup_user(optval, optlen); + if (IS_ERR(new)) + return PTR_ERR(new); + if (unlikely(ipv6_optlen(new) > optlen)) { + kfree(new); + return -EINVAL; + } + } + + opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); + opt = ipv6_renew_options(sk, opt, optname, new); + kfree(new); + if (IS_ERR(opt)) + return PTR_ERR(opt); + + /* routing header option needs extra check */ + err = -EINVAL; + if (optname == IPV6_RTHDR && opt && opt->srcrt) { + struct ipv6_rt_hdr *rthdr = opt->srcrt; + switch (rthdr->type) { +#if IS_ENABLED(CONFIG_IPV6_MIP6) + case IPV6_SRCRT_TYPE_2: + if (rthdr->hdrlen != 2 || rthdr->segments_left != 1) + goto sticky_done; + break; +#endif + case IPV6_SRCRT_TYPE_4: + { + struct ipv6_sr_hdr *srh = + (struct ipv6_sr_hdr *)opt->srcrt; + + if (!seg6_validate_srh(srh, optlen, false)) + goto sticky_done; + break; + } + default: + goto sticky_done; + } + } + + err = 0; + opt = ipv6_update_options(sk, opt); +sticky_done: + if (opt) { + atomic_sub(opt->tot_len, &sk->sk_omem_alloc); + txopt_put(opt); + } + return err; +} + static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { @@ -580,82 +654,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case IPV6_RTHDRDSTOPTS: case IPV6_RTHDR: case IPV6_DSTOPTS: - { - struct ipv6_txoptions *opt; - struct ipv6_opt_hdr *new = NULL; - - /* hop-by-hop / destination options are privileged option */ - retv = -EPERM; - if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) - break; - - /* remove any sticky options header with a zero option - * length, per RFC3542. - */ - if (optlen == 0) - optval = NULL; - else if (!optval) - goto e_inval; - else if (optlen < sizeof(struct ipv6_opt_hdr) || - optlen & 0x7 || optlen > 8 * 255) - goto e_inval; - else { - new = memdup_user(optval, optlen); - if (IS_ERR(new)) { - retv = PTR_ERR(new); - break; - } - if (unlikely(ipv6_optlen(new) > optlen)) { - kfree(new); - goto e_inval; - } - } - - opt = rcu_dereference_protected(np->opt, - lockdep_sock_is_held(sk)); - opt = ipv6_renew_options(sk, opt, optname, new); - kfree(new); - if (IS_ERR(opt)) { - retv = PTR_ERR(opt); - break; - } - - /* routing header option needs extra check */ - retv = -EINVAL; - if (optname == IPV6_RTHDR && opt && opt->srcrt) { - struct ipv6_rt_hdr *rthdr = opt->srcrt; - switch (rthdr->type) { -#if IS_ENABLED(CONFIG_IPV6_MIP6) - case IPV6_SRCRT_TYPE_2: - if (rthdr->hdrlen != 2 || - rthdr->segments_left != 1) - goto sticky_done; - - break; -#endif - case IPV6_SRCRT_TYPE_4: - { - struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *) - opt->srcrt; - - if (!seg6_validate_srh(srh, optlen, false)) - goto sticky_done; - break; - } - default: - goto sticky_done; - } - } - - retv = 0; - opt = ipv6_update_options(sk, opt); -sticky_done: - if (opt) { - atomic_sub(opt->tot_len, &sk->sk_omem_alloc); - txopt_put(opt); - } + retv = ipv6_set_opt_hdr(sk, optname, optval, optlen); break; - } case IPV6_PKTINFO: { From 894cfbc0cf3ea447e486fca99852893a155b6a81 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:03 +0200 Subject: [PATCH 1568/2056] net/ipv6: switch do_ipv6_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv6/ipv6_sockglue.c | 66 ++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 90442c8366df..dcd000a5a9b1 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -136,15 +136,15 @@ static bool setsockopt_needs_rtnl(int optname) return false; } -static int copy_group_source_from_user(struct group_source_req *greqs, - void __user *optval, int optlen) +static int copy_group_source_from_sockptr(struct group_source_req *greqs, + sockptr_t optval, int optlen) { if (in_compat_syscall()) { struct compat_group_source_req gr32; if (optlen < sizeof(gr32)) return -EINVAL; - if (copy_from_user(&gr32, optval, sizeof(gr32))) + if (copy_from_sockptr(&gr32, optval, sizeof(gr32))) return -EFAULT; greqs->gsr_interface = gr32.gsr_interface; greqs->gsr_group = gr32.gsr_group; @@ -152,7 +152,7 @@ static int copy_group_source_from_user(struct group_source_req *greqs, } else { if (optlen < sizeof(*greqs)) return -EINVAL; - if (copy_from_user(greqs, optval, sizeof(*greqs))) + if (copy_from_sockptr(greqs, optval, sizeof(*greqs))) return -EFAULT; } @@ -160,13 +160,13 @@ static int copy_group_source_from_user(struct group_source_req *greqs, } static int do_ipv6_mcast_group_source(struct sock *sk, int optname, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct group_source_req greqs; int omode, add; int ret; - ret = copy_group_source_from_user(&greqs, optval, optlen); + ret = copy_group_source_from_sockptr(&greqs, optval, optlen); if (ret) return ret; @@ -200,7 +200,7 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname, return ip6_mc_source(add, omode, sk, &greqs); } -static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, +static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) { struct group_filter *gsf; @@ -211,7 +211,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, if (optlen > sysctl_optmem_max) return -ENOBUFS; - gsf = memdup_user(optval, optlen); + gsf = memdup_sockptr(optval, optlen); if (IS_ERR(gsf)) return PTR_ERR(gsf); @@ -231,7 +231,7 @@ static int ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, return ret; } -static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, +static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) { const int size0 = offsetof(struct compat_group_filter, gf_slist); @@ -251,7 +251,7 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */ ret = -EFAULT; - if (copy_from_user(gf32, optval, optlen)) + if (copy_from_sockptr(gf32, optval, optlen)) goto out_free_p; /* numsrc >= (4G-140)/128 overflow in 32 bits */ @@ -276,14 +276,14 @@ static int compat_ipv6_set_mcast_msfilter(struct sock *sk, void __user *optval, } static int ipv6_mcast_join_leave(struct sock *sk, int optname, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct sockaddr_in6 *psin6; struct group_req greq; if (optlen < sizeof(greq)) return -EINVAL; - if (copy_from_user(&greq, optval, sizeof(greq))) + if (copy_from_sockptr(&greq, optval, sizeof(greq))) return -EFAULT; if (greq.gr_group.ss_family != AF_INET6) @@ -296,14 +296,14 @@ static int ipv6_mcast_join_leave(struct sock *sk, int optname, } static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, - void __user *optval, int optlen) + sockptr_t optval, int optlen) { struct compat_group_req gr32; struct sockaddr_in6 *psin6; if (optlen < sizeof(gr32)) return -EINVAL; - if (copy_from_user(&gr32, optval, sizeof(gr32))) + if (copy_from_sockptr(&gr32, optval, sizeof(gr32))) return -EFAULT; if (gr32.gr_group.ss_family != AF_INET6) @@ -315,7 +315,7 @@ static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname, return ipv6_sock_mc_drop(sk, gr32.gr_interface, &psin6->sin6_addr); } -static int ipv6_set_opt_hdr(struct sock *sk, int optname, void __user *optval, +static int ipv6_set_opt_hdr(struct sock *sk, int optname, sockptr_t optval, int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); @@ -332,14 +332,14 @@ static int ipv6_set_opt_hdr(struct sock *sk, int optname, void __user *optval, * length, per RFC3542. */ if (optlen > 0) { - if (!optval) + if (sockptr_is_null(optval)) return -EINVAL; if (optlen < sizeof(struct ipv6_opt_hdr) || optlen & 0x7 || optlen > 8 * 255) return -EINVAL; - new = memdup_user(optval, optlen); + new = memdup_sockptr(optval, optlen); if (IS_ERR(new)) return PTR_ERR(new); if (unlikely(ipv6_optlen(new) > optlen)) { @@ -390,7 +390,7 @@ static int ipv6_set_opt_hdr(struct sock *sk, int optname, void __user *optval, } static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct ipv6_pinfo *np = inet6_sk(sk); struct net *net = sock_net(sk); @@ -398,11 +398,11 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, int retv = -ENOPROTOOPT; bool needs_rtnl = setsockopt_needs_rtnl(optname); - if (!optval) + if (sockptr_is_null(optval)) val = 0; else { if (optlen >= sizeof(int)) { - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; } else val = 0; @@ -411,8 +411,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, valbool = (val != 0); if (ip6_mroute_opt(optname)) - return ip6_mroute_setsockopt(sk, optname, USER_SOCKPTR(optval), - optlen); + return ip6_mroute_setsockopt(sk, optname, optval, optlen); if (needs_rtnl) rtnl_lock(); @@ -663,12 +662,13 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, if (optlen == 0) goto e_inval; - else if (optlen < sizeof(struct in6_pktinfo) || !optval) + else if (optlen < sizeof(struct in6_pktinfo) || + sockptr_is_null(optval)) goto e_inval; - if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) { - retv = -EFAULT; - break; + if (copy_from_sockptr(&pkt, optval, sizeof(pkt))) { + retv = -EFAULT; + break; } if (!sk_dev_equal_l3scope(sk, pkt.ipi6_ifindex)) goto e_inval; @@ -709,7 +709,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, refcount_set(&opt->refcnt, 1); opt->tot_len = sizeof(*opt) + optlen; retv = -EFAULT; - if (copy_from_user(opt+1, optval, optlen)) + if (copy_from_sockptr(opt + 1, optval, optlen)) goto done; msg.msg_controllen = optlen; @@ -831,7 +831,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, break; retv = -EFAULT; - if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) + if (copy_from_sockptr(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_ADD_MEMBERSHIP) @@ -849,7 +849,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, goto e_inval; retv = -EFAULT; - if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq))) + if (copy_from_sockptr(&mreq, optval, sizeof(struct ipv6_mreq))) break; if (optname == IPV6_JOIN_ANYCAST) @@ -929,15 +929,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, retv = 0; break; case IPV6_FLOWLABEL_MGR: - retv = ipv6_flowlabel_opt(sk, USER_SOCKPTR(optval), optlen); + retv = ipv6_flowlabel_opt(sk, optval, optlen); break; case IPV6_IPSEC_POLICY: case IPV6_XFRM_POLICY: retv = -EPERM; if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) break; - retv = xfrm_user_policy(sk, optname, USER_SOCKPTR(optval), - optlen); + retv = xfrm_user_policy(sk, optname, optval, optlen); break; case IPV6_ADDR_PREFERENCES: @@ -992,7 +991,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, if (level != SOL_IPV6) return -ENOPROTOOPT; - err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); + err = do_ipv6_setsockopt(sk, level, optname, USER_SOCKPTR(optval), + optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && From 91ac1ccaff597d06b1e16801e1a4c99b8a78dcbe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:04 +0200 Subject: [PATCH 1569/2056] net/udp: switch udp_lib_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/udp.h | 2 +- net/ipv4/udp.c | 7 ++++--- net/ipv6/udp.c | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/net/udp.h b/include/net/udp.h index 17a9e86a8076..295d52a73598 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -306,7 +306,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen, + sockptr_t optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)); struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index bb95cddcb040..c6cb2d09dbc7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2588,7 +2588,7 @@ void udp_destroy_sock(struct sock *sk) * Socket option code for UDP */ int udp_lib_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen, + sockptr_t optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); @@ -2599,7 +2599,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; valbool = val ? 1 : 0; @@ -2707,7 +2707,8 @@ int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, + return udp_lib_setsockopt(sk, level, optname, + USER_SOCKPTR(optval), optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 7c1143feb2bf..2df1e6c9d7cb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1622,7 +1622,8 @@ int udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) - return udp_lib_setsockopt(sk, level, optname, optval, optlen, + return udp_lib_setsockopt(sk, level, optname, + USER_SOCKPTR(optval), optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } From d4c19c49142ddb2efcc34cff6379d03edb3553bd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:05 +0200 Subject: [PATCH 1570/2056] net/tcp: switch ->md5_parse to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp.c | 3 ++- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv6/tcp_ipv6.c | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 9f7f7c0c1104..e3c8e1d82021 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2002,7 +2002,7 @@ struct tcp_sock_af_ops { const struct sk_buff *skb); int (*md5_parse)(struct sock *sk, int optname, - char __user *optval, + sockptr_t optval, int optlen); #endif }; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 58ede3d62b2e..49bf15c27dea 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3249,7 +3249,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); + err = tp->af_specific->md5_parse(sk, optname, + USER_SOCKPTR(optval), optlen); break; #endif case TCP_USER_TIMEOUT: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index daa39d33702b..f8913923a6c0 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1195,7 +1195,7 @@ static void tcp_clear_md5_list(struct sock *sk) } static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, - char __user *optval, int optlen) + sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; @@ -1206,7 +1206,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname, if (optlen < sizeof(cmd)) return -EINVAL; - if (copy_from_user(&cmd, optval, sizeof(cmd))) + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c34b7834fd84..305870a72352 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -567,7 +567,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, } static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, - char __user *optval, int optlen) + sockptr_t optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; @@ -577,7 +577,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname, if (optlen < sizeof(cmd)) return -EINVAL; - if (copy_from_user(&cmd, optval, sizeof(cmd))) + if (copy_from_sockptr(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) From d38d2b00ba64b3f2f30d70a7929000606d2c4509 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:06 +0200 Subject: [PATCH 1571/2056] net/tcp: switch do_tcp_setsockopt to sockptr_t Pass a sockptr_t to prepare for set_fs-less handling of the kernel pointer from bpf-cgroup. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 49bf15c27dea..71cbc61c335f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2764,7 +2764,7 @@ static inline bool tcp_can_repair_sock(const struct sock *sk) (sk->sk_state != TCP_LISTEN); } -static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) +static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) { struct tcp_repair_window opt; @@ -2774,7 +2774,7 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l if (len != sizeof(opt)) return -EINVAL; - if (copy_from_user(&opt, optbuf, sizeof(opt))) + if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) return -EFAULT; if (opt.max_window < opt.snd_wnd) @@ -2796,17 +2796,17 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l return 0; } -static int tcp_repair_options_est(struct sock *sk, - struct tcp_repair_opt __user *optbuf, unsigned int len) +static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, + unsigned int len) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_repair_opt opt; while (len >= sizeof(opt)) { - if (copy_from_user(&opt, optbuf, sizeof(opt))) + if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) return -EFAULT; - optbuf++; + sockptr_advance(optbuf, sizeof(opt)); len -= sizeof(opt); switch (opt.opt_code) { @@ -3020,8 +3020,8 @@ EXPORT_SYMBOL(tcp_sock_set_keepcnt); /* * Socket option code for TCP. */ -static int do_tcp_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, unsigned int optlen) +static int do_tcp_setsockopt(struct sock *sk, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); @@ -3037,7 +3037,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (optlen < 1) return -EINVAL; - val = strncpy_from_user(name, optval, + val = strncpy_from_sockptr(name, optval, min_t(long, TCP_CA_NAME_MAX-1, optlen)); if (val < 0) return -EFAULT; @@ -3056,7 +3056,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (optlen < 1) return -EINVAL; - val = strncpy_from_user(name, optval, + val = strncpy_from_sockptr(name, optval, min_t(long, TCP_ULP_NAME_MAX - 1, optlen)); if (val < 0) @@ -3079,7 +3079,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) return -EINVAL; - if (copy_from_user(key, optval, optlen)) + if (copy_from_sockptr(key, optval, optlen)) return -EFAULT; if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) @@ -3095,7 +3095,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3174,9 +3174,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, if (!tp->repair) err = -EINVAL; else if (sk->sk_state == TCP_ESTABLISHED) - err = tcp_repair_options_est(sk, - (struct tcp_repair_opt __user *)optval, - optlen); + err = tcp_repair_options_est(sk, optval, optlen); else err = -EPERM; break; @@ -3249,8 +3247,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: - err = tp->af_specific->md5_parse(sk, optname, - USER_SOCKPTR(optval), optlen); + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); break; #endif case TCP_USER_TIMEOUT: @@ -3334,7 +3331,8 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); - return do_tcp_setsockopt(sk, level, optname, optval, optlen); + return do_tcp_setsockopt(sk, level, optname, USER_SOCKPTR(optval), + optlen); } EXPORT_SYMBOL(tcp_setsockopt); From a7b75c5a8c41445f33efb663887ff5f5c3b4454b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:07 +0200 Subject: [PATCH 1572/2056] net: pass a sockptr_t into ->setsockopt Rework the remaining setsockopt code to pass a sockptr_t instead of a plain user pointer. This removes the last remaining set_fs(KERNEL_DS) outside of architecture specific code. Signed-off-by: Christoph Hellwig Acked-by: Stefan Schmidt [ieee802154] Acked-by: Matthieu Baerts Signed-off-by: David S. Miller --- crypto/af_alg.c | 7 ++-- drivers/crypto/chelsio/chtls/chtls_main.c | 18 ++++++----- drivers/isdn/mISDN/socket.c | 4 +-- include/linux/net.h | 4 ++- include/net/inet_connection_sock.h | 3 +- include/net/ip.h | 2 +- include/net/ipv6.h | 4 +-- include/net/sctp/structs.h | 2 +- include/net/sock.h | 4 +-- include/net/tcp.h | 4 +-- net/atm/common.c | 6 ++-- net/atm/common.h | 2 +- net/atm/pvc.c | 2 +- net/atm/svc.c | 6 ++-- net/ax25/af_ax25.c | 6 ++-- net/bluetooth/hci_sock.c | 8 ++--- net/bluetooth/l2cap_sock.c | 22 ++++++------- net/bluetooth/rfcomm/sock.c | 12 ++++--- net/bluetooth/sco.c | 6 ++-- net/caif/caif_socket.c | 8 ++--- net/can/j1939/socket.c | 12 +++---- net/can/raw.c | 16 +++++----- net/core/sock.c | 2 +- net/dccp/dccp.h | 2 +- net/dccp/proto.c | 20 ++++++------ net/decnet/af_decnet.c | 16 ++++++---- net/ieee802154/socket.c | 6 ++-- net/ipv4/ip_sockglue.c | 13 +++----- net/ipv4/raw.c | 8 ++--- net/ipv4/tcp.c | 5 ++- net/ipv4/udp.c | 6 ++-- net/ipv4/udp_impl.h | 4 +-- net/ipv6/ipv6_sockglue.c | 10 +++--- net/ipv6/raw.c | 10 +++--- net/ipv6/udp.c | 6 ++-- net/ipv6/udp_impl.h | 4 +-- net/iucv/af_iucv.c | 4 +-- net/kcm/kcmsock.c | 6 ++-- net/l2tp/l2tp_ppp.c | 4 +-- net/llc/af_llc.c | 4 +-- net/mptcp/protocol.c | 12 +++---- net/netlink/af_netlink.c | 4 +-- net/netrom/af_netrom.c | 4 +-- net/nfc/llcp_sock.c | 6 ++-- net/packet/af_packet.c | 39 ++++++++++++----------- net/phonet/pep.c | 4 +-- net/rds/af_rds.c | 30 ++++++++--------- net/rds/rdma.c | 14 ++++---- net/rds/rds.h | 6 ++-- net/rose/af_rose.c | 4 +-- net/rxrpc/af_rxrpc.c | 8 ++--- net/rxrpc/ar-internal.h | 4 +-- net/rxrpc/key.c | 9 +++--- net/sctp/socket.c | 4 +-- net/smc/af_smc.c | 4 +-- net/socket.c | 23 ++++--------- net/tipc/socket.c | 8 ++--- net/tls/tls_main.c | 17 +++++----- net/vmw_vsock/af_vsock.c | 4 +-- net/x25/af_x25.c | 4 +-- net/xdp/xsk.c | 8 ++--- 61 files changed, 246 insertions(+), 258 deletions(-) diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 29f71428520b..892242a42c3e 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c @@ -197,8 +197,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) return err; } -static int alg_setkey(struct sock *sk, char __user *ukey, - unsigned int keylen) +static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type = ask->type; @@ -210,7 +209,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey, return -ENOMEM; err = -EFAULT; - if (copy_from_user(key, ukey, keylen)) + if (copy_from_sockptr(key, ukey, keylen)) goto out; err = type->setkey(ask->private, key, keylen); @@ -222,7 +221,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey, } static int alg_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index d98b89d0fa6e..c3058dcdb33c 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -488,7 +488,7 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname, } static int do_chtls_setsockopt(struct sock *sk, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct tls_crypto_info *crypto_info, tmp_crypto_info; struct chtls_sock *csk; @@ -498,12 +498,12 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, csk = rcu_dereference_sk_user_data(sk); - if (!optval || optlen < sizeof(*crypto_info)) { + if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) { rc = -EINVAL; goto out; } - rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info)); + rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto out; @@ -525,8 +525,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, /* Obtain version and type from previous copy */ crypto_info[0] = tmp_crypto_info; /* Now copy the following data */ - rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), - optval + sizeof(*crypto_info), + sockptr_advance(optval, sizeof(*crypto_info)); + rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), + optval, sizeof(struct tls12_crypto_info_aes_gcm_128) - sizeof(*crypto_info)); @@ -541,8 +542,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } case TLS_CIPHER_AES_GCM_256: { crypto_info[0] = tmp_crypto_info; - rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info), - optval + sizeof(*crypto_info), + sockptr_advance(optval, sizeof(*crypto_info)); + rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), + optval, sizeof(struct tls12_crypto_info_aes_gcm_256) - sizeof(*crypto_info)); @@ -565,7 +567,7 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } static int chtls_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct tls_context *ctx = tls_get_ctx(sk); diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 1b2b91479107..2835daae9e9f 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -401,7 +401,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) } static int data_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int len) + sockptr_t optval, unsigned int len) { struct sock *sk = sock->sk; int err = 0, opt = 0; @@ -414,7 +414,7 @@ static int data_sock_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case MISDN_TIME_STAMP: - if (get_user(opt, (int __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(int))) { err = -EFAULT; break; } diff --git a/include/linux/net.h b/include/linux/net.h index 858ff1d98154..d48ff1180879 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -21,6 +21,7 @@ #include #include #include +#include #include @@ -162,7 +163,8 @@ struct proto_ops { int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); + int optname, sockptr_t optval, + unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); void (*show_fdinfo)(struct seq_file *m, struct socket *sock); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 157c60cca0ca..1e209ce7d1bd 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -45,7 +46,7 @@ struct inet_connection_sock_af_ops { u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); diff --git a/include/net/ip.h b/include/net/ip.h index d66ad3a95220..b09c48d862cc 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -722,7 +722,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, int tlen, int offset); int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); -int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, +int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4c9d89b5d732..bd1f396cc9c7 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1084,8 +1084,8 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, * socket options (ipv6_sockglue.c) */ -int ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); int ipv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 233bbf7df5d6..b33f1aefad09 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -431,7 +431,7 @@ struct sctp_af { int (*setsockopt) (struct sock *sk, int level, int optname, - char __user *optval, + sockptr_t optval, unsigned int optlen); int (*getsockopt) (struct sock *sk, int level, diff --git a/include/net/sock.h b/include/net/sock.h index bfb2fe2fc368..2cc3ba667908 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1141,7 +1141,7 @@ struct proto { void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); int (*setsockopt)(struct sock *sk, int level, - int optname, char __user *optval, + int optname, sockptr_t optval, unsigned int optlen); int (*getsockopt)(struct sock *sk, int level, int optname, char __user *optval, @@ -1734,7 +1734,7 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname, int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags); int sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); void sk_common_release(struct sock *sk); diff --git a/include/net/tcp.h b/include/net/tcp.h index e3c8e1d82021..e0c35d56091f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -399,8 +399,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int tcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); void tcp_set_keepalive(struct sock *sk, int val); void tcp_syn_ack_timeout(const struct request_sock *req); int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/atm/common.c b/net/atm/common.c index 9b28f1fb3c69..84367b844b14 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -745,7 +745,7 @@ static int check_qos(const struct atm_qos *qos) } int vcc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct atm_vcc *vcc; unsigned long value; @@ -760,7 +760,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, { struct atm_qos qos; - if (copy_from_user(&qos, optval, sizeof(qos))) + if (copy_from_sockptr(&qos, optval, sizeof(qos))) return -EFAULT; error = check_qos(&qos); if (error) @@ -774,7 +774,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname, return 0; } case SO_SETCLP: - if (get_user(value, (unsigned long __user *)optval)) + if (copy_from_sockptr(&value, optval, sizeof(value))) return -EFAULT; if (value) vcc->atm_options |= ATM_ATMOPT_CLP; diff --git a/net/atm/common.h b/net/atm/common.h index 5850649068bb..a1e56e8de698 100644 --- a/net/atm/common.h +++ b/net/atm/common.h @@ -21,7 +21,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait); int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int vcc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int vcc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); void vcc_process_recv_queue(struct atm_vcc *vcc); diff --git a/net/atm/pvc.c b/net/atm/pvc.c index 02bd2a436bdf..53e7d3f39e26 100644 --- a/net/atm/pvc.c +++ b/net/atm/pvc.c @@ -63,7 +63,7 @@ static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr, } static int pvc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int error; diff --git a/net/atm/svc.c b/net/atm/svc.c index ba144d035e3d..4a02bcaad279 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c @@ -451,7 +451,7 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) } static int svc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); @@ -464,7 +464,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname, error = -EINVAL; goto out; } - if (copy_from_user(&vcc->sap, optval, optlen)) { + if (copy_from_sockptr(&vcc->sap, optval, optlen)) { error = -EFAULT; goto out; } @@ -475,7 +475,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname, error = -EINVAL; goto out; } - if (get_user(value, (int __user *)optval)) { + if (copy_from_sockptr(&value, optval, sizeof(int))) { error = -EFAULT; goto out; } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index fd91cd34f25e..17bf31a89692 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -528,7 +528,7 @@ ax25_cb *ax25_create_cb(void) */ static int ax25_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; ax25_cb *ax25; @@ -543,7 +543,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL; - if (get_user(opt, (unsigned int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) return -EFAULT; lock_sock(sk); @@ -640,7 +640,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, memset(devname, 0, sizeof(devname)); - if (copy_from_user(devname, optval, optlen)) { + if (copy_from_sockptr(devname, optval, optlen)) { res = -EFAULT; break; } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index caf38a8ea6a8..d5eff27d5b1e 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -1842,7 +1842,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, } static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int len) + sockptr_t optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -1862,7 +1862,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case HCI_DATA_DIR: - if (get_user(opt, (int __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } @@ -1874,7 +1874,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, break; case HCI_TIME_STAMP: - if (get_user(opt, (int __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { err = -EFAULT; break; } @@ -1896,7 +1896,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, } len = min_t(unsigned int, len, sizeof(uf)); - if (copy_from_user(&uf, optval, len)) { + if (copy_from_sockptr(&uf, optval, len)) { err = -EFAULT; break; } diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a995d2c51fa7..a3d104123f38 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -703,7 +703,7 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) } static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -736,7 +736,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, opts.txwin_size = chan->tx_win; len = min_t(unsigned int, sizeof(opts), optlen); - if (copy_from_user((char *) &opts, optval, len)) { + if (copy_from_sockptr(&opts, optval, len)) { err = -EFAULT; break; } @@ -782,7 +782,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, break; case L2CAP_LM: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -859,7 +859,7 @@ static int l2cap_set_mode(struct l2cap_chan *chan, u8 mode) } static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2cap_chan *chan = l2cap_pi(sk)->chan; @@ -891,7 +891,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); - if (copy_from_user((char *) &sec, optval, len)) { + if (copy_from_sockptr(&sec, optval, len)) { err = -EFAULT; break; } @@ -939,7 +939,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -954,7 +954,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_FLUSHABLE: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -990,7 +990,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; len = min_t(unsigned int, sizeof(pwr), optlen); - if (copy_from_user((char *) &pwr, optval, len)) { + if (copy_from_sockptr(&pwr, optval, len)) { err = -EFAULT; break; } @@ -1002,7 +1002,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_CHANNEL_POLICY: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -1050,7 +1050,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u16 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u16))) { err = -EFAULT; break; } @@ -1081,7 +1081,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u8 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u8))) { err = -EFAULT; break; } diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index df14eebe80da..dba4ea0e1b0d 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -644,7 +644,8 @@ static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, return len; } -static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) +static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err = 0; @@ -656,7 +657,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u switch (optname) { case RFCOMM_LM: - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -685,7 +686,8 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u return err; } -static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct bt_security sec; @@ -713,7 +715,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c sec.level = BT_SECURITY_LOW; len = min_t(unsigned int, sizeof(sec), optlen); - if (copy_from_user((char *) &sec, optval, len)) { + if (copy_from_sockptr(&sec, optval, len)) { err = -EFAULT; break; } @@ -732,7 +734,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index c8c3d38cdc7b..37260baf7150 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -791,7 +791,7 @@ static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, } static int sco_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int len, err = 0; @@ -810,7 +810,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -831,7 +831,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, voice.setting = sco_pi(sk)->setting; len = min_t(unsigned int, sizeof(voice), optlen); - if (copy_from_user((char *)&voice, optval, len)) { + if (copy_from_sockptr(&voice, optval, len)) { err = -EFAULT; break; } diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index b94ecd931002..3ad0a1df6712 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -669,8 +669,8 @@ static int caif_stream_sendmsg(struct socket *sock, struct msghdr *msg, return sent ? : err; } -static int setsockopt(struct socket *sock, - int lvl, int opt, char __user *ov, unsigned int ol) +static int setsockopt(struct socket *sock, int lvl, int opt, sockptr_t ov, + unsigned int ol) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); @@ -685,7 +685,7 @@ static int setsockopt(struct socket *sock, return -EINVAL; if (lvl != SOL_CAIF) goto bad_sol; - if (copy_from_user(&linksel, ov, sizeof(int))) + if (copy_from_sockptr(&linksel, ov, sizeof(int))) return -EINVAL; lock_sock(&(cf_sk->sk)); cf_sk->conn_req.link_selector = linksel; @@ -699,7 +699,7 @@ static int setsockopt(struct socket *sock, return -ENOPROTOOPT; lock_sock(&(cf_sk->sk)); if (ol > sizeof(cf_sk->conn_req.param.data) || - copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { + copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) { release_sock(&cf_sk->sk); return -EINVAL; } diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index f7587428febd..78ff9b3f1d40 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -627,14 +627,14 @@ static int j1939_sk_release(struct socket *sock) return 0; } -static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval, +static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval, unsigned int optlen, int flag) { int tmp; if (optlen != sizeof(tmp)) return -EINVAL; - if (copy_from_user(&tmp, optval, optlen)) + if (copy_from_sockptr(&tmp, optval, optlen)) return -EFAULT; lock_sock(&jsk->sk); if (tmp) @@ -646,7 +646,7 @@ static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval, } static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct j1939_sock *jsk = j1939_sk(sk); @@ -658,7 +658,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, switch (optname) { case SO_J1939_FILTER: - if (optval) { + if (!sockptr_is_null(optval)) { struct j1939_filter *f; int c; @@ -670,7 +670,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, return -EINVAL; count = optlen / sizeof(*filters); - filters = memdup_user(optval, optlen); + filters = memdup_sockptr(optval, optlen); if (IS_ERR(filters)) return PTR_ERR(filters); @@ -703,7 +703,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, case SO_J1939_SEND_PRIO: if (optlen != sizeof(tmp)) return -EINVAL; - if (copy_from_user(&tmp, optval, optlen)) + if (copy_from_sockptr(&tmp, optval, optlen)) return -EFAULT; if (tmp < 0 || tmp > 7) return -EDOM; diff --git a/net/can/raw.c b/net/can/raw.c index 59c039d73c6d..94a9405658dc 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -485,7 +485,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr, } static int raw_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); @@ -511,11 +511,11 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (count > 1) { /* filter does not fit into dfilter => alloc space */ - filter = memdup_user(optval, optlen); + filter = memdup_sockptr(optval, optlen); if (IS_ERR(filter)) return PTR_ERR(filter); } else if (count == 1) { - if (copy_from_user(&sfilter, optval, sizeof(sfilter))) + if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter))) return -EFAULT; } @@ -568,7 +568,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(err_mask)) return -EINVAL; - if (copy_from_user(&err_mask, optval, optlen)) + if (copy_from_sockptr(&err_mask, optval, optlen)) return -EFAULT; err_mask &= CAN_ERR_MASK; @@ -607,7 +607,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->loopback)) return -EINVAL; - if (copy_from_user(&ro->loopback, optval, optlen)) + if (copy_from_sockptr(&ro->loopback, optval, optlen)) return -EFAULT; break; @@ -616,7 +616,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->recv_own_msgs)) return -EINVAL; - if (copy_from_user(&ro->recv_own_msgs, optval, optlen)) + if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen)) return -EFAULT; break; @@ -625,7 +625,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->fd_frames)) return -EINVAL; - if (copy_from_user(&ro->fd_frames, optval, optlen)) + if (copy_from_sockptr(&ro->fd_frames, optval, optlen)) return -EFAULT; break; @@ -634,7 +634,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, if (optlen != sizeof(ro->join_filters)) return -EINVAL; - if (copy_from_user(&ro->join_filters, optval, optlen)) + if (copy_from_sockptr(&ro->join_filters, optval, optlen)) return -EFAULT; break; diff --git a/net/core/sock.c b/net/core/sock.c index 1444d7d53ba2..2c5dd1397775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3211,7 +3211,7 @@ EXPORT_SYMBOL(sock_common_recvmsg); * Set socket options on an inet socket. */ int sock_common_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 434eea91b767..9cc9d1ee6cdb 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -295,7 +295,7 @@ int dccp_disconnect(struct sock *sk, int flags); int dccp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); int dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); + sockptr_t optval, unsigned int optlen); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg); int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 9e453611107f..2e9e8449698f 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -411,7 +411,7 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) EXPORT_SYMBOL_GPL(dccp_ioctl); static int dccp_setsockopt_service(struct sock *sk, const __be32 service, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_service_list *sl = NULL; @@ -426,9 +426,9 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service, return -ENOMEM; sl->dccpsl_nr = optlen / sizeof(u32) - 1; - if (copy_from_user(sl->dccpsl_list, - optval + sizeof(service), - optlen - sizeof(service)) || + sockptr_advance(optval, sizeof(service)); + if (copy_from_sockptr(sl->dccpsl_list, optval, + optlen - sizeof(service)) || dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { kfree(sl); return -EFAULT; @@ -482,7 +482,7 @@ static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx) } static int dccp_setsockopt_ccid(struct sock *sk, int type, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { u8 *val; int rc = 0; @@ -490,7 +490,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type, if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) return -EINVAL; - val = memdup_user(optval, optlen); + val = memdup_sockptr(optval, optlen); if (IS_ERR(val)) return PTR_ERR(val); @@ -507,7 +507,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type, } static int do_dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct dccp_sock *dp = dccp_sk(sk); int val, err = 0; @@ -529,7 +529,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, if (optlen < (int)sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; if (optname == DCCP_SOCKOPT_SERVICE) @@ -572,8 +572,8 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname, return err; } -int dccp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { if (level != SOL_DCCP) return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level, diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 7d51ab608fb3..3b53d766789d 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -150,7 +150,8 @@ static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; static struct hlist_head dn_wild_sk; static atomic_long_t decnet_memory_allocated; -static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); +static int __dn_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen, int flags); static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); static struct hlist_head *dn_find_list(struct sock *sk) @@ -1320,7 +1321,8 @@ static int dn_shutdown(struct socket *sock, int how) return err; } -static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +static int dn_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; int err; @@ -1332,14 +1334,14 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != DSO_LINKINFO && optname != DSO_STREAM && optname != DSO_SEQPACKET) - err = nf_setsockopt(sk, PF_DECnet, optname, - USER_SOCKPTR(optval), optlen); + err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); #endif return err; } -static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags) +static int __dn_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); @@ -1355,13 +1357,13 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us } u; int err; - if (optlen && !optval) + if (optlen && sockptr_is_null(optval)) return -EINVAL; if (optlen > sizeof(u)) return -EINVAL; - if (copy_from_user(&u, optval, optlen)) + if (copy_from_sockptr(&u, optval, optlen)) return -EFAULT; switch (optname) { diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 94ae9662133e..a45a0401adc5 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -382,7 +382,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname, } static int raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { return -EOPNOTSUPP; } @@ -872,7 +872,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname, } static int dgram_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct dgram_sock *ro = dgram_sk(sk); struct net *net = sock_net(sk); @@ -882,7 +882,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; lock_sock(sk); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index f7f1507b89fe..8dc027e54c5b 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1401,21 +1401,19 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) skb_dst_drop(skb); } -int ip_setsockopt(struct sock *sk, int level, - int optname, char __user *optval, unsigned int optlen) +int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { int err; if (level != SOL_IP) return -ENOPROTOOPT; - err = do_ip_setsockopt(sk, level, optname, USER_SOCKPTR(optval), - optlen); + err = do_ip_setsockopt(sk, level, optname, optval, optlen); #if IS_ENABLED(CONFIG_BPFILTER_UMH) if (optname >= BPFILTER_IPT_SO_SET_REPLACE && optname < BPFILTER_IPT_SET_MAX) - err = bpfilter_ip_set_sockopt(sk, optname, USER_SOCKPTR(optval), - optlen); + err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen); #endif #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ @@ -1423,8 +1421,7 @@ int ip_setsockopt(struct sock *sk, int level, optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY && !ip_mroute_opt(optname)) - err = nf_setsockopt(sk, PF_INET, optname, USER_SOCKPTR(optval), - optlen); + err = nf_setsockopt(sk, PF_INET, optname, optval, optlen); #endif return err; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2a57d633b31e..6fd4330287c2 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -809,11 +809,11 @@ static int raw_sk_init(struct sock *sk) return 0; } -static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen) +static int raw_seticmpfilter(struct sock *sk, sockptr_t optval, int optlen) { if (optlen > sizeof(struct icmp_filter)) optlen = sizeof(struct icmp_filter); - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) + if (copy_from_sockptr(&raw_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; } @@ -838,7 +838,7 @@ out: return ret; } static int do_raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { if (optname == ICMP_FILTER) { if (inet_sk(sk)->inet_num != IPPROTO_ICMP) @@ -850,7 +850,7 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname, } static int raw_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { if (level != SOL_RAW) return ip_setsockopt(sk, level, optname, optval, optlen); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71cbc61c335f..27de9380ed14 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3323,7 +3323,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, return err; } -int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, +int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen) { const struct inet_connection_sock *icsk = inet_csk(sk); @@ -3331,8 +3331,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, if (level != SOL_TCP) return icsk->icsk_af_ops->setsockopt(sk, level, optname, optval, optlen); - return do_tcp_setsockopt(sk, level, optname, USER_SOCKPTR(optval), - optlen); + return do_tcp_setsockopt(sk, level, optname, optval, optlen); } EXPORT_SYMBOL(tcp_setsockopt); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index c6cb2d09dbc7..5a6a2f6d86b9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2703,12 +2703,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, } EXPORT_SYMBOL(udp_lib_setsockopt); -int udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, - USER_SOCKPTR(optval), optlen, + optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index ab313702c87f..2878d8285caf 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h @@ -12,8 +12,8 @@ int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *); int udp_v4_get_port(struct sock *sk, unsigned short snum); void udp_v4_rehash(struct sock *sk); -int udp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index dcd000a5a9b1..d2282f5c9760 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -980,8 +980,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, return -EINVAL; } -int ipv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { int err; @@ -991,14 +991,12 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, if (level != SOL_IPV6) return -ENOPROTOOPT; - err = do_ipv6_setsockopt(sk, level, optname, USER_SOCKPTR(optval), - optlen); + err = do_ipv6_setsockopt(sk, level, optname, optval, optlen); #ifdef CONFIG_NETFILTER /* we need to exclude all possible ENOPROTOOPTs except default case */ if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY && optname != IPV6_XFRM_POLICY) - err = nf_setsockopt(sk, PF_INET6, optname, USER_SOCKPTR(optval), - optlen); + err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen); #endif return err; } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 594e01ad670a..874f01cd7aec 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -972,13 +972,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, - char __user *optval, int optlen) + sockptr_t optval, int optlen) { switch (optname) { case ICMPV6_FILTER: if (optlen > sizeof(struct icmp6_filter)) optlen = sizeof(struct icmp6_filter); - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) + if (copy_from_sockptr(&raw6_sk(sk)->filter, optval, optlen)) return -EFAULT; return 0; default: @@ -1015,12 +1015,12 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct raw6_sock *rp = raw6_sk(sk); int val; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; switch (optname) { @@ -1062,7 +1062,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, } static int rawv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { switch (level) { case SOL_RAW: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2df1e6c9d7cb..15818e18655d 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1618,12 +1618,12 @@ void udpv6_destroy_sock(struct sock *sk) /* * Socket option code for UDP */ -int udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) +int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, - USER_SOCKPTR(optval), optlen, + optval, optlen, udp_v6_push_pending_frames); return ipv6_setsockopt(sk, level, optname, optval, optlen); } diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index 30dfb6f1b762..b2fcc46c1630 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -17,8 +17,8 @@ void udp_v6_rehash(struct sock *sk); int udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen); -int udpv6_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen); +int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, + unsigned int optlen); int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ee0add15497d..6ee9851ac7c6 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1494,7 +1494,7 @@ static int iucv_sock_release(struct socket *sock) /* getsockopt and setsockopt */ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); @@ -1507,7 +1507,7 @@ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; rc = 0; diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index 56fac24a627a..56dad9565bc9 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -1265,7 +1265,7 @@ static void kcm_recv_enable(struct kcm_sock *kcm) } static int kcm_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct kcm_sock *kcm = kcm_sk(sock->sk); int val, valbool; @@ -1277,8 +1277,8 @@ static int kcm_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EINVAL; + if (copy_from_sockptr(&val, optval, sizeof(int))) + return -EFAULT; valbool = val ? 1 : 0; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index e58fe7e3b884..4389df66af35 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1242,7 +1242,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, * session or the special tunnel type. */ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct l2tp_session *session; @@ -1256,7 +1256,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; err = -ENOTCONN; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6140a3e46c26..7180979114e4 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -1053,7 +1053,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd, * Set various connection specific parameters. */ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); @@ -1063,7 +1063,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; - rc = get_user(opt, (int __user *)optval); + rc = copy_from_sockptr(&opt, optval, sizeof(opt)); if (rc) goto out; rc = -EINVAL; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 7246847efa90..2891ae8a1028 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1632,7 +1632,7 @@ static void mptcp_destroy(struct sock *sk) } static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; struct socket *ssock; @@ -1648,8 +1648,7 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return -EINVAL; } - ret = sock_setsockopt(ssock, SOL_SOCKET, optname, - USER_SOCKPTR(optval), optlen); + ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); if (ret == 0) { if (optname == SO_REUSEPORT) sk->sk_reuseport = ssock->sk->sk_reuseport; @@ -1660,12 +1659,11 @@ static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, return ret; } - return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, - USER_SOCKPTR(optval), optlen); + return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); } static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = (struct sock *)msk; int ret = -EOPNOTSUPP; @@ -1692,7 +1690,7 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, } static int mptcp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct mptcp_sock *msk = mptcp_sk(sk); struct sock *ssk; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 3cd58f0c2de4..d8921b833744 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1621,7 +1621,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk, } static int netlink_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); @@ -1632,7 +1632,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, return -ENOPROTOOPT; if (optlen >= sizeof(int) && - get_user(val, (unsigned int __user *)optval)) + copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; switch (optname) { diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index f90ef6934b8f..6d16e1ab1a8a 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -294,7 +294,7 @@ void nr_destroy_socket(struct sock *sk) */ static int nr_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); @@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(unsigned int)) return -EINVAL; - if (get_user(opt, (unsigned int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) return -EFAULT; switch (optname) { diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 6da1e2334bb6..d257ed3b732a 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -218,7 +218,7 @@ static int llcp_sock_listen(struct socket *sock, int backlog) } static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); @@ -241,7 +241,7 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } @@ -263,7 +263,7 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (get_user(opt, (u32 __user *) optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d8d4f78f78e4..0b8160d1a6e0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1558,7 +1558,7 @@ static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data, return 0; } -static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, +static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data, unsigned int len) { struct bpf_prog *new; @@ -1568,7 +1568,7 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, return -EPERM; if (len != sizeof(fd)) return -EINVAL; - if (copy_from_user(&fd, data, len)) + if (copy_from_sockptr(&fd, data, len)) return -EFAULT; new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); @@ -1579,12 +1579,12 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data, return 0; } -static int fanout_set_data(struct packet_sock *po, char __user *data, +static int fanout_set_data(struct packet_sock *po, sockptr_t data, unsigned int len) { switch (po->fanout->type) { case PACKET_FANOUT_CBPF: - return fanout_set_data_cbpf(po, USER_SOCKPTR(data), len); + return fanout_set_data_cbpf(po, data, len); case PACKET_FANOUT_EBPF: return fanout_set_data_ebpf(po, data, len); default: @@ -3652,7 +3652,8 @@ static void packet_flush_mclist(struct sock *sk) } static int -packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) +packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, + unsigned int optlen) { struct sock *sk = sock->sk; struct packet_sock *po = pkt_sk(sk); @@ -3672,7 +3673,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return -EINVAL; if (len > sizeof(mreq)) len = sizeof(mreq); - if (copy_from_user(&mreq, optval, len)) + if (copy_from_sockptr(&mreq, optval, len)) return -EFAULT; if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address))) return -EINVAL; @@ -3703,7 +3704,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen < len) { ret = -EINVAL; } else { - if (copy_from_user(&req_u.req, optval, len)) + if (copy_from_sockptr(&req_u.req, optval, len)) ret = -EFAULT; else ret = packet_set_ring(sk, &req_u, 0, @@ -3718,7 +3719,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; pkt_sk(sk)->copy_thresh = val; @@ -3730,7 +3731,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; switch (val) { case TPACKET_V1: @@ -3756,7 +3757,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; if (val > INT_MAX) return -EINVAL; @@ -3776,7 +3777,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3795,7 +3796,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen < sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3809,7 +3810,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen < sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3825,7 +3826,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv return -EINVAL; if (optlen < sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3844,7 +3845,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; po->tp_tstamp = val; @@ -3856,7 +3857,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; return fanout_add(sk, val & 0xffff, val >> 16); @@ -3874,7 +3875,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; if (val < 0 || val > 1) return -EINVAL; @@ -3888,7 +3889,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; lock_sock(sk); @@ -3907,7 +3908,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(val))) + if (copy_from_sockptr(&val, optval, sizeof(val))) return -EFAULT; po->xmit = val ? packet_direct_xmit : dev_queue_xmit; diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 4577e43cb777..e47d09aca4af 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -975,7 +975,7 @@ static int pep_init(struct sock *sk) } static int pep_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct pep_sock *pn = pep_sk(sk); int val = 0, err = 0; @@ -983,7 +983,7 @@ static int pep_setsockopt(struct sock *sk, int level, int optname, if (level != SOL_PNPIPE) return -ENOPROTOOPT; if (optlen >= sizeof(int)) { - if (get_user(val, (int __user *) optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; } diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c index 1a5bf3fa4578..b239120dd9ca 100644 --- a/net/rds/af_rds.c +++ b/net/rds/af_rds.c @@ -290,8 +290,7 @@ static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) return 0; } -static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, - int len) +static int rds_cancel_sent_to(struct rds_sock *rs, sockptr_t optval, int len) { struct sockaddr_in6 sin6; struct sockaddr_in sin; @@ -308,14 +307,15 @@ static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, goto out; } else if (len < sizeof(struct sockaddr_in6)) { /* Assume IPv4 */ - if (copy_from_user(&sin, optval, sizeof(struct sockaddr_in))) { + if (copy_from_sockptr(&sin, optval, + sizeof(struct sockaddr_in))) { ret = -EFAULT; goto out; } ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr); sin6.sin6_port = sin.sin_port; } else { - if (copy_from_user(&sin6, optval, + if (copy_from_sockptr(&sin6, optval, sizeof(struct sockaddr_in6))) { ret = -EFAULT; goto out; @@ -327,21 +327,20 @@ static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval, return ret; } -static int rds_set_bool_option(unsigned char *optvar, char __user *optval, +static int rds_set_bool_option(unsigned char *optvar, sockptr_t optval, int optlen) { int value; if (optlen < sizeof(int)) return -EINVAL; - if (get_user(value, (int __user *) optval)) + if (copy_from_sockptr(&value, optval, sizeof(int))) return -EFAULT; *optvar = !!value; return 0; } -static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, - int optlen) +static int rds_cong_monitor(struct rds_sock *rs, sockptr_t optval, int optlen) { int ret; @@ -358,8 +357,7 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval, return ret; } -static int rds_set_transport(struct rds_sock *rs, char __user *optval, - int optlen) +static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen) { int t_type; @@ -369,7 +367,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval, if (optlen != sizeof(int)) return -EINVAL; - if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type))) + if (copy_from_sockptr(&t_type, optval, sizeof(t_type))) return -EFAULT; if (t_type < 0 || t_type >= RDS_TRANS_COUNT) @@ -380,7 +378,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval, return rs->rs_transport ? 0 : -ENOPROTOOPT; } -static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, +static int rds_enable_recvtstamp(struct sock *sk, sockptr_t optval, int optlen, int optname) { int val, valbool; @@ -388,7 +386,7 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, if (optlen != sizeof(int)) return -EFAULT; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; valbool = val ? 1 : 0; @@ -404,7 +402,7 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval, return 0; } -static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, +static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_rx_trace_so trace; @@ -413,7 +411,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, if (optlen != sizeof(struct rds_rx_trace_so)) return -EFAULT; - if (copy_from_user(&trace, optval, sizeof(trace))) + if (copy_from_sockptr(&trace, optval, sizeof(trace))) return -EFAULT; if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX) @@ -432,7 +430,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval, } static int rds_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct rds_sock *rs = rds_sk_to_rs(sock->sk); int ret; diff --git a/net/rds/rdma.c b/net/rds/rdma.c index a7ae11846cd7..ccdd304eae0a 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -353,21 +353,20 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args, return ret; } -int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen) +int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_get_mr_args args; if (optlen != sizeof(struct rds_get_mr_args)) return -EINVAL; - if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval, - sizeof(struct rds_get_mr_args))) + if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args))) return -EFAULT; return __rds_rdma_map(rs, &args, NULL, NULL, NULL); } -int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) +int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_get_mr_for_dest_args args; struct rds_get_mr_args new_args; @@ -375,7 +374,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) if (optlen != sizeof(struct rds_get_mr_for_dest_args)) return -EINVAL; - if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval, + if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_for_dest_args))) return -EFAULT; @@ -394,7 +393,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen) /* * Free the MR indicated by the given R_Key */ -int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) +int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen) { struct rds_free_mr_args args; struct rds_mr *mr; @@ -403,8 +402,7 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen) if (optlen != sizeof(struct rds_free_mr_args)) return -EINVAL; - if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval, - sizeof(struct rds_free_mr_args))) + if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args))) return -EFAULT; /* Special case - a null cookie means flush all unused MRs */ diff --git a/net/rds/rds.h b/net/rds/rds.h index 106e862996b9..d35d1fc39807 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -924,9 +924,9 @@ int rds_send_pong(struct rds_conn_path *cp, __be16 dport); /* rdma.c */ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); -int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen); -int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen); -int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen); +int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen); +int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen); +int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen); void rds_rdma_drop_keys(struct rds_sock *rs); int rds_rdma_extra_size(struct rds_rdma_args *args, struct rds_iov_vector *iov); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index ce85656ac9c1..cf7d974e0f61 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -365,7 +365,7 @@ void rose_destroy_socket(struct sock *sk) */ static int rose_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); @@ -377,7 +377,7 @@ static int rose_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(opt, (int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(int))) return -EFAULT; switch (optname) { diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index cd7d0d204c74..e6725a6de015 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -588,7 +588,7 @@ EXPORT_SYMBOL(rxrpc_sock_set_min_security_level); * set RxRPC socket options */ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct rxrpc_sock *rx = rxrpc_sk(sock->sk); unsigned int min_sec_level; @@ -639,8 +639,8 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNBOUND) goto error; - ret = get_user(min_sec_level, - (unsigned int __user *) optval); + ret = copy_from_sockptr(&min_sec_level, optval, + sizeof(unsigned int)); if (ret < 0) goto error; ret = -EINVAL; @@ -658,7 +658,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, if (rx->sk.sk_state != RXRPC_SERVER_BOUND2) goto error; ret = -EFAULT; - if (copy_from_user(service_upgrade, optval, + if (copy_from_sockptr(service_upgrade, optval, sizeof(service_upgrade)) != 0) goto error; ret = -EINVAL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 9a2139ebd67d..6d29a3603a3e 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -909,8 +909,8 @@ extern const struct rxrpc_security rxrpc_no_security; extern struct key_type key_type_rxrpc; extern struct key_type key_type_rxrpc_s; -int rxrpc_request_key(struct rxrpc_sock *, char __user *, int); -int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int); +int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int); +int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int); int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t, u32); diff --git a/net/rxrpc/key.c b/net/rxrpc/key.c index 0c98313dd7a8..94c3df392651 100644 --- a/net/rxrpc/key.c +++ b/net/rxrpc/key.c @@ -896,7 +896,7 @@ static void rxrpc_describe(const struct key *key, struct seq_file *m) /* * grab the security key for a socket */ -int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) +int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen) { struct key *key; char *description; @@ -906,7 +906,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) if (optlen <= 0 || optlen > PAGE_SIZE - 1) return -EINVAL; - description = memdup_user_nul(optval, optlen); + description = memdup_sockptr_nul(optval, optlen); if (IS_ERR(description)) return PTR_ERR(description); @@ -926,8 +926,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen) /* * grab the security keyring for a server socket */ -int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, - int optlen) +int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen) { struct key *key; char *description; @@ -937,7 +936,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval, if (optlen <= 0 || optlen > PAGE_SIZE - 1) return -EINVAL; - description = memdup_user_nul(optval, optlen); + description = memdup_sockptr_nul(optval, optlen); if (IS_ERR(description)) return PTR_ERR(description); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9a767f359718..144808dfea9e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4429,7 +4429,7 @@ static int sctp_setsockopt_pf_expose(struct sock *sk, * optlen - the size of the buffer. */ static int sctp_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { void *kopt = NULL; int retval = 0; @@ -4449,7 +4449,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, } if (optlen > 0) { - kopt = memdup_user(optval, optlen); + kopt = memdup_sockptr(optval, optlen); if (IS_ERR(kopt)) return PTR_ERR(kopt); } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 9711c9e0e515..4ac1d4de6676 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1731,7 +1731,7 @@ static int smc_shutdown(struct socket *sock, int how) } static int smc_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -1754,7 +1754,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) + if (copy_from_sockptr(&val, optval, sizeof(int))) return -EFAULT; lock_sock(sk); diff --git a/net/socket.c b/net/socket.c index c97f83d879ae..e44b8ac47f6f 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2094,10 +2094,10 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) * Set a socket option. Because we don't know the option lengths we have * to pass the user mode parameter for the protocols to sort out. */ -int __sys_setsockopt(int fd, int level, int optname, char __user *optval, +int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, int optlen) { - mm_segment_t oldfs = get_fs(); + sockptr_t optval = USER_SOCKPTR(user_optval); char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2115,7 +2115,7 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, if (!in_compat_syscall()) err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname, - optval, &optlen, + user_optval, &optlen, &kernel_optval); if (err < 0) goto out_put; @@ -2124,25 +2124,16 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *optval, goto out_put; } - if (kernel_optval) { - set_fs(KERNEL_DS); - optval = (char __user __force *)kernel_optval; - } - + if (kernel_optval) + optval = KERNEL_SOCKPTR(kernel_optval); if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock)) - err = sock_setsockopt(sock, level, optname, - USER_SOCKPTR(optval), optlen); + err = sock_setsockopt(sock, level, optname, optval, optlen); else if (unlikely(!sock->ops->setsockopt)) err = -EOPNOTSUPP; else err = sock->ops->setsockopt(sock, level, optname, optval, optlen); - - if (kernel_optval) { - set_fs(oldfs); - kfree(kernel_optval); - } - + kfree(kernel_optval); out_put: fput_light(sock->file, fput_needed); return err; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index fc388cef6471..07419f36116a 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -3103,7 +3103,7 @@ static int tipc_sk_leave(struct tipc_sock *tsk) * Returns 0 on success, errno otherwise */ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, - char __user *ov, unsigned int ol) + sockptr_t ov, unsigned int ol) { struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); @@ -3124,17 +3124,17 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt, case TIPC_NODELAY: if (ol < sizeof(value)) return -EINVAL; - if (get_user(value, (u32 __user *)ov)) + if (copy_from_sockptr(&value, ov, sizeof(u32))) return -EFAULT; break; case TIPC_GROUP_JOIN: if (ol < sizeof(mreq)) return -EINVAL; - if (copy_from_user(&mreq, ov, sizeof(mreq))) + if (copy_from_sockptr(&mreq, ov, sizeof(mreq))) return -EFAULT; break; default: - if (ov || ol) + if (!sockptr_is_null(ov) || ol) return -EINVAL; } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index ec10041c6b7d..d77f7d821130 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -450,7 +450,7 @@ static int tls_getsockopt(struct sock *sk, int level, int optname, return do_tls_getsockopt(sk, optname, optval, optlen); } -static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, +static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, unsigned int optlen, int tx) { struct tls_crypto_info *crypto_info; @@ -460,7 +460,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, int rc = 0; int conf; - if (!optval || (optlen < sizeof(*crypto_info))) { + if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) { rc = -EINVAL; goto out; } @@ -479,7 +479,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, goto out; } - rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); + rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto err_crypto_info; @@ -522,8 +522,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, goto err_crypto_info; } - rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info), - optlen - sizeof(*crypto_info)); + sockptr_advance(optval, sizeof(*crypto_info)); + rc = copy_from_sockptr(crypto_info + 1, optval, + optlen - sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto err_crypto_info; @@ -579,8 +580,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval, return rc; } -static int do_tls_setsockopt(struct sock *sk, int optname, - char __user *optval, unsigned int optlen) +static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, + unsigned int optlen) { int rc = 0; @@ -600,7 +601,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, } static int tls_setsockopt(struct sock *sk, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct tls_context *ctx = tls_get_ctx(sk); diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index df204c6761c4..27bbcfad9c17 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -1517,7 +1517,7 @@ static void vsock_update_buffer_size(struct vsock_sock *vsk, static int vsock_stream_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, + sockptr_t optval, unsigned int optlen) { int err; @@ -1535,7 +1535,7 @@ static int vsock_stream_setsockopt(struct socket *sock, err = -EINVAL; \ goto exit; \ } \ - if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \ + if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) { \ err = -EFAULT; \ goto exit; \ } \ diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d5b09bbff375..0bbb283f23c9 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -431,7 +431,7 @@ void x25_destroy_socket_from_timer(struct sock *sk) */ static int x25_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { int opt; struct sock *sk = sock->sk; @@ -445,7 +445,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname, goto out; rc = -EFAULT; - if (get_user(opt, (int __user *)optval)) + if (copy_from_sockptr(&opt, optval, sizeof(int))) goto out; if (opt) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 26e3bba8c204..2e94a7e94671 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -702,7 +702,7 @@ struct xdp_umem_reg_v1 { }; static int xsk_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); @@ -720,7 +720,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, if (optlen < sizeof(entries)) return -EINVAL; - if (copy_from_user(&entries, optval, sizeof(entries))) + if (copy_from_sockptr(&entries, optval, sizeof(entries))) return -EFAULT; mutex_lock(&xs->mutex); @@ -747,7 +747,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, else if (optlen < sizeof(mr)) mr_size = sizeof(struct xdp_umem_reg_v1); - if (copy_from_user(&mr, optval, mr_size)) + if (copy_from_sockptr(&mr, optval, mr_size)) return -EFAULT; mutex_lock(&xs->mutex); @@ -774,7 +774,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, struct xsk_queue **q; int entries; - if (copy_from_user(&entries, optval, sizeof(entries))) + if (copy_from_sockptr(&entries, optval, sizeof(entries))) return -EFAULT; mutex_lock(&xs->mutex); From 6d04fe15f78acdf8e32329e208552e226f7a8ae6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 23 Jul 2020 08:09:08 +0200 Subject: [PATCH 1573/2056] net: optimize the sockptr_t for unified kernel/user address spaces For architectures like x86 and arm64 we don't need the separate bit to indicate that a pointer is a kernel pointer as the address spaces are unified. That way the sockptr_t can be reduced to a union of two pointers, which leads to nicer calling conventions. The only caveat is that we need to check that users don't pass in kernel address and thus gain access to kernel memory. Thus the USER_SOCKPTR helper is replaced with a init_user_sockptr function that does this check and returns an error if it fails. Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 32 ++++++++++++++++++++++++++++++-- net/ipv4/bpfilter/sockopt.c | 14 ++++++++------ net/socket.c | 6 +++++- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 700856e13ea0..7d5cdb2b30b5 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -8,9 +8,34 @@ #ifndef _LINUX_SOCKPTR_H #define _LINUX_SOCKPTR_H +#include #include #include +#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE +typedef union { + void *kernel; + void __user *user; +} sockptr_t; + +static inline bool sockptr_is_kernel(sockptr_t sockptr) +{ + return (unsigned long)sockptr.kernel >= TASK_SIZE; +} + +static inline sockptr_t KERNEL_SOCKPTR(void *p) +{ + return (sockptr_t) { .kernel = p }; +} + +static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) +{ + if ((unsigned long)p >= TASK_SIZE) + return -EFAULT; + sp->user = p; + return 0; +} +#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ typedef struct { union { void *kernel; @@ -29,10 +54,13 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) return (sockptr_t) { .kernel = p, .is_kernel = true }; } -static inline sockptr_t USER_SOCKPTR(void __user *p) +static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) { - return (sockptr_t) { .user = p }; + sp->user = p; + sp->is_kernel = false; + return 0; } +#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ static inline bool sockptr_is_null(sockptr_t sockptr) { diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 1b34cb9a7708..94f18d2352d0 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -57,16 +57,18 @@ int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, return bpfilter_mbox_request(sk, optname, optval, optlen, true); } -int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, - int __user *optlen) +int bpfilter_ip_get_sockopt(struct sock *sk, int optname, + char __user *user_optval, int __user *optlen) { - int len; + sockptr_t optval; + int err, len; if (get_user(len, optlen)) return -EFAULT; - - return bpfilter_mbox_request(sk, optname, USER_SOCKPTR(optval), len, - false); + err = init_user_sockptr(&optval, user_optval); + if (err) + return err; + return bpfilter_mbox_request(sk, optname, optval, len, false); } static int __init bpfilter_sockopt_init(void) diff --git a/net/socket.c b/net/socket.c index e44b8ac47f6f..94ca4547cd7c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2097,7 +2097,7 @@ static bool sock_use_custom_sol_socket(const struct socket *sock) int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, int optlen) { - sockptr_t optval = USER_SOCKPTR(user_optval); + sockptr_t optval; char *kernel_optval = NULL; int err, fput_needed; struct socket *sock; @@ -2105,6 +2105,10 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, if (optlen < 0) return -EINVAL; + err = init_user_sockptr(&optval, user_optval); + if (err) + return err; + sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) return err; From 2d6be17d85f43cdd74901571f2d1a3955a33d027 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 24 Jul 2020 16:36:13 -0700 Subject: [PATCH 1574/2056] mISDN: Don't try to print a sockptr_t from debug logging code. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/isdn/mISDN/socket.c: In function ‘data_sock_setsockopt’: ./include/linux/kern_levels.h:5:18: warning: format ‘%p’ expects argument of type ‘void *’, but argument 6 has type ‘sockptr_t’ [-Wformat=] 5 | #define KERN_SOH "\001" /* ASCII Start Of Header */ | ^~~~~~ ./include/linux/kern_levels.h:15:20: note: in expansion of macro ‘KERN_SOH’ 15 | #define KERN_DEBUG KERN_SOH "7" /* debug-level messages */ | ^~~~~~~~ drivers/isdn/mISDN/socket.c:410:10: note: in expansion of macro ‘KERN_DEBUG’ 410 | printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock, | ^~~~~~~~~~ drivers/isdn/mISDN/socket.c:410:38: note: format string is defined here 410 | printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock, | ~^ | | | void * Signed-off-by: David S. Miller --- drivers/isdn/mISDN/socket.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c index 2835daae9e9f..a6606736d8c5 100644 --- a/drivers/isdn/mISDN/socket.c +++ b/drivers/isdn/mISDN/socket.c @@ -407,8 +407,8 @@ static int data_sock_setsockopt(struct socket *sock, int level, int optname, int err = 0, opt = 0; if (*debug & DEBUG_SOCKET) - printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock, - level, optname, optval, len); + printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock, + level, optname, len); lock_sock(sk); From dfd3d5266dc1d9a2b06e5a09bbff4cee547eeb5f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Fri, 24 Jul 2020 08:48:55 +0200 Subject: [PATCH 1575/2056] sctp: fix slab-out-of-bounds in SCTP_DELAYED_SACK processing This sockopt accepts two kinds of parameters, using struct sctp_sack_info and struct sctp_assoc_value. The mentioned commit didn't notice an implicit cast from the smaller (latter) struct to the bigger one (former) when copying the data from the user space, which now leads to an attempt to write beyond the buffer (because it assumes the storing buffer is bigger than the parameter itself). Fix it by allocating a sctp_sack_info on stack and filling it out based on the small struct for the compat case. Changelog stole from an earlier patch from Marcelo Ricardo Leitner. Fixes: ebb25defdc17 ("sctp: pass a kernel pointer to sctp_setsockopt_delayed_ack") Reported-by: syzbot+0e4699d000d8b874d8dc@syzkaller.appspotmail.com Signed-off-by: Christoph Hellwig Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/socket.c | 50 +++++++++++++++++++++++++++-------------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 144808dfea9e..ec1fba1fbe71 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2749,31 +2749,12 @@ static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params, * timer to expire. The default value for this is 2, setting this * value to 1 will disable the delayed sack algorithm. */ - -static int sctp_setsockopt_delayed_ack(struct sock *sk, - struct sctp_sack_info *params, - unsigned int optlen) +static int __sctp_setsockopt_delayed_ack(struct sock *sk, + struct sctp_sack_info *params) { struct sctp_sock *sp = sctp_sk(sk); struct sctp_association *asoc; - if (optlen == sizeof(struct sctp_sack_info)) { - if (params->sack_delay == 0 && params->sack_freq == 0) - return 0; - } else if (optlen == sizeof(struct sctp_assoc_value)) { - pr_warn_ratelimited(DEPRECATED - "%s (pid %d) " - "Use of struct sctp_assoc_value in delayed_ack socket option.\n" - "Use struct sctp_sack_info instead\n", - current->comm, task_pid_nr(current)); - - if (params->sack_delay == 0) - params->sack_freq = 1; - else - params->sack_freq = 0; - } else - return -EINVAL; - /* Validate value parameter. */ if (params->sack_delay > 500) return -EINVAL; @@ -2821,6 +2802,33 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk, return 0; } +static int sctp_setsockopt_delayed_ack(struct sock *sk, + struct sctp_sack_info *params, + unsigned int optlen) +{ + if (optlen == sizeof(struct sctp_assoc_value)) { + struct sctp_assoc_value *v = (struct sctp_assoc_value *)params; + struct sctp_sack_info p; + + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of struct sctp_assoc_value in delayed_ack socket option.\n" + "Use struct sctp_sack_info instead\n", + current->comm, task_pid_nr(current)); + + p.sack_assoc_id = v->assoc_id; + p.sack_delay = v->assoc_value; + p.sack_freq = v->assoc_value ? 0 : 1; + return __sctp_setsockopt_delayed_ack(sk, &p); + } + + if (optlen != sizeof(struct sctp_sack_info)) + return -EINVAL; + if (params->sack_delay == 0 && params->sack_freq == 0) + return 0; + return __sctp_setsockopt_delayed_ack(sk, params); +} + /* 7.1.3 Initialization Parameters (SCTP_INITMSG) * * Applications can specify protocol parameters for the default association From a65878d6f00bb2d791348ae90dcc2ede1dbe2b91 Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Fri, 24 Jul 2020 10:20:59 +0200 Subject: [PATCH 1576/2056] net: openvswitch: fixes potential deadlock in dp cleanup code The previous patch introduced a deadlock, this patch fixes it by making sure the work is canceled without holding the global ovs lock. This is done by moving the reorder processing one layer up to the netns level. Fixes: eac87c413bf9 ("net: openvswitch: reorder masks array based on usage") Reported-by: syzbot+2c4ff3614695f75ce26c@syzkaller.appspotmail.com Reported-by: syzbot+bad6507e5db05017b008@syzkaller.appspotmail.com Reviewed-by: Paolo Signed-off-by: Eelco Chaudron Signed-off-by: David S. Miller --- net/openvswitch/datapath.c | 23 ++++++++++++----------- net/openvswitch/datapath.h | 4 +--- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 95805f0e27bd..6b6822f82f70 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1655,7 +1655,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) goto err_destroy_reply; ovs_dp_set_net(dp, sock_net(skb->sk)); - INIT_DELAYED_WORK(&dp->masks_rebalance, ovs_dp_masks_rebalance); /* Allocate table. */ err = ovs_flow_tbl_init(&dp->table); @@ -1715,9 +1714,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id); list_add_tail_rcu(&dp->list_node, &ovs_net->dps); - schedule_delayed_work(&dp->masks_rebalance, - msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); - ovs_unlock(); ovs_notify(&dp_datapath_genl_family, reply, info); @@ -1762,9 +1758,6 @@ static void __dp_destroy(struct datapath *dp) /* RCU destroy the flow table */ call_rcu(&dp->rcu, destroy_dp_rcu); - - /* Cancel remaining work. */ - cancel_delayed_work_sync(&dp->masks_rebalance); } static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info) @@ -2349,14 +2342,18 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) static void ovs_dp_masks_rebalance(struct work_struct *work) { - struct datapath *dp = container_of(work, struct datapath, - masks_rebalance.work); + struct ovs_net *ovs_net = container_of(work, struct ovs_net, + masks_rebalance.work); + struct datapath *dp; ovs_lock(); - ovs_flow_masks_rebalance(&dp->table); + + list_for_each_entry(dp, &ovs_net->dps, list_node) + ovs_flow_masks_rebalance(&dp->table); + ovs_unlock(); - schedule_delayed_work(&dp->masks_rebalance, + schedule_delayed_work(&ovs_net->masks_rebalance, msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); } @@ -2454,6 +2451,9 @@ static int __net_init ovs_init_net(struct net *net) INIT_LIST_HEAD(&ovs_net->dps); INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq); + INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance); + schedule_delayed_work(&ovs_net->masks_rebalance, + msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL)); return ovs_ct_init(net); } @@ -2508,6 +2508,7 @@ static void __net_exit ovs_exit_net(struct net *dnet) ovs_unlock(); + cancel_delayed_work_sync(&ovs_net->masks_rebalance); cancel_work_sync(&ovs_net->dp_notify_work); } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 697a2354194b..24fcec22fde2 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -84,9 +84,6 @@ struct datapath { /* Switch meters. */ struct dp_meter_table meter_tbl; - - /* re-balance flow masks timer */ - struct delayed_work masks_rebalance; }; /** @@ -135,6 +132,7 @@ struct dp_upcall_info { struct ovs_net { struct list_head dps; struct work_struct dp_notify_work; + struct delayed_work masks_rebalance; #if IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT) struct ovs_ct_limit_info *ct_limit_info; #endif From 623b57bec7c8daf89623c95cd9cc919e4090c9da Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 24 Jul 2020 14:09:19 +0100 Subject: [PATCH 1577/2056] sctp: remove redundant initialization of variable status The variable status is being initialized with a value that is never read and it is being updated later with a new value. The initialization is redundant and can be removed. Also put the variable declarations into reverse christmas tree order. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Acked-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/sctp/protocol.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 7ecaf7d575c0..d19db22262fd 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1367,15 +1367,15 @@ static struct pernet_operations sctp_ctrlsock_ops = { /* Initialize the universe into something sensible. */ static __init int sctp_init(void) { - int i; - int status = -EINVAL; - unsigned long goal; - unsigned long limit; unsigned long nr_pages = totalram_pages(); - int max_share; - int order; - int num_entries; + unsigned long limit; + unsigned long goal; int max_entry_order; + int num_entries; + int max_share; + int status; + int order; + int i; sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); From c4e9e09f5589f9afe6b8f8c4fb078e0559bca667 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 24 Jul 2020 09:03:08 -0400 Subject: [PATCH 1578/2056] icmp: revise rfc4884 tests 1) Only accept packets with original datagram len field >= header len. The extension header must start after the original datagram headers. The embedded datagram len field is compared against the 128B minimum stipulated by RFC 4884. It is unlikely that headers extend beyond this. But as we know the exact header length, check explicitly. 2) Remove the check that datagram length must be <= 576B. This is a send constraint. There is no value in testing this on rx. Within private networks it may be known safe to send larger packets. Process these packets. This test was also too lax. It compared original datagram length rather than entire icmp packet length. The stand-alone fix would be: - if (hlen + skb->len > 576) + if (-skb_network_offset(skb) + skb->len > 576) Fixes: eba75c587e81 ("icmp: support rfc 4884") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/icmp.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 793aebf07c2a..8d2654cdbd77 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1164,16 +1164,12 @@ void ip_icmp_error_rfc4884(const struct sk_buff *skb, return; } - /* outer headers up to inner iph. skb->data is at inner payload */ + /* original datagram headers: end of icmph to payload (skb->data) */ hlen = -skb_transport_offset(skb) - sizeof(struct icmphdr); - /* per rfc 791: maximum packet length of 576 bytes */ - if (hlen + skb->len > 576) - return; - /* per rfc 4884: minimal datagram length of 128 bytes */ off = icmp_hdr(skb)->un.reserved[1] * sizeof(u32); - if (off < 128) + if (off < 128 || off < hlen) return; /* kernel has stripped headers: return payload offset in bytes */ From 178c49d9f9a4b5ade00c93480d714708fe971e24 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 24 Jul 2020 09:03:09 -0400 Subject: [PATCH 1579/2056] icmp: prepare rfc 4884 for ipv6 The RFC 4884 spec is largely the same between IPv4 and IPv6. Factor out the IPv4 specific parts in preparation for IPv6 support: - icmp types supported - icmp header size, and thus offset to original datagram start - datagram length field offset in icmp(6)hdr. - datagram length field word size: 4B for IPv4, 8B for IPv6. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/icmp.h | 3 ++- net/ipv4/icmp.c | 17 ++++------------- net/ipv4/ip_sockglue.c | 14 +++++++++++++- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 8fc38a34cb20..0af4d210ee31 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -37,6 +37,7 @@ static inline bool icmp_is_err(int type) } void ip_icmp_error_rfc4884(const struct sk_buff *skb, - struct sock_ee_data_rfc4884 *out); + struct sock_ee_data_rfc4884 *out, + int thlen, int off); #endif /* _LINUX_ICMP_H */ diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 8d2654cdbd77..7498c58460a1 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1151,24 +1151,15 @@ static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off) } void ip_icmp_error_rfc4884(const struct sk_buff *skb, - struct sock_ee_data_rfc4884 *out) + struct sock_ee_data_rfc4884 *out, + int thlen, int off) { - int hlen, off; - - switch (icmp_hdr(skb)->type) { - case ICMP_DEST_UNREACH: - case ICMP_TIME_EXCEEDED: - case ICMP_PARAMETERPROB: - break; - default: - return; - } + int hlen; /* original datagram headers: end of icmph to payload (skb->data) */ - hlen = -skb_transport_offset(skb) - sizeof(struct icmphdr); + hlen = -skb_transport_offset(skb) - thlen; /* per rfc 4884: minimal datagram length of 128 bytes */ - off = icmp_hdr(skb)->un.reserved[1] * sizeof(u32); if (off < 128 || off < hlen) return; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8dc027e54c5b..d2c223554ff7 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -390,6 +390,18 @@ int ip_ra_control(struct sock *sk, unsigned char on, return 0; } +static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out) +{ + switch (icmp_hdr(skb)->type) { + case ICMP_DEST_UNREACH: + case ICMP_TIME_EXCEEDED: + case ICMP_PARAMETERPROB: + ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr), + icmp_hdr(skb)->un.reserved[1] * 4); + } +} + void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { @@ -413,7 +425,7 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, if (skb_pull(skb, payload - skb->data)) { if (inet_sk(sk)->recverr_rfc4884) - ip_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); + ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb) == 0) From 01370434df85eb76ecb1527a4466013c4aca2436 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 24 Jul 2020 09:03:10 -0400 Subject: [PATCH 1580/2056] icmp6: support rfc 4884 Extend the rfc 4884 read interface introduced for ipv4 in commit eba75c587e81 ("icmp: support rfc 4884") to ipv6. Add socket option SOL_IPV6/IPV6_RECVERR_RFC4884. Changes v1->v2: - make ipv6_icmp_error_rfc4884 static (file scope) Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/ipv6.h | 1 + include/uapi/linux/icmpv6.h | 1 + include/uapi/linux/in6.h | 1 + net/ipv4/icmp.c | 1 + net/ipv6/datagram.c | 16 ++++++++++++++++ net/ipv6/ipv6_sockglue.c | 12 ++++++++++++ 6 files changed, 32 insertions(+) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 8d8f877e7f81..a44789d027cc 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -283,6 +283,7 @@ struct ipv6_pinfo { autoflowlabel:1, autoflowlabel_set:1, mc_all:1, + recverr_rfc4884:1, rtalert_isolate:1; __u8 min_hopcount; __u8 tclass; diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index 2622b5a3e616..c1661febc2dc 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -68,6 +68,7 @@ struct icmp6hdr { #define icmp6_mtu icmp6_dataun.un_data32[0] #define icmp6_unused icmp6_dataun.un_data32[0] #define icmp6_maxdelay icmp6_dataun.un_data16[0] +#define icmp6_datagram_len icmp6_dataun.un_data8[0] #define icmp6_router icmp6_dataun.u_nd_advt.router #define icmp6_solicited icmp6_dataun.u_nd_advt.solicited #define icmp6_override icmp6_dataun.u_nd_advt.override diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h index 9f2273a08356..5ad396a57eb3 100644 --- a/include/uapi/linux/in6.h +++ b/include/uapi/linux/in6.h @@ -179,6 +179,7 @@ struct in6_flowlabel_req { #define IPV6_LEAVE_ANYCAST 28 #define IPV6_MULTICAST_ALL 29 #define IPV6_ROUTER_ALERT_ISOLATE 30 +#define IPV6_RECVERR_RFC4884 31 /* IPV6_MTU_DISCOVER values */ #define IPV6_PMTUDISC_DONT 0 diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 7498c58460a1..cf36f955bfe6 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -1173,6 +1173,7 @@ void ip_icmp_error_rfc4884(const struct sk_buff *skb, if (!ip_icmp_error_rfc4884_validate(skb, off)) out->flags |= SO_EE_RFC4884_FLAG_INVALID; } +EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884); int icmp_err(struct sk_buff *skb, u32 info) { diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 390bedde21a5..cc8ad7ddecda 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -284,6 +285,17 @@ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, } EXPORT_SYMBOL_GPL(ip6_datagram_connect_v6_only); +static void ipv6_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out) +{ + switch (icmp6_hdr(skb)->icmp6_type) { + case ICMPV6_TIME_EXCEED: + case ICMPV6_DEST_UNREACH: + ip_icmp_error_rfc4884(skb, out, sizeof(struct icmp6hdr), + icmp6_hdr(skb)->icmp6_datagram_len * 8); + } +} + void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { @@ -313,6 +325,10 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, serr->port = port; __skb_pull(skb, payload - skb->data); + + if (inet6_sk(sk)->recverr_rfc4884) + ipv6_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884); + skb_reset_transport_header(skb); if (sock_queue_err_skb(sk, skb)) diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d2282f5c9760..20c740976334 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -965,6 +965,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, np->rxopt.bits.recvfragsize = valbool; retv = 0; break; + case IPV6_RECVERR_RFC4884: + if (optlen < sizeof(int)) + goto e_inval; + if (val < 0 || val > 1) + goto e_inval; + np->recverr_rfc4884 = valbool; + retv = 0; + break; } release_sock(sk); @@ -1439,6 +1447,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, val = np->rtalert_isolate; break; + case IPV6_RECVERR_RFC4884: + val = np->recverr_rfc4884; + break; + default: return -ENOPROTOOPT; } From 95075150d0bdaa78cc350efc606e6ed02fa2a991 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:49 +0100 Subject: [PATCH 1581/2056] l2tp: avoid multiple assignments checkpatch warns about multiple assignments. Update l2tp accordingly. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 6 +++--- net/l2tp/l2tp_ip.c | 12 ++++++++---- net/l2tp/l2tp_ip6.c | 6 ++++-- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 7e3523015d6f..b871cceeff7c 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -621,8 +621,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int length) { struct l2tp_tunnel *tunnel = session->tunnel; + u32 ns = 0, nr = 0; int offset; - u32 ns, nr; /* Parse and check optional cookie */ if (session->peer_cookie_len > 0) { @@ -644,7 +644,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, * the control of the LNS. If no sequence numbers present but * we were expecting them, discard frame. */ - ns = nr = 0; L2TP_SKB_CB(skb)->has_seq = 0; if (tunnel->version == L2TP_HDR_VER_2) { if (hdrflags & L2TP_HDRFLAG_S) { @@ -826,7 +825,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) } /* Point to L2TP header */ - optr = ptr = skb->data; + optr = skb->data; + ptr = skb->data; /* Get L2TP header flags */ hdrflags = ntohs(*(__be16 *)ptr); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index d81564cf1e7f..a159cb2bf0f4 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -124,7 +124,8 @@ static int l2tp_ip_recv(struct sk_buff *skb) goto discard; /* Point to L2TP header */ - optr = ptr = skb->data; + optr = skb->data; + ptr = skb->data; session_id = ntohl(*((__be32 *)ptr)); ptr += 4; @@ -153,7 +154,8 @@ static int l2tp_ip_recv(struct sk_buff *skb) goto discard_sess; /* Point to L2TP header */ - optr = ptr = skb->data; + optr = skb->data; + ptr = skb->data; ptr += 4; pr_debug("%s: ip recv\n", tunnel->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); @@ -284,8 +286,10 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; - if (addr->l2tp_addr.s_addr) - inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; + if (addr->l2tp_addr.s_addr) { + inet->inet_rcv_saddr = addr->l2tp_addr.s_addr; + inet->inet_saddr = addr->l2tp_addr.s_addr; + } if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 614febf8dd0d..bc757bc7e264 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -137,7 +137,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) goto discard; /* Point to L2TP header */ - optr = ptr = skb->data; + optr = skb->data; + ptr = skb->data; session_id = ntohl(*((__be32 *)ptr)); ptr += 4; @@ -166,7 +167,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb) goto discard_sess; /* Point to L2TP header */ - optr = ptr = skb->data; + optr = skb->data; + ptr = skb->data; ptr += 4; pr_debug("%s: ip recv\n", tunnel->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); From 7a379558c28c435681221aa5d84ead8ff19211be Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:50 +0100 Subject: [PATCH 1582/2056] l2tp: WARN_ON rather than BUG_ON in l2tp_dfs_seq_start l2tp_dfs_seq_start had a BUG_ON to catch a possible programming error in l2tp_dfs_seq_open. Since we can easily bail out of l2tp_dfs_seq_start, prefer to do that and flag the error with a WARN_ON rather than crashing the kernel. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_debugfs.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 72ba83aa0eaf..96cb9601c21b 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -72,7 +72,10 @@ static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs) if (!pos) goto out; - BUG_ON(!m->private); + if (WARN_ON(!m->private)) { + pd = NULL; + goto out; + } pd = m->private; if (!pd->tunnel) From ce2f86ae253de3f3e961d4f91438604a97c29752 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:51 +0100 Subject: [PATCH 1583/2056] l2tp: remove BUG_ON in l2tp_session_queue_purge l2tp_session_queue_purge is only called from l2tp_core.c, and it's easy to statically analyse the code paths calling it to validate that it should never be passed a NULL session pointer. Having a BUG_ON checking the session pointer triggers a checkpatch warning. Since the BUG_ON is of no value, remove it to avoid the warning. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index b871cceeff7c..a1ed8baa5aaa 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -777,7 +777,6 @@ static int l2tp_session_queue_purge(struct l2tp_session *session) { struct sk_buff *skb = NULL; - BUG_ON(!session); BUG_ON(session->magic != L2TP_SESSION_MAGIC); while ((skb = skb_dequeue(&session->reorder_q))) { atomic_long_inc(&session->stats.rx_errors); From cd3e29b333cc2571a5875d1b45c460dc948a2f95 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:52 +0100 Subject: [PATCH 1584/2056] l2tp: remove BUG_ON in l2tp_tunnel_closeall l2tp_tunnel_closeall is only called from l2tp_core.c, and it's easy to statically analyse the code path calling it to validate that it should never be passed a NULL tunnel pointer. Having a BUG_ON checking the tunnel pointer triggers a checkpatch warning. Since the BUG_ON is of no value, remove it to avoid the warning. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index a1ed8baa5aaa..6be3f2e69efd 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1188,8 +1188,6 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) struct hlist_node *tmp; struct l2tp_session *session; - BUG_ON(!tunnel); - l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n", tunnel->name); From 1aa646ac71feb987a9571e1d70980d10ff495fbc Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:53 +0100 Subject: [PATCH 1585/2056] l2tp: don't BUG_ON session magic checks in l2tp_ppp checkpatch advises that WARN_ON and recovery code are preferred over BUG_ON which crashes the kernel. l2tp_ppp.c's BUG_ON checks of the l2tp session structure's "magic" field occur in code paths where it's reasonably easy to recover: * In the case of pppol2tp_sock_to_session, we can return NULL and the caller will bail out appropriately. There is no change required to any of the callsites of this function since they already handle pppol2tp_sock_to_session returning NULL. * In the case of pppol2tp_session_destruct we can just avoid decrementing the reference count on the suspect session structure. In the worst case scenario this results in a memory leak, which is preferable to a crash. Convert these uses of BUG_ON to WARN_ON accordingly. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 4389df66af35..2aeee648c080 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -163,8 +163,11 @@ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk) sock_put(sk); goto out; } - - BUG_ON(session->magic != L2TP_SESSION_MAGIC); + if (WARN_ON(session->magic != L2TP_SESSION_MAGIC)) { + session = NULL; + sock_put(sk); + goto out; + } out: return session; @@ -419,7 +422,8 @@ static void pppol2tp_session_destruct(struct sock *sk) if (session) { sk->sk_user_data = NULL; - BUG_ON(session->magic != L2TP_SESSION_MAGIC); + if (WARN_ON(session->magic != L2TP_SESSION_MAGIC)) + return; l2tp_session_dec_refcount(session); } } From ebb4f5e6e4cd68a856aeea5d319a76f47543c3a9 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:54 +0100 Subject: [PATCH 1586/2056] l2tp: don't BUG_ON seqfile checks in l2tp_ppp checkpatch advises that WARN_ON and recovery code are preferred over BUG_ON which crashes the kernel. l2tp_ppp has a BUG_ON check of struct seq_file's private pointer in pppol2tp_seq_start prior to accessing data through that pointer. Rather than crashing, we can simply bail out early and return NULL in order to terminate the seq file processing in much the same way as we do when reaching the end of tunnel/session instances to render. Retain a WARN_ON to help trace possible bugs in this area. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 2aeee648c080..13c3153b40d6 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1478,7 +1478,11 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs) if (!pos) goto out; - BUG_ON(!m->private); + if (WARN_ON(!m->private)) { + pd = NULL; + goto out; + } + pd = m->private; net = seq_file_net(m); From 493048f5dfcd0093880414928717f68613ba9157 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:55 +0100 Subject: [PATCH 1587/2056] l2tp: WARN_ON rather than BUG_ON in l2tp_session_queue_purge l2tp_session_queue_purge is used during session shutdown to drop any skbs queued for reordering purposes according to L2TP dataplane rules. The BUG_ON in this function checks the session magic feather in an attempt to catch lifetime bugs. Rather than crashing the kernel with a BUG_ON, we can simply WARN_ON and refuse to do anything more -- in the worst case this could result in a leak. However this is highly unlikely given that the session purge only occurs from codepaths which have obtained the session by means of a lookup via. the parent tunnel and which check the session "dead" flag to protect against shutdown races. While we're here, have l2tp_session_queue_purge return void rather than an integer, since neither of the callsites checked the return value. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6be3f2e69efd..e228480fa529 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -773,16 +773,17 @@ EXPORT_SYMBOL(l2tp_recv_common); /* Drop skbs from the session's reorder_q */ -static int l2tp_session_queue_purge(struct l2tp_session *session) +static void l2tp_session_queue_purge(struct l2tp_session *session) { struct sk_buff *skb = NULL; - BUG_ON(session->magic != L2TP_SESSION_MAGIC); + if (WARN_ON(session->magic != L2TP_SESSION_MAGIC)) + return; + while ((skb = skb_dequeue(&session->reorder_q))) { atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } - return 0; } /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame From 0dd62f69d898de3cf31a2f8b59e9a62bb5448457 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:56 +0100 Subject: [PATCH 1588/2056] l2tp: remove BUG_ON refcount value in l2tp_session_free l2tp_session_free is only called by l2tp_session_dec_refcount when the reference count reaches zero, so it's of limited value to validate the reference count value in l2tp_session_free itself. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index e228480fa529..50548c61b91e 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1563,8 +1563,6 @@ void l2tp_session_free(struct l2tp_session *session) { struct l2tp_tunnel *tunnel = session->tunnel; - BUG_ON(refcount_read(&session->ref_count) != 0); - if (tunnel) { BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); l2tp_tunnel_dec_refcount(tunnel); From ab6934e084e5eee665adf6e834e5096ebae4a95f Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Fri, 24 Jul 2020 16:31:57 +0100 Subject: [PATCH 1589/2056] l2tp: WARN_ON rather than BUG_ON in l2tp_session_free l2tp_session_free called BUG_ON if the tunnel magic feather value wasn't correct. The intent of this was to catch lifetime bugs; for example early tunnel free due to incorrect use of reference counts. Since the tunnel magic feather being wrong indicates either early free or structure corruption, we can avoid doing more damage by simply leaving the tunnel structure alone. If the tunnel refcount isn't dropped when it should be, the tunnel instance will remain in the kernel, resulting in the tunnel structure and socket leaking. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 50548c61b91e..e723828e458b 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1564,10 +1564,12 @@ void l2tp_session_free(struct l2tp_session *session) struct l2tp_tunnel *tunnel = session->tunnel; if (tunnel) { - BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); + if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC)) + goto out; l2tp_tunnel_dec_refcount(tunnel); } +out: kfree(session); } EXPORT_SYMBOL_GPL(l2tp_session_free); From 9b964f16546167fd1d5dfcf4d66f5cc9bf608525 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Fri, 24 Jul 2020 21:46:30 +0800 Subject: [PATCH 1590/2056] net: hix5hd2_gmac: Remove unneeded cast from memory allocation Remove casting the values returned by memory allocation function. Coccinelle emits WARNING: ./drivers/net/ethernet/hisilicon/hix5hd2_gmac.c:1027:9-23: WARNING: casting value returned by memory allocation function to (struct sg_desc *) is useless. This issue was detected by using the Coccinelle software. Signed-off-by: Wang Hai Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hix5hd2_gmac.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index 4fb776920a93..8b2bf85039f1 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -1024,9 +1024,9 @@ static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv) struct sg_desc *desc; dma_addr_t phys_addr; - desc = (struct sg_desc *)dma_alloc_coherent(priv->dev, - TX_DESC_NUM * sizeof(struct sg_desc), - &phys_addr, GFP_KERNEL); + desc = dma_alloc_coherent(priv->dev, + TX_DESC_NUM * sizeof(struct sg_desc), + &phys_addr, GFP_KERNEL); if (!desc) return -ENOMEM; From 0f3c66a3c7b4e8b9f654b3c998e9674376a51b0f Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 24 Jul 2020 11:21:20 +1200 Subject: [PATCH 1591/2056] net: dsa: mv88e6xxx: MV88E6097 does not support jumbo configuration The MV88E6097 chip does not support configuring jumbo frames. Prior to commit 5f4366660d65 only the 6352, 6351, 6165 and 6320 chips configured jumbo mode. The refactor accidentally added the function for the 6097. Remove the erroneous function pointer assignment. Fixes: 5f4366660d65 ("net: dsa: mv88e6xxx: Refactor setting of jumbo frames") Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6f019955ae42..4ddb6f3035c9 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3494,7 +3494,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, - .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting, .port_pause_limit = mv88e6097_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, From e8b34c67d6c10ee3f187469958af3fb36c9c3361 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 24 Jul 2020 11:21:21 +1200 Subject: [PATCH 1592/2056] net: dsa: mv88e6xxx: Support jumbo configuration on 6190/6190X The MV88E6190 and MV88E6190X both support per port jumbo configuration just like the other GE switches. Install the appropriate ops. Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 4ddb6f3035c9..43a2ab8cf2c8 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3958,6 +3958,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_pause_limit = mv88e6390_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, @@ -4016,6 +4017,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, + .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_pause_limit = mv88e6390_port_pause_limit, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, From 1baf0fac10fbe3084975d7cb0a4378eb18871482 Mon Sep 17 00:00:00 2001 From: Chris Packham Date: Fri, 24 Jul 2020 11:21:22 +1200 Subject: [PATCH 1593/2056] net: dsa: mv88e6xxx: Use chip-wide max frame size for MTU Some of the chips in the mv88e6xxx family don't support jumbo configuration per port. But they do have a chip-wide max frame size that can be used. Use this to approximate the behaviour of configuring a port based MTU. Signed-off-by: Chris Packham Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 9 +++++++++ drivers/net/dsa/mv88e6xxx/chip.h | 3 +++ drivers/net/dsa/mv88e6xxx/global1.c | 17 +++++++++++++++++ drivers/net/dsa/mv88e6xxx/global1.h | 2 ++ 4 files changed, 31 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 43a2ab8cf2c8..b8a3e8c88c07 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2699,6 +2699,8 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port) if (chip->info->ops->port_set_jumbo_size) return 10240; + else if (chip->info->ops->set_max_frame_size) + return 1632; return 1522; } @@ -2710,6 +2712,8 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) mv88e6xxx_reg_lock(chip); if (chip->info->ops->port_set_jumbo_size) ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu); + else if (chip->info->ops->set_max_frame_size) + ret = chip->info->ops->set_max_frame_size(chip, new_mtu); else if (new_mtu > 1522) ret = -EINVAL; @@ -3450,6 +3454,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, + .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; static const struct mv88e6xxx_ops mv88e6095_ops = { @@ -3478,6 +3483,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, + .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; static const struct mv88e6xxx_ops mv88e6097_ops = { @@ -3515,6 +3521,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, + .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; static const struct mv88e6xxx_ops mv88e6123_ops = { @@ -3549,6 +3556,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, + .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; static const struct mv88e6xxx_ops mv88e6131_ops = { @@ -3938,6 +3946,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, .phylink_validate = mv88e6185_phylink_validate, + .set_max_frame_size = mv88e6185_g1_set_max_frame_size, }; static const struct mv88e6xxx_ops mv88e6190_ops = { diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 1c541b074256..2d70eac30e58 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -552,6 +552,9 @@ struct mv88e6xxx_ops { void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port, unsigned long *mask, struct phylink_link_state *state); + + /* Max Frame Size */ + int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu); }; struct mv88e6xxx_irq_ops { diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c index ca3a7a7a73c3..f62aa83ca08d 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.c +++ b/drivers/net/dsa/mv88e6xxx/global1.c @@ -196,6 +196,23 @@ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip) return mv88e6185_g1_wait_ppu_disabled(chip); } +int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu) +{ + u16 val; + int err; + + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val); + if (err) + return err; + + val &= ~MV88E6185_G1_CTL1_MAX_FRAME_1632; + + if (mtu > 1518) + val |= MV88E6185_G1_CTL1_MAX_FRAME_1632; + + return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val); +} + /* Offset 0x10: IP-PRI Mapping Register 0 * Offset 0x11: IP-PRI Mapping Register 1 * Offset 0x12: IP-PRI Mapping Register 2 diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h index 5324c6f4ae90..1e3546f8b072 100644 --- a/drivers/net/dsa/mv88e6xxx/global1.h +++ b/drivers/net/dsa/mv88e6xxx/global1.h @@ -282,6 +282,8 @@ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip); int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip); +int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu); + int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port); int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port); int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port); From f3c93a93b564d25cab715cd71a6c3417e77b19f3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 21:38:04 -0700 Subject: [PATCH 1594/2056] tools/bpftool: Strip BPF .o files before skeleton generation Strip away DWARF info from .bpf.o files, before generating BPF skeletons. This reduces bpftool binary size from 3.43MB to 2.58MB. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20200722043804.2373298-1-andriin@fb.com --- tools/bpf/bpftool/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index 51bd520ed437..8462690a039b 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -59,6 +59,7 @@ endif INSTALL ?= install RM ?= rm -f CLANG ?= clang +LLVM_STRIP ?= llvm-strip FEATURE_USER = .bpftool FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib libcap \ @@ -147,7 +148,7 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF) -I$(srctree)/tools/include/uapi/ \ -I$(LIBBPF_PATH) \ -I$(srctree)/tools/lib \ - -g -O2 -target bpf -c $< -o $@ + -g -O2 -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@ $(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP) $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@ From c8a2983c4df06c4cd11bea6abfa7e2947bd3113b Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Wed, 22 Jul 2020 18:17:19 +0200 Subject: [PATCH 1595/2056] udp: Don't discard reuseport selection when group has connections When BPF socket lookup prog selects a socket that belongs to a reuseport group, and the reuseport group has connected sockets in it, the socket selected by reuseport will be discarded, and socket returned by BPF socket lookup will be used instead. Modify this behavior so that the socket selected by reuseport running after BPF socket lookup always gets used. Ignore the fact that the reuseport group might have connections because it is only relevant when scoring sockets during regular hashtable-based lookup. Fixes: 72f7e9440e9b ("udp: Run SK_LOOKUP BPF program on socket lookup") Fixes: 6d4201b1386b ("udp6: Run SK_LOOKUP BPF program on socket lookup") Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Kuniyuki Iwashima Link: https://lore.kernel.org/bpf/20200722161720.940831-2-jakub@cloudflare.com --- net/ipv6/udp.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 5530c9dcb61c..c394e674f486 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -155,9 +155,6 @@ static struct sock *lookup_reuseport(struct net *net, struct sock *sk, hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); reuse_sk = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - /* Fall back to scoring if group has connections */ - if (reuseport_has_conns(sk, false)) - return NULL; } return reuse_sk; } From 14fc6bd6b79c430f615500d0fe6cea4722110db8 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:09 -0700 Subject: [PATCH 1596/2056] bpf: Refactor bpf_iter_reg to have separate seq_info member There is no functionality change for this patch. Struct bpf_iter_reg is used to register a bpf_iter target, which includes information for both prog_load, link_create and seq_file creation. This patch puts fields related seq_file creation into a different structure. This will be useful for map elements iterator where one iterator covers different map types and different map types may have different seq_ops, init/fini private_data function and private_data size. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184109.590030-1-yhs@fb.com --- include/linux/bpf.h | 17 ++++++++++------- kernel/bpf/bpf_iter.c | 12 ++++++------ kernel/bpf/map_iter.c | 8 ++++++-- kernel/bpf/prog_iter.c | 8 ++++++-- kernel/bpf/task_iter.c | 16 ++++++++++++---- net/ipv4/tcp_ipv4.c | 8 ++++++-- net/ipv4/udp.c | 8 ++++++-- net/ipv6/route.c | 8 ++++++-- net/netlink/af_netlink.c | 8 ++++++-- 9 files changed, 64 insertions(+), 29 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 72221aea1c60..127067f71fd4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -37,6 +37,15 @@ struct seq_operations; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ @@ -1189,18 +1198,12 @@ int bpf_obj_get_user(const char __user *pathname, int flags); extern int bpf_iter_ ## target(args); \ int __init bpf_iter_ ## target(args) { return 0; } -typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); -typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); - #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; - const struct seq_operations *seq_ops; - bpf_iter_init_seq_priv_t init_seq_private; - bpf_iter_fini_seq_priv_t fini_seq_private; - u32 seq_priv_size; u32 ctx_arg_info_size; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; + const struct bpf_iter_seq_info *seq_info; }; struct bpf_iter_meta { diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index dd612b80b9fe..5b2387d6aa1f 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -218,8 +218,8 @@ static int iter_release(struct inode *inode, struct file *file) iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); - if (iter_priv->tinfo->reg_info->fini_seq_private) - iter_priv->tinfo->reg_info->fini_seq_private(seq->private); + if (iter_priv->tinfo->reg_info->seq_info->fini_seq_private) + iter_priv->tinfo->reg_info->seq_info->fini_seq_private(seq->private); bpf_prog_put(iter_priv->prog); seq->private = iter_priv; @@ -433,16 +433,16 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) tinfo = link->tinfo; total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) + - tinfo->reg_info->seq_priv_size; - priv_data = __seq_open_private(file, tinfo->reg_info->seq_ops, + tinfo->reg_info->seq_info->seq_priv_size; + priv_data = __seq_open_private(file, tinfo->reg_info->seq_info->seq_ops, total_priv_dsize); if (!priv_data) { err = -ENOMEM; goto release_prog; } - if (tinfo->reg_info->init_seq_private) { - err = tinfo->reg_info->init_seq_private(priv_data->target_private); + if (tinfo->reg_info->seq_info->init_seq_private) { + err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private); if (err) goto release_seq_file; } diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 5926c76d854e..1a69241fb1e2 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -81,17 +81,21 @@ static const struct seq_operations bpf_map_seq_ops = { BTF_ID_LIST(btf_bpf_map_id) BTF_ID(struct, bpf_map) -static struct bpf_iter_reg bpf_map_reg_info = { - .target = "bpf_map", +static const struct bpf_iter_seq_info bpf_map_seq_info = { .seq_ops = &bpf_map_seq_ops, .init_seq_private = NULL, .fini_seq_private = NULL, .seq_priv_size = sizeof(struct bpf_iter_seq_map_info), +}; + +static struct bpf_iter_reg bpf_map_reg_info = { + .target = "bpf_map", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_map, map), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &bpf_map_seq_info, }; static int __init bpf_map_iter_init(void) diff --git a/kernel/bpf/prog_iter.c b/kernel/bpf/prog_iter.c index 6541b577d69f..53a73c841c13 100644 --- a/kernel/bpf/prog_iter.c +++ b/kernel/bpf/prog_iter.c @@ -81,17 +81,21 @@ static const struct seq_operations bpf_prog_seq_ops = { BTF_ID_LIST(btf_bpf_prog_id) BTF_ID(struct, bpf_prog) -static struct bpf_iter_reg bpf_prog_reg_info = { - .target = "bpf_prog", +static const struct bpf_iter_seq_info bpf_prog_seq_info = { .seq_ops = &bpf_prog_seq_ops, .init_seq_private = NULL, .fini_seq_private = NULL, .seq_priv_size = sizeof(struct bpf_iter_seq_prog_info), +}; + +static struct bpf_iter_reg bpf_prog_reg_info = { + .target = "bpf_prog", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__bpf_prog, prog), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &bpf_prog_seq_info, }; static int __init bpf_prog_iter_init(void) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 1039e52ebd8b..6d9cd23869bf 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -319,25 +319,32 @@ BTF_ID_LIST(btf_task_file_ids) BTF_ID(struct, task_struct) BTF_ID(struct, file) -static struct bpf_iter_reg task_reg_info = { - .target = "task", +static const struct bpf_iter_seq_info task_seq_info = { .seq_ops = &task_seq_ops, .init_seq_private = init_seq_pidns, .fini_seq_private = fini_seq_pidns, .seq_priv_size = sizeof(struct bpf_iter_seq_task_info), +}; + +static struct bpf_iter_reg task_reg_info = { + .target = "task", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__task, task), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &task_seq_info, }; -static struct bpf_iter_reg task_file_reg_info = { - .target = "task_file", +static const struct bpf_iter_seq_info task_file_seq_info = { .seq_ops = &task_file_seq_ops, .init_seq_private = init_seq_pidns, .fini_seq_private = fini_seq_pidns, .seq_priv_size = sizeof(struct bpf_iter_seq_task_file_info), +}; + +static struct bpf_iter_reg task_file_reg_info = { + .target = "task_file", .ctx_arg_info_size = 2, .ctx_arg_info = { { offsetof(struct bpf_iter__task_file, task), @@ -345,6 +352,7 @@ static struct bpf_iter_reg task_file_reg_info = { { offsetof(struct bpf_iter__task_file, file), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &task_file_seq_info, }; static int __init task_iter_init(void) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index f8913923a6c0..cb288fdcf2ca 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2947,17 +2947,21 @@ static void bpf_iter_fini_tcp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static struct bpf_iter_reg tcp_reg_info = { - .target = "tcp", +static const struct bpf_iter_seq_info tcp_seq_info = { .seq_ops = &bpf_iter_tcp_seq_ops, .init_seq_private = bpf_iter_init_tcp, .fini_seq_private = bpf_iter_fini_tcp, .seq_priv_size = sizeof(struct tcp_iter_state), +}; + +static struct bpf_iter_reg tcp_reg_info = { + .target = "tcp", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__tcp, sk_common), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &tcp_seq_info, }; static void __init bpf_iter_register(void) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 0fb5e4ea133f..1bc50ec2caef 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3208,17 +3208,21 @@ static void bpf_iter_fini_udp(void *priv_data) bpf_iter_fini_seq_net(priv_data); } -static struct bpf_iter_reg udp_reg_info = { - .target = "udp", +static const struct bpf_iter_seq_info udp_seq_info = { .seq_ops = &bpf_iter_udp_seq_ops, .init_seq_private = bpf_iter_init_udp, .fini_seq_private = bpf_iter_fini_udp, .seq_priv_size = sizeof(struct udp_iter_state), +}; + +static struct bpf_iter_reg udp_reg_info = { + .target = "udp", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__udp, udp_sk), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &udp_seq_info, }; static void __init bpf_iter_register(void) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 33f5efbad0a9..8bfc57b0802a 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -6427,17 +6427,21 @@ DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *r BTF_ID_LIST(btf_fib6_info_id) BTF_ID(struct, fib6_info) -static struct bpf_iter_reg ipv6_route_reg_info = { - .target = "ipv6_route", +static const struct bpf_iter_seq_info ipv6_route_seq_info = { .seq_ops = &ipv6_route_seq_ops, .init_seq_private = bpf_iter_init_seq_net, .fini_seq_private = bpf_iter_fini_seq_net, .seq_priv_size = sizeof(struct ipv6_route_iter), +}; + +static struct bpf_iter_reg ipv6_route_reg_info = { + .target = "ipv6_route", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__ipv6_route, rt), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &ipv6_route_seq_info, }; static int __init bpf_iter_register(void) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index d8921b833744..b5f30d7d30d0 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2807,17 +2807,21 @@ static const struct rhashtable_params netlink_rhashtable_params = { BTF_ID_LIST(btf_netlink_sock_id) BTF_ID(struct, netlink_sock) -static struct bpf_iter_reg netlink_reg_info = { - .target = "netlink", +static const struct bpf_iter_seq_info netlink_seq_info = { .seq_ops = &netlink_seq_ops, .init_seq_private = bpf_iter_init_seq_net, .fini_seq_private = bpf_iter_fini_seq_net, .seq_priv_size = sizeof(struct nl_seq_iter), +}; + +static struct bpf_iter_reg netlink_reg_info = { + .target = "netlink", .ctx_arg_info_size = 1, .ctx_arg_info = { { offsetof(struct bpf_iter__netlink, sk), PTR_TO_BTF_ID_OR_NULL }, }, + .seq_info = &netlink_seq_info, }; static int __init bpf_iter_register(void) From f9c792729581bd8b8473af163e8ab426c2c61d89 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:10 -0700 Subject: [PATCH 1597/2056] bpf: Refactor to provide aux info to bpf_iter_init_seq_priv_t This patch refactored target bpf_iter_init_seq_priv_t callback function to accept additional information. This will be needed in later patches for map element targets since a particular map should be passed to traverse elements for that particular map. In the future, other information may be passed to target as well, e.g., pid, cgroup id, etc. to customize the iterator. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184110.590156-1-yhs@fb.com --- fs/proc/proc_net.c | 2 +- include/linux/bpf.h | 7 ++++++- include/linux/proc_fs.h | 3 ++- kernel/bpf/bpf_iter.c | 2 +- kernel/bpf/task_iter.c | 2 +- net/ipv4/tcp_ipv4.c | 4 ++-- net/ipv4/udp.c | 4 ++-- 7 files changed, 15 insertions(+), 9 deletions(-) diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index dba63b2429f0..ed8a6306990c 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -98,7 +98,7 @@ static const struct proc_ops proc_net_seq_ops = { .proc_release = seq_release_net, }; -int bpf_iter_init_seq_net(void *priv_data) +int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux) { #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 127067f71fd4..ef52717336cf 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -33,11 +33,13 @@ struct btf; struct btf_type; struct exception_table_entry; struct seq_operations; +struct bpf_iter_aux_info; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; -typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, + struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); struct bpf_iter_seq_info { const struct seq_operations *seq_ops; @@ -1198,6 +1200,9 @@ int bpf_obj_get_user(const char __user *pathname, int flags); extern int bpf_iter_ ## target(args); \ int __init bpf_iter_ ## target(args) { return 0; } +struct bpf_iter_aux_info { +}; + #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index d1eed1b43651..2df965cd0974 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -133,7 +133,8 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); -extern int bpf_iter_init_seq_net(void *priv_data); +struct bpf_iter_aux_info; +extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux); extern void bpf_iter_fini_seq_net(void *priv_data); #ifdef CONFIG_PROC_PID_ARCH_STATUS diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 5b2387d6aa1f..8fa94cb1b5a0 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -442,7 +442,7 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) } if (tinfo->reg_info->seq_info->init_seq_private) { - err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private); + err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private, NULL); if (err) goto release_seq_file; } diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 6d9cd23869bf..232df29793e9 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -293,7 +293,7 @@ static void task_file_seq_stop(struct seq_file *seq, void *v) } } -static int init_seq_pidns(void *priv_data) +static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux) { struct bpf_iter_seq_task_common *common = priv_data; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cb288fdcf2ca..5084333b5ab6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2921,7 +2921,7 @@ static struct pernet_operations __net_initdata tcp_sk_ops = { DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta, struct sock_common *sk_common, uid_t uid) -static int bpf_iter_init_tcp(void *priv_data) +static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux) { struct tcp_iter_state *st = priv_data; struct tcp_seq_afinfo *afinfo; @@ -2933,7 +2933,7 @@ static int bpf_iter_init_tcp(void *priv_data) afinfo->family = AF_UNSPEC; st->bpf_seq_afinfo = afinfo; - ret = bpf_iter_init_seq_net(priv_data); + ret = bpf_iter_init_seq_net(priv_data, aux); if (ret) kfree(afinfo); return ret; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 1bc50ec2caef..7ce31beccfc2 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -3181,7 +3181,7 @@ static struct pernet_operations __net_initdata udp_sysctl_ops = { DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta, struct udp_sock *udp_sk, uid_t uid, int bucket) -static int bpf_iter_init_udp(void *priv_data) +static int bpf_iter_init_udp(void *priv_data, struct bpf_iter_aux_info *aux) { struct udp_iter_state *st = priv_data; struct udp_seq_afinfo *afinfo; @@ -3194,7 +3194,7 @@ static int bpf_iter_init_udp(void *priv_data) afinfo->family = AF_UNSPEC; afinfo->udp_table = &udp_table; st->bpf_seq_afinfo = afinfo; - ret = bpf_iter_init_seq_net(priv_data); + ret = bpf_iter_init_seq_net(priv_data, aux); if (ret) kfree(afinfo); return ret; From 86176a1821a1237ad163d312ba0f2d7598757894 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Wed, 22 Jul 2020 18:17:20 +0200 Subject: [PATCH 1598/2056] selftests/bpf: Test BPF socket lookup and reuseport with connections Cover the case when BPF socket lookup returns a socket that belongs to a reuseport group, and the reuseport group contains connected UDP sockets. Ensure that the presence of connected UDP sockets in reuseport group does not affect the socket lookup result. Socket selected by reuseport should always be used as result in such case. Signed-off-by: Jakub Sitnicki Signed-off-by: Alexei Starovoitov Acked-by: Kuniyuki Iwashima Link: https://lore.kernel.org/bpf/20200722161720.940831-3-jakub@cloudflare.com --- .../selftests/bpf/prog_tests/sk_lookup.c | 54 ++++++++++++++++++- 1 file changed, 53 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index f1784ae4565a..9bbd2b2b7630 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -74,6 +74,7 @@ struct test { struct inet_addr connect_to; struct inet_addr listen_at; enum server accept_on; + bool reuseport_has_conns; /* Add a connected socket to reuseport group */ }; static __u32 duration; /* for CHECK macro */ @@ -559,7 +560,8 @@ static void query_lookup_prog(struct test_sk_lookup *skel) static void run_lookup_prog(const struct test *t) { - int client_fd, server_fds[MAX_SERVERS] = { -1 }; + int server_fds[MAX_SERVERS] = { -1 }; + int client_fd, reuse_conn_fd = -1; struct bpf_link *lookup_link; int i, err; @@ -583,6 +585,32 @@ static void run_lookup_prog(const struct test *t) break; } + /* Regular UDP socket lookup with reuseport behaves + * differently when reuseport group contains connected + * sockets. Check that adding a connected UDP socket to the + * reuseport group does not affect how reuseport works with + * BPF socket lookup. + */ + if (t->reuseport_has_conns) { + struct sockaddr_storage addr = {}; + socklen_t len = sizeof(addr); + + /* Add an extra socket to reuseport group */ + reuse_conn_fd = make_server(t->sotype, t->listen_at.ip, + t->listen_at.port, + t->reuseport_prog); + if (reuse_conn_fd < 0) + goto close; + + /* Connect the extra socket to itself */ + err = getsockname(reuse_conn_fd, (void *)&addr, &len); + if (CHECK(err, "getsockname", "errno %d\n", errno)) + goto close; + err = connect(reuse_conn_fd, (void *)&addr, len); + if (CHECK(err, "connect", "errno %d\n", errno)) + goto close; + } + client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port); if (client_fd < 0) goto close; @@ -594,6 +622,8 @@ static void run_lookup_prog(const struct test *t) close(client_fd); close: + if (reuse_conn_fd != -1) + close(reuse_conn_fd); for (i = 0; i < ARRAY_SIZE(server_fds); i++) { if (server_fds[i] != -1) close(server_fds[i]); @@ -710,6 +740,17 @@ static void test_redirect_lookup(struct test_sk_lookup *skel) .listen_at = { INT_IP4, INT_PORT }, .accept_on = SERVER_B, }, + { + .desc = "UDP IPv4 redir and reuseport with conns", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP4, EXT_PORT }, + .listen_at = { INT_IP4, INT_PORT }, + .accept_on = SERVER_B, + .reuseport_has_conns = true, + }, { .desc = "UDP IPv4 redir skip reuseport", .lookup_prog = skel->progs.select_sock_a_no_reuseport, @@ -754,6 +795,17 @@ static void test_redirect_lookup(struct test_sk_lookup *skel) .listen_at = { INT_IP6, INT_PORT }, .accept_on = SERVER_B, }, + { + .desc = "UDP IPv6 redir and reuseport with conns", + .lookup_prog = skel->progs.select_sock_a, + .reuseport_prog = skel->progs.select_sock_b, + .sock_map = skel->maps.redir_map, + .sotype = SOCK_DGRAM, + .connect_to = { EXT_IP6, EXT_PORT }, + .listen_at = { INT_IP6, INT_PORT }, + .accept_on = SERVER_B, + .reuseport_has_conns = true, + }, { .desc = "UDP IPv6 redir skip reuseport", .lookup_prog = skel->progs.select_sock_a_no_reuseport, From afbf21dce668ef59482037596eaffbe5041e094c Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:11 -0700 Subject: [PATCH 1599/2056] bpf: Support readonly/readwrite buffers in verifier Readonly and readwrite buffer register states are introduced. Totally four states, PTR_TO_RDONLY_BUF[_OR_NULL] and PTR_TO_RDWR_BUF[_OR_NULL] are supported. As suggested by their respective names, PTR_TO_RDONLY_BUF[_OR_NULL] are for readonly buffers and PTR_TO_RDWR_BUF[_OR_NULL] for read/write buffers. These new register states will be used by later bpf map element iterator. New register states share some similarity to PTR_TO_TP_BUFFER as it will calculate accessed buffer size during verification time. The accessed buffer size will be later compared to other metrics during later attach/link_create time. Similar to reg_state PTR_TO_BTF_ID_OR_NULL in bpf iterator programs, PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL reg_types can be set at prog->aux->bpf_ctx_arg_aux, and bpf verifier will retrieve the values during btf_ctx_access(). Later bpf map element iterator implementation will show how such information will be assigned during target registeration time. The verifier is also enhanced such that PTR_TO_RDONLY_BUF can be passed to ARG_PTR_TO_MEM[_OR_NULL] helper argument, and PTR_TO_RDWR_BUF can be passed to ARG_PTR_TO_MEM[_OR_NULL] or ARG_PTR_TO_UNINIT_MEM. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184111.590274-1-yhs@fb.com --- include/linux/bpf.h | 6 +++ kernel/bpf/btf.c | 13 +++++++ kernel/bpf/verifier.c | 91 ++++++++++++++++++++++++++++++++++++++++--- 3 files changed, 104 insertions(+), 6 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ef52717336cf..f9c4bb08f616 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -353,6 +353,10 @@ enum bpf_reg_type { PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ + PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ + PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ + PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ + PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -694,6 +698,8 @@ struct bpf_prog_aux { u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; const struct bpf_ctx_arg_aux *ctx_arg_info; struct bpf_prog *linked_prog; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index ee36b7f60936..0fd6bb62be3a 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3806,6 +3806,19 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, btf_kind_str[BTF_INFO_KIND(t->info)]); return false; } + + /* check for PTR_TO_RDONLY_BUF_OR_NULL or PTR_TO_RDWR_BUF_OR_NULL */ + for (i = 0; i < prog->aux->ctx_arg_info_size; i++) { + const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i]; + + if (ctx_arg_info->offset == off && + (ctx_arg_info->reg_type == PTR_TO_RDONLY_BUF_OR_NULL || + ctx_arg_info->reg_type == PTR_TO_RDWR_BUF_OR_NULL)) { + info->reg_type = ctx_arg_info->reg_type; + return true; + } + } + if (t->type == 0) /* This is a pointer to void. * It is the same as scalar from the verifier safety pov. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9a6703bc3f36..8d6979db48d8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -409,7 +409,9 @@ static bool reg_type_may_be_null(enum bpf_reg_type type) type == PTR_TO_SOCK_COMMON_OR_NULL || type == PTR_TO_TCP_SOCK_OR_NULL || type == PTR_TO_BTF_ID_OR_NULL || - type == PTR_TO_MEM_OR_NULL; + type == PTR_TO_MEM_OR_NULL || + type == PTR_TO_RDONLY_BUF_OR_NULL || + type == PTR_TO_RDWR_BUF_OR_NULL; } static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) @@ -503,6 +505,10 @@ static const char * const reg_type_str[] = { [PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_", [PTR_TO_MEM] = "mem", [PTR_TO_MEM_OR_NULL] = "mem_or_null", + [PTR_TO_RDONLY_BUF] = "rdonly_buf", + [PTR_TO_RDONLY_BUF_OR_NULL] = "rdonly_buf_or_null", + [PTR_TO_RDWR_BUF] = "rdwr_buf", + [PTR_TO_RDWR_BUF_OR_NULL] = "rdwr_buf_or_null", }; static char slot_type_char[] = { @@ -2173,6 +2179,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_XDP_SOCK: case PTR_TO_BTF_ID: case PTR_TO_BTF_ID_OR_NULL: + case PTR_TO_RDONLY_BUF: + case PTR_TO_RDONLY_BUF_OR_NULL: + case PTR_TO_RDWR_BUF: + case PTR_TO_RDWR_BUF_OR_NULL: return true; default: return false; @@ -3052,14 +3062,15 @@ int check_ctx_reg(struct bpf_verifier_env *env, return 0; } -static int check_tp_buffer_access(struct bpf_verifier_env *env, - const struct bpf_reg_state *reg, - int regno, int off, int size) +static int __check_buffer_access(struct bpf_verifier_env *env, + const char *buf_info, + const struct bpf_reg_state *reg, + int regno, int off, int size) { if (off < 0) { verbose(env, - "R%d invalid tracepoint buffer access: off=%d, size=%d", - regno, off, size); + "R%d invalid %s buffer access: off=%d, size=%d", + regno, buf_info, off, size); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { @@ -3071,12 +3082,45 @@ static int check_tp_buffer_access(struct bpf_verifier_env *env, regno, off, tn_buf); return -EACCES; } + + return 0; +} + +static int check_tp_buffer_access(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + int regno, int off, int size) +{ + int err; + + err = __check_buffer_access(env, "tracepoint", reg, regno, off, size); + if (err) + return err; + if (off + size > env->prog->aux->max_tp_access) env->prog->aux->max_tp_access = off + size; return 0; } +static int check_buffer_access(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, + int regno, int off, int size, + bool zero_size_allowed, + const char *buf_info, + u32 *max_access) +{ + int err; + + err = __check_buffer_access(env, buf_info, reg, regno, off, size); + if (err) + return err; + + if (off + size > *max_access) + *max_access = off + size; + + return 0; +} + /* BPF architecture zero extends alu32 ops into 64-bit registesr */ static void zext_32_to_64(struct bpf_reg_state *reg) { @@ -3427,6 +3471,23 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == CONST_PTR_TO_MAP) { err = check_ptr_to_map_access(env, regs, regno, off, size, t, value_regno); + } else if (reg->type == PTR_TO_RDONLY_BUF) { + if (t == BPF_WRITE) { + verbose(env, "R%d cannot write into %s\n", + regno, reg_type_str[reg->type]); + return -EACCES; + } + err = check_buffer_access(env, reg, regno, off, size, "rdonly", + false, + &env->prog->aux->max_rdonly_access); + if (!err && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); + } else if (reg->type == PTR_TO_RDWR_BUF) { + err = check_buffer_access(env, reg, regno, off, size, "rdwr", + false, + &env->prog->aux->max_rdwr_access); + if (!err && t == BPF_READ && value_regno >= 0) + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -3668,6 +3729,18 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return check_mem_region_access(env, regno, reg->off, access_size, reg->mem_size, zero_size_allowed); + case PTR_TO_RDONLY_BUF: + if (meta && meta->raw_mode) + return -EACCES; + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, + "rdonly", + &env->prog->aux->max_rdonly_access); + case PTR_TO_RDWR_BUF: + return check_buffer_access(env, reg, regno, reg->off, + access_size, zero_size_allowed, + "rdwr", + &env->prog->aux->max_rdwr_access); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); @@ -3933,6 +4006,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg, else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != PTR_TO_MEM && + type != PTR_TO_RDONLY_BUF && + type != PTR_TO_RDWR_BUF && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; @@ -6806,6 +6881,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, reg->type = PTR_TO_BTF_ID; } else if (reg->type == PTR_TO_MEM_OR_NULL) { reg->type = PTR_TO_MEM; + } else if (reg->type == PTR_TO_RDONLY_BUF_OR_NULL) { + reg->type = PTR_TO_RDONLY_BUF; + } else if (reg->type == PTR_TO_RDWR_BUF_OR_NULL) { + reg->type = PTR_TO_RDWR_BUF; } if (is_null) { /* We don't need id and ref_obj_id from this point From 3f9969f2c040ba2ba635b6b5a7051f404bcc634d Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 22 Jul 2020 12:51:56 -0700 Subject: [PATCH 1600/2056] bpf: Fix pos computation for bpf_iter seq_ops->start() Currently, the pos pointer in bpf iterator map/task/task_file seq_ops->start() is always incremented. This is incorrect. It should be increased only if *pos is 0 (for SEQ_START_TOKEN) since these start() function actually returns the first real object. If *pos is not 0, it merely found the object based on the state in seq->private, and not really advancing the *pos. This patch fixed this issue by only incrementing *pos if it is 0. Note that the old *pos calculation, although not correct, does not affect correctness of bpf_iter as bpf_iter seq_file->read() does not support llseek. This patch also renamed "mid" in bpf_map iterator seq_file private data to "map_id" for better clarity. Fixes: 6086d29def80 ("bpf: Add bpf_map iterator") Fixes: eaaacd23910f ("bpf: Add task and task/file iterator targets") Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722195156.4029817-1-yhs@fb.com --- kernel/bpf/map_iter.c | 16 ++++++---------- kernel/bpf/task_iter.c | 6 ++++-- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 8a7af11b411f..5926c76d854e 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -7,7 +7,7 @@ #include struct bpf_iter_seq_map_info { - u32 mid; + u32 map_id; }; static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos) @@ -15,27 +15,23 @@ static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos) struct bpf_iter_seq_map_info *info = seq->private; struct bpf_map *map; - map = bpf_map_get_curr_or_next(&info->mid); + map = bpf_map_get_curr_or_next(&info->map_id); if (!map) return NULL; - ++*pos; + if (*pos == 0) + ++*pos; return map; } static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct bpf_iter_seq_map_info *info = seq->private; - struct bpf_map *map; ++*pos; - ++info->mid; + ++info->map_id; bpf_map_put((struct bpf_map *)v); - map = bpf_map_get_curr_or_next(&info->mid); - if (!map) - return NULL; - - return map; + return bpf_map_get_curr_or_next(&info->map_id); } struct bpf_iter__bpf_map { diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index 2feecf095609..1039e52ebd8b 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -51,7 +51,8 @@ static void *task_seq_start(struct seq_file *seq, loff_t *pos) if (!task) return NULL; - ++*pos; + if (*pos == 0) + ++*pos; return task; } @@ -210,7 +211,8 @@ static void *task_file_seq_start(struct seq_file *seq, loff_t *pos) return NULL; } - ++*pos; + if (*pos == 0) + ++*pos; info->task = task; info->files = files; From a5cbe05a6673b85bed2a63ffcfea6a96c6410cff Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:12 -0700 Subject: [PATCH 1601/2056] bpf: Implement bpf iterator for map elements The bpf iterator for map elements are implemented. The bpf program will receive four parameters: bpf_iter_meta *meta: the meta data bpf_map *map: the bpf_map whose elements are traversed void *key: the key of one element void *value: the value of the same element Here, meta and map pointers are always valid, and key has register type PTR_TO_RDONLY_BUF_OR_NULL and value has register type PTR_TO_RDWR_BUF_OR_NULL. The kernel will track the access range of key and value during verification time. Later, these values will be compared against the values in the actual map to ensure all accesses are within range. A new field iter_seq_info is added to bpf_map_ops which is used to add map type specific information, i.e., seq_ops, init/fini seq_file func and seq_file private data size. Subsequent patches will have actual implementation for bpf_map_ops->iter_seq_info. In user space, BPF_ITER_LINK_MAP_FD needs to be specified in prog attr->link_create.flags, which indicates that attr->link_create.target_fd is a map_fd. The reason for such an explicit flag is for possible future cases where one bpf iterator may allow more than one possible customization, e.g., pid and cgroup id for task_file. Current kernel internal implementation only allows the target to register at most one required bpf_iter_link_info. To support the above case, optional bpf_iter_link_info's are needed, the target can be extended to register such link infos, and user provided link_info needs to match one of target supported ones. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184112.590360-1-yhs@fb.com --- include/linux/bpf.h | 16 +++++++ include/uapi/linux/bpf.h | 7 +++ kernel/bpf/bpf_iter.c | 85 +++++++++++++++++++++++++++------- kernel/bpf/map_iter.c | 30 +++++++++++- tools/include/uapi/linux/bpf.h | 7 +++ 5 files changed, 128 insertions(+), 17 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f9c4bb08f616..4175cf1f4665 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -107,6 +107,9 @@ struct bpf_map_ops { /* BTF name and id of struct allocated by map_alloc */ const char * const map_btf_name; int *map_btf_id; + + /* bpf_iter info used to open a seq_file */ + const struct bpf_iter_seq_info *iter_seq_info; }; struct bpf_map_memory { @@ -1207,12 +1210,18 @@ int bpf_obj_get_user(const char __user *pathname, int flags); int __init bpf_iter_ ## target(args) { return 0; } struct bpf_iter_aux_info { + struct bpf_map *map; }; +typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog, + struct bpf_iter_aux_info *aux); + #define BPF_ITER_CTX_ARG_MAX 2 struct bpf_iter_reg { const char *target; + bpf_iter_check_target_t check_target; u32 ctx_arg_info_size; + enum bpf_iter_link_info req_linfo; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; const struct bpf_iter_seq_info *seq_info; }; @@ -1223,6 +1232,13 @@ struct bpf_iter_meta { u64 seq_num; }; +struct bpf_iter__bpf_map_elem { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); + __bpf_md_ptr(void *, key); + __bpf_md_ptr(void *, value); +}; + int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); bool bpf_iter_prog_supported(struct bpf_prog *prog); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 54d0c886e3ba..828c2f6438f2 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -246,6 +246,13 @@ enum bpf_link_type { MAX_BPF_LINK_TYPE, }; +enum bpf_iter_link_info { + BPF_ITER_LINK_UNSPEC = 0, + BPF_ITER_LINK_MAP_FD = 1, + + MAX_BPF_ITER_LINK_INFO, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c index 8fa94cb1b5a0..363b9cafc2d8 100644 --- a/kernel/bpf/bpf_iter.c +++ b/kernel/bpf/bpf_iter.c @@ -14,11 +14,13 @@ struct bpf_iter_target_info { struct bpf_iter_link { struct bpf_link link; + struct bpf_iter_aux_info aux; struct bpf_iter_target_info *tinfo; }; struct bpf_iter_priv_data { struct bpf_iter_target_info *tinfo; + const struct bpf_iter_seq_info *seq_info; struct bpf_prog *prog; u64 session_id; u64 seq_num; @@ -35,7 +37,8 @@ static DEFINE_MUTEX(link_mutex); /* incremented on every opened seq_file */ static atomic64_t session_id; -static int prepare_seq_file(struct file *file, struct bpf_iter_link *link); +static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, + const struct bpf_iter_seq_info *seq_info); static void bpf_iter_inc_seq_num(struct seq_file *seq) { @@ -199,11 +202,25 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size, return copied; } +static const struct bpf_iter_seq_info * +__get_seq_info(struct bpf_iter_link *link) +{ + const struct bpf_iter_seq_info *seq_info; + + if (link->aux.map) { + seq_info = link->aux.map->ops->iter_seq_info; + if (seq_info) + return seq_info; + } + + return link->tinfo->reg_info->seq_info; +} + static int iter_open(struct inode *inode, struct file *file) { struct bpf_iter_link *link = inode->i_private; - return prepare_seq_file(file, link); + return prepare_seq_file(file, link, __get_seq_info(link)); } static int iter_release(struct inode *inode, struct file *file) @@ -218,8 +235,8 @@ static int iter_release(struct inode *inode, struct file *file) iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); - if (iter_priv->tinfo->reg_info->seq_info->fini_seq_private) - iter_priv->tinfo->reg_info->seq_info->fini_seq_private(seq->private); + if (iter_priv->seq_info->fini_seq_private) + iter_priv->seq_info->fini_seq_private(seq->private); bpf_prog_put(iter_priv->prog); seq->private = iter_priv; @@ -318,6 +335,11 @@ bool bpf_iter_prog_supported(struct bpf_prog *prog) static void bpf_iter_link_release(struct bpf_link *link) { + struct bpf_iter_link *iter_link = + container_of(link, struct bpf_iter_link, link); + + if (iter_link->aux.map) + bpf_map_put_with_uref(iter_link->aux.map); } static void bpf_iter_link_dealloc(struct bpf_link *link) @@ -370,14 +392,13 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct bpf_link_primer link_primer; struct bpf_iter_target_info *tinfo; + struct bpf_iter_aux_info aux = {}; struct bpf_iter_link *link; + u32 prog_btf_id, target_fd; bool existed = false; - u32 prog_btf_id; + struct bpf_map *map; int err; - if (attr->link_create.target_fd || attr->link_create.flags) - return -EINVAL; - prog_btf_id = prog->aux->attach_btf_id; mutex_lock(&targets_mutex); list_for_each_entry(tinfo, &targets, list) { @@ -390,6 +411,13 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) if (!existed) return -ENOENT; + /* Make sure user supplied flags are target expected. */ + target_fd = attr->link_create.target_fd; + if (attr->link_create.flags != tinfo->reg_info->req_linfo) + return -EINVAL; + if (!attr->link_create.flags && target_fd) + return -EINVAL; + link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); if (!link) return -ENOMEM; @@ -403,21 +431,45 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) return err; } + if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) { + map = bpf_map_get_with_uref(target_fd); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto cleanup_link; + } + + aux.map = map; + err = tinfo->reg_info->check_target(prog, &aux); + if (err) { + bpf_map_put_with_uref(map); + goto cleanup_link; + } + + link->aux.map = map; + } + return bpf_link_settle(&link_primer); + +cleanup_link: + bpf_link_cleanup(&link_primer); + return err; } static void init_seq_meta(struct bpf_iter_priv_data *priv_data, struct bpf_iter_target_info *tinfo, + const struct bpf_iter_seq_info *seq_info, struct bpf_prog *prog) { priv_data->tinfo = tinfo; + priv_data->seq_info = seq_info; priv_data->prog = prog; priv_data->session_id = atomic64_inc_return(&session_id); priv_data->seq_num = 0; priv_data->done_stop = false; } -static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) +static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, + const struct bpf_iter_seq_info *seq_info) { struct bpf_iter_priv_data *priv_data; struct bpf_iter_target_info *tinfo; @@ -433,21 +485,21 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) tinfo = link->tinfo; total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) + - tinfo->reg_info->seq_info->seq_priv_size; - priv_data = __seq_open_private(file, tinfo->reg_info->seq_info->seq_ops, + seq_info->seq_priv_size; + priv_data = __seq_open_private(file, seq_info->seq_ops, total_priv_dsize); if (!priv_data) { err = -ENOMEM; goto release_prog; } - if (tinfo->reg_info->seq_info->init_seq_private) { - err = tinfo->reg_info->seq_info->init_seq_private(priv_data->target_private, NULL); + if (seq_info->init_seq_private) { + err = seq_info->init_seq_private(priv_data->target_private, &link->aux); if (err) goto release_seq_file; } - init_seq_meta(priv_data, tinfo, prog); + init_seq_meta(priv_data, tinfo, seq_info, prog); seq = file->private_data; seq->private = priv_data->target_private; @@ -463,6 +515,7 @@ static int prepare_seq_file(struct file *file, struct bpf_iter_link *link) int bpf_iter_new_fd(struct bpf_link *link) { + struct bpf_iter_link *iter_link; struct file *file; unsigned int flags; int err, fd; @@ -481,8 +534,8 @@ int bpf_iter_new_fd(struct bpf_link *link) goto free_fd; } - err = prepare_seq_file(file, - container_of(link, struct bpf_iter_link, link)); + iter_link = container_of(link, struct bpf_iter_link, link); + err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link)); if (err) goto free_file; diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 1a69241fb1e2..8a1f9b3355d0 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -98,10 +98,38 @@ static struct bpf_iter_reg bpf_map_reg_info = { .seq_info = &bpf_map_seq_info, }; +static int bpf_iter_check_map(struct bpf_prog *prog, + struct bpf_iter_aux_info *aux) +{ + return -EINVAL; +} + +DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, + struct bpf_map *map, void *key, void *value) + +static const struct bpf_iter_reg bpf_map_elem_reg_info = { + .target = "bpf_map_elem", + .check_target = bpf_iter_check_map, + .req_linfo = BPF_ITER_LINK_MAP_FD, + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_map_elem, key), + PTR_TO_RDONLY_BUF_OR_NULL }, + { offsetof(struct bpf_iter__bpf_map_elem, value), + PTR_TO_RDWR_BUF_OR_NULL }, + }, +}; + static int __init bpf_map_iter_init(void) { + int ret; + bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id; - return bpf_iter_reg_target(&bpf_map_reg_info); + ret = bpf_iter_reg_target(&bpf_map_reg_info); + if (ret) + return ret; + + return bpf_iter_reg_target(&bpf_map_elem_reg_info); } late_initcall(bpf_map_iter_init); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 54d0c886e3ba..828c2f6438f2 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -246,6 +246,13 @@ enum bpf_link_type { MAX_BPF_LINK_TYPE, }; +enum bpf_iter_link_info { + BPF_ITER_LINK_UNSPEC = 0, + BPF_ITER_LINK_MAP_FD = 1, + + MAX_BPF_ITER_LINK_INFO, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. From a228a64fc1e4428e2b96dc68e9ad3c447095c9e7 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 1 Jul 2020 18:10:18 -0700 Subject: [PATCH 1602/2056] bpf: Add bpf_prog iterator It's mostly a copy paste of commit 6086d29def80 ("bpf: Add bpf_map iterator") that is use to implement bpf_seq_file opreations to traverse all bpf programs. v1->v2: Tweak to use build time btf_id Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Acked-by: Daniel Borkmann --- include/linux/bpf.h | 1 + kernel/bpf/Makefile | 2 +- kernel/bpf/prog_iter.c | 103 +++++++++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 19 ++++++++ 4 files changed, 124 insertions(+), 1 deletion(-) create mode 100644 kernel/bpf/prog_iter.c diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bae557ff2da8..72221aea1c60 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1117,6 +1117,7 @@ int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); struct bpf_map *bpf_map_get_curr_or_next(u32 *id); +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); extern int sysctl_unprivileged_bpf_disabled; diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 1131a921e1a6..e6eb9c0402da 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -2,7 +2,7 @@ obj-y := core.o CFLAGS_core.o += $(call cc-disable-warning, override-init) -obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o +obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o obj-$(CONFIG_BPF_SYSCALL) += disasm.o diff --git a/kernel/bpf/prog_iter.c b/kernel/bpf/prog_iter.c new file mode 100644 index 000000000000..6541b577d69f --- /dev/null +++ b/kernel/bpf/prog_iter.c @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2020 Facebook */ +#include +#include +#include +#include +#include + +struct bpf_iter_seq_prog_info { + u32 prog_id; +}; + +static void *bpf_prog_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_prog_info *info = seq->private; + struct bpf_prog *prog; + + prog = bpf_prog_get_curr_or_next(&info->prog_id); + if (!prog) + return NULL; + + if (*pos == 0) + ++*pos; + return prog; +} + +static void *bpf_prog_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_prog_info *info = seq->private; + + ++*pos; + ++info->prog_id; + bpf_prog_put((struct bpf_prog *)v); + return bpf_prog_get_curr_or_next(&info->prog_id); +} + +struct bpf_iter__bpf_prog { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_prog *, prog); +}; + +DEFINE_BPF_ITER_FUNC(bpf_prog, struct bpf_iter_meta *meta, struct bpf_prog *prog) + +static int __bpf_prog_seq_show(struct seq_file *seq, void *v, bool in_stop) +{ + struct bpf_iter__bpf_prog ctx; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret = 0; + + ctx.meta = &meta; + ctx.prog = v; + meta.seq = seq; + prog = bpf_iter_get_info(&meta, in_stop); + if (prog) + ret = bpf_iter_run_prog(prog, &ctx); + + return ret; +} + +static int bpf_prog_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_prog_seq_show(seq, v, false); +} + +static void bpf_prog_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__bpf_prog_seq_show(seq, v, true); + else + bpf_prog_put((struct bpf_prog *)v); +} + +static const struct seq_operations bpf_prog_seq_ops = { + .start = bpf_prog_seq_start, + .next = bpf_prog_seq_next, + .stop = bpf_prog_seq_stop, + .show = bpf_prog_seq_show, +}; + +BTF_ID_LIST(btf_bpf_prog_id) +BTF_ID(struct, bpf_prog) + +static struct bpf_iter_reg bpf_prog_reg_info = { + .target = "bpf_prog", + .seq_ops = &bpf_prog_seq_ops, + .init_seq_private = NULL, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct bpf_iter_seq_prog_info), + .ctx_arg_info_size = 1, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_prog, prog), + PTR_TO_BTF_ID_OR_NULL }, + }, +}; + +static int __init bpf_prog_iter_init(void) +{ + bpf_prog_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_prog_id; + return bpf_iter_reg_target(&bpf_prog_reg_info); +} + +late_initcall(bpf_prog_iter_init); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d07417d17712..ee290b1f2d9e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3044,6 +3044,25 @@ struct bpf_map *bpf_map_get_curr_or_next(u32 *id) return map; } +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id) +{ + struct bpf_prog *prog; + + spin_lock_bh(&prog_idr_lock); +again: + prog = idr_get_next(&prog_idr, id); + if (prog) { + prog = bpf_prog_inc_not_zero(prog); + if (IS_ERR(prog)) { + (*id)++; + goto again; + } + } + spin_unlock_bh(&prog_idr_lock); + + return prog; +} + #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id struct bpf_prog *bpf_prog_by_id(u32 id) From d6c4503cc29638f328e1a6e6fefbdbda401c28fc Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:14 -0700 Subject: [PATCH 1603/2056] bpf: Implement bpf iterator for hash maps The bpf iterators for hash, percpu hash, lru hash and lru percpu hash are implemented. During link time, bpf_iter_reg->check_target() will check map type and ensure the program access key/value region is within the map defined key/value size limit. For percpu hash and lru hash maps, the bpf program will receive values for all cpus. The map element bpf iterator infrastructure will prepare value properly before passing the value pointer to the bpf program. This patch set supports readonly map keys and read/write map values. It does not support deleting map elements, e.g., from hash tables. If there is a user case for this, the following mechanism can be used to support map deletion for hashtab, etc. - permit a new bpf program return value, e.g., 2, to let bpf iterator know the map element should be removed. - since bucket lock is taken, the map element will be queued. - once bucket lock is released after all elements under this bucket are traversed, all to-be-deleted map elements can be deleted. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184114.590470-1-yhs@fb.com --- kernel/bpf/hashtab.c | 194 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/map_iter.c | 24 +++++- 2 files changed, 217 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index d4378d7d442b..024276787055 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1612,6 +1612,196 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map, true, false); } +struct bpf_iter_seq_hash_map_info { + struct bpf_map *map; + struct bpf_htab *htab; + void *percpu_value_buf; // non-zero means percpu hash + unsigned long flags; + u32 bucket_id; + u32 skip_elems; +}; + +static struct htab_elem * +bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, + struct htab_elem *prev_elem) +{ + const struct bpf_htab *htab = info->htab; + unsigned long flags = info->flags; + u32 skip_elems = info->skip_elems; + u32 bucket_id = info->bucket_id; + struct hlist_nulls_head *head; + struct hlist_nulls_node *n; + struct htab_elem *elem; + struct bucket *b; + u32 i, count; + + if (bucket_id >= htab->n_buckets) + return NULL; + + /* try to find next elem in the same bucket */ + if (prev_elem) { + /* no update/deletion on this bucket, prev_elem should be still valid + * and we won't skip elements. + */ + n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node)); + elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node); + if (elem) + return elem; + + /* not found, unlock and go to the next bucket */ + b = &htab->buckets[bucket_id++]; + htab_unlock_bucket(htab, b, flags); + skip_elems = 0; + } + + for (i = bucket_id; i < htab->n_buckets; i++) { + b = &htab->buckets[i]; + flags = htab_lock_bucket(htab, b); + + count = 0; + head = &b->head; + hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { + if (count >= skip_elems) { + info->flags = flags; + info->bucket_id = i; + info->skip_elems = count; + return elem; + } + count++; + } + + htab_unlock_bucket(htab, b, flags); + skip_elems = 0; + } + + info->bucket_id = i; + info->skip_elems = 0; + return NULL; +} + +static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + struct htab_elem *elem; + + elem = bpf_hash_map_seq_find_next(info, NULL); + if (!elem) + return NULL; + + if (*pos == 0) + ++*pos; + return elem; +} + +static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + + ++*pos; + ++info->skip_elems; + return bpf_hash_map_seq_find_next(info, v); +} + +static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + u32 roundup_key_size, roundup_value_size; + struct bpf_iter__bpf_map_elem ctx = {}; + struct bpf_map *map = info->map; + struct bpf_iter_meta meta; + int ret = 0, off = 0, cpu; + struct bpf_prog *prog; + void __percpu *pptr; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, elem == NULL); + if (prog) { + ctx.meta = &meta; + ctx.map = info->map; + if (elem) { + roundup_key_size = round_up(map->key_size, 8); + ctx.key = elem->key; + if (!info->percpu_value_buf) { + ctx.value = elem->key + roundup_key_size; + } else { + roundup_value_size = round_up(map->value_size, 8); + pptr = htab_elem_get_ptr(elem, map->key_size); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(info->percpu_value_buf + off, + per_cpu_ptr(pptr, cpu), + roundup_value_size); + off += roundup_value_size; + } + ctx.value = info->percpu_value_buf; + } + } + ret = bpf_iter_run_prog(prog, &ctx); + } + + return ret; +} + +static int bpf_hash_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_hash_map_seq_show(seq, v); +} + +static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_hash_map_info *info = seq->private; + + if (!v) + (void)__bpf_hash_map_seq_show(seq, NULL); + else + htab_unlock_bucket(info->htab, + &info->htab->buckets[info->bucket_id], + info->flags); +} + +static int bpf_iter_init_hash_map(void *priv_data, + struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_hash_map_info *seq_info = priv_data; + struct bpf_map *map = aux->map; + void *value_buf; + u32 buf_size; + + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { + buf_size = round_up(map->value_size, 8) * num_possible_cpus(); + value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); + if (!value_buf) + return -ENOMEM; + + seq_info->percpu_value_buf = value_buf; + } + + seq_info->map = map; + seq_info->htab = container_of(map, struct bpf_htab, map); + return 0; +} + +static void bpf_iter_fini_hash_map(void *priv_data) +{ + struct bpf_iter_seq_hash_map_info *seq_info = priv_data; + + kfree(seq_info->percpu_value_buf); +} + +static const struct seq_operations bpf_hash_map_seq_ops = { + .start = bpf_hash_map_seq_start, + .next = bpf_hash_map_seq_next, + .stop = bpf_hash_map_seq_stop, + .show = bpf_hash_map_seq_show, +}; + +static const struct bpf_iter_seq_info iter_seq_info = { + .seq_ops = &bpf_hash_map_seq_ops, + .init_seq_private = bpf_iter_init_hash_map, + .fini_seq_private = bpf_iter_fini_hash_map, + .seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info), +}; + static int htab_map_btf_id; const struct bpf_map_ops htab_map_ops = { .map_alloc_check = htab_map_alloc_check, @@ -1626,6 +1816,7 @@ const struct bpf_map_ops htab_map_ops = { BATCH_OPS(htab), .map_btf_name = "bpf_htab", .map_btf_id = &htab_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int htab_lru_map_btf_id; @@ -1643,6 +1834,7 @@ const struct bpf_map_ops htab_lru_map_ops = { BATCH_OPS(htab_lru), .map_btf_name = "bpf_htab", .map_btf_id = &htab_lru_map_btf_id, + .iter_seq_info = &iter_seq_info, }; /* Called from eBPF program */ @@ -1760,6 +1952,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { BATCH_OPS(htab_percpu), .map_btf_name = "bpf_htab", .map_btf_id = &htab_percpu_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int htab_lru_percpu_map_btf_id; @@ -1775,6 +1968,7 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { BATCH_OPS(htab_lru_percpu), .map_btf_name = "bpf_htab", .map_btf_id = &htab_lru_percpu_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int fd_htab_map_alloc_check(union bpf_attr *attr) diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index 8a1f9b3355d0..bcb68b55bf65 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -101,7 +101,29 @@ static struct bpf_iter_reg bpf_map_reg_info = { static int bpf_iter_check_map(struct bpf_prog *prog, struct bpf_iter_aux_info *aux) { - return -EINVAL; + u32 key_acc_size, value_acc_size, key_size, value_size; + struct bpf_map *map = aux->map; + bool is_percpu = false; + + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) + is_percpu = true; + else if (map->map_type != BPF_MAP_TYPE_HASH && + map->map_type != BPF_MAP_TYPE_LRU_HASH) + return -EINVAL; + + key_acc_size = prog->aux->max_rdonly_access; + value_acc_size = prog->aux->max_rdwr_access; + key_size = map->key_size; + if (!is_percpu) + value_size = map->value_size; + else + value_size = round_up(map->value_size, 8) * num_possible_cpus(); + + if (key_acc_size > key_size || value_acc_size > value_size) + return -EACCES; + + return 0; } DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta, From d3cc2ab546adc6e52b65f36f7c34820d2830d0c9 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:15 -0700 Subject: [PATCH 1604/2056] bpf: Implement bpf iterator for array maps The bpf iterators for array and percpu array are implemented. Similar to hash maps, for percpu array map, bpf program will receive values from all cpus. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184115.590532-1-yhs@fb.com --- kernel/bpf/arraymap.c | 138 ++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/map_iter.c | 6 +- 2 files changed, 142 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index c66e8273fccd..8ff419b632a6 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -487,6 +487,142 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma) vma->vm_pgoff + pgoff); } +struct bpf_iter_seq_array_map_info { + struct bpf_map *map; + void *percpu_value_buf; + u32 index; +}; + +static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_iter_seq_array_map_info *info = seq->private; + struct bpf_map *map = info->map; + struct bpf_array *array; + u32 index; + + if (info->index >= map->max_entries) + return NULL; + + if (*pos == 0) + ++*pos; + array = container_of(map, struct bpf_array, map); + index = info->index & array->index_mask; + if (info->percpu_value_buf) + return array->pptrs[index]; + return array->value + array->elem_size * index; +} + +static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bpf_iter_seq_array_map_info *info = seq->private; + struct bpf_map *map = info->map; + struct bpf_array *array; + u32 index; + + ++*pos; + ++info->index; + if (info->index >= map->max_entries) + return NULL; + + array = container_of(map, struct bpf_array, map); + index = info->index & array->index_mask; + if (info->percpu_value_buf) + return array->pptrs[index]; + return array->value + array->elem_size * index; +} + +static int __bpf_array_map_seq_show(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_array_map_info *info = seq->private; + struct bpf_iter__bpf_map_elem ctx = {}; + struct bpf_map *map = info->map; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int off = 0, cpu = 0; + void __percpu **pptr; + u32 size; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, v == NULL); + if (!prog) + return 0; + + ctx.meta = &meta; + ctx.map = info->map; + if (v) { + ctx.key = &info->index; + + if (!info->percpu_value_buf) { + ctx.value = v; + } else { + pptr = v; + size = round_up(map->value_size, 8); + for_each_possible_cpu(cpu) { + bpf_long_memcpy(info->percpu_value_buf + off, + per_cpu_ptr(pptr, cpu), + size); + off += size; + } + ctx.value = info->percpu_value_buf; + } + } + + return bpf_iter_run_prog(prog, &ctx); +} + +static int bpf_array_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_array_map_seq_show(seq, v); +} + +static void bpf_array_map_seq_stop(struct seq_file *seq, void *v) +{ + if (!v) + (void)__bpf_array_map_seq_show(seq, NULL); +} + +static int bpf_iter_init_array_map(void *priv_data, + struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_array_map_info *seq_info = priv_data; + struct bpf_map *map = aux->map; + void *value_buf; + u32 buf_size; + + if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { + buf_size = round_up(map->value_size, 8) * num_possible_cpus(); + value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN); + if (!value_buf) + return -ENOMEM; + + seq_info->percpu_value_buf = value_buf; + } + + seq_info->map = map; + return 0; +} + +static void bpf_iter_fini_array_map(void *priv_data) +{ + struct bpf_iter_seq_array_map_info *seq_info = priv_data; + + kfree(seq_info->percpu_value_buf); +} + +static const struct seq_operations bpf_array_map_seq_ops = { + .start = bpf_array_map_seq_start, + .next = bpf_array_map_seq_next, + .stop = bpf_array_map_seq_stop, + .show = bpf_array_map_seq_show, +}; + +static const struct bpf_iter_seq_info iter_seq_info = { + .seq_ops = &bpf_array_map_seq_ops, + .init_seq_private = bpf_iter_init_array_map, + .fini_seq_private = bpf_iter_fini_array_map, + .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info), +}; + static int array_map_btf_id; const struct bpf_map_ops array_map_ops = { .map_alloc_check = array_map_alloc_check, @@ -506,6 +642,7 @@ const struct bpf_map_ops array_map_ops = { .map_update_batch = generic_map_update_batch, .map_btf_name = "bpf_array", .map_btf_id = &array_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int percpu_array_map_btf_id; @@ -521,6 +658,7 @@ const struct bpf_map_ops percpu_array_map_ops = { .map_check_btf = array_map_check_btf, .map_btf_name = "bpf_array", .map_btf_id = &percpu_array_map_btf_id, + .iter_seq_info = &iter_seq_info, }; static int fd_array_map_alloc_check(union bpf_attr *attr) diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c index bcb68b55bf65..fbe1f557cb88 100644 --- a/kernel/bpf/map_iter.c +++ b/kernel/bpf/map_iter.c @@ -106,10 +106,12 @@ static int bpf_iter_check_map(struct bpf_prog *prog, bool is_percpu = false; if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || - map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || + map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) is_percpu = true; else if (map->map_type != BPF_MAP_TYPE_HASH && - map->map_type != BPF_MAP_TYPE_LRU_HASH) + map->map_type != BPF_MAP_TYPE_LRU_HASH && + map->map_type != BPF_MAP_TYPE_ARRAY) return -EINVAL; key_acc_size = prog->aux->max_rdonly_access; From 5ce6e77c7edf7310a0ff9532fd6b9693c082ab32 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:16 -0700 Subject: [PATCH 1605/2056] bpf: Implement bpf iterator for sock local storage map The bpf iterator for bpf sock local storage map is implemented. User space interacts with sock local storage map with fd as a key and storage value. In kernel, passing fd to the bpf program does not really make sense. In this case, the sock itself is passed to bpf program. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184116.590602-1-yhs@fb.com --- net/core/bpf_sk_storage.c | 206 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 6f921c4ddc2c..eafcd15e7dfd 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include @@ -1217,3 +1218,208 @@ int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, return err; } EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put); + +struct bpf_iter_seq_sk_storage_map_info { + struct bpf_map *map; + unsigned int bucket_id; + unsigned skip_elems; +}; + +static struct bpf_sk_storage_elem * +bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info, + struct bpf_sk_storage_elem *prev_selem) +{ + struct bpf_sk_storage *sk_storage; + struct bpf_sk_storage_elem *selem; + u32 skip_elems = info->skip_elems; + struct bpf_sk_storage_map *smap; + u32 bucket_id = info->bucket_id; + u32 i, count, n_buckets; + struct bucket *b; + + smap = (struct bpf_sk_storage_map *)info->map; + n_buckets = 1U << smap->bucket_log; + if (bucket_id >= n_buckets) + return NULL; + + /* try to find next selem in the same bucket */ + selem = prev_selem; + count = 0; + while (selem) { + selem = hlist_entry_safe(selem->map_node.next, + struct bpf_sk_storage_elem, map_node); + if (!selem) { + /* not found, unlock and go to the next bucket */ + b = &smap->buckets[bucket_id++]; + raw_spin_unlock_bh(&b->lock); + skip_elems = 0; + break; + } + sk_storage = rcu_dereference_raw(selem->sk_storage); + if (sk_storage) { + info->skip_elems = skip_elems + count; + return selem; + } + count++; + } + + for (i = bucket_id; i < (1U << smap->bucket_log); i++) { + b = &smap->buckets[i]; + raw_spin_lock_bh(&b->lock); + count = 0; + hlist_for_each_entry(selem, &b->list, map_node) { + sk_storage = rcu_dereference_raw(selem->sk_storage); + if (sk_storage && count >= skip_elems) { + info->bucket_id = i; + info->skip_elems = count; + return selem; + } + count++; + } + raw_spin_unlock_bh(&b->lock); + skip_elems = 0; + } + + info->bucket_id = i; + info->skip_elems = 0; + return NULL; +} + +static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct bpf_sk_storage_elem *selem; + + selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL); + if (!selem) + return NULL; + + if (*pos == 0) + ++*pos; + return selem; +} + +static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v, + loff_t *pos) +{ + struct bpf_iter_seq_sk_storage_map_info *info = seq->private; + + ++*pos; + ++info->skip_elems; + return bpf_sk_storage_map_seq_find_next(seq->private, v); +} + +struct bpf_iter__bpf_sk_storage_map { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); + __bpf_md_ptr(struct sock *, sk); + __bpf_md_ptr(void *, value); +}; + +DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta, + struct bpf_map *map, struct sock *sk, + void *value) + +static int __bpf_sk_storage_map_seq_show(struct seq_file *seq, + struct bpf_sk_storage_elem *selem) +{ + struct bpf_iter_seq_sk_storage_map_info *info = seq->private; + struct bpf_iter__bpf_sk_storage_map ctx = {}; + struct bpf_sk_storage *sk_storage; + struct bpf_iter_meta meta; + struct bpf_prog *prog; + int ret = 0; + + meta.seq = seq; + prog = bpf_iter_get_info(&meta, selem == NULL); + if (prog) { + ctx.meta = &meta; + ctx.map = info->map; + if (selem) { + sk_storage = rcu_dereference_raw(selem->sk_storage); + ctx.sk = sk_storage->sk; + ctx.value = SDATA(selem)->data; + } + ret = bpf_iter_run_prog(prog, &ctx); + } + + return ret; +} + +static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v) +{ + return __bpf_sk_storage_map_seq_show(seq, v); +} + +static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v) +{ + struct bpf_iter_seq_sk_storage_map_info *info = seq->private; + struct bpf_sk_storage_map *smap; + struct bucket *b; + + if (!v) { + (void)__bpf_sk_storage_map_seq_show(seq, v); + } else { + smap = (struct bpf_sk_storage_map *)info->map; + b = &smap->buckets[info->bucket_id]; + raw_spin_unlock_bh(&b->lock); + } +} + +static int bpf_iter_init_sk_storage_map(void *priv_data, + struct bpf_iter_aux_info *aux) +{ + struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data; + + seq_info->map = aux->map; + return 0; +} + +static int bpf_iter_check_map(struct bpf_prog *prog, + struct bpf_iter_aux_info *aux) +{ + struct bpf_map *map = aux->map; + + if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) + return -EINVAL; + + if (prog->aux->max_rdonly_access > map->value_size) + return -EACCES; + + return 0; +} + +static const struct seq_operations bpf_sk_storage_map_seq_ops = { + .start = bpf_sk_storage_map_seq_start, + .next = bpf_sk_storage_map_seq_next, + .stop = bpf_sk_storage_map_seq_stop, + .show = bpf_sk_storage_map_seq_show, +}; + +static const struct bpf_iter_seq_info iter_seq_info = { + .seq_ops = &bpf_sk_storage_map_seq_ops, + .init_seq_private = bpf_iter_init_sk_storage_map, + .fini_seq_private = NULL, + .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info), +}; + +static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { + .target = "bpf_sk_storage_map", + .check_target = bpf_iter_check_map, + .req_linfo = BPF_ITER_LINK_MAP_FD, + .ctx_arg_info_size = 2, + .ctx_arg_info = { + { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), + PTR_TO_BTF_ID_OR_NULL }, + { offsetof(struct bpf_iter__bpf_sk_storage_map, value), + PTR_TO_RDWR_BUF_OR_NULL }, + }, + .seq_info = &iter_seq_info, +}; + +static int __init bpf_sk_storage_map_iter_init(void) +{ + bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id = + btf_sock_ids[BTF_SOCK_TYPE_SOCK]; + return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info); +} +late_initcall(bpf_sk_storage_map_iter_init); From cd31039a7347610863aa8b77a9162048999723d0 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:17 -0700 Subject: [PATCH 1606/2056] tools/libbpf: Add support for bpf map element iterator Add map_fd to bpf_iter_attach_opts and flags to bpf_link_create_opts. Later on, bpftool or selftest will be able to create a bpf map element iterator by passing map_fd to the kernel during link creation time. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184117.590673-1-yhs@fb.com --- tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 3 ++- tools/lib/bpf/libbpf.c | 10 +++++++++- tools/lib/bpf/libbpf.h | 3 ++- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index a7329b671c41..e1bdf214f75f 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -598,6 +598,7 @@ int bpf_link_create(int prog_fd, int target_fd, attr.link_create.prog_fd = prog_fd; attr.link_create.target_fd = target_fd; attr.link_create.attach_type = attach_type; + attr.link_create.flags = OPTS_GET(opts, flags, 0); return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); } diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index dbef24ebcfcb..6d367e01d05e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -170,8 +170,9 @@ LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd, struct bpf_link_create_opts { size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 flags; }; -#define bpf_link_create_opts__last_field sz +#define bpf_link_create_opts__last_field flags LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 846164c79df1..a05aa7e2bab6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8282,13 +8282,20 @@ struct bpf_link * bpf_program__attach_iter(struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) { + DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); char errmsg[STRERR_BUFSIZE]; struct bpf_link *link; int prog_fd, link_fd; + __u32 target_fd = 0; if (!OPTS_VALID(opts, bpf_iter_attach_opts)) return ERR_PTR(-EINVAL); + if (OPTS_HAS(opts, map_fd)) { + target_fd = opts->map_fd; + link_create_opts.flags = BPF_ITER_LINK_MAP_FD; + } + prog_fd = bpf_program__fd(prog); if (prog_fd < 0) { pr_warn("program '%s': can't attach before loaded\n", @@ -8301,7 +8308,8 @@ bpf_program__attach_iter(struct bpf_program *prog, return ERR_PTR(-ENOMEM); link->detach = &bpf_link__detach_fd; - link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_ITER, NULL); + link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, + &link_create_opts); if (link_fd < 0) { link_fd = -errno; free(link); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c2272132e929..c6813791fa7e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -264,8 +264,9 @@ LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map); struct bpf_iter_attach_opts { size_t sz; /* size of this struct for forward/backward compatibility */ + __u32 map_fd; }; -#define bpf_iter_attach_opts__last_field sz +#define bpf_iter_attach_opts__last_field map_fd LIBBPF_API struct bpf_link * bpf_program__attach_iter(struct bpf_program *prog, From d8793aca708602c676372b03d6493972457524af Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:19 -0700 Subject: [PATCH 1607/2056] tools/bpftool: Add bpftool support for bpf map element iterator The optional parameter "map MAP" can be added to "bpftool iter" command to create a bpf iterator for map elements. For example, bpftool iter pin ./prog.o /sys/fs/bpf/p1 map id 333 For map element bpf iterator "map MAP" parameter is required. Otherwise, bpf link creation will return an error. Quentin Monnet kindly provided bash-completion implementation for new "map MAP" option. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184119.590799-1-yhs@fb.com --- .../bpftool/Documentation/bpftool-iter.rst | 18 ++++++++-- tools/bpf/bpftool/bash-completion/bpftool | 18 +++++++++- tools/bpf/bpftool/iter.c | 33 ++++++++++++++++--- 3 files changed, 62 insertions(+), 7 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst index 8dce698eab79..070ffacb42b5 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-iter.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst @@ -17,14 +17,15 @@ SYNOPSIS ITER COMMANDS =================== -| **bpftool** **iter pin** *OBJ* *PATH* +| **bpftool** **iter pin** *OBJ* *PATH* [**map** *MAP*] | **bpftool** **iter help** | | *OBJ* := /a/file/of/bpf_iter_target.o +| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } DESCRIPTION =========== - **bpftool iter pin** *OBJ* *PATH* + **bpftool iter pin** *OBJ* *PATH* [**map** *MAP*] A bpf iterator combines a kernel iterating of particular kernel data (e.g., tasks, bpf_maps, etc.) and a bpf program called for each kernel data object @@ -37,6 +38,12 @@ DESCRIPTION character ('.'), which is reserved for future extensions of *bpffs*. + Map element bpf iterator requires an additional parameter + *MAP* so bpf program can iterate over map elements for + that map. User can have a bpf program in kernel to run + with each map element, do checking, filtering, aggregation, + etc. without copying data to user space. + User can then *cat PATH* to see the bpf iterator output. **bpftool iter help** @@ -64,6 +71,13 @@ EXAMPLES Create a file-based bpf iterator from bpf_iter_netlink.o and pin it to /sys/fs/bpf/my_netlink +**# bpftool iter pin bpf_iter_hashmap.o /sys/fs/bpf/my_hashmap map id 20** + +:: + + Create a file-based bpf iterator from bpf_iter_hashmap.o and map with + id 20, and pin it to /sys/fs/bpf/my_hashmap + SEE ALSO ======== **bpf**\ (2), diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 7b137264ea3a..257fa310ea2b 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -615,7 +615,23 @@ _bpftool() iter) case $command in pin) - _filedir + case $prev in + $command) + _filedir + ;; + id) + _bpftool_get_map_ids + ;; + name) + _bpftool_get_map_names + ;; + pinned) + _filedir + ;; + *) + _bpftool_one_of_list $MAP_TYPE + ;; + esac return 0 ;; *) diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c index 33240fcc6319..c9dba7543dba 100644 --- a/tools/bpf/bpftool/iter.c +++ b/tools/bpf/bpftool/iter.c @@ -2,6 +2,7 @@ // Copyright (C) 2020 Facebook #define _GNU_SOURCE +#include #include #include @@ -9,11 +10,12 @@ static int do_pin(int argc, char **argv) { + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts); const char *objfile, *path; struct bpf_program *prog; struct bpf_object *obj; struct bpf_link *link; - int err; + int err = -1, map_fd = -1; if (!REQ_ARGS(2)) usage(); @@ -21,10 +23,26 @@ static int do_pin(int argc, char **argv) objfile = GET_ARG(); path = GET_ARG(); + /* optional arguments */ + if (argc) { + if (is_prefix(*argv, "map")) { + NEXT_ARG(); + + if (!REQ_ARGS(2)) { + p_err("incorrect map spec"); + return -1; + } + + map_fd = map_parse_fd(&argc, &argv); + if (map_fd < 0) + return -1; + } + } + obj = bpf_object__open(objfile); if (IS_ERR(obj)) { p_err("can't open objfile %s", objfile); - return -1; + goto close_map_fd; } err = bpf_object__load(obj); @@ -39,7 +57,10 @@ static int do_pin(int argc, char **argv) goto close_obj; } - link = bpf_program__attach_iter(prog, NULL); + if (map_fd >= 0) + iter_opts.map_fd = map_fd; + + link = bpf_program__attach_iter(prog, &iter_opts); if (IS_ERR(link)) { err = PTR_ERR(link); p_err("attach_iter failed for program %s", @@ -62,14 +83,18 @@ static int do_pin(int argc, char **argv) bpf_link__destroy(link); close_obj: bpf_object__close(obj); +close_map_fd: + if (map_fd >= 0) + close(map_fd); return err; } static int do_help(int argc, char **argv) { fprintf(stderr, - "Usage: %1$s %2$s pin OBJ PATH\n" + "Usage: %1$s %2$s pin OBJ PATH [map MAP]\n" " %1$s %2$s help\n" + " " HELP_SPEC_MAP "\n" "", bin_name, "iter"); From 2a7c2fff7dd6e87634e47ddb2d2c7f272708dbbf Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:20 -0700 Subject: [PATCH 1608/2056] selftests/bpf: Add test for bpf hash map iterators Two subtests are added. $ ./test_progs -n 4 ... #4/18 bpf_hash_map:OK #4/19 bpf_percpu_hash_map:OK ... Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184120.590916-1-yhs@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 187 ++++++++++++++++++ .../bpf/progs/bpf_iter_bpf_hash_map.c | 100 ++++++++++ .../bpf/progs/bpf_iter_bpf_percpu_hash_map.c | 50 +++++ 3 files changed, 337 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index fed42755416d..72790b600c62 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -15,6 +15,8 @@ #include "bpf_iter_test_kern2.skel.h" #include "bpf_iter_test_kern3.skel.h" #include "bpf_iter_test_kern4.skel.h" +#include "bpf_iter_bpf_hash_map.skel.h" +#include "bpf_iter_bpf_percpu_hash_map.skel.h" static int duration; @@ -455,6 +457,187 @@ static void test_overflow(bool test_e2big_overflow, bool ret1) bpf_iter_test_kern4__destroy(skel); } +static void test_bpf_hash_map(void) +{ + __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_hash_map *skel; + int err, i, len, map_fd, iter_fd; + __u64 val, expected_val = 0; + struct bpf_link *link; + struct key_t { + int a; + int b; + int c; + } key; + char buf[64]; + + skel = bpf_iter_bpf_hash_map__open(); + if (CHECK(!skel, "bpf_iter_bpf_hash_map__open", + "skeleton open failed\n")) + return; + + skel->bss->in_test_mode = true; + + err = bpf_iter_bpf_hash_map__load(skel); + if (CHECK(!skel, "bpf_iter_bpf_hash_map__load", + "skeleton load failed\n")) + goto out; + + /* iterator with hashmap2 and hashmap3 should fail */ + opts.map_fd = bpf_map__fd(skel->maps.hashmap2); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (CHECK(!IS_ERR(link), "attach_iter", + "attach_iter for hashmap2 unexpected succeeded\n")) + goto out; + + opts.map_fd = bpf_map__fd(skel->maps.hashmap3); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (CHECK(!IS_ERR(link), "attach_iter", + "attach_iter for hashmap3 unexpected succeeded\n")) + goto out; + + /* hashmap1 should be good, update map values here */ + map_fd = bpf_map__fd(skel->maps.hashmap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { + key.a = i + 1; + key.b = i + 2; + key.c = i + 3; + val = i + 4; + expected_key_a += key.a; + expected_key_b += key.b; + expected_key_c += key.c; + expected_val += val; + + err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + opts.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->key_sum_a != expected_key_a, + "key_sum_a", "got %u expected %u\n", + skel->bss->key_sum_a, expected_key_a)) + goto close_iter; + if (CHECK(skel->bss->key_sum_b != expected_key_b, + "key_sum_b", "got %u expected %u\n", + skel->bss->key_sum_b, expected_key_b)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %llu expected %llu\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_hash_map__destroy(skel); +} + +static void test_bpf_percpu_hash_map(void) +{ + __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_percpu_hash_map *skel; + int err, i, j, len, map_fd, iter_fd; + __u32 expected_val = 0; + struct bpf_link *link; + struct key_t { + int a; + int b; + int c; + } key; + char buf[64]; + void *val; + + val = malloc(8 * bpf_num_possible_cpus()); + + skel = bpf_iter_bpf_percpu_hash_map__open(); + if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open", + "skeleton open failed\n")) + return; + + skel->rodata->num_cpus = bpf_num_possible_cpus(); + + err = bpf_iter_bpf_percpu_hash_map__load(skel); + if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load", + "skeleton load failed\n")) + goto out; + + /* update map values here */ + map_fd = bpf_map__fd(skel->maps.hashmap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) { + key.a = i + 1; + key.b = i + 2; + key.c = i + 3; + expected_key_a += key.a; + expected_key_b += key.b; + expected_key_c += key.c; + + for (j = 0; j < bpf_num_possible_cpus(); j++) { + *(__u32 *)(val + j * 8) = i + j; + expected_val += i + j; + } + + err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + opts.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->key_sum_a != expected_key_a, + "key_sum_a", "got %u expected %u\n", + skel->bss->key_sum_a, expected_key_a)) + goto close_iter; + if (CHECK(skel->bss->key_sum_b != expected_key_b, + "key_sum_b", "got %u expected %u\n", + skel->bss->key_sum_b, expected_key_b)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %u expected %u\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_percpu_hash_map__destroy(skel); +} + void test_bpf_iter(void) { if (test__start_subtest("btf_id_or_null")) @@ -491,4 +674,8 @@ void test_bpf_iter(void) test_overflow(true, false); if (test__start_subtest("prog-ret-1")) test_overflow(false, true); + if (test__start_subtest("bpf_hash_map")) + test_bpf_hash_map(); + if (test__start_subtest("bpf_percpu_hash_map")) + test_bpf_percpu_hash_map(); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c new file mode 100644 index 000000000000..07ddbfdbcab7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u64); +} hashmap1 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, __u64); + __type(value, __u64); +} hashmap2 SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u32); +} hashmap3 SEC(".maps"); + +/* will set before prog run */ +bool in_test_mode = 0; + +/* will collect results during prog run */ +__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0; +__u64 val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + __u32 seq_num = ctx->meta->seq_num; + struct bpf_map *map = ctx->map; + struct key_t *key = ctx->key; + __u64 *val = ctx->value; + + if (in_test_mode) { + /* test mode is used by selftests to + * test functionality of bpf_hash_map iter. + * + * the above hashmap1 will have correct size + * and will be accepted, hashmap2 and hashmap3 + * should be rejected due to smaller key/value + * size. + */ + if (key == (void *)0 || val == (void *)0) + return 0; + + key_sum_a += key->a; + key_sum_b += key->b; + key_sum_c += key->c; + val_sum += *val; + return 0; + } + + /* non-test mode, the map is prepared with the + * below bpftool command sequence: + * bpftool map create /sys/fs/bpf/m1 type hash \ + * key 12 value 8 entries 3 name map1 + * bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 1 \ + * value 0 0 0 1 0 0 0 1 + * bpftool map update id 77 key 0 0 0 1 0 0 0 0 0 0 0 2 \ + * value 0 0 0 1 0 0 0 2 + * The bpftool iter command line: + * bpftool iter pin ./bpf_iter_bpf_hash_map.o /sys/fs/bpf/p1 \ + * map id 77 + * The below output will be: + * map dump starts + * 77: (1000000 0 2000000) (200000001000000) + * 77: (1000000 0 1000000) (100000001000000) + * map dump ends + */ + if (seq_num == 0) + BPF_SEQ_PRINTF(seq, "map dump starts\n"); + + if (key == (void *)0 || val == (void *)0) { + BPF_SEQ_PRINTF(seq, "map dump ends\n"); + return 0; + } + + BPF_SEQ_PRINTF(seq, "%d: (%x %d %x) (%llx)\n", map->id, + key->a, key->b, key->c, *val); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c new file mode 100644 index 000000000000..feaaa2b89c57 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u32); +} hashmap1 SEC(".maps"); + +/* will set before prog run */ +volatile const __u32 num_cpus = 0; + +/* will collect results during prog run */ +__u32 key_sum_a = 0, key_sum_b = 0, key_sum_c = 0; +__u32 val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_percpu_hash_map(struct bpf_iter__bpf_map_elem *ctx) +{ + struct key_t *key = ctx->key; + void *pptr = ctx->value; + __u32 step; + int i; + + if (key == (void *)0 || pptr == (void *)0) + return 0; + + key_sum_a += key->a; + key_sum_b += key->b; + key_sum_c += key->c; + + step = 8; + for (i = 0; i < num_cpus; i++) { + val_sum += *(__u32 *)pptr; + pptr += step; + } + return 0; +} From 60dd49ea65390986a665c462da704927e861e67e Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:21 -0700 Subject: [PATCH 1609/2056] selftests/bpf: Add test for bpf array map iterators Two subtests are added. $ ./test_progs -n 4 ... #4/20 bpf_array_map:OK #4/21 bpf_percpu_array_map:OK ... The bpf_array_map subtest also tested bpf program changing array element values and send key/value to user space through bpf_seq_write() interface. Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184121.591367-1-yhs@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 161 ++++++++++++++++++ .../bpf/progs/bpf_iter_bpf_array_map.c | 40 +++++ .../bpf/progs/bpf_iter_bpf_percpu_array_map.c | 46 +++++ 3 files changed, 247 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 72790b600c62..4a02b2222a6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -17,6 +17,8 @@ #include "bpf_iter_test_kern4.skel.h" #include "bpf_iter_bpf_hash_map.skel.h" #include "bpf_iter_bpf_percpu_hash_map.skel.h" +#include "bpf_iter_bpf_array_map.skel.h" +#include "bpf_iter_bpf_percpu_array_map.skel.h" static int duration; @@ -638,6 +640,161 @@ static void test_bpf_percpu_hash_map(void) bpf_iter_bpf_percpu_hash_map__destroy(skel); } +static void test_bpf_array_map(void) +{ + __u64 val, expected_val = 0, res_first_val, first_val = 0; + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + __u32 expected_key = 0, res_first_key; + struct bpf_iter_bpf_array_map *skel; + int err, i, map_fd, iter_fd; + struct bpf_link *link; + char buf[64] = {}; + int len, start; + + skel = bpf_iter_bpf_array_map__open_and_load(); + if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load", + "skeleton open_and_load failed\n")) + return; + + map_fd = bpf_map__fd(skel->maps.arraymap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + val = i + 4; + expected_key += i; + expected_val += val; + + if (i == 0) + first_val = val; + + err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + opts.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + start = 0; + while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0) + start += len; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + res_first_key = *(__u32 *)buf; + res_first_val = *(__u64 *)(buf + sizeof(__u32)); + if (CHECK(res_first_key != 0 || res_first_val != first_val, + "bpf_seq_write", + "seq_write failure: first key %u vs expected 0, " + " first value %llu vs expected %llu\n", + res_first_key, res_first_val, first_val)) + goto close_iter; + + if (CHECK(skel->bss->key_sum != expected_key, + "key_sum", "got %u expected %u\n", + skel->bss->key_sum, expected_key)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %llu expected %llu\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + err = bpf_map_lookup_elem(map_fd, &i, &val); + if (CHECK(err, "map_lookup", "map_lookup failed\n")) + goto out; + if (CHECK(i != val, "invalid_val", + "got value %llu expected %u\n", val, i)) + goto out; + } + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_array_map__destroy(skel); +} + +static void test_bpf_percpu_array_map(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_bpf_percpu_array_map *skel; + __u32 expected_key = 0, expected_val = 0; + int err, i, j, map_fd, iter_fd; + struct bpf_link *link; + char buf[64]; + void *val; + int len; + + val = malloc(8 * bpf_num_possible_cpus()); + + skel = bpf_iter_bpf_percpu_array_map__open(); + if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open", + "skeleton open failed\n")) + return; + + skel->rodata->num_cpus = bpf_num_possible_cpus(); + + err = bpf_iter_bpf_percpu_array_map__load(skel); + if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load", + "skeleton load failed\n")) + goto out; + + /* update map values here */ + map_fd = bpf_map__fd(skel->maps.arraymap1); + for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) { + expected_key += i; + + for (j = 0; j < bpf_num_possible_cpus(); j++) { + *(__u32 *)(val + j * 8) = i + j; + expected_val += i + j; + } + + err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + opts.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->key_sum != expected_key, + "key_sum", "got %u expected %u\n", + skel->bss->key_sum, expected_key)) + goto close_iter; + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %u expected %u\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + bpf_iter_bpf_percpu_array_map__destroy(skel); +} + void test_bpf_iter(void) { if (test__start_subtest("btf_id_or_null")) @@ -678,4 +835,8 @@ void test_bpf_iter(void) test_bpf_hash_map(); if (test__start_subtest("bpf_percpu_hash_map")) test_bpf_percpu_hash_map(); + if (test__start_subtest("bpf_array_map")) + test_bpf_array_map(); + if (test__start_subtest("bpf_percpu_array_map")) + test_bpf_percpu_array_map(); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c new file mode 100644 index 000000000000..6286023fd62b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u64); +} arraymap1 SEC(".maps"); + +__u32 key_sum = 0; +__u64 val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_array_map(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 *key = ctx->key; + __u64 *val = ctx->value; + + if (key == (void *)0 || val == (void *)0) + return 0; + + bpf_seq_write(ctx->meta->seq, key, sizeof(__u32)); + bpf_seq_write(ctx->meta->seq, val, sizeof(__u64)); + key_sum += *key; + val_sum += *val; + *val = *key; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c new file mode 100644 index 000000000000..85fa710fad90 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 3); + __type(key, __u32); + __type(value, __u32); +} arraymap1 SEC(".maps"); + +/* will set before prog run */ +volatile const __u32 num_cpus = 0; + +__u32 key_sum = 0, val_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_percpu_array_map(struct bpf_iter__bpf_map_elem *ctx) +{ + __u32 *key = ctx->key; + void *pptr = ctx->value; + __u32 step; + int i; + + if (key == (void *)0 || pptr == (void *)0) + return 0; + + key_sum += *key; + + step = 8; + for (i = 0; i < num_cpus; i++) { + val_sum += *(__u32 *)pptr; + pptr += step; + } + return 0; +} From 3b1c420bd882115eb7a3d2335cc00d7b9974eb0b Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:22 -0700 Subject: [PATCH 1610/2056] selftests/bpf: Add a test for bpf sk_storage_map iterator Added one test for bpf sk_storage_map_iterator. $ ./test_progs -n 4 ... #4/22 bpf_sk_storage_map:OK ... Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184122.591591-1-yhs@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 72 +++++++++++++++++++ .../bpf/progs/bpf_iter_bpf_sk_storage_map.c | 34 +++++++++ 2 files changed, 106 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 4a02b2222a6d..ffbbeb9fa268 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -19,6 +19,7 @@ #include "bpf_iter_bpf_percpu_hash_map.skel.h" #include "bpf_iter_bpf_array_map.skel.h" #include "bpf_iter_bpf_percpu_array_map.skel.h" +#include "bpf_iter_bpf_sk_storage_map.skel.h" static int duration; @@ -795,6 +796,75 @@ static void test_bpf_percpu_array_map(void) bpf_iter_bpf_percpu_array_map__destroy(skel); } +static void test_bpf_sk_storage_map(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + int err, i, len, map_fd, iter_fd, num_sockets; + struct bpf_iter_bpf_sk_storage_map *skel; + int sock_fd[3] = {-1, -1, -1}; + __u32 val, expected_val = 0; + struct bpf_link *link; + char buf[64]; + + skel = bpf_iter_bpf_sk_storage_map__open_and_load(); + if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load", + "skeleton open_and_load failed\n")) + return; + + map_fd = bpf_map__fd(skel->maps.sk_stg_map); + num_sockets = ARRAY_SIZE(sock_fd); + for (i = 0; i < num_sockets; i++) { + sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0); + if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno)) + goto out; + + val = i + 1; + expected_val += val; + + err = bpf_map_update_elem(map_fd, &sock_fd[i], &val, + BPF_NOEXIST); + if (CHECK(err, "map_update", "map_update failed\n")) + goto out; + } + + opts.map_fd = map_fd; + link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts); + if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n")) + goto out; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n")) + goto free_link; + + /* do some tests */ + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno))) + goto close_iter; + + /* test results */ + if (CHECK(skel->bss->ipv6_sk_count != num_sockets, + "ipv6_sk_count", "got %u expected %u\n", + skel->bss->ipv6_sk_count, num_sockets)) + goto close_iter; + + if (CHECK(skel->bss->val_sum != expected_val, + "val_sum", "got %u expected %u\n", + skel->bss->val_sum, expected_val)) + goto close_iter; + +close_iter: + close(iter_fd); +free_link: + bpf_link__destroy(link); +out: + for (i = 0; i < num_sockets; i++) { + if (sock_fd[i] >= 0) + close(sock_fd[i]); + } + bpf_iter_bpf_sk_storage_map__destroy(skel); +} + void test_bpf_iter(void) { if (test__start_subtest("btf_id_or_null")) @@ -839,4 +909,6 @@ void test_bpf_iter(void) test_bpf_array_map(); if (test__start_subtest("bpf_percpu_array_map")) test_bpf_percpu_array_map(); + if (test__start_subtest("bpf_sk_storage_map")) + test_bpf_sk_storage_map(); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c new file mode 100644 index 000000000000..6b70ccaba301 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include "bpf_tracing_net.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} sk_stg_map SEC(".maps"); + +__u32 val_sum = 0; +__u32 ipv6_sk_count = 0; + +SEC("iter/bpf_sk_storage_map") +int dump_bpf_sk_storage_map(struct bpf_iter__bpf_sk_storage_map *ctx) +{ + struct sock *sk = ctx->sk; + __u32 *val = ctx->value; + + if (sk == (void *)0 || val == (void *)0) + return 0; + + if (sk->sk_family == AF_INET6) + ipv6_sk_count++; + + val_sum += *val; + return 0; +} From 7b04d6d60fcfb5b2200ffebb9cfb90927bdfeec7 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:44 -0700 Subject: [PATCH 1611/2056] bpf: Separate bpf_get_[stack|stackid] for perf events BPF Calling get_perf_callchain() on perf_events from PEBS entries may cause unwinder errors. To fix this issue, the callchain is fetched early. Such perf_events are marked with __PERF_SAMPLE_CALLCHAIN_EARLY. Similarly, calling bpf_get_[stack|stackid] on perf_events from PEBS may also cause unwinder errors. To fix this, add separate version of these two helpers, bpf_get_[stack|stackid]_pe. These two hepers use callchain in bpf_perf_event_data_kern->data->callchain. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-2-songliubraving@fb.com --- include/linux/bpf.h | 2 + kernel/bpf/stackmap.c | 184 +++++++++++++++++++++++++++++++++++---- kernel/trace/bpf_trace.c | 4 +- 3 files changed, 170 insertions(+), 20 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4175cf1f4665..8357be349133 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1675,6 +1675,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; extern const struct bpf_func_proto bpf_get_task_stack_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto_pe; +extern const struct bpf_func_proto bpf_get_stack_proto_pe; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 48d8e739975f..5beb2f8c23da 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -387,11 +388,10 @@ get_callchain_entry_for_task(struct task_struct *task, u32 init_nr) #endif } -BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, - u64, flags) +static long __bpf_get_stackid(struct bpf_map *map, + struct perf_callchain_entry *trace, u64 flags) { struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); - struct perf_callchain_entry *trace; struct stack_map_bucket *bucket, *new_bucket, *old_bucket; u32 max_depth = map->value_size / stack_map_data_size(map); /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ @@ -399,21 +399,9 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, u32 skip = flags & BPF_F_SKIP_FIELD_MASK; u32 hash, id, trace_nr, trace_len; bool user = flags & BPF_F_USER_STACK; - bool kernel = !user; u64 *ips; bool hash_matches; - if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | - BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) - return -EINVAL; - - trace = get_perf_callchain(regs, init_nr, kernel, user, - sysctl_perf_event_max_stack, false, false); - - if (unlikely(!trace)) - /* couldn't fetch the stack trace */ - return -EFAULT; - /* get_perf_callchain() guarantees that trace->nr >= init_nr * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth */ @@ -478,6 +466,30 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, return id; } +BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, + u64, flags) +{ + u32 max_depth = map->value_size / stack_map_data_size(map); + /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ + u32 init_nr = sysctl_perf_event_max_stack - max_depth; + bool user = flags & BPF_F_USER_STACK; + struct perf_callchain_entry *trace; + bool kernel = !user; + + if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | + BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) + return -EINVAL; + + trace = get_perf_callchain(regs, init_nr, kernel, user, + sysctl_perf_event_max_stack, false, false); + + if (unlikely(!trace)) + /* couldn't fetch the stack trace */ + return -EFAULT; + + return __bpf_get_stackid(map, trace, flags); +} + const struct bpf_func_proto bpf_get_stackid_proto = { .func = bpf_get_stackid, .gpl_only = true, @@ -487,7 +499,77 @@ const struct bpf_func_proto bpf_get_stackid_proto = { .arg3_type = ARG_ANYTHING, }; +static __u64 count_kernel_ip(struct perf_callchain_entry *trace) +{ + __u64 nr_kernel = 0; + + while (nr_kernel < trace->nr) { + if (trace->ip[nr_kernel] == PERF_CONTEXT_USER) + break; + nr_kernel++; + } + return nr_kernel; +} + +BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx, + struct bpf_map *, map, u64, flags) +{ + struct perf_event *event = ctx->event; + struct perf_callchain_entry *trace; + bool kernel, user; + __u64 nr_kernel; + int ret; + + /* perf_sample_data doesn't have callchain, use bpf_get_stackid */ + if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) + return bpf_get_stackid((unsigned long)(ctx->regs), + (unsigned long) map, flags, 0, 0); + + if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | + BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) + return -EINVAL; + + user = flags & BPF_F_USER_STACK; + kernel = !user; + + trace = ctx->data->callchain; + if (unlikely(!trace)) + return -EFAULT; + + nr_kernel = count_kernel_ip(trace); + + if (kernel) { + __u64 nr = trace->nr; + + trace->nr = nr_kernel; + ret = __bpf_get_stackid(map, trace, flags); + + /* restore nr */ + trace->nr = nr; + } else { /* user */ + u64 skip = flags & BPF_F_SKIP_FIELD_MASK; + + skip += nr_kernel; + if (skip > BPF_F_SKIP_FIELD_MASK) + return -EFAULT; + + flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; + ret = __bpf_get_stackid(map, trace, flags); + } + return ret; +} + +const struct bpf_func_proto bpf_get_stackid_proto_pe = { + .func = bpf_get_stackid_pe, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, +}; + static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, + struct perf_callchain_entry *trace_in, void *buf, u32 size, u64 flags) { u32 init_nr, trace_nr, copy_len, elem_size, num_elem; @@ -520,7 +602,9 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, else init_nr = sysctl_perf_event_max_stack - num_elem; - if (kernel && task) + if (trace_in) + trace = trace_in; + else if (kernel && task) trace = get_callchain_entry_for_task(task, init_nr); else trace = get_perf_callchain(regs, init_nr, kernel, user, @@ -556,7 +640,7 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task, BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, u64, flags) { - return __bpf_get_stack(regs, NULL, buf, size, flags); + return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); } const struct bpf_func_proto bpf_get_stack_proto = { @@ -574,7 +658,7 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf, { struct pt_regs *regs = task_pt_regs(task); - return __bpf_get_stack(regs, task, buf, size, flags); + return __bpf_get_stack(regs, task, NULL, buf, size, flags); } BTF_ID_LIST(bpf_get_task_stack_btf_ids) @@ -591,6 +675,70 @@ const struct bpf_func_proto bpf_get_task_stack_proto = { .btf_id = bpf_get_task_stack_btf_ids, }; +BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, + void *, buf, u32, size, u64, flags) +{ + struct perf_event *event = ctx->event; + struct perf_callchain_entry *trace; + bool kernel, user; + int err = -EINVAL; + __u64 nr_kernel; + + if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) + return __bpf_get_stack(ctx->regs, NULL, NULL, buf, size, flags); + + if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | + BPF_F_USER_BUILD_ID))) + goto clear; + + user = flags & BPF_F_USER_STACK; + kernel = !user; + + err = -EFAULT; + trace = ctx->data->callchain; + if (unlikely(!trace)) + goto clear; + + nr_kernel = count_kernel_ip(trace); + + if (kernel) { + __u64 nr = trace->nr; + + trace->nr = nr_kernel; + err = __bpf_get_stack(ctx->regs, NULL, trace, buf, + size, flags); + + /* restore nr */ + trace->nr = nr; + } else { /* user */ + u64 skip = flags & BPF_F_SKIP_FIELD_MASK; + + skip += nr_kernel; + if (skip > BPF_F_SKIP_FIELD_MASK) + goto clear; + + flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; + err = __bpf_get_stack(ctx->regs, NULL, trace, buf, + size, flags); + } + return err; + +clear: + memset(buf, 0, size); + return err; + +} + +const struct bpf_func_proto bpf_get_stack_proto_pe = { + .func = bpf_get_stack_pe, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_UNINIT_MEM, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, + .arg4_type = ARG_ANYTHING, +}; + /* Called from eBPF program */ static void *stack_map_lookup_elem(struct bpf_map *map, void *key) { diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3cc0dcb60ca2..cb91ef902cc4 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1411,9 +1411,9 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_perf_event_output: return &bpf_perf_event_output_proto_tp; case BPF_FUNC_get_stackid: - return &bpf_get_stackid_proto_tp; + return &bpf_get_stackid_proto_pe; case BPF_FUNC_get_stack: - return &bpf_get_stack_proto_tp; + return &bpf_get_stack_proto_pe; case BPF_FUNC_perf_prog_read_value: return &bpf_perf_prog_read_value_proto; case BPF_FUNC_read_branch_records: From 9efcc4ad7a15ea50550c53fbf62457c309216051 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Thu, 23 Jul 2020 11:41:24 -0700 Subject: [PATCH 1612/2056] selftests/bpf: Add a test for out of bound rdonly buf access If the bpf program contains out of bound access w.r.t. a particular map key/value size, the verification will be still okay, e.g., it will be accepted by verifier. But it will be rejected during link_create time. A test is added here to ensure link_create failure did happen if out of bound access happened. $ ./test_progs -n 4 ... #4/23 rdonly-buf-out-of-bound:OK ... Signed-off-by: Yonghong Song Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723184124.591700-1-yhs@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 22 ++++++++++++ .../selftests/bpf/progs/bpf_iter_test_kern5.c | 35 +++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index ffbbeb9fa268..d95de80b1851 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -20,6 +20,7 @@ #include "bpf_iter_bpf_array_map.skel.h" #include "bpf_iter_bpf_percpu_array_map.skel.h" #include "bpf_iter_bpf_sk_storage_map.skel.h" +#include "bpf_iter_test_kern5.skel.h" static int duration; @@ -865,6 +866,25 @@ static void test_bpf_sk_storage_map(void) bpf_iter_bpf_sk_storage_map__destroy(skel); } +static void test_rdonly_buf_out_of_bound(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct bpf_iter_test_kern5 *skel; + struct bpf_link *link; + + skel = bpf_iter_test_kern5__open_and_load(); + if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load", + "skeleton open_and_load failed\n")) + return; + + opts.map_fd = bpf_map__fd(skel->maps.hashmap1); + link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts); + if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n")) + bpf_link__destroy(link); + + bpf_iter_test_kern5__destroy(skel); +} + void test_bpf_iter(void) { if (test__start_subtest("btf_id_or_null")) @@ -911,4 +931,6 @@ void test_bpf_iter(void) test_bpf_percpu_array_map(); if (test__start_subtest("bpf_sk_storage_map")) test_bpf_sk_storage_map(); + if (test__start_subtest("rdonly-buf-out-of-bound")) + test_rdonly_buf_out_of_bound(); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c new file mode 100644 index 000000000000..e3a7575e81d2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include +#include + +char _license[] SEC("license") = "GPL"; + +struct key_t { + int a; + int b; + int c; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 3); + __type(key, struct key_t); + __type(value, __u64); +} hashmap1 SEC(".maps"); + +__u32 key_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) +{ + void *key = ctx->key; + + if (key == (void *)0) + return 0; + + /* out of bound access w.r.t. hashmap1 */ + key_sum += *(__u32 *)(key + sizeof(struct key_t)); + return 0; +} From 5d99cb2c86775b4780c02a339a9578bf9471ead9 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:45 -0700 Subject: [PATCH 1613/2056] bpf: Fail PERF_EVENT_IOC_SET_BPF when bpf_get_[stack|stackid] cannot work bpf_get_[stack|stackid] on perf_events with precise_ip uses callchain attached to perf_sample_data. If this callchain is not presented, do not allow attaching BPF program that calls bpf_get_[stack|stackid] to this event. In the error case, -EPROTO is returned so that libbpf can identify this error and print proper hint message. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-3-songliubraving@fb.com --- include/linux/filter.h | 3 ++- kernel/bpf/verifier.c | 3 +++ kernel/events/core.c | 18 ++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index d07a6e973a7d..0a355b005bf4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -533,7 +533,8 @@ struct bpf_prog { is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ - enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ + enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ + call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8d6979db48d8..cd14e70f2d07 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4962,6 +4962,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn env->prog->has_callchain_buf = true; } + if (func_id == BPF_FUNC_get_stackid || func_id == BPF_FUNC_get_stack) + env->prog->call_get_stack = true; + if (changes_data) clear_all_pkt_pointers(env); return 0; diff --git a/kernel/events/core.c b/kernel/events/core.c index 856d98c36f56..ddcfd2fb5cc5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9544,6 +9544,24 @@ static int perf_event_set_bpf_handler(struct perf_event *event, u32 prog_fd) if (IS_ERR(prog)) return PTR_ERR(prog); + if (event->attr.precise_ip && + prog->call_get_stack && + (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) || + event->attr.exclude_callchain_kernel || + event->attr.exclude_callchain_user)) { + /* + * On perf_event with precise_ip, calling bpf_get_stack() + * may trigger unwinder warnings and occasional crashes. + * bpf_get_[stack|stackid] works around this issue by using + * callchain attached to perf_sample_data. If the + * perf_event does not full (kernel and user) callchain + * attached to perf_sample_data, do not allow attaching BPF + * program that calls bpf_get_[stack|stackid]. + */ + bpf_prog_put(prog); + return -EPROTO; + } + event->prog = prog; event->orig_overflow_handler = READ_ONCE(event->overflow_handler); WRITE_ONCE(event->overflow_handler, bpf_overflow_handler); From d4b4dd6ce7709c2d2fe56dcfc15074ee18505bcb Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:46 -0700 Subject: [PATCH 1614/2056] libbpf: Print hint when PERF_EVENT_IOC_SET_BPF returns -EPROTO The kernel prevents potential unwinder warnings and crashes by blocking BPF program with bpf_get_[stack|stackid] on perf_event without PERF_SAMPLE_CALLCHAIN, or with exclude_callchain_[kernel|user]. Print a hint message in libbpf to help the user debug such issues. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-4-songliubraving@fb.com --- tools/lib/bpf/libbpf.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a05aa7e2bab6..e51479d60285 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7833,6 +7833,9 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, pr_warn("program '%s': failed to attach to pfd %d: %s\n", bpf_program__title(prog, false), pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + if (err == -EPROTO) + pr_warn("program '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", + bpf_program__title(prog, false), pfd); return ERR_PTR(err); } if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { From d4a89c1eb81431479664029bcdec593dbf23385f Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 23 Jul 2020 23:47:41 -0500 Subject: [PATCH 1615/2056] selftests/bpf: Add test for CGROUP_STORAGE map on multiple attaches This test creates a parent cgroup, and a child of that cgroup. It attaches a cgroup_skb/egress program that simply counts packets, to a global variable (ARRAY map), and to a CGROUP_STORAGE map. The program is first attached to the parent cgroup only, then to parent and child. The test cases sends a message within the child cgroup, and because the program is inherited across parent / child cgroups, it will trigger the egress program for both the parent and child, if they exist. The program, when looking up a CGROUP_STORAGE map, uses the cgroup and attach type of the attachment parameters; therefore, both attaches uses different cgroup storages. We assert that all packet counts returns what we expects. Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/5a20206afa4606144691c7caa0d1b997cd60dec0.1595565795.git.zhuyifei@google.com --- .../bpf/prog_tests/cg_storage_multi.c | 161 ++++++++++++++++++ .../bpf/progs/cg_storage_multi_egress_only.c | 30 ++++ 2 files changed, 191 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c create mode 100644 tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c new file mode 100644 index 000000000000..e90e0547d759 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include +#include +#include + +#include "cg_storage_multi_egress_only.skel.h" + +#define PARENT_CGROUP "/cgroup_storage" +#define CHILD_CGROUP "/cgroup_storage/child" + +static int duration; + +static bool assert_storage(struct bpf_map *map, const char *cgroup_path, + __u32 expected) +{ + struct bpf_cgroup_storage_key key = {0}; + __u32 value; + int map_fd; + + map_fd = bpf_map__fd(map); + + key.cgroup_inode_id = get_cgroup_id(cgroup_path); + key.attach_type = BPF_CGROUP_INET_EGRESS; + if (CHECK(bpf_map_lookup_elem(map_fd, &key, &value) < 0, + "map-lookup", "errno %d", errno)) + return true; + if (CHECK(value != expected, + "assert-storage", "got %u expected %u", value, expected)) + return true; + + return false; +} + +static bool assert_storage_noexist(struct bpf_map *map, const char *cgroup_path) +{ + struct bpf_cgroup_storage_key key = {0}; + __u32 value; + int map_fd; + + map_fd = bpf_map__fd(map); + + key.cgroup_inode_id = get_cgroup_id(cgroup_path); + key.attach_type = BPF_CGROUP_INET_EGRESS; + if (CHECK(bpf_map_lookup_elem(map_fd, &key, &value) == 0, + "map-lookup", "succeeded, expected ENOENT")) + return true; + if (CHECK(errno != ENOENT, + "map-lookup", "errno %d, expected ENOENT", errno)) + return true; + + return false; +} + +static bool connect_send(const char *cgroup_path) +{ + bool res = true; + int server_fd = -1, client_fd = -1; + + if (join_cgroup(cgroup_path)) + goto out_clean; + + server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 0, 0); + if (server_fd < 0) + goto out_clean; + + client_fd = connect_to_fd(server_fd, 0); + if (client_fd < 0) + goto out_clean; + + if (send(client_fd, "message", strlen("message"), 0) < 0) + goto out_clean; + + res = false; + +out_clean: + close(client_fd); + close(server_fd); + return res; +} + +static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) +{ + struct cg_storage_multi_egress_only *obj; + struct bpf_link *parent_link = NULL, *child_link = NULL; + bool err; + + obj = cg_storage_multi_egress_only__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + /* Attach to parent cgroup, trigger packet from child. + * Assert that there is only one run and in that run the storage is + * parent cgroup's storage. + * Also assert that child cgroup's storage does not exist + */ + parent_link = bpf_program__attach_cgroup(obj->progs.egress, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_link), "parent-cg-attach", + "err %ld", PTR_ERR(parent_link))) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "first-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 1, + "first-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + if (assert_storage(obj->maps.cgroup_storage, PARENT_CGROUP, 1)) + goto close_bpf_object; + if (assert_storage_noexist(obj->maps.cgroup_storage, CHILD_CGROUP)) + goto close_bpf_object; + + /* Attach to parent and child cgroup, trigger packet from child. + * Assert that there are two additional runs, one that run with parent + * cgroup's storage and one with child cgroup's storage. + */ + child_link = bpf_program__attach_cgroup(obj->progs.egress, + child_cgroup_fd); + if (CHECK(IS_ERR(child_link), "child-cg-attach", + "err %ld", PTR_ERR(child_link))) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "second-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 3, + "second-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + if (assert_storage(obj->maps.cgroup_storage, PARENT_CGROUP, 2)) + goto close_bpf_object; + if (assert_storage(obj->maps.cgroup_storage, CHILD_CGROUP, 1)) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(parent_link); + bpf_link__destroy(child_link); + + cg_storage_multi_egress_only__destroy(obj); +} + +void test_cg_storage_multi(void) +{ + int parent_cgroup_fd = -1, child_cgroup_fd = -1; + + parent_cgroup_fd = test__join_cgroup(PARENT_CGROUP); + if (CHECK(parent_cgroup_fd < 0, "cg-create-parent", "errno %d", errno)) + goto close_cgroup_fd; + child_cgroup_fd = create_and_get_cgroup(CHILD_CGROUP); + if (CHECK(child_cgroup_fd < 0, "cg-create-child", "errno %d", errno)) + goto close_cgroup_fd; + + if (test__start_subtest("egress_only")) + test_egress_only(parent_cgroup_fd, child_cgroup_fd); + +close_cgroup_fd: + close(child_cgroup_fd); + close(parent_cgroup_fd); +} diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c new file mode 100644 index 000000000000..ec0165d07105 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include +#include +#include +#include +#include + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); +} cgroup_storage SEC(".maps"); + +__u32 invocations = 0; + +SEC("cgroup_skb/egress") +int egress(struct __sk_buff *skb) +{ + __u32 *ptr_cg_storage = bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(ptr_cg_storage, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} From 1da4864c2b20f81afbb18b2a0b914d0c776331fc Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:47 -0700 Subject: [PATCH 1616/2056] selftests/bpf: Add callchain_stackid This tests new helper function bpf_get_stackid_pe and bpf_get_stack_pe. These two helpers have different implementation for perf_event with PEB entries. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200723180648.1429892-5-songliubraving@fb.com --- .../bpf/prog_tests/perf_event_stackmap.c | 116 ++++++++++++++++++ .../selftests/bpf/progs/perf_event_stackmap.c | 59 +++++++++ 2 files changed, 175 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c create mode 100644 tools/testing/selftests/bpf/progs/perf_event_stackmap.c diff --git a/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c new file mode 100644 index 000000000000..72c3690844fb --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#define _GNU_SOURCE +#include +#include +#include +#include "perf_event_stackmap.skel.h" + +#ifndef noinline +#define noinline __attribute__((noinline)) +#endif + +noinline int func_1(void) +{ + static int val = 1; + + val += 1; + + usleep(100); + return val; +} + +noinline int func_2(void) +{ + return func_1(); +} + +noinline int func_3(void) +{ + return func_2(); +} + +noinline int func_4(void) +{ + return func_3(); +} + +noinline int func_5(void) +{ + return func_4(); +} + +noinline int func_6(void) +{ + int i, val = 1; + + for (i = 0; i < 100; i++) + val += func_5(); + + return val; +} + +void test_perf_event_stackmap(void) +{ + struct perf_event_attr attr = { + /* .type = PERF_TYPE_SOFTWARE, */ + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .precise_ip = 2, + .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK | + PERF_SAMPLE_CALLCHAIN, + .branch_sample_type = PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_NO_FLAGS | + PERF_SAMPLE_BRANCH_NO_CYCLES | + PERF_SAMPLE_BRANCH_CALL_STACK, + .sample_period = 5000, + .size = sizeof(struct perf_event_attr), + }; + struct perf_event_stackmap *skel; + __u32 duration = 0; + cpu_set_t cpu_set; + int pmu_fd, err; + + skel = perf_event_stackmap__open(); + + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + err = perf_event_stackmap__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; + + CPU_ZERO(&cpu_set); + CPU_SET(0, &cpu_set); + err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set); + if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno)) + goto cleanup; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (pmu_fd < 0) { + printf("%s:SKIP:cpu doesn't support the event\n", __func__); + test__skip(); + goto cleanup; + } + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + if (CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event", + "err %ld\n", PTR_ERR(skel->links.oncpu))) { + close(pmu_fd); + goto cleanup; + } + + /* create kernel and user stack traces for testing */ + func_6(); + + CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n"); + CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n"); + CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n"); + CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n"); + +cleanup: + perf_event_stackmap__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/perf_event_stackmap.c b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c new file mode 100644 index 000000000000..25467d13c356 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/perf_event_stackmap.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include "vmlinux.h" +#include + +#ifndef PERF_MAX_STACK_DEPTH +#define PERF_MAX_STACK_DEPTH 127 +#endif + +typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH]; +struct { + __uint(type, BPF_MAP_TYPE_STACK_TRACE); + __uint(max_entries, 16384); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(stack_trace_t)); +} stackmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, stack_trace_t); +} stackdata_map SEC(".maps"); + +long stackid_kernel = 1; +long stackid_user = 1; +long stack_kernel = 1; +long stack_user = 1; + +SEC("perf_event") +int oncpu(void *ctx) +{ + stack_trace_t *trace; + __u32 key = 0; + long val; + + val = bpf_get_stackid(ctx, &stackmap, 0); + if (val > 0) + stackid_kernel = 2; + val = bpf_get_stackid(ctx, &stackmap, BPF_F_USER_STACK); + if (val > 0) + stackid_user = 2; + + trace = bpf_map_lookup_elem(&stackdata_map, &key); + if (!trace) + return 0; + + val = bpf_get_stack(ctx, trace, sizeof(stack_trace_t), 0); + if (val > 0) + stack_kernel = 2; + + val = bpf_get_stack(ctx, trace, sizeof(stack_trace_t), BPF_F_USER_STACK); + if (val > 0) + stack_user = 2; + + return 0; +} + +char LICENSE[] SEC("license") = "GPL"; From 9e5bd1f7633bc1c3c8b25496eedfeced6d2675ff Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 23 Jul 2020 23:47:42 -0500 Subject: [PATCH 1617/2056] selftests/bpf: Test CGROUP_STORAGE map can't be used by multiple progs The current assumption is that the lifetime of a cgroup storage is tied to the program's attachment. The storage is created in cgroup_bpf_attach, and released upon cgroup_bpf_detach and cgroup_bpf_release. Because the current semantics is that each attachment gets a completely independent cgroup storage, and you can have multiple programs attached to the same (cgroup, attach type) pair, the key of the CGROUP_STORAGE map, looking up the map with this pair could yield multiple storages, and that is not permitted. Therefore, the kernel verifier checks that two programs cannot share the same CGROUP_STORAGE map, even if they have different expected attach types, considering that the actual attach type does not always have to be equal to the expected attach type. The test creates a CGROUP_STORAGE map and make it shared across two different programs, one cgroup_skb/egress and one /ingress. It asserts that the two programs cannot be both loaded, due to verifier failure from the above reason. Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/30a6b0da67ae6b0296c4d511bfb19c5f3d035916.1595565795.git.zhuyifei@google.com --- .../bpf/prog_tests/cg_storage_multi.c | 43 ++++++++++++++---- .../selftests/bpf/progs/cg_storage_multi.h | 13 ++++++ .../progs/cg_storage_multi_egress_ingress.c | 45 +++++++++++++++++++ .../bpf/progs/cg_storage_multi_egress_only.c | 9 ++-- 4 files changed, 99 insertions(+), 11 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/cg_storage_multi.h create mode 100644 tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index e90e0547d759..1c7653423698 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -8,7 +8,10 @@ #include #include +#include "progs/cg_storage_multi.h" + #include "cg_storage_multi_egress_only.skel.h" +#include "cg_storage_multi_egress_ingress.skel.h" #define PARENT_CGROUP "/cgroup_storage" #define CHILD_CGROUP "/cgroup_storage/child" @@ -16,10 +19,10 @@ static int duration; static bool assert_storage(struct bpf_map *map, const char *cgroup_path, - __u32 expected) + struct cgroup_value *expected) { struct bpf_cgroup_storage_key key = {0}; - __u32 value; + struct cgroup_value value; int map_fd; map_fd = bpf_map__fd(map); @@ -29,8 +32,8 @@ static bool assert_storage(struct bpf_map *map, const char *cgroup_path, if (CHECK(bpf_map_lookup_elem(map_fd, &key, &value) < 0, "map-lookup", "errno %d", errno)) return true; - if (CHECK(value != expected, - "assert-storage", "got %u expected %u", value, expected)) + if (CHECK(memcmp(&value, expected, sizeof(struct cgroup_value)), + "assert-storage", "storages differ")) return true; return false; @@ -39,7 +42,7 @@ static bool assert_storage(struct bpf_map *map, const char *cgroup_path, static bool assert_storage_noexist(struct bpf_map *map, const char *cgroup_path) { struct bpf_cgroup_storage_key key = {0}; - __u32 value; + struct cgroup_value value; int map_fd; map_fd = bpf_map__fd(map); @@ -86,6 +89,7 @@ static bool connect_send(const char *cgroup_path) static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) { struct cg_storage_multi_egress_only *obj; + struct cgroup_value expected_cgroup_value; struct bpf_link *parent_link = NULL, *child_link = NULL; bool err; @@ -109,7 +113,9 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) if (CHECK(obj->bss->invocations != 1, "first-invoke", "invocations=%d", obj->bss->invocations)) goto close_bpf_object; - if (assert_storage(obj->maps.cgroup_storage, PARENT_CGROUP, 1)) + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + PARENT_CGROUP, &expected_cgroup_value)) goto close_bpf_object; if (assert_storage_noexist(obj->maps.cgroup_storage, CHILD_CGROUP)) goto close_bpf_object; @@ -129,9 +135,13 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) if (CHECK(obj->bss->invocations != 3, "second-invoke", "invocations=%d", obj->bss->invocations)) goto close_bpf_object; - if (assert_storage(obj->maps.cgroup_storage, PARENT_CGROUP, 2)) + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + PARENT_CGROUP, &expected_cgroup_value)) goto close_bpf_object; - if (assert_storage(obj->maps.cgroup_storage, CHILD_CGROUP, 1)) + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + CHILD_CGROUP, &expected_cgroup_value)) goto close_bpf_object; close_bpf_object: @@ -141,6 +151,20 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) cg_storage_multi_egress_only__destroy(obj); } +static void test_egress_ingress(int parent_cgroup_fd, int child_cgroup_fd) +{ + struct cg_storage_multi_egress_ingress *obj; + + /* Cannot load both programs due to verifier failure: + * "only one cgroup storage of each type is allowed" + */ + obj = cg_storage_multi_egress_ingress__open_and_load(); + CHECK(obj || errno != EBUSY, + "skel-load", "errno %d, expected EBUSY", errno); + + cg_storage_multi_egress_ingress__destroy(obj); +} + void test_cg_storage_multi(void) { int parent_cgroup_fd = -1, child_cgroup_fd = -1; @@ -155,6 +179,9 @@ void test_cg_storage_multi(void) if (test__start_subtest("egress_only")) test_egress_only(parent_cgroup_fd, child_cgroup_fd); + if (test__start_subtest("egress_ingress")) + test_egress_ingress(parent_cgroup_fd, child_cgroup_fd); + close_cgroup_fd: close(child_cgroup_fd); close(parent_cgroup_fd); diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi.h b/tools/testing/selftests/bpf/progs/cg_storage_multi.h new file mode 100644 index 000000000000..a0778fe7857a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __PROGS_CG_STORAGE_MULTI_H +#define __PROGS_CG_STORAGE_MULTI_H + +#include + +struct cgroup_value { + __u32 egress_pkts; + __u32 ingress_pkts; +}; + +#endif diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c new file mode 100644 index 000000000000..9ce386899365 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include +#include +#include +#include +#include + +#include "progs/cg_storage_multi.h" + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, struct cgroup_value); +} cgroup_storage SEC(".maps"); + +__u32 invocations = 0; + +SEC("cgroup_skb/egress") +int egress(struct __sk_buff *skb) +{ + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} + +SEC("cgroup_skb/ingress") +int ingress(struct __sk_buff *skb) +{ + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(&ptr_cg_storage->ingress_pkts, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c index ec0165d07105..44ad46b33539 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c @@ -10,10 +10,12 @@ #include #include +#include "progs/cg_storage_multi.h" + struct { __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); __type(key, struct bpf_cgroup_storage_key); - __type(value, __u32); + __type(value, struct cgroup_value); } cgroup_storage SEC(".maps"); __u32 invocations = 0; @@ -21,9 +23,10 @@ __u32 invocations = 0; SEC("cgroup_skb/egress") int egress(struct __sk_buff *skb) { - __u32 *ptr_cg_storage = bpf_get_local_storage(&cgroup_storage, 0); + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); - __sync_fetch_and_add(ptr_cg_storage, 1); + __sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1); __sync_fetch_and_add(&invocations, 1); return 1; From 346938e9380cc0b2ad8e2566389cdc570386fe22 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Thu, 23 Jul 2020 11:06:48 -0700 Subject: [PATCH 1618/2056] selftests/bpf: Add get_stackid_cannot_attach This test confirms that BPF program that calls bpf_get_stackid() cannot attach to perf_event with precise_ip > 0 but not PERF_SAMPLE_CALLCHAIN; and cannot attach if the perf_event has exclude_callchain_kernel. Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200723180648.1429892-6-songliubraving@fb.com --- .../prog_tests/get_stackid_cannot_attach.c | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c diff --git a/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c new file mode 100644 index 000000000000..d884b2ed5bc5 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2020 Facebook +#include +#include "test_stacktrace_build_id.skel.h" + +void test_get_stackid_cannot_attach(void) +{ + struct perf_event_attr attr = { + /* .type = PERF_TYPE_SOFTWARE, */ + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .precise_ip = 1, + .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK, + .branch_sample_type = PERF_SAMPLE_BRANCH_USER | + PERF_SAMPLE_BRANCH_NO_FLAGS | + PERF_SAMPLE_BRANCH_NO_CYCLES | + PERF_SAMPLE_BRANCH_CALL_STACK, + .sample_period = 5000, + .size = sizeof(struct perf_event_attr), + }; + struct test_stacktrace_build_id *skel; + __u32 duration = 0; + int pmu_fd, err; + + skel = test_stacktrace_build_id__open(); + if (CHECK(!skel, "skel_open", "skeleton open failed\n")) + return; + + /* override program type */ + bpf_program__set_perf_event(skel->progs.oncpu); + + err = test_stacktrace_build_id__load(skel); + if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err)) + goto cleanup; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (pmu_fd < 0 && (errno == ENOENT || errno == EOPNOTSUPP)) { + printf("%s:SKIP:cannot open PERF_COUNT_HW_CPU_CYCLES with precise_ip > 0\n", + __func__); + test__skip(); + goto cleanup; + } + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_no_callchain", + "should have failed\n"); + close(pmu_fd); + + /* add PERF_SAMPLE_CALLCHAIN, attach should succeed */ + attr.sample_type |= PERF_SAMPLE_CALLCHAIN; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + CHECK(IS_ERR(skel->links.oncpu), "attach_perf_event_callchain", + "err: %ld\n", PTR_ERR(skel->links.oncpu)); + close(pmu_fd); + + /* add exclude_callchain_kernel, attach should fail */ + attr.exclude_callchain_kernel = 1; + + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto cleanup; + + skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu, + pmu_fd); + CHECK(!IS_ERR(skel->links.oncpu), "attach_perf_event_exclude_callchain_kernel", + "should have failed\n"); + close(pmu_fd); + +cleanup: + test_stacktrace_build_id__destroy(skel); +} From 7d9c3427894fe70d1347b4820476bf37736d2ff0 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 23 Jul 2020 23:47:43 -0500 Subject: [PATCH 1619/2056] bpf: Make cgroup storages shared between programs on the same cgroup This change comes in several parts: One, the restriction that the CGROUP_STORAGE map can only be used by one program is removed. This results in the removal of the field 'aux' in struct bpf_cgroup_storage_map, and removal of relevant code associated with the field, and removal of now-noop functions bpf_free_cgroup_storage and bpf_cgroup_storage_release. Second, we permit a key of type u64 as the key to the map. Providing such a key type indicates that the map should ignore attach type when comparing map keys. However, for simplicity newly linked storage will still have the attach type at link time in its key struct. cgroup_storage_check_btf is adapted to accept u64 as the type of the key. Third, because the storages are now shared, the storages cannot be unconditionally freed on program detach. There could be two ways to solve this issue: * A. Reference count the usage of the storages, and free when the last program is detached. * B. Free only when the storage is impossible to be referred to again, i.e. when either the cgroup_bpf it is attached to, or the map itself, is freed. Option A has the side effect that, when the user detach and reattach a program, whether the program gets a fresh storage depends on whether there is another program attached using that storage. This could trigger races if the user is multi-threaded, and since nondeterminism in data races is evil, go with option B. The both the map and the cgroup_bpf now tracks their associated storages, and the storage unlink and free are removed from cgroup_bpf_detach and added to cgroup_bpf_release and cgroup_storage_map_free. The latter also new holds the cgroup_mutex to prevent any races with the former. Fourth, on attach, we reuse the old storage if the key already exists in the map, via cgroup_storage_lookup. If the storage does not exist yet, we create a new one, and publish it at the last step in the attach process. This does not create a race condition because for the whole attach the cgroup_mutex is held. We keep track of an array of new storages that was allocated and if the process fails only the new storages would get freed. Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/d5401c6106728a00890401190db40020a1f84ff1.1595565795.git.zhuyifei@google.com --- include/linux/bpf-cgroup.h | 12 ++- kernel/bpf/cgroup.c | 67 +++++++----- kernel/bpf/core.c | 12 --- kernel/bpf/local_storage.c | 214 ++++++++++++++++++++----------------- 4 files changed, 163 insertions(+), 142 deletions(-) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 2c6f26670acc..64f367044e25 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -46,7 +46,8 @@ struct bpf_cgroup_storage { }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; - struct list_head list; + struct list_head list_map; + struct list_head list_cg; struct rb_node node; struct rcu_head rcu; }; @@ -78,6 +79,9 @@ struct cgroup_bpf { struct list_head progs[MAX_BPF_ATTACH_TYPE]; u32 flags[MAX_BPF_ATTACH_TYPE]; + /* list of cgroup shared storages */ + struct list_head storages; + /* temp storage for effective prog array used by prog_attach/detach */ struct bpf_prog_array *inactive; @@ -161,6 +165,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } +struct bpf_cgroup_storage * +cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, + void *key, bool locked); struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); @@ -169,7 +176,6 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, enum bpf_attach_type type); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); -void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, @@ -383,8 +389,6 @@ static inline void bpf_cgroup_storage_set( struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } -static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, - struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } static inline void bpf_cgroup_storage_free( diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ac53102e244a..957cce1d5168 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -37,17 +37,34 @@ static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) } static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], - struct bpf_prog *prog) + struct bpf_cgroup_storage *new_storages[], + enum bpf_attach_type type, + struct bpf_prog *prog, + struct cgroup *cgrp) { enum bpf_cgroup_storage_type stype; + struct bpf_cgroup_storage_key key; + struct bpf_map *map; + + key.cgroup_inode_id = cgroup_id(cgrp); + key.attach_type = type; for_each_cgroup_storage_type(stype) { + map = prog->aux->cgroup_storage[stype]; + if (!map) + continue; + + storages[stype] = cgroup_storage_lookup((void *)map, &key, false); + if (storages[stype]) + continue; + storages[stype] = bpf_cgroup_storage_alloc(prog, stype); if (IS_ERR(storages[stype])) { - storages[stype] = NULL; - bpf_cgroup_storages_free(storages); + bpf_cgroup_storages_free(new_storages); return -ENOMEM; } + + new_storages[stype] = storages[stype]; } return 0; @@ -63,7 +80,7 @@ static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], } static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], - struct cgroup* cgrp, + struct cgroup *cgrp, enum bpf_attach_type attach_type) { enum bpf_cgroup_storage_type stype; @@ -72,14 +89,6 @@ static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); } -static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) -{ - enum bpf_cgroup_storage_type stype; - - for_each_cgroup_storage_type(stype) - bpf_cgroup_storage_unlink(storages[stype]); -} - /* Called when bpf_cgroup_link is auto-detached from dying cgroup. * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It * doesn't free link memory, which will eventually be done by bpf_link's @@ -101,22 +110,23 @@ static void cgroup_bpf_release(struct work_struct *work) struct cgroup *p, *cgrp = container_of(work, struct cgroup, bpf.release_work); struct bpf_prog_array *old_array; + struct list_head *storages = &cgrp->bpf.storages; + struct bpf_cgroup_storage *storage, *stmp; + unsigned int type; mutex_lock(&cgroup_mutex); for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { struct list_head *progs = &cgrp->bpf.progs[type]; - struct bpf_prog_list *pl, *tmp; + struct bpf_prog_list *pl, *pltmp; - list_for_each_entry_safe(pl, tmp, progs, node) { + list_for_each_entry_safe(pl, pltmp, progs, node) { list_del(&pl->node); if (pl->prog) bpf_prog_put(pl->prog); if (pl->link) bpf_cgroup_link_auto_detach(pl->link); - bpf_cgroup_storages_unlink(pl->storage); - bpf_cgroup_storages_free(pl->storage); kfree(pl); static_branch_dec(&cgroup_bpf_enabled_key); } @@ -126,6 +136,11 @@ static void cgroup_bpf_release(struct work_struct *work) bpf_prog_array_free(old_array); } + list_for_each_entry_safe(storage, stmp, storages, list_cg) { + bpf_cgroup_storage_unlink(storage); + bpf_cgroup_storage_free(storage); + } + mutex_unlock(&cgroup_mutex); for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) @@ -290,6 +305,8 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) for (i = 0; i < NR; i++) INIT_LIST_HEAD(&cgrp->bpf.progs[i]); + INIT_LIST_HEAD(&cgrp->bpf.storages); + for (i = 0; i < NR; i++) if (compute_effective_progs(cgrp, i, &arrays[i])) goto cleanup; @@ -422,7 +439,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct list_head *progs = &cgrp->bpf.progs[type]; struct bpf_prog *old_prog = NULL; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; - struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; + struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; struct bpf_prog_list *pl; int err; @@ -455,17 +472,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, if (IS_ERR(pl)) return PTR_ERR(pl); - if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) + if (bpf_cgroup_storages_alloc(storage, new_storage, type, + prog ? : link->link.prog, cgrp)) return -ENOMEM; if (pl) { old_prog = pl->prog; - bpf_cgroup_storages_unlink(pl->storage); - bpf_cgroup_storages_assign(old_storage, pl->storage); } else { pl = kmalloc(sizeof(*pl), GFP_KERNEL); if (!pl) { - bpf_cgroup_storages_free(storage); + bpf_cgroup_storages_free(new_storage); return -ENOMEM; } list_add_tail(&pl->node, progs); @@ -480,12 +496,11 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, if (err) goto cleanup; - bpf_cgroup_storages_free(old_storage); if (old_prog) bpf_prog_put(old_prog); else static_branch_inc(&cgroup_bpf_enabled_key); - bpf_cgroup_storages_link(pl->storage, cgrp, type); + bpf_cgroup_storages_link(new_storage, cgrp, type); return 0; cleanup: @@ -493,9 +508,7 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, pl->prog = old_prog; pl->link = NULL; } - bpf_cgroup_storages_free(pl->storage); - bpf_cgroup_storages_assign(pl->storage, old_storage); - bpf_cgroup_storages_link(pl->storage, cgrp, type); + bpf_cgroup_storages_free(new_storage); if (!old_prog) { list_del(&pl->node); kfree(pl); @@ -679,8 +692,6 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, /* now can actually delete it from this cgroup list */ list_del(&pl->node); - bpf_cgroup_storages_unlink(pl->storage); - bpf_cgroup_storages_free(pl->storage); kfree(pl); if (list_empty(progs)) /* last program was detached, reset flags to zero */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7be02e555ab9..bde93344164d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2097,24 +2097,12 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, : 0; } -static void bpf_free_cgroup_storage(struct bpf_prog_aux *aux) -{ - enum bpf_cgroup_storage_type stype; - - for_each_cgroup_storage_type(stype) { - if (!aux->cgroup_storage[stype]) - continue; - bpf_cgroup_storage_release(aux, aux->cgroup_storage[stype]); - } -} - void __bpf_free_used_maps(struct bpf_prog_aux *aux, struct bpf_map **used_maps, u32 len) { struct bpf_map *map; u32 i; - bpf_free_cgroup_storage(aux); for (i = 0; i < len; i++) { map = used_maps[i]; if (map->ops->map_poke_untrack) diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 51bd5a8cb01b..3b2c70197d78 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -9,6 +9,8 @@ #include #include +#include "../cgroup/cgroup-internal.h" + DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #ifdef CONFIG_CGROUP_BPF @@ -20,7 +22,6 @@ struct bpf_cgroup_storage_map { struct bpf_map map; spinlock_t lock; - struct bpf_prog_aux *aux; struct rb_root root; struct list_head list; }; @@ -30,24 +31,41 @@ static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) return container_of(map, struct bpf_cgroup_storage_map, map); } -static int bpf_cgroup_storage_key_cmp( - const struct bpf_cgroup_storage_key *key1, - const struct bpf_cgroup_storage_key *key2) +static bool attach_type_isolated(const struct bpf_map *map) { - if (key1->cgroup_inode_id < key2->cgroup_inode_id) - return -1; - else if (key1->cgroup_inode_id > key2->cgroup_inode_id) - return 1; - else if (key1->attach_type < key2->attach_type) - return -1; - else if (key1->attach_type > key2->attach_type) - return 1; + return map->key_size == sizeof(struct bpf_cgroup_storage_key); +} + +static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map, + const void *_key1, const void *_key2) +{ + if (attach_type_isolated(&map->map)) { + const struct bpf_cgroup_storage_key *key1 = _key1; + const struct bpf_cgroup_storage_key *key2 = _key2; + + if (key1->cgroup_inode_id < key2->cgroup_inode_id) + return -1; + else if (key1->cgroup_inode_id > key2->cgroup_inode_id) + return 1; + else if (key1->attach_type < key2->attach_type) + return -1; + else if (key1->attach_type > key2->attach_type) + return 1; + } else { + const __u64 *cgroup_inode_id1 = _key1; + const __u64 *cgroup_inode_id2 = _key2; + + if (*cgroup_inode_id1 < *cgroup_inode_id2) + return -1; + else if (*cgroup_inode_id1 > *cgroup_inode_id2) + return 1; + } return 0; } -static struct bpf_cgroup_storage *cgroup_storage_lookup( - struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key, - bool locked) +struct bpf_cgroup_storage * +cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, + void *key, bool locked) { struct rb_root *root = &map->root; struct rb_node *node; @@ -61,7 +79,7 @@ static struct bpf_cgroup_storage *cgroup_storage_lookup( storage = container_of(node, struct bpf_cgroup_storage, node); - switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) { + switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) { case -1: node = node->rb_left; break; @@ -93,7 +111,7 @@ static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, this = container_of(*new, struct bpf_cgroup_storage, node); parent = *new; - switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) { + switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) { case -1: new = &((*new)->rb_left); break; @@ -111,10 +129,9 @@ static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, return 0; } -static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) +static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; storage = cgroup_storage_lookup(map, key, false); @@ -124,17 +141,13 @@ static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) return &READ_ONCE(storage->buf)->data[0]; } -static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, +static int cgroup_storage_update_elem(struct bpf_map *map, void *key, void *value, u64 flags) { - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; struct bpf_storage_buffer *new; - if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST))) - return -EINVAL; - - if (unlikely(flags & BPF_NOEXIST)) + if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST))) return -EINVAL; if (unlikely((flags & BPF_F_LOCK) && @@ -167,11 +180,10 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, return 0; } -int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, +int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *key, void *value) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; int cpu, off = 0; u32 size; @@ -197,11 +209,10 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, return 0; } -int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, +int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *key, void *value, u64 map_flags) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; int cpu, off = 0; u32 size; @@ -232,12 +243,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, return 0; } -static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, +static int cgroup_storage_get_next_key(struct bpf_map *_map, void *key, void *_next_key) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); - struct bpf_cgroup_storage_key *key = _key; - struct bpf_cgroup_storage_key *next = _next_key; struct bpf_cgroup_storage *storage; spin_lock_bh(&map->lock); @@ -250,17 +259,23 @@ static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, if (!storage) goto enoent; - storage = list_next_entry(storage, list); + storage = list_next_entry(storage, list_map); if (!storage) goto enoent; } else { storage = list_first_entry(&map->list, - struct bpf_cgroup_storage, list); + struct bpf_cgroup_storage, list_map); } spin_unlock_bh(&map->lock); - next->attach_type = storage->key.attach_type; - next->cgroup_inode_id = storage->key.cgroup_inode_id; + + if (attach_type_isolated(&map->map)) { + struct bpf_cgroup_storage_key *next = _next_key; + *next = storage->key; + } else { + __u64 *next = _next_key; + *next = storage->key.cgroup_inode_id; + } return 0; enoent: @@ -275,7 +290,8 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) struct bpf_map_memory mem; int ret; - if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) + if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && + attr->key_size != sizeof(__u64)) return ERR_PTR(-EINVAL); if (attr->value_size == 0) @@ -318,6 +334,17 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) static void cgroup_storage_map_free(struct bpf_map *_map) { struct bpf_cgroup_storage_map *map = map_to_storage(_map); + struct list_head *storages = &map->list; + struct bpf_cgroup_storage *storage, *stmp; + + mutex_lock(&cgroup_mutex); + + list_for_each_entry_safe(storage, stmp, storages, list_map) { + bpf_cgroup_storage_unlink(storage); + bpf_cgroup_storage_free(storage); + } + + mutex_unlock(&cgroup_mutex); WARN_ON(!RB_EMPTY_ROOT(&map->root)); WARN_ON(!list_empty(&map->list)); @@ -335,49 +362,63 @@ static int cgroup_storage_check_btf(const struct bpf_map *map, const struct btf_type *key_type, const struct btf_type *value_type) { - struct btf_member *m; - u32 offset, size; + if (attach_type_isolated(map)) { + struct btf_member *m; + u32 offset, size; - /* Key is expected to be of struct bpf_cgroup_storage_key type, - * which is: - * struct bpf_cgroup_storage_key { - * __u64 cgroup_inode_id; - * __u32 attach_type; - * }; - */ + /* Key is expected to be of struct bpf_cgroup_storage_key type, + * which is: + * struct bpf_cgroup_storage_key { + * __u64 cgroup_inode_id; + * __u32 attach_type; + * }; + */ - /* - * Key_type must be a structure with two fields. - */ - if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || - BTF_INFO_VLEN(key_type->info) != 2) - return -EINVAL; + /* + * Key_type must be a structure with two fields. + */ + if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || + BTF_INFO_VLEN(key_type->info) != 2) + return -EINVAL; - /* - * The first field must be a 64 bit integer at 0 offset. - */ - m = (struct btf_member *)(key_type + 1); - size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); - if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) - return -EINVAL; + /* + * The first field must be a 64 bit integer at 0 offset. + */ + m = (struct btf_member *)(key_type + 1); + size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); + if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) + return -EINVAL; - /* - * The second field must be a 32 bit integer at 64 bit offset. - */ - m++; - offset = offsetof(struct bpf_cgroup_storage_key, attach_type); - size = sizeof_field(struct bpf_cgroup_storage_key, attach_type); - if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) - return -EINVAL; + /* + * The second field must be a 32 bit integer at 64 bit offset. + */ + m++; + offset = offsetof(struct bpf_cgroup_storage_key, attach_type); + size = sizeof_field(struct bpf_cgroup_storage_key, attach_type); + if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) + return -EINVAL; + } else { + u32 int_data; + + /* + * Key is expected to be u64, which stores the cgroup_inode_id + */ + + if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) + return -EINVAL; + + int_data = *(u32 *)(key_type + 1); + if (BTF_INT_BITS(int_data) != 64 || BTF_INT_OFFSET(int_data)) + return -EINVAL; + } return 0; } -static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, +static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, struct seq_file *m) { enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); - struct bpf_cgroup_storage_key *key = _key; struct bpf_cgroup_storage *storage; int cpu; @@ -426,38 +467,13 @@ const struct bpf_map_ops cgroup_storage_map_ops = { int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map) { enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); - struct bpf_cgroup_storage_map *map = map_to_storage(_map); - int ret = -EBUSY; - spin_lock_bh(&map->lock); - - if (map->aux && map->aux != aux) - goto unlock; if (aux->cgroup_storage[stype] && aux->cgroup_storage[stype] != _map) - goto unlock; + return -EBUSY; - map->aux = aux; aux->cgroup_storage[stype] = _map; - ret = 0; -unlock: - spin_unlock_bh(&map->lock); - - return ret; -} - -void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *_map) -{ - enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); - struct bpf_cgroup_storage_map *map = map_to_storage(_map); - - spin_lock_bh(&map->lock); - if (map->aux == aux) { - WARN_ON(aux->cgroup_storage[stype] != _map); - map->aux = NULL; - aux->cgroup_storage[stype] = NULL; - } - spin_unlock_bh(&map->lock); + return 0; } static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) @@ -578,7 +594,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, spin_lock_bh(&map->lock); WARN_ON(cgroup_storage_insert(map, storage)); - list_add(&storage->list, &map->list); + list_add(&storage->list_map, &map->list); + list_add(&storage->list_cg, &cgroup->bpf.storages); spin_unlock_bh(&map->lock); } @@ -596,7 +613,8 @@ void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) root = &map->root; rb_erase(&storage->node, root); - list_del(&storage->list); + list_del(&storage->list_map); + list_del(&storage->list_cg); spin_unlock_bh(&map->lock); } From 3573f384014f51fd5289df0e8369b63ae7fdc244 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 23 Jul 2020 23:47:44 -0500 Subject: [PATCH 1620/2056] selftests/bpf: Test CGROUP_STORAGE behavior on shared egress + ingress This mirrors the original egress-only test. The cgroup_storage is now extended to have two packet counters, one for egress and one for ingress. We also extend to have two egress programs to test that egress will always share with other egress origrams in the same cgroup. The behavior of the counters are exactly the same as the original egress-only test. The test is split into two, one "isolated" test that when the key type is struct bpf_cgroup_storage_key, which contains the attach type, programs of different attach types will see different storages. The other, "shared" test that when the key type is u64, programs of different attach types will see the same storage if they are attached to the same cgroup. Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/c756f5f1521227b8e6e90a453299dda722d7324d.1595565795.git.zhuyifei@google.com --- .../bpf/prog_tests/cg_storage_multi.c | 265 ++++++++++++++++-- ..._ingress.c => cg_storage_multi_isolated.c} | 16 +- .../bpf/progs/cg_storage_multi_shared.c | 57 ++++ 3 files changed, 311 insertions(+), 27 deletions(-) rename tools/testing/selftests/bpf/progs/{cg_storage_multi_egress_ingress.c => cg_storage_multi_isolated.c} (73%) create mode 100644 tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index 1c7653423698..c67d8c076a34 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -11,25 +11,23 @@ #include "progs/cg_storage_multi.h" #include "cg_storage_multi_egress_only.skel.h" -#include "cg_storage_multi_egress_ingress.skel.h" +#include "cg_storage_multi_isolated.skel.h" +#include "cg_storage_multi_shared.skel.h" #define PARENT_CGROUP "/cgroup_storage" #define CHILD_CGROUP "/cgroup_storage/child" static int duration; -static bool assert_storage(struct bpf_map *map, const char *cgroup_path, +static bool assert_storage(struct bpf_map *map, const void *key, struct cgroup_value *expected) { - struct bpf_cgroup_storage_key key = {0}; struct cgroup_value value; int map_fd; map_fd = bpf_map__fd(map); - key.cgroup_inode_id = get_cgroup_id(cgroup_path); - key.attach_type = BPF_CGROUP_INET_EGRESS; - if (CHECK(bpf_map_lookup_elem(map_fd, &key, &value) < 0, + if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) < 0, "map-lookup", "errno %d", errno)) return true; if (CHECK(memcmp(&value, expected, sizeof(struct cgroup_value)), @@ -39,17 +37,14 @@ static bool assert_storage(struct bpf_map *map, const char *cgroup_path, return false; } -static bool assert_storage_noexist(struct bpf_map *map, const char *cgroup_path) +static bool assert_storage_noexist(struct bpf_map *map, const void *key) { - struct bpf_cgroup_storage_key key = {0}; struct cgroup_value value; int map_fd; map_fd = bpf_map__fd(map); - key.cgroup_inode_id = get_cgroup_id(cgroup_path); - key.attach_type = BPF_CGROUP_INET_EGRESS; - if (CHECK(bpf_map_lookup_elem(map_fd, &key, &value) == 0, + if (CHECK(bpf_map_lookup_elem(map_fd, key, &value) == 0, "map-lookup", "succeeded, expected ENOENT")) return true; if (CHECK(errno != ENOENT, @@ -90,9 +85,12 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) { struct cg_storage_multi_egress_only *obj; struct cgroup_value expected_cgroup_value; + struct bpf_cgroup_storage_key key; struct bpf_link *parent_link = NULL, *child_link = NULL; bool err; + key.attach_type = BPF_CGROUP_INET_EGRESS; + obj = cg_storage_multi_egress_only__open_and_load(); if (CHECK(!obj, "skel-load", "errno %d", errno)) return; @@ -113,11 +111,13 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) if (CHECK(obj->bss->invocations != 1, "first-invoke", "invocations=%d", obj->bss->invocations)) goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 }; if (assert_storage(obj->maps.cgroup_storage, - PARENT_CGROUP, &expected_cgroup_value)) + &key, &expected_cgroup_value)) goto close_bpf_object; - if (assert_storage_noexist(obj->maps.cgroup_storage, CHILD_CGROUP)) + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) goto close_bpf_object; /* Attach to parent and child cgroup, trigger packet from child. @@ -135,13 +135,15 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) if (CHECK(obj->bss->invocations != 3, "second-invoke", "invocations=%d", obj->bss->invocations)) goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; if (assert_storage(obj->maps.cgroup_storage, - PARENT_CGROUP, &expected_cgroup_value)) + &key, &expected_cgroup_value)) goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 1 }; if (assert_storage(obj->maps.cgroup_storage, - CHILD_CGROUP, &expected_cgroup_value)) + &key, &expected_cgroup_value)) goto close_bpf_object; close_bpf_object: @@ -151,18 +153,228 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) cg_storage_multi_egress_only__destroy(obj); } -static void test_egress_ingress(int parent_cgroup_fd, int child_cgroup_fd) +static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd) { - struct cg_storage_multi_egress_ingress *obj; + struct cg_storage_multi_isolated *obj; + struct cgroup_value expected_cgroup_value; + struct bpf_cgroup_storage_key key; + struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL; + struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL; + struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL; + bool err; - /* Cannot load both programs due to verifier failure: - * "only one cgroup storage of each type is allowed" + obj = cg_storage_multi_isolated__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + /* Attach to parent cgroup, trigger packet from child. + * Assert that there is three runs, two with parent cgroup egress and + * one with parent cgroup ingress, stored in separate parent storages. + * Also assert that child cgroup's storages does not exist */ - obj = cg_storage_multi_egress_ingress__open_and_load(); - CHECK(obj || errno != EBUSY, - "skel-load", "errno %d, expected EBUSY", errno); + parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach", + "err %ld", PTR_ERR(parent_egress1_link))) + goto close_bpf_object; + parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach", + "err %ld", PTR_ERR(parent_egress2_link))) + goto close_bpf_object; + parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach", + "err %ld", PTR_ERR(parent_ingress_link))) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "first-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 3, + "first-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; - cg_storage_multi_egress_ingress__destroy(obj); + /* Attach to parent and child cgroup, trigger packet from child. + * Assert that there is six additional runs, parent cgroup egresses and + * ingress, child cgroup egresses and ingress. + * Assert that egree and ingress storages are separate. + */ + child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + child_cgroup_fd); + if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach", + "err %ld", PTR_ERR(child_egress1_link))) + goto close_bpf_object; + child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + child_cgroup_fd); + if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach", + "err %ld", PTR_ERR(child_egress2_link))) + goto close_bpf_object; + child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + child_cgroup_fd); + if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach", + "err %ld", PTR_ERR(child_ingress_link))) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "second-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 9, + "second-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(PARENT_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 4 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.cgroup_inode_id = get_cgroup_id(CHILD_CGROUP); + key.attach_type = BPF_CGROUP_INET_EGRESS; + expected_cgroup_value = (struct cgroup_value) { .egress_pkts = 2 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key.attach_type = BPF_CGROUP_INET_INGRESS; + expected_cgroup_value = (struct cgroup_value) { .ingress_pkts = 1 }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(parent_egress1_link); + bpf_link__destroy(parent_egress2_link); + bpf_link__destroy(parent_ingress_link); + bpf_link__destroy(child_egress1_link); + bpf_link__destroy(child_egress2_link); + bpf_link__destroy(child_ingress_link); + + cg_storage_multi_isolated__destroy(obj); +} + +static void test_shared(int parent_cgroup_fd, int child_cgroup_fd) +{ + struct cg_storage_multi_shared *obj; + struct cgroup_value expected_cgroup_value; + __u64 key; + struct bpf_link *parent_egress1_link = NULL, *parent_egress2_link = NULL; + struct bpf_link *child_egress1_link = NULL, *child_egress2_link = NULL; + struct bpf_link *parent_ingress_link = NULL, *child_ingress_link = NULL; + bool err; + + obj = cg_storage_multi_shared__open_and_load(); + if (CHECK(!obj, "skel-load", "errno %d", errno)) + return; + + /* Attach to parent cgroup, trigger packet from child. + * Assert that there is three runs, two with parent cgroup egress and + * one with parent cgroup ingress. + * Also assert that child cgroup's storage does not exist + */ + parent_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_egress1_link), "parent-egress1-cg-attach", + "err %ld", PTR_ERR(parent_egress1_link))) + goto close_bpf_object; + parent_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_egress2_link), "parent-egress2-cg-attach", + "err %ld", PTR_ERR(parent_egress2_link))) + goto close_bpf_object; + parent_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + parent_cgroup_fd); + if (CHECK(IS_ERR(parent_ingress_link), "parent-ingress-cg-attach", + "err %ld", PTR_ERR(parent_ingress_link))) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "first-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 3, + "first-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key = get_cgroup_id(PARENT_CGROUP); + expected_cgroup_value = (struct cgroup_value) { + .egress_pkts = 2, + .ingress_pkts = 1, + }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key = get_cgroup_id(CHILD_CGROUP); + if (assert_storage_noexist(obj->maps.cgroup_storage, &key)) + goto close_bpf_object; + + /* Attach to parent and child cgroup, trigger packet from child. + * Assert that there is six additional runs, parent cgroup egresses and + * ingress, child cgroup egresses and ingress. + */ + child_egress1_link = bpf_program__attach_cgroup(obj->progs.egress1, + child_cgroup_fd); + if (CHECK(IS_ERR(child_egress1_link), "child-egress1-cg-attach", + "err %ld", PTR_ERR(child_egress1_link))) + goto close_bpf_object; + child_egress2_link = bpf_program__attach_cgroup(obj->progs.egress2, + child_cgroup_fd); + if (CHECK(IS_ERR(child_egress2_link), "child-egress2-cg-attach", + "err %ld", PTR_ERR(child_egress2_link))) + goto close_bpf_object; + child_ingress_link = bpf_program__attach_cgroup(obj->progs.ingress, + child_cgroup_fd); + if (CHECK(IS_ERR(child_ingress_link), "child-ingress-cg-attach", + "err %ld", PTR_ERR(child_ingress_link))) + goto close_bpf_object; + err = connect_send(CHILD_CGROUP); + if (CHECK(err, "second-connect-send", "errno %d", errno)) + goto close_bpf_object; + if (CHECK(obj->bss->invocations != 9, + "second-invoke", "invocations=%d", obj->bss->invocations)) + goto close_bpf_object; + key = get_cgroup_id(PARENT_CGROUP); + expected_cgroup_value = (struct cgroup_value) { + .egress_pkts = 4, + .ingress_pkts = 2, + }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + key = get_cgroup_id(CHILD_CGROUP); + expected_cgroup_value = (struct cgroup_value) { + .egress_pkts = 2, + .ingress_pkts = 1, + }; + if (assert_storage(obj->maps.cgroup_storage, + &key, &expected_cgroup_value)) + goto close_bpf_object; + +close_bpf_object: + bpf_link__destroy(parent_egress1_link); + bpf_link__destroy(parent_egress2_link); + bpf_link__destroy(parent_ingress_link); + bpf_link__destroy(child_egress1_link); + bpf_link__destroy(child_egress2_link); + bpf_link__destroy(child_ingress_link); + + cg_storage_multi_shared__destroy(obj); } void test_cg_storage_multi(void) @@ -179,8 +391,11 @@ void test_cg_storage_multi(void) if (test__start_subtest("egress_only")) test_egress_only(parent_cgroup_fd, child_cgroup_fd); - if (test__start_subtest("egress_ingress")) - test_egress_ingress(parent_cgroup_fd, child_cgroup_fd); + if (test__start_subtest("isolated")) + test_isolated(parent_cgroup_fd, child_cgroup_fd); + + if (test__start_subtest("shared")) + test_shared(parent_cgroup_fd, child_cgroup_fd); close_cgroup_fd: close(child_cgroup_fd); diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c similarity index 73% rename from tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c rename to tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c index 9ce386899365..a25373002055 100644 --- a/tools/testing/selftests/bpf/progs/cg_storage_multi_egress_ingress.c +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c @@ -20,8 +20,20 @@ struct { __u32 invocations = 0; -SEC("cgroup_skb/egress") -int egress(struct __sk_buff *skb) +SEC("cgroup_skb/egress/1") +int egress1(struct __sk_buff *skb) +{ + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} + +SEC("cgroup_skb/egress/2") +int egress2(struct __sk_buff *skb) { struct cgroup_value *ptr_cg_storage = bpf_get_local_storage(&cgroup_storage, 0); diff --git a/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c new file mode 100644 index 000000000000..a149f33bc533 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright 2020 Google LLC. + */ + +#include +#include +#include +#include +#include + +#include "progs/cg_storage_multi.h" + +struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, __u64); + __type(value, struct cgroup_value); +} cgroup_storage SEC(".maps"); + +__u32 invocations = 0; + +SEC("cgroup_skb/egress/1") +int egress1(struct __sk_buff *skb) +{ + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} + +SEC("cgroup_skb/egress/2") +int egress2(struct __sk_buff *skb) +{ + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(&ptr_cg_storage->egress_pkts, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} + +SEC("cgroup_skb/ingress") +int ingress(struct __sk_buff *skb) +{ + struct cgroup_value *ptr_cg_storage = + bpf_get_local_storage(&cgroup_storage, 0); + + __sync_fetch_and_add(&ptr_cg_storage->ingress_pkts, 1); + __sync_fetch_and_add(&invocations, 1); + + return 1; +} From 4e15f460be6d14c3fe80ef3221bde759f6b94d9d Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Thu, 23 Jul 2020 23:47:45 -0500 Subject: [PATCH 1621/2056] Documentation/bpf: Document CGROUP_STORAGE map type The machanics and usage are not very straightforward. Given the changes it's better to document how it works and how to use it, rather than having to rely on the examples and implementation to infer what is going on. Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/b412edfbb05cb1077c9e2a36a981a54ee23fa8b3.1595565795.git.zhuyifei@google.com --- Documentation/bpf/index.rst | 9 ++ Documentation/bpf/map_cgroup_storage.rst | 169 +++++++++++++++++++++++ 2 files changed, 178 insertions(+) create mode 100644 Documentation/bpf/map_cgroup_storage.rst diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 38b4db8be7a2..26f4bb3107fc 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -48,6 +48,15 @@ Program types bpf_lsm +Map types +========= + +.. toctree:: + :maxdepth: 1 + + map_cgroup_storage + + Testing and debugging BPF ========================= diff --git a/Documentation/bpf/map_cgroup_storage.rst b/Documentation/bpf/map_cgroup_storage.rst new file mode 100644 index 000000000000..cab9543017bf --- /dev/null +++ b/Documentation/bpf/map_cgroup_storage.rst @@ -0,0 +1,169 @@ +.. SPDX-License-Identifier: GPL-2.0-only +.. Copyright (C) 2020 Google LLC. + +=========================== +BPF_MAP_TYPE_CGROUP_STORAGE +=========================== + +The ``BPF_MAP_TYPE_CGROUP_STORAGE`` map type represents a local fix-sized +storage. It is only available with ``CONFIG_CGROUP_BPF``, and to programs that +attach to cgroups; the programs are made available by the same Kconfig. The +storage is identified by the cgroup the program is attached to. + +The map provide a local storage at the cgroup that the BPF program is attached +to. It provides a faster and simpler access than the general purpose hash +table, which performs a hash table lookups, and requires user to track live +cgroups on their own. + +This document describes the usage and semantics of the +``BPF_MAP_TYPE_CGROUP_STORAGE`` map type. Some of its behaviors was changed in +Linux 5.9 and this document will describe the differences. + +Usage +===== + +The map uses key of type of either ``__u64 cgroup_inode_id`` or +``struct bpf_cgroup_storage_key``, declared in ``linux/bpf.h``:: + + struct bpf_cgroup_storage_key { + __u64 cgroup_inode_id; + __u32 attach_type; + }; + +``cgroup_inode_id`` is the inode id of the cgroup directory. +``attach_type`` is the the program's attach type. + +Linux 5.9 added support for type ``__u64 cgroup_inode_id`` as the key type. +When this key type is used, then all attach types of the particular cgroup and +map will share the same storage. Otherwise, if the type is +``struct bpf_cgroup_storage_key``, then programs of different attach types +be isolated and see different storages. + +To access the storage in a program, use ``bpf_get_local_storage``:: + + void *bpf_get_local_storage(void *map, u64 flags) + +``flags`` is reserved for future use and must be 0. + +There is no implicit synchronization. Storages of ``BPF_MAP_TYPE_CGROUP_STORAGE`` +can be accessed by multiple programs across different CPUs, and user should +take care of synchronization by themselves. The bpf infrastructure provides +``struct bpf_spin_lock`` to synchronize the storage. See +``tools/testing/selftests/bpf/progs/test_spin_lock.c``. + +Examples +======== + +Usage with key type as ``struct bpf_cgroup_storage_key``:: + + #include + + struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, struct bpf_cgroup_storage_key); + __type(value, __u32); + } cgroup_storage SEC(".maps"); + + int program(struct __sk_buff *skb) + { + __u32 *ptr = bpf_get_local_storage(&cgroup_storage, 0); + __sync_fetch_and_add(ptr, 1); + + return 0; + } + +Userspace accessing map declared above:: + + #include + #include + + __u32 map_lookup(struct bpf_map *map, __u64 cgrp, enum bpf_attach_type type) + { + struct bpf_cgroup_storage_key = { + .cgroup_inode_id = cgrp, + .attach_type = type, + }; + __u32 value; + bpf_map_lookup_elem(bpf_map__fd(map), &key, &value); + // error checking omitted + return value; + } + +Alternatively, using just ``__u64 cgroup_inode_id`` as key type:: + + #include + + struct { + __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE); + __type(key, __u64); + __type(value, __u32); + } cgroup_storage SEC(".maps"); + + int program(struct __sk_buff *skb) + { + __u32 *ptr = bpf_get_local_storage(&cgroup_storage, 0); + __sync_fetch_and_add(ptr, 1); + + return 0; + } + +And userspace:: + + #include + #include + + __u32 map_lookup(struct bpf_map *map, __u64 cgrp, enum bpf_attach_type type) + { + __u32 value; + bpf_map_lookup_elem(bpf_map__fd(map), &cgrp, &value); + // error checking omitted + return value; + } + +Semantics +========= + +``BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE`` is a variant of this map type. This +per-CPU variant will have different memory regions for each CPU for each +storage. The non-per-CPU will have the same memory region for each storage. + +Prior to Linux 5.9, the lifetime of a storage is precisely per-attachment, and +for a single ``CGROUP_STORAGE`` map, there can be at most one program loaded +that uses the map. A program may be attached to multiple cgroups or have +multiple attach types, and each attach creates a fresh zeroed storage. The +storage is freed upon detach. + +There is a one-to-one association between the map of each type (per-CPU and +non-per-CPU) and the BPF program during load verification time. As a result, +each map can only be used by one BPF program and each BPF program can only use +one storage map of each type. Because of map can only be used by one BPF +program, sharing of this cgroup's storage with other BPF programs were +impossible. + +Since Linux 5.9, storage can be shared by multiple programs. When a program is +attached to a cgroup, the kernel would create a new storage only if the map +does not already contain an entry for the cgroup and attach type pair, or else +the old storage is reused for the new attachment. If the map is attach type +shared, then attach type is simply ignored during comparison. Storage is freed +only when either the map or the cgroup attached to is being freed. Detaching +will not directly free the storage, but it may cause the reference to the map +to reach zero and indirectly freeing all storage in the map. + +The map is not associated with any BPF program, thus making sharing possible. +However, the BPF program can still only associate with one map of each type +(per-CPU and non-per-CPU). A BPF program cannot use more than one +``BPF_MAP_TYPE_CGROUP_STORAGE`` or more than one +``BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE``. + +In all versions, userspace may use the the attach parameters of cgroup and +attach type pair in ``struct bpf_cgroup_storage_key`` as the key to the BPF map +APIs to read or update the storage for a given attachment. For Linux 5.9 +attach type shared storages, only the first value in the struct, cgroup inode +id, is used during comparison, so userspace may just specify a ``__u64`` +directly. + +The storage is bound at attach time. Even if the program is attached to parent +and triggers in child, the storage still belongs to the parent. + +Userspace cannot create a new entry in the map or delete an existing entry. +Program test runs always use a temporary storage. From dfcdf0e9ad2e006196986f363c99b2097aec5ef0 Mon Sep 17 00:00:00 2001 From: YiFei Zhu Date: Fri, 24 Jul 2020 16:17:53 -0500 Subject: [PATCH 1622/2056] bpf/local_storage: Fix build without CONFIG_CGROUP local_storage.o has its compile guard as CONFIG_BPF_SYSCALL, which does not imply that CONFIG_CGROUP is on. Including cgroup-internal.h when CONFIG_CGROUP is off cause a compilation failure. Fixes: f67cfc233706 ("bpf: Make cgroup storages shared between programs on the same cgroup") Reported-by: kernel test robot Signed-off-by: YiFei Zhu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200724211753.902969-1-zhuyifei1999@gmail.com --- kernel/bpf/local_storage.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c index 3b2c70197d78..571bb351ed3b 100644 --- a/kernel/bpf/local_storage.c +++ b/kernel/bpf/local_storage.c @@ -9,12 +9,12 @@ #include #include -#include "../cgroup/cgroup-internal.h" - DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); #ifdef CONFIG_CGROUP_BPF +#include "../cgroup/cgroup-internal.h" + #define LOCAL_STORAGE_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) From 2b9b305fcdda1810bdffeb599361174eb2cd0b7c Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 24 Jul 2020 13:05:02 -0700 Subject: [PATCH 1623/2056] bpf: Fix build on architectures with special bpf_user_pt_regs_t Architectures like s390, powerpc, arm64, riscv have speical definition of bpf_user_pt_regs_t. So we need to cast the pointer before passing it to bpf_get_stack(). This is similar to bpf_get_stack_tp(). Fixes: 03d42fd2d83f ("bpf: Separate bpf_get_[stack|stackid] for perf events BPF") Reported-by: kernel test robot Signed-off-by: Song Liu Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200724200503.3629591-1-songliubraving@fb.com --- kernel/bpf/stackmap.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 5beb2f8c23da..4fd830a62be2 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -678,6 +678,7 @@ const struct bpf_func_proto bpf_get_task_stack_proto = { BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, void *, buf, u32, size, u64, flags) { + struct pt_regs *regs = (struct pt_regs *)(ctx->regs); struct perf_event *event = ctx->event; struct perf_callchain_entry *trace; bool kernel, user; @@ -685,7 +686,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, __u64 nr_kernel; if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY)) - return __bpf_get_stack(ctx->regs, NULL, NULL, buf, size, flags); + return __bpf_get_stack(regs, NULL, NULL, buf, size, flags); if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | BPF_F_USER_BUILD_ID))) @@ -705,8 +706,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, __u64 nr = trace->nr; trace->nr = nr_kernel; - err = __bpf_get_stack(ctx->regs, NULL, trace, buf, - size, flags); + err = __bpf_get_stack(regs, NULL, trace, buf, size, flags); /* restore nr */ trace->nr = nr; @@ -718,8 +718,7 @@ BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx, goto clear; flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip; - err = __bpf_get_stack(ctx->regs, NULL, trace, buf, - size, flags); + err = __bpf_get_stack(regs, NULL, trace, buf, size, flags); } return err; From 6cc7d1e8e9e06d45f9d1a39a5f465288d7cd8f9a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:54 -0700 Subject: [PATCH 1624/2056] bpf: Make bpf_link API available indepently of CONFIG_BPF_SYSCALL Similarly to bpf_prog, make bpf_link and related generic API available unconditionally to make it easier to have bpf_link support in various parts of the kernel. Stub out init/prime/settle/cleanup and inc/put APIs. Reported-by: kernel test robot Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-2-andriin@fb.com --- include/linux/bpf.h | 81 ++++++++++++++++++++++++++++++--------------- 1 file changed, 55 insertions(+), 26 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8357be349133..40c5e206ecf2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -781,6 +781,32 @@ struct bpf_array_aux { struct work_struct work; }; +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); + int (*fill_link_info)(const struct bpf_link *link, + struct bpf_link_info *info); +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + struct bpf_struct_ops_value; struct btf_type; struct btf_member; @@ -1164,32 +1190,6 @@ static inline bool bpf_bypass_spec_v4(void) int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); -struct bpf_link { - atomic64_t refcnt; - u32 id; - enum bpf_link_type type; - const struct bpf_link_ops *ops; - struct bpf_prog *prog; - struct work_struct work; -}; - -struct bpf_link_primer { - struct bpf_link *link; - struct file *file; - int fd; - u32 id; -}; - -struct bpf_link_ops { - void (*release)(struct bpf_link *link); - void (*dealloc)(struct bpf_link *link); - int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, - struct bpf_prog *old_prog); - void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); - int (*fill_link_info)(const struct bpf_link *link, - struct bpf_link_info *info); -}; - void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog); int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); @@ -1401,6 +1401,35 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) { } +static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, + struct bpf_prog *prog) +{ +} + +static inline int bpf_link_prime(struct bpf_link *link, + struct bpf_link_primer *primer) +{ + return -EOPNOTSUPP; +} + +static inline int bpf_link_settle(struct bpf_link_primer *primer) +{ + return -EOPNOTSUPP; +} + +static inline void bpf_link_cleanup(struct bpf_link_primer *primer) +{ +} + +static inline void bpf_link_inc(struct bpf_link *link) +{ +} + +static inline void bpf_link_put(struct bpf_link *link) +{ +} + static inline int bpf_obj_get_user(const char __user *pathname, int flags) { return -EOPNOTSUPP; From 7f0a838254bdd9114b978ef2541a6ce330307e9e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:55 -0700 Subject: [PATCH 1625/2056] bpf, xdp: Maintain info on attached XDP BPF programs in net_device Instead of delegating to drivers, maintain information about which BPF programs are attached in which XDP modes (generic/skb, driver, or hardware) locally in net_device. This effectively obsoletes XDP_QUERY_PROG command. Such re-organization simplifies existing code already. But it also allows to further add bpf_link-based XDP attachments without drivers having to know about any of this at all, which seems like a good setup. XDP_SETUP_PROG/XDP_SETUP_PROG_HW are just low-level commands to driver to install/uninstall active BPF program. All the higher-level concerns about prog/link interaction will be contained within generic driver-agnostic logic. All the XDP_QUERY_PROG calls to driver in dev_xdp_uninstall() were removed. It's not clear for me why dev_xdp_uninstall() were passing previous prog_flags when resetting installed programs. That seems unnecessary, plus most drivers don't populate prog_flags anyways. Having XDP_SETUP_PROG vs XDP_SETUP_PROG_HW should be enough of an indicator of what is required of driver to correctly reset active BPF program. dev_xdp_uninstall() is also generalized as an iteration over all three supported mode. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-3-andriin@fb.com --- include/linux/netdevice.h | 17 +++- net/core/dev.c | 166 +++++++++++++++++++++----------------- net/core/rtnetlink.c | 5 +- 3 files changed, 109 insertions(+), 79 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ac2cd3f49aba..cad44b40c776 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -889,6 +889,17 @@ struct netlink_ext_ack; struct xdp_umem; struct xdp_dev_bulk_queue; +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE +}; + +struct bpf_xdp_entity { + struct bpf_prog *prog; +}; + struct netdev_bpf { enum bpf_netdev_command command; union { @@ -2142,6 +2153,9 @@ struct net_device { #endif const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; + + /* protected by rtnl_lock */ + struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -3817,8 +3831,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, - enum bpf_netdev_command cmd); +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index fe2e387eed29..bf38fde667e9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8716,84 +8716,103 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down_generic); -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, - enum bpf_netdev_command cmd) +static enum bpf_xdp_mode dev_xdp_mode(u32 flags) { - struct netdev_bpf xdp; - - if (!bpf_op) - return 0; - - memset(&xdp, 0, sizeof(xdp)); - xdp.command = cmd; - - /* Query must always succeed. */ - WARN_ON(bpf_op(dev, &xdp) < 0 && cmd == XDP_QUERY_PROG); - - return xdp.prog_id; + if (flags & XDP_FLAGS_HW_MODE) + return XDP_MODE_HW; + if (flags & XDP_FLAGS_DRV_MODE) + return XDP_MODE_DRV; + return XDP_MODE_SKB; } -static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, - struct netlink_ext_ack *extack, u32 flags, - struct bpf_prog *prog) +static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) +{ + switch (mode) { + case XDP_MODE_SKB: + return generic_xdp_install; + case XDP_MODE_DRV: + case XDP_MODE_HW: + return dev->netdev_ops->ndo_bpf; + default: + return NULL; + }; +} + +static struct bpf_prog *dev_xdp_prog(struct net_device *dev, + enum bpf_xdp_mode mode) +{ + return dev->xdp_state[mode].prog; +} + +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) +{ + struct bpf_prog *prog = dev_xdp_prog(dev, mode); + + return prog ? prog->aux->id : 0; +} + +static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, + struct bpf_prog *prog) +{ + dev->xdp_state[mode].prog = prog; +} + +static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, + bpf_op_t bpf_op, struct netlink_ext_ack *extack, + u32 flags, struct bpf_prog *prog) { - bool non_hw = !(flags & XDP_FLAGS_HW_MODE); - struct bpf_prog *prev_prog = NULL; struct netdev_bpf xdp; int err; - if (non_hw) { - prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op, - XDP_QUERY_PROG)); - if (IS_ERR(prev_prog)) - prev_prog = NULL; - } - memset(&xdp, 0, sizeof(xdp)); - if (flags & XDP_FLAGS_HW_MODE) - xdp.command = XDP_SETUP_PROG_HW; - else - xdp.command = XDP_SETUP_PROG; + xdp.command = mode == XDP_MODE_HW ? XDP_SETUP_PROG_HW : XDP_SETUP_PROG; xdp.extack = extack; xdp.flags = flags; xdp.prog = prog; + /* Drivers assume refcnt is already incremented (i.e, prog pointer is + * "moved" into driver), so they don't increment it on their own, but + * they do decrement refcnt when program is detached or replaced. + * Given net_device also owns link/prog, we need to bump refcnt here + * to prevent drivers from underflowing it. + */ + if (prog) + bpf_prog_inc(prog); err = bpf_op(dev, &xdp); - if (!err && non_hw) - bpf_prog_change_xdp(prev_prog, prog); + if (err) { + if (prog) + bpf_prog_put(prog); + return err; + } - if (prev_prog) - bpf_prog_put(prev_prog); + if (mode != XDP_MODE_HW) + bpf_prog_change_xdp(dev_xdp_prog(dev, mode), prog); - return err; + return 0; } static void dev_xdp_uninstall(struct net_device *dev) { - struct netdev_bpf xdp; - bpf_op_t ndo_bpf; + struct bpf_prog *prog; + enum bpf_xdp_mode mode; + bpf_op_t bpf_op; - /* Remove generic XDP */ - WARN_ON(dev_xdp_install(dev, generic_xdp_install, NULL, 0, NULL)); + ASSERT_RTNL(); - /* Remove from the driver */ - ndo_bpf = dev->netdev_ops->ndo_bpf; - if (!ndo_bpf) - return; + for (mode = XDP_MODE_SKB; mode < __MAX_XDP_MODE; mode++) { + prog = dev_xdp_prog(dev, mode); + if (!prog) + continue; - memset(&xdp, 0, sizeof(xdp)); - xdp.command = XDP_QUERY_PROG; - WARN_ON(ndo_bpf(dev, &xdp)); - if (xdp.prog_id) - WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, - NULL)); + bpf_op = dev_xdp_bpf_op(dev, mode); + if (!bpf_op) + continue; - /* Remove HW offload */ - memset(&xdp, 0, sizeof(xdp)); - xdp.command = XDP_QUERY_PROG_HW; - if (!ndo_bpf(dev, &xdp) && xdp.prog_id) - WARN_ON(dev_xdp_install(dev, ndo_bpf, NULL, xdp.prog_flags, - NULL)); + WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); + + bpf_prog_put(prog); + dev_xdp_set_prog(dev, mode, NULL); + } } /** @@ -8810,29 +8829,22 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags) { const struct net_device_ops *ops = dev->netdev_ops; - enum bpf_netdev_command query; + enum bpf_xdp_mode mode = dev_xdp_mode(flags); + bool offload = mode == XDP_MODE_HW; u32 prog_id, expected_id = 0; - bpf_op_t bpf_op, bpf_chk; struct bpf_prog *prog; - bool offload; + bpf_op_t bpf_op; int err; ASSERT_RTNL(); - offload = flags & XDP_FLAGS_HW_MODE; - query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; - - bpf_op = bpf_chk = ops->ndo_bpf; - if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { + bpf_op = dev_xdp_bpf_op(dev, mode); + if (!bpf_op) { NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); return -EOPNOTSUPP; } - if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) - bpf_op = generic_xdp_install; - if (bpf_op == bpf_chk) - bpf_chk = generic_xdp_install; - prog_id = __dev_xdp_query(dev, bpf_op, query); + prog_id = dev_xdp_prog_id(dev, mode); if (flags & XDP_FLAGS_REPLACE) { if (expected_fd >= 0) { prog = bpf_prog_get_type_dev(expected_fd, @@ -8850,8 +8862,11 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, } } if (fd >= 0) { - if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { - NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); + enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB + ? XDP_MODE_DRV : XDP_MODE_SKB; + + if (!offload && dev_xdp_prog_id(dev, other_mode)) { + NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); return -EEXIST; } @@ -8866,7 +8881,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, return PTR_ERR(prog); if (!offload && bpf_prog_is_dev_bound(prog->aux)) { - NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); + NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); bpf_prog_put(prog); return -EINVAL; } @@ -8895,11 +8910,14 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, prog = NULL; } - err = dev_xdp_install(dev, bpf_op, extack, flags, prog); - if (err < 0 && prog) + err = dev_xdp_install(dev, mode, bpf_op, extack, flags, prog); + if (err < 0 && prog) { bpf_prog_put(prog); + return err; + } + dev_xdp_set_prog(dev, mode, prog); - return err; + return 0; } /** diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 85a4b0101f76..58c484a28395 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1416,13 +1416,12 @@ static u32 rtnl_xdp_prog_skb(struct net_device *dev) static u32 rtnl_xdp_prog_drv(struct net_device *dev) { - return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, XDP_QUERY_PROG); + return dev_xdp_prog_id(dev, XDP_MODE_DRV); } static u32 rtnl_xdp_prog_hw(struct net_device *dev) { - return __dev_xdp_query(dev, dev->netdev_ops->ndo_bpf, - XDP_QUERY_PROG_HW); + return dev_xdp_prog_id(dev, XDP_MODE_HW); } static int rtnl_xdp_report_one(struct sk_buff *skb, struct net_device *dev, From d4baa9368a5e4d694e787e0442ddd6ab95d6fd96 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:56 -0700 Subject: [PATCH 1626/2056] bpf, xdp: Extract common XDP program attachment logic Further refactor XDP attachment code. dev_change_xdp_fd() is split into two parts: getting bpf_progs from FDs and attachment logic, working with bpf_progs. This makes attachment logic a bit more straightforward and prepares code for bpf_xdp_link inclusion, which will share the common logic. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-4-andriin@fb.com --- net/core/dev.c | 177 +++++++++++++++++++++++++++---------------------- 1 file changed, 97 insertions(+), 80 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index bf38fde667e9..521ce031ee35 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8815,6 +8815,82 @@ static void dev_xdp_uninstall(struct net_device *dev) } } +static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, + struct bpf_prog *new_prog, struct bpf_prog *old_prog, + u32 flags) +{ + struct bpf_prog *cur_prog; + enum bpf_xdp_mode mode; + bpf_op_t bpf_op; + int err; + + ASSERT_RTNL(); + + /* just one XDP mode bit should be set, zero defaults to SKB mode */ + if (hweight32(flags & XDP_FLAGS_MODES) > 1) { + NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); + return -EINVAL; + } + /* old_prog != NULL implies XDP_FLAGS_REPLACE is set */ + if (old_prog && !(flags & XDP_FLAGS_REPLACE)) { + NL_SET_ERR_MSG(extack, "XDP_FLAGS_REPLACE is not specified"); + return -EINVAL; + } + + mode = dev_xdp_mode(flags); + cur_prog = dev_xdp_prog(dev, mode); + if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { + NL_SET_ERR_MSG(extack, "Active program does not match expected"); + return -EEXIST; + } + if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && cur_prog) { + NL_SET_ERR_MSG(extack, "XDP program already attached"); + return -EBUSY; + } + + if (new_prog) { + bool offload = mode == XDP_MODE_HW; + enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB + ? XDP_MODE_DRV : XDP_MODE_SKB; + + if (!offload && dev_xdp_prog(dev, other_mode)) { + NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); + return -EEXIST; + } + if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) { + NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); + return -EINVAL; + } + if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) { + NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); + return -EINVAL; + } + if (new_prog->expected_attach_type == BPF_XDP_CPUMAP) { + NL_SET_ERR_MSG(extack, "BPF_XDP_CPUMAP programs can not be attached to a device"); + return -EINVAL; + } + } + + /* don't call drivers if the effective program didn't change */ + if (new_prog != cur_prog) { + bpf_op = dev_xdp_bpf_op(dev, mode); + if (!bpf_op) { + NL_SET_ERR_MSG(extack, "Underlying driver does not support XDP in native mode"); + return -EOPNOTSUPP; + } + + err = dev_xdp_install(dev, mode, bpf_op, extack, flags, new_prog); + if (err) + return err; + } + + dev_xdp_set_prog(dev, mode, new_prog); + if (cur_prog) + bpf_prog_put(cur_prog); + + return 0; +} + /** * dev_change_xdp_fd - set or clear a bpf program for a device rx path * @dev: device @@ -8828,96 +8904,37 @@ static void dev_xdp_uninstall(struct net_device *dev) int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags) { - const struct net_device_ops *ops = dev->netdev_ops; enum bpf_xdp_mode mode = dev_xdp_mode(flags); - bool offload = mode == XDP_MODE_HW; - u32 prog_id, expected_id = 0; - struct bpf_prog *prog; - bpf_op_t bpf_op; + struct bpf_prog *new_prog = NULL, *old_prog = NULL; int err; ASSERT_RTNL(); - bpf_op = dev_xdp_bpf_op(dev, mode); - if (!bpf_op) { - NL_SET_ERR_MSG(extack, "underlying driver does not support XDP in native mode"); - return -EOPNOTSUPP; - } - - prog_id = dev_xdp_prog_id(dev, mode); - if (flags & XDP_FLAGS_REPLACE) { - if (expected_fd >= 0) { - prog = bpf_prog_get_type_dev(expected_fd, - BPF_PROG_TYPE_XDP, - bpf_op == ops->ndo_bpf); - if (IS_ERR(prog)) - return PTR_ERR(prog); - expected_id = prog->aux->id; - bpf_prog_put(prog); - } - - if (prog_id != expected_id) { - NL_SET_ERR_MSG(extack, "Active program does not match expected"); - return -EEXIST; - } - } if (fd >= 0) { - enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB - ? XDP_MODE_DRV : XDP_MODE_SKB; - - if (!offload && dev_xdp_prog_id(dev, other_mode)) { - NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time"); - return -EEXIST; - } - - if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) { - NL_SET_ERR_MSG(extack, "XDP program already attached"); - return -EBUSY; - } - - prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, - bpf_op == ops->ndo_bpf); - if (IS_ERR(prog)) - return PTR_ERR(prog); - - if (!offload && bpf_prog_is_dev_bound(prog->aux)) { - NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported"); - bpf_prog_put(prog); - return -EINVAL; - } - - if (prog->expected_attach_type == BPF_XDP_DEVMAP) { - NL_SET_ERR_MSG(extack, "BPF_XDP_DEVMAP programs can not be attached to a device"); - bpf_prog_put(prog); - return -EINVAL; - } - - if (prog->expected_attach_type == BPF_XDP_CPUMAP) { - NL_SET_ERR_MSG(extack, - "BPF_XDP_CPUMAP programs can not be attached to a device"); - bpf_prog_put(prog); - return -EINVAL; - } - - /* prog->aux->id may be 0 for orphaned device-bound progs */ - if (prog->aux->id && prog->aux->id == prog_id) { - bpf_prog_put(prog); - return 0; - } - } else { - if (!prog_id) - return 0; - prog = NULL; + new_prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, + mode != XDP_MODE_SKB); + if (IS_ERR(new_prog)) + return PTR_ERR(new_prog); } - err = dev_xdp_install(dev, mode, bpf_op, extack, flags, prog); - if (err < 0 && prog) { - bpf_prog_put(prog); - return err; + if (expected_fd >= 0) { + old_prog = bpf_prog_get_type_dev(expected_fd, BPF_PROG_TYPE_XDP, + mode != XDP_MODE_SKB); + if (IS_ERR(old_prog)) { + err = PTR_ERR(old_prog); + old_prog = NULL; + goto err_out; + } } - dev_xdp_set_prog(dev, mode, prog); - return 0; + err = dev_xdp_attach(dev, extack, new_prog, old_prog, flags); + +err_out: + if (err && new_prog) + bpf_prog_put(new_prog); + if (old_prog) + bpf_prog_put(old_prog); + return err; } /** From aa8d3a716b59db6c1ad6c68fb8aa05e31980da60 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:57 -0700 Subject: [PATCH 1627/2056] bpf, xdp: Add bpf_link-based XDP attachment API Add bpf_link-based API (bpf_xdp_link) to attach BPF XDP program through BPF_LINK_CREATE command. bpf_xdp_link is mutually exclusive with direct BPF program attachment, previous BPF program should be detached prior to attempting to create a new bpf_xdp_link attachment (for a given XDP mode). Once BPF link is attached, it can't be replaced by other BPF program attachment or link attachment. It will be detached only when the last BPF link FD is closed. bpf_xdp_link will be auto-detached when net_device is shutdown, similarly to how other BPF links behave (cgroup, flow_dissector). At that point bpf_link will become defunct, but won't be destroyed until last FD is closed. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-5-andriin@fb.com --- include/linux/netdevice.h | 4 + include/uapi/linux/bpf.h | 7 +- kernel/bpf/syscall.c | 5 ++ net/core/dev.c | 169 ++++++++++++++++++++++++++++++++++++-- 4 files changed, 178 insertions(+), 7 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cad44b40c776..7d3c412fcfe5 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -888,6 +888,7 @@ struct bpf_prog_offload_ops; struct netlink_ext_ack; struct xdp_umem; struct xdp_dev_bulk_queue; +struct bpf_xdp_link; enum bpf_xdp_mode { XDP_MODE_SKB = 0, @@ -898,6 +899,7 @@ enum bpf_xdp_mode { struct bpf_xdp_entity { struct bpf_prog *prog; + struct bpf_xdp_link *link; }; struct netdev_bpf { @@ -3831,7 +3833,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); + int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 828c2f6438f2..87823fb9c123 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -230,6 +230,7 @@ enum bpf_attach_type { BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, BPF_SK_LOOKUP, + BPF_XDP, __MAX_BPF_ATTACH_TYPE }; @@ -242,6 +243,7 @@ enum bpf_link_type { BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, MAX_BPF_LINK_TYPE, }; @@ -614,7 +616,10 @@ union bpf_attr { struct { /* struct used by BPF_LINK_CREATE command */ __u32 prog_fd; /* eBPF program to attach */ - __u32 target_fd; /* object to attach to */ + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ } link_create; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ee290b1f2d9e..0e8c88db7e7a 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2824,6 +2824,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_TRACING; case BPF_SK_LOOKUP: return BPF_PROG_TYPE_SK_LOOKUP; + case BPF_XDP: + return BPF_PROG_TYPE_XDP; default: return BPF_PROG_TYPE_UNSPEC; } @@ -3921,6 +3923,9 @@ static int link_create(union bpf_attr *attr) case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; + case BPF_PROG_TYPE_XDP: + ret = bpf_xdp_link_attach(attr, prog); + break; default: ret = -EINVAL; } diff --git a/net/core/dev.c b/net/core/dev.c index 521ce031ee35..e24248f3d675 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8716,6 +8716,12 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down_generic); +struct bpf_xdp_link { + struct bpf_link link; + struct net_device *dev; /* protected by rtnl_lock, no refcnt held */ + int flags; +}; + static enum bpf_xdp_mode dev_xdp_mode(u32 flags) { if (flags & XDP_FLAGS_HW_MODE) @@ -8738,9 +8744,19 @@ static bpf_op_t dev_xdp_bpf_op(struct net_device *dev, enum bpf_xdp_mode mode) }; } +static struct bpf_xdp_link *dev_xdp_link(struct net_device *dev, + enum bpf_xdp_mode mode) +{ + return dev->xdp_state[mode].link; +} + static struct bpf_prog *dev_xdp_prog(struct net_device *dev, enum bpf_xdp_mode mode) { + struct bpf_xdp_link *link = dev_xdp_link(dev, mode); + + if (link) + return link->link.prog; return dev->xdp_state[mode].prog; } @@ -8751,9 +8767,17 @@ u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode) return prog ? prog->aux->id : 0; } +static void dev_xdp_set_link(struct net_device *dev, enum bpf_xdp_mode mode, + struct bpf_xdp_link *link) +{ + dev->xdp_state[mode].link = link; + dev->xdp_state[mode].prog = NULL; +} + static void dev_xdp_set_prog(struct net_device *dev, enum bpf_xdp_mode mode, struct bpf_prog *prog) { + dev->xdp_state[mode].link = NULL; dev->xdp_state[mode].prog = prog; } @@ -8793,6 +8817,7 @@ static int dev_xdp_install(struct net_device *dev, enum bpf_xdp_mode mode, static void dev_xdp_uninstall(struct net_device *dev) { + struct bpf_xdp_link *link; struct bpf_prog *prog; enum bpf_xdp_mode mode; bpf_op_t bpf_op; @@ -8810,14 +8835,20 @@ static void dev_xdp_uninstall(struct net_device *dev) WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); - bpf_prog_put(prog); - dev_xdp_set_prog(dev, mode, NULL); + /* auto-detach link from net device */ + link = dev_xdp_link(dev, mode); + if (link) + link->dev = NULL; + else + bpf_prog_put(prog); + + dev_xdp_set_link(dev, mode, NULL); } } static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack, - struct bpf_prog *new_prog, struct bpf_prog *old_prog, - u32 flags) + struct bpf_xdp_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog, u32 flags) { struct bpf_prog *cur_prog; enum bpf_xdp_mode mode; @@ -8826,6 +8857,14 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack ASSERT_RTNL(); + /* either link or prog attachment, never both */ + if (link && (new_prog || old_prog)) + return -EINVAL; + /* link supports only XDP mode flags */ + if (link && (flags & ~XDP_FLAGS_MODES)) { + NL_SET_ERR_MSG(extack, "Invalid XDP flags for BPF link attachment"); + return -EINVAL; + } /* just one XDP mode bit should be set, zero defaults to SKB mode */ if (hweight32(flags & XDP_FLAGS_MODES) > 1) { NL_SET_ERR_MSG(extack, "Only one XDP mode flag can be set"); @@ -8838,7 +8877,18 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack } mode = dev_xdp_mode(flags); + /* can't replace attached link */ + if (dev_xdp_link(dev, mode)) { + NL_SET_ERR_MSG(extack, "Can't replace active BPF XDP link"); + return -EBUSY; + } + cur_prog = dev_xdp_prog(dev, mode); + /* can't replace attached prog with link */ + if (link && cur_prog) { + NL_SET_ERR_MSG(extack, "Can't replace active XDP program with BPF link"); + return -EBUSY; + } if ((flags & XDP_FLAGS_REPLACE) && cur_prog != old_prog) { NL_SET_ERR_MSG(extack, "Active program does not match expected"); return -EEXIST; @@ -8848,6 +8898,10 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack return -EBUSY; } + /* put effective new program into new_prog */ + if (link) + new_prog = link->link.prog; + if (new_prog) { bool offload = mode == XDP_MODE_HW; enum bpf_xdp_mode other_mode = mode == XDP_MODE_SKB @@ -8884,13 +8938,116 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack return err; } - dev_xdp_set_prog(dev, mode, new_prog); + if (link) + dev_xdp_set_link(dev, mode, link); + else + dev_xdp_set_prog(dev, mode, new_prog); if (cur_prog) bpf_prog_put(cur_prog); return 0; } +static int dev_xdp_attach_link(struct net_device *dev, + struct netlink_ext_ack *extack, + struct bpf_xdp_link *link) +{ + return dev_xdp_attach(dev, extack, link, NULL, NULL, link->flags); +} + +static int dev_xdp_detach_link(struct net_device *dev, + struct netlink_ext_ack *extack, + struct bpf_xdp_link *link) +{ + enum bpf_xdp_mode mode; + bpf_op_t bpf_op; + + ASSERT_RTNL(); + + mode = dev_xdp_mode(link->flags); + if (dev_xdp_link(dev, mode) != link) + return -EINVAL; + + bpf_op = dev_xdp_bpf_op(dev, mode); + WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL)); + dev_xdp_set_link(dev, mode, NULL); + return 0; +} + +static void bpf_xdp_link_release(struct bpf_link *link) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + + rtnl_lock(); + + /* if racing with net_device's tear down, xdp_link->dev might be + * already NULL, in which case link was already auto-detached + */ + if (xdp_link->dev) + WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); + + rtnl_unlock(); +} + +static void bpf_xdp_link_dealloc(struct bpf_link *link) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + + kfree(xdp_link); +} + +static const struct bpf_link_ops bpf_xdp_link_lops = { + .release = bpf_xdp_link_release, + .dealloc = bpf_xdp_link_dealloc, +}; + +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct net *net = current->nsproxy->net_ns; + struct bpf_link_primer link_primer; + struct bpf_xdp_link *link; + struct net_device *dev; + int err, fd; + + dev = dev_get_by_index(net, attr->link_create.target_ifindex); + if (!dev) + return -EINVAL; + + link = kzalloc(sizeof(*link), GFP_USER); + if (!link) { + err = -ENOMEM; + goto out_put_dev; + } + + bpf_link_init(&link->link, BPF_LINK_TYPE_XDP, &bpf_xdp_link_lops, prog); + link->dev = dev; + link->flags = attr->link_create.flags; + + err = bpf_link_prime(&link->link, &link_primer); + if (err) { + kfree(link); + goto out_put_dev; + } + + rtnl_lock(); + err = dev_xdp_attach_link(dev, NULL, link); + rtnl_unlock(); + + if (err) { + bpf_link_cleanup(&link_primer); + goto out_put_dev; + } + + fd = bpf_link_settle(&link_primer); + /* link itself doesn't hold dev's refcnt to not complicate shutdown */ + dev_put(dev); + return fd; + +out_put_dev: + dev_put(dev); + return err; +} + /** * dev_change_xdp_fd - set or clear a bpf program for a device rx path * @dev: device @@ -8927,7 +9084,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, } } - err = dev_xdp_attach(dev, extack, new_prog, old_prog, flags); + err = dev_xdp_attach(dev, extack, NULL, new_prog, old_prog, flags); err_out: if (err && new_prog) From 026a4c28e1db3b0cb99cd9a3e495d4a8b632fa74 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:58 -0700 Subject: [PATCH 1628/2056] bpf, xdp: Implement LINK_UPDATE for BPF XDP link Add support for LINK_UPDATE command for BPF XDP link to enable reliable replacement of underlying BPF program. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-6-andriin@fb.com --- net/core/dev.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/net/core/dev.c b/net/core/dev.c index e24248f3d675..49f284f51a22 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8996,9 +8996,52 @@ static void bpf_xdp_link_dealloc(struct bpf_link *link) kfree(xdp_link); } +static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + enum bpf_xdp_mode mode; + bpf_op_t bpf_op; + int err = 0; + + rtnl_lock(); + + /* link might have been auto-released already, so fail */ + if (!xdp_link->dev) { + err = -ENOLINK; + goto out_unlock; + } + + if (old_prog && link->prog != old_prog) { + err = -EPERM; + goto out_unlock; + } + old_prog = link->prog; + if (old_prog == new_prog) { + /* no-op, don't disturb drivers */ + bpf_prog_put(new_prog); + goto out_unlock; + } + + mode = dev_xdp_mode(xdp_link->flags); + bpf_op = dev_xdp_bpf_op(xdp_link->dev, mode); + err = dev_xdp_install(xdp_link->dev, mode, bpf_op, NULL, + xdp_link->flags, new_prog); + if (err) + goto out_unlock; + + old_prog = xchg(&link->prog, new_prog); + bpf_prog_put(old_prog); + +out_unlock: + rtnl_unlock(); + return err; +} + static const struct bpf_link_ops bpf_xdp_link_lops = { .release = bpf_xdp_link_release, .dealloc = bpf_xdp_link_dealloc, + .update_prog = bpf_xdp_link_update, }; int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) From c1931c9784ebb5787c0784c112fb8baa5e8455b3 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:45:59 -0700 Subject: [PATCH 1629/2056] bpf: Implement BPF XDP link-specific introspection APIs Implement XDP link-specific show_fdinfo and link_info to emit ifindex. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-7-andriin@fb.com --- include/uapi/linux/bpf.h | 3 +++ net/core/dev.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 87823fb9c123..e1ba4ae6a916 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4069,6 +4069,9 @@ struct bpf_link_info { __u32 netns_ino; __u32 attach_type; } netns; + struct { + __u32 ifindex; + } xdp; }; } __attribute__((aligned(8))); diff --git a/net/core/dev.c b/net/core/dev.c index 49f284f51a22..82ce0920b172 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8996,6 +8996,35 @@ static void bpf_xdp_link_dealloc(struct bpf_link *link) kfree(xdp_link); } +static void bpf_xdp_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + u32 ifindex = 0; + + rtnl_lock(); + if (xdp_link->dev) + ifindex = xdp_link->dev->ifindex; + rtnl_unlock(); + + seq_printf(seq, "ifindex:\t%u\n", ifindex); +} + +static int bpf_xdp_link_fill_link_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); + u32 ifindex = 0; + + rtnl_lock(); + if (xdp_link->dev) + ifindex = xdp_link->dev->ifindex; + rtnl_unlock(); + + info->xdp.ifindex = ifindex; + return 0; +} + static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog) { @@ -9041,6 +9070,8 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, static const struct bpf_link_ops bpf_xdp_link_lops = { .release = bpf_xdp_link_release, .dealloc = bpf_xdp_link_dealloc, + .show_fdinfo = bpf_xdp_link_show_fdinfo, + .fill_link_info = bpf_xdp_link_fill_link_info, .update_prog = bpf_xdp_link_update, }; From dc8698cac7aada9b61a612cb819341d84591163e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:46:00 -0700 Subject: [PATCH 1630/2056] libbpf: Add support for BPF XDP link Sync UAPI header and add support for using bpf_link-based XDP attachment. Make xdp/ prog type set expected attach type. Kernel didn't enforce attach_type for XDP programs before, so there is no backwards compatiblity issues there. Also fix section_names selftest to recognize that xdp prog types now have expected attach type. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-8-andriin@fb.com --- tools/include/uapi/linux/bpf.h | 10 +++++++++- tools/lib/bpf/libbpf.c | 9 ++++++++- tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 1 + tools/testing/selftests/bpf/prog_tests/section_names.c | 2 +- 5 files changed, 21 insertions(+), 3 deletions(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 828c2f6438f2..e1ba4ae6a916 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -230,6 +230,7 @@ enum bpf_attach_type { BPF_CGROUP_INET_SOCK_RELEASE, BPF_XDP_CPUMAP, BPF_SK_LOOKUP, + BPF_XDP, __MAX_BPF_ATTACH_TYPE }; @@ -242,6 +243,7 @@ enum bpf_link_type { BPF_LINK_TYPE_CGROUP = 3, BPF_LINK_TYPE_ITER = 4, BPF_LINK_TYPE_NETNS = 5, + BPF_LINK_TYPE_XDP = 6, MAX_BPF_LINK_TYPE, }; @@ -614,7 +616,10 @@ union bpf_attr { struct { /* struct used by BPF_LINK_CREATE command */ __u32 prog_fd; /* eBPF program to attach */ - __u32 target_fd; /* object to attach to */ + union { + __u32 target_fd; /* object to attach to */ + __u32 target_ifindex; /* target ifindex */ + }; __u32 attach_type; /* attach type */ __u32 flags; /* extra flags */ } link_create; @@ -4064,6 +4069,9 @@ struct bpf_link_info { __u32 netns_ino; __u32 attach_type; } netns; + struct { + __u32 ifindex; + } xdp; }; } __attribute__((aligned(8))); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e51479d60285..54830d603fee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6915,7 +6915,8 @@ static const struct bpf_sec_def section_defs[] = { BPF_XDP_DEVMAP), BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP, BPF_XDP_CPUMAP), - BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), + BPF_EAPROG_SEC("xdp", BPF_PROG_TYPE_XDP, + BPF_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT), @@ -8281,6 +8282,12 @@ bpf_program__attach_netns(struct bpf_program *prog, int netns_fd) return bpf_program__attach_fd(prog, netns_fd, "netns"); } +struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex) +{ + /* target_fd/target_ifindex use the same field in LINK_CREATE */ + return bpf_program__attach_fd(prog, ifindex, "xdp"); +} + struct bpf_link * bpf_program__attach_iter(struct bpf_program *prog, const struct bpf_iter_attach_opts *opts) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c6813791fa7e..9924385462ab 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -257,6 +257,8 @@ LIBBPF_API struct bpf_link * bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd); LIBBPF_API struct bpf_link * bpf_program__attach_netns(struct bpf_program *prog, int netns_fd); +LIBBPF_API struct bpf_link * +bpf_program__attach_xdp(struct bpf_program *prog, int ifindex); struct bpf_map; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 6f0856abe299..ca49a6a7e5b2 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -286,6 +286,7 @@ LIBBPF_0.1.0 { bpf_map__set_value_size; bpf_map__type; bpf_map__value_size; + bpf_program__attach_xdp; bpf_program__autoload; bpf_program__is_sk_lookup; bpf_program__set_autoload; diff --git a/tools/testing/selftests/bpf/prog_tests/section_names.c b/tools/testing/selftests/bpf/prog_tests/section_names.c index 713167449c98..8b571890c57e 100644 --- a/tools/testing/selftests/bpf/prog_tests/section_names.c +++ b/tools/testing/selftests/bpf/prog_tests/section_names.c @@ -35,7 +35,7 @@ static struct sec_name_test tests[] = { {-EINVAL, 0}, }, {"raw_tp/", {0, BPF_PROG_TYPE_RAW_TRACEPOINT, 0}, {-EINVAL, 0} }, - {"xdp", {0, BPF_PROG_TYPE_XDP, 0}, {-EINVAL, 0} }, + {"xdp", {0, BPF_PROG_TYPE_XDP, BPF_XDP}, {0, BPF_XDP} }, {"perf_event", {0, BPF_PROG_TYPE_PERF_EVENT, 0}, {-EINVAL, 0} }, {"lwt_in", {0, BPF_PROG_TYPE_LWT_IN, 0}, {-EINVAL, 0} }, {"lwt_out", {0, BPF_PROG_TYPE_LWT_OUT, 0}, {-EINVAL, 0} }, From fe48230cf2ae27c9e3b96d29908e22e2926fd1ab Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:46:01 -0700 Subject: [PATCH 1631/2056] selftests/bpf: Add BPF XDP link selftests Add selftest validating all the attachment logic around BPF XDP link. Test also link updates and get_obj_info() APIs. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-9-andriin@fb.com --- .../selftests/bpf/prog_tests/xdp_link.c | 137 ++++++++++++++++++ .../selftests/bpf/progs/test_xdp_link.c | 12 ++ 2 files changed, 149 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/xdp_link.c create mode 100644 tools/testing/selftests/bpf/progs/test_xdp_link.c diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c new file mode 100644 index 000000000000..52cba6795d40 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include +#include +#include "test_xdp_link.skel.h" + +#define IFINDEX_LO 1 + +void test_xdp_link(void) +{ + __u32 duration = 0, id1, id2, id0 = 0, prog_fd1, prog_fd2, err; + DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1); + struct test_xdp_link *skel1 = NULL, *skel2 = NULL; + struct bpf_link_info link_info; + struct bpf_prog_info prog_info; + struct bpf_link *link; + __u32 link_info_len = sizeof(link_info); + __u32 prog_info_len = sizeof(prog_info); + + skel1 = test_xdp_link__open_and_load(); + if (CHECK(!skel1, "skel_load", "skeleton open and load failed\n")) + goto cleanup; + prog_fd1 = bpf_program__fd(skel1->progs.xdp_handler); + + skel2 = test_xdp_link__open_and_load(); + if (CHECK(!skel2, "skel_load", "skeleton open and load failed\n")) + goto cleanup; + prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler); + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len); + if (CHECK(err, "fd_info1", "failed %d\n", -errno)) + goto cleanup; + id1 = prog_info.id; + + memset(&prog_info, 0, sizeof(prog_info)); + err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len); + if (CHECK(err, "fd_info2", "failed %d\n", -errno)) + goto cleanup; + id2 = prog_info.id; + + /* set initial prog attachment */ + err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts); + if (CHECK(err, "fd_attach", "initial prog attach failed: %d\n", err)) + goto cleanup; + + /* validate prog ID */ + err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + CHECK(err || id0 != id1, "id1_check", + "loaded prog id %u != id1 %u, err %d", id0, id1, err); + + /* BPF link is not allowed to replace prog attachment */ + link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO); + if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) { + bpf_link__destroy(link); + /* best-effort detach prog */ + opts.old_fd = prog_fd1; + bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts); + goto cleanup; + } + + /* detach BPF program */ + opts.old_fd = prog_fd1; + err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts); + if (CHECK(err, "prog_detach", "failed %d\n", err)) + goto cleanup; + + /* now BPF link should attach successfully */ + link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO); + if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link))) + goto cleanup; + skel1->links.xdp_handler = link; + + /* validate prog ID */ + err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + if (CHECK(err || id0 != id1, "id1_check", + "loaded prog id %u != id1 %u, err %d", id0, id1, err)) + goto cleanup; + + /* BPF prog attach is not allowed to replace BPF link */ + opts.old_fd = prog_fd1; + err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts); + if (CHECK(!err, "prog_attach_fail", "unexpected success\n")) + goto cleanup; + + /* Can't force-update when BPF link is active */ + err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd2, 0); + if (CHECK(!err, "prog_update_fail", "unexpected success\n")) + goto cleanup; + + /* Can't force-detach when BPF link is active */ + err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0); + if (CHECK(!err, "prog_detach_fail", "unexpected success\n")) + goto cleanup; + + /* BPF link is not allowed to replace another BPF link */ + link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO); + if (CHECK(!IS_ERR(link), "link_attach_fail", "unexpected success\n")) { + bpf_link__destroy(link); + goto cleanup; + } + + bpf_link__destroy(skel1->links.xdp_handler); + skel1->links.xdp_handler = NULL; + + /* new link attach should succeed */ + link = bpf_program__attach_xdp(skel2->progs.xdp_handler, IFINDEX_LO); + if (CHECK(IS_ERR(link), "link_attach", "failed: %ld\n", PTR_ERR(link))) + goto cleanup; + skel2->links.xdp_handler = link; + + err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0); + if (CHECK(err || id0 != id2, "id2_check", + "loaded prog id %u != id2 %u, err %d", id0, id1, err)) + goto cleanup; + + /* updating program under active BPF link works as expected */ + err = bpf_link__update_program(link, skel1->progs.xdp_handler); + if (CHECK(err, "link_upd", "failed: %d\n", err)) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (CHECK(err, "link_info", "failed: %d\n", err)) + goto cleanup; + + CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type", + "got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP); + CHECK(link_info.prog_id != id1, "link_prog_id", + "got %u != exp %u\n", link_info.prog_id, id1); + CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex", + "got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO); + +cleanup: + test_xdp_link__destroy(skel1); + test_xdp_link__destroy(skel2); +} diff --git a/tools/testing/selftests/bpf/progs/test_xdp_link.c b/tools/testing/selftests/bpf/progs/test_xdp_link.c new file mode 100644 index 000000000000..eb93ea95d1d8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_xdp_link.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include +#include + +char LICENSE[] SEC("license") = "GPL"; + +SEC("xdp/handler") +int xdp_handler(struct xdp_md *xdp) +{ + return 0; +} From e8407fdeb9a6866784e249881f6c786a0835faba Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 21 Jul 2020 23:46:02 -0700 Subject: [PATCH 1632/2056] bpf, xdp: Remove XDP_QUERY_PROG and XDP_QUERY_PROG_HW XDP commands Now that BPF program/link management is centralized in generic net_device code, kernel code never queries program id from drivers, so XDP_QUERY_PROG/XDP_QUERY_PROG_HW commands are unnecessary. This patch removes all the implementations of those commands in kernel, along the xdp_attachment_query(). This patch was compile-tested on allyesconfig. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200722064603.3350758-10-andriin@fb.com --- drivers/net/ethernet/amazon/ena/ena_netdev.c | 6 ----- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 4 ---- .../net/ethernet/cavium/thunder/nicvf_main.c | 3 --- .../net/ethernet/freescale/dpaa2/dpaa2-eth.c | 5 ---- drivers/net/ethernet/intel/i40e/i40e_main.c | 3 --- drivers/net/ethernet/intel/ice/ice_main.c | 3 --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ---- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 ----- drivers/net/ethernet/marvell/mvneta.c | 5 ---- .../net/ethernet/marvell/mvpp2/mvpp2_main.c | 3 --- .../net/ethernet/mellanox/mlx4/en_netdev.c | 24 ------------------- .../net/ethernet/mellanox/mlx5/core/en_main.c | 18 -------------- .../ethernet/netronome/nfp/nfp_net_common.c | 4 ---- .../net/ethernet/qlogic/qede/qede_filter.c | 3 --- drivers/net/ethernet/sfc/efx.c | 5 ---- drivers/net/ethernet/socionext/netsec.c | 3 --- drivers/net/ethernet/ti/cpsw_priv.c | 3 --- drivers/net/hyperv/netvsc_bpf.c | 21 +--------------- drivers/net/netdevsim/bpf.c | 4 ---- drivers/net/netdevsim/netdevsim.h | 2 +- drivers/net/tun.c | 15 ------------ drivers/net/veth.c | 15 ------------ drivers/net/virtio_net.c | 17 ------------- drivers/net/xen-netfront.c | 21 ---------------- include/linux/netdevice.h | 8 ------- include/net/xdp.h | 2 -- net/core/dev.c | 4 ---- net/core/xdp.c | 9 ------- 28 files changed, 2 insertions(+), 218 deletions(-) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 6478c1e0d137..2a6c9725e092 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -576,15 +576,9 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf) */ static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf) { - struct ena_adapter *adapter = netdev_priv(netdev); - switch (bpf->command) { case XDP_SETUP_PROG: return ena_xdp_set(netdev, bpf); - case XDP_QUERY_PROG: - bpf->prog_id = adapter->xdp_bpf_prog ? - adapter->xdp_bpf_prog->aux->id : 0; - break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 5e3b4a3b69ea..2704a4709bc7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -330,10 +330,6 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) case XDP_SETUP_PROG: rc = bnxt_xdp_set(bp, xdp->prog); break; - case XDP_QUERY_PROG: - xdp->prog_id = bp->xdp_prog ? bp->xdp_prog->aux->id : 0; - rc = 0; - break; default: rc = -EINVAL; break; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 2ba0ce115e63..1c6163934e20 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1906,9 +1906,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return nicvf_xdp_setup(nic, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 9b4028c0e34c..17f6bcafc944 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2077,14 +2077,9 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct dpaa2_eth_priv *priv = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return setup_xdp(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; - break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dadbfb3d2a2b..d8315811cbdf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12923,9 +12923,6 @@ static int i40e_xdp(struct net_device *dev, switch (xdp->command) { case XDP_SETUP_PROG: return i40e_xdp_setup(vsi, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; - return 0; case XDP_SETUP_XSK_UMEM: return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 16a4096bb780..231f4b6e93d0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2292,9 +2292,6 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; - return 0; case XDP_SETUP_XSK_UMEM: return ice_xsk_umem_setup(vsi, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4d898ff21a46..6f32b1706ab9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10190,10 +10190,6 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return ixgbe_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; case XDP_SETUP_XSK_UMEM: return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6e9a397db583..a6267569bfa9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4502,15 +4502,9 @@ static int ixgbevf_xdp_setup(struct net_device *dev, struct bpf_prog *prog) static int ixgbevf_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct ixgbevf_adapter *adapter = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return ixgbevf_xdp_setup(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = adapter->xdp_prog ? - adapter->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2c9277e73cef..6e3f9e2f883b 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4442,14 +4442,9 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog, static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp) { - struct mvneta_port *pp = netdev_priv(dev); - switch (xdp->command) { case XDP_SETUP_PROG: return mvneta_xdp_setup(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 6a3f356640a0..cd5e9d60307e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4656,9 +4656,6 @@ static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return mvpp2_xdp_setup(port, xdp); - case XDP_QUERY_PROG: - xdp->prog_id = port->xdp_prog ? port->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 2b8608f8f0a9..106513f772c3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2802,35 +2802,11 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) return err; } -static u32 mlx4_xdp_query(struct net_device *dev) -{ - struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_dev *mdev = priv->mdev; - const struct bpf_prog *xdp_prog; - u32 prog_id = 0; - - if (!priv->tx_ring_num[TX_XDP]) - return prog_id; - - mutex_lock(&mdev->state_lock); - xdp_prog = rcu_dereference_protected( - priv->rx_ring[0]->xdp_prog, - lockdep_is_held(&mdev->state_lock)); - if (xdp_prog) - prog_id = xdp_prog->aux->id; - mutex_unlock(&mdev->state_lock); - - return prog_id; -} - static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return mlx4_xdp_set(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = mlx4_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9d5d8b28bcd8..aa4fb503dac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4518,29 +4518,11 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) return err; } -static u32 mlx5e_xdp_query(struct net_device *dev) -{ - struct mlx5e_priv *priv = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - u32 prog_id = 0; - - mutex_lock(&priv->state_lock); - xdp_prog = priv->channels.params.xdp_prog; - if (xdp_prog) - prog_id = xdp_prog->aux->id; - mutex_unlock(&priv->state_lock); - - return prog_id; -} - static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return mlx5e_xdp_set(dev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = mlx5e_xdp_query(dev); - return 0; case XDP_SETUP_XSK_UMEM: return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem, xdp->xsk.queue_id); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 44608873d3d9..39ee23e8c0bf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3614,10 +3614,6 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) return nfp_net_xdp_setup_drv(nn, xdp); case XDP_SETUP_PROG_HW: return nfp_net_xdp_setup_hw(nn, xdp); - case XDP_QUERY_PROG: - return xdp_attachment_query(&nn->xdp, xdp); - case XDP_QUERY_PROG_HW: - return xdp_attachment_query(&nn->xdp_hw, xdp); default: return nfp_app_bpf(nn->app, nn, xdp); } diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index b7d0b6ccebd3..f961f65d9372 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1045,9 +1045,6 @@ int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return qede_xdp_set(edev, xdp->prog); - case XDP_QUERY_PROG: - xdp->prog_id = edev->xdp_prog ? edev->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index f16b4f236031..d60acaa3879d 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -653,15 +653,10 @@ static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog) static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct efx_nic *efx = netdev_priv(dev); - struct bpf_prog *xdp_prog; switch (xdp->command) { case XDP_SETUP_PROG: return efx_xdp_setup_prog(efx, xdp->prog); - case XDP_QUERY_PROG: - xdp_prog = rtnl_dereference(efx->xdp_prog); - xdp->prog_id = xdp_prog ? xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 0f366cc50b74..25db667fa879 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1811,9 +1811,6 @@ static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return netsec_xdp_setup(priv, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; - return 0; default: return -EINVAL; } diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index a399f3659346..d6d7a7d9c7ad 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -1286,9 +1286,6 @@ int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf) case XDP_SETUP_PROG: return cpsw_xdp_prog_setup(priv, bpf); - case XDP_QUERY_PROG: - return xdp_attachment_query(&priv->xdpi, bpf); - default: return -EINVAL; } diff --git a/drivers/net/hyperv/netvsc_bpf.c b/drivers/net/hyperv/netvsc_bpf.c index 8e4141552423..440486d9c999 100644 --- a/drivers/net/hyperv/netvsc_bpf.c +++ b/drivers/net/hyperv/netvsc_bpf.c @@ -163,16 +163,6 @@ int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) return ret; } -static u32 netvsc_xdp_query(struct netvsc_device *nvdev) -{ - struct bpf_prog *prog = netvsc_xdp_get(nvdev); - - if (prog) - return prog->aux->id; - - return 0; -} - int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) { struct net_device_context *ndevctx = netdev_priv(dev); @@ -182,12 +172,7 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) int ret; if (!nvdev || nvdev->destroy) { - if (bpf->command == XDP_QUERY_PROG) { - bpf->prog_id = 0; - return 0; /* Query must always succeed */ - } else { - return -ENODEV; - } + return -ENODEV; } switch (bpf->command) { @@ -208,10 +193,6 @@ int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) return ret; - case XDP_QUERY_PROG: - bpf->prog_id = netvsc_xdp_query(nvdev); - return 0; - default: return -EINVAL; } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index 0b362b8dac17..2e90512f3bbe 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -551,10 +551,6 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) ASSERT_RTNL(); switch (bpf->command) { - case XDP_QUERY_PROG: - return xdp_attachment_query(&ns->xdp, bpf); - case XDP_QUERY_PROG_HW: - return xdp_attachment_query(&ns->xdp_hw, bpf); case XDP_SETUP_PROG: err = nsim_setup_prog_checks(ns, bpf); if (err) diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index d164052e0393..284f7092241d 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -121,7 +121,7 @@ static inline void nsim_bpf_uninit(struct netdevsim *ns) static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) { - return bpf->command == XDP_QUERY_PROG ? 0 : -EOPNOTSUPP; + return -EOPNOTSUPP; } static inline int nsim_bpf_disable_tc(struct netdevsim *ns) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7adeb91bd368..061bebe25cb1 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1184,26 +1184,11 @@ static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, return 0; } -static u32 tun_xdp_query(struct net_device *dev) -{ - struct tun_struct *tun = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - - xdp_prog = rtnl_dereference(tun->xdp_prog); - if (xdp_prog) - return xdp_prog->aux->id; - - return 0; -} - static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return tun_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = tun_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index b594f03eeddb..e56cd562a664 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1198,26 +1198,11 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, return err; } -static u32 veth_xdp_query(struct net_device *dev) -{ - struct veth_priv *priv = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - - xdp_prog = priv->_xdp_prog; - if (xdp_prog) - return xdp_prog->aux->id; - - return 0; -} - static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return veth_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = veth_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ba38765dc490..6fa8fe5ef160 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2490,28 +2490,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return err; } -static u32 virtnet_xdp_query(struct net_device *dev) -{ - struct virtnet_info *vi = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - int i; - - for (i = 0; i < vi->max_queue_pairs; i++) { - xdp_prog = rtnl_dereference(vi->rq[i].xdp_prog); - if (xdp_prog) - return xdp_prog->aux->id; - } - return 0; -} - static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return virtnet_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = virtnet_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index a63e550c370e..458be6882b98 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1480,32 +1480,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return 0; } -static u32 xennet_xdp_query(struct net_device *dev) -{ - unsigned int num_queues = dev->real_num_tx_queues; - struct netfront_info *np = netdev_priv(dev); - const struct bpf_prog *xdp_prog; - struct netfront_queue *queue; - unsigned int i; - - for (i = 0; i < num_queues; ++i) { - queue = &np->queues[i]; - xdp_prog = rtnl_dereference(queue->xdp_prog); - if (xdp_prog) - return xdp_prog->aux->id; - } - - return 0; -} - static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: return xennet_xdp_set(dev, xdp->prog, xdp->extack); - case XDP_QUERY_PROG: - xdp->prog_id = xennet_xdp_query(dev); - return 0; default: return -EINVAL; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7d3c412fcfe5..1046763cd0dc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -876,8 +876,6 @@ enum bpf_netdev_command { */ XDP_SETUP_PROG, XDP_SETUP_PROG_HW, - XDP_QUERY_PROG, - XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE, @@ -911,12 +909,6 @@ struct netdev_bpf { struct bpf_prog *prog; struct netlink_ext_ack *extack; }; - /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ - struct { - u32 prog_id; - /* flags with which program was installed */ - u32 prog_flags; - }; /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ struct { struct bpf_offloaded_map *offmap; diff --git a/include/net/xdp.h b/include/net/xdp.h index dbe9c60797e1..3814fb631d52 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -240,8 +240,6 @@ struct xdp_attachment_info { }; struct netdev_bpf; -int xdp_attachment_query(struct xdp_attachment_info *info, - struct netdev_bpf *bpf); bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, struct netdev_bpf *bpf); void xdp_attachment_setup(struct xdp_attachment_info *info, diff --git a/net/core/dev.c b/net/core/dev.c index 82ce0920b172..a2a57988880a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5468,10 +5468,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) } break; - case XDP_QUERY_PROG: - xdp->prog_id = old ? old->aux->id : 0; - break; - default: ret = -EINVAL; break; diff --git a/net/core/xdp.c b/net/core/xdp.c index 3c45f99e26d5..48aba933a5a8 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -400,15 +400,6 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem) } EXPORT_SYMBOL_GPL(__xdp_release_frame); -int xdp_attachment_query(struct xdp_attachment_info *info, - struct netdev_bpf *bpf) -{ - bpf->prog_id = info->prog ? info->prog->aux->id : 0; - bpf->prog_flags = info->prog ? info->flags : 0; - return 0; -} -EXPORT_SYMBOL_GPL(xdp_attachment_query); - bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, struct netdev_bpf *bpf) { From d9f0c8e457c059cc72a3ff240aae48c0bf387988 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 22 Jun 2020 10:20:17 +0300 Subject: [PATCH 1633/2056] igc: Remove unneeded variable Though we are populating and tracking ictxqec, the value is not being used for anything so remove it altogether and save the register read. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_hw.h | 1 - drivers/net/ethernet/intel/igc/igc_mac.c | 1 - drivers/net/ethernet/intel/igc/igc_main.c | 1 - drivers/net/ethernet/intel/igc/igc_regs.h | 2 -- 4 files changed, 5 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 2ab7d9fab6af..68e83d8529ea 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -280,7 +280,6 @@ struct igc_hw_stats { u64 icrxatc; u64 ictxptc; u64 ictxatc; - u64 ictxqec; u64 ictxqmtc; u64 icrxdmtc; u64 icrxoc; diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index b47e7b0a6398..2d9ca3e1bdde 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -301,7 +301,6 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_ICRXATC); rd32(IGC_ICTXPTC); rd32(IGC_ICTXATC); - rd32(IGC_ICTXQEC); rd32(IGC_ICTXQMTC); rd32(IGC_ICRXDMTC); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8d5869dcf798..e620d7a78d05 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3735,7 +3735,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.icrxatc += rd32(IGC_ICRXATC); adapter->stats.ictxptc += rd32(IGC_ICTXPTC); adapter->stats.ictxatc += rd32(IGC_ICTXATC); - adapter->stats.ictxqec += rd32(IGC_ICTXQEC); adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 1c46cec5a799..d6ed1b1ebcbc 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -63,7 +63,6 @@ #define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */ #define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */ #define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */ -#define IGC_ICTXQEC 0x04118 /* Tx Queue Empty Count */ #define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */ #define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ #define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ @@ -184,7 +183,6 @@ #define IGC_IAC 0x04100 /* Interrupt Assertion Count */ #define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ #define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ -#define IGC_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ #define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ #define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ From 60f7bb824133ee3820b94957c89e2321fd5aec3f Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 22 Jun 2020 10:20:30 +0300 Subject: [PATCH 1634/2056] igc: Add Receive Descriptor Minimum Threshold Count to clear HW counters The statistics of this register are being tracked, however, the register was inadvertently missed when implementing igc_clear_hw_cntrs_base(). The register is clear on read, so add it to the function so that the register is cleared when requested so the tracked count is accurate. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_mac.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 2d9ca3e1bdde..3a618e69514e 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -308,6 +308,7 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_TLPIC); rd32(IGC_RLPIC); rd32(IGC_HGPTC); + rd32(IGC_RXDMTC); rd32(IGC_HGORCL); rd32(IGC_HGORCH); rd32(IGC_HGOTCL); From ed6ab19adf493ec5043318a3e4e7eac2cc0a032a Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 22 Jun 2020 10:43:34 +0300 Subject: [PATCH 1635/2056] igc: Remove unneeded ICTXQMTC register Tx Queue Min Threshold Count register no applicable for the i225 device. This patch comes to clean up it. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_hw.h | 1 - drivers/net/ethernet/intel/igc/igc_mac.c | 1 - drivers/net/ethernet/intel/igc/igc_main.c | 1 - drivers/net/ethernet/intel/igc/igc_regs.h | 2 -- 4 files changed, 5 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 68e83d8529ea..f11fa0a4baff 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -280,7 +280,6 @@ struct igc_hw_stats { u64 icrxatc; u64 ictxptc; u64 ictxatc; - u64 ictxqmtc; u64 icrxdmtc; u64 icrxoc; u64 cbtmpc; diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 3a618e69514e..f85c8bcd7f70 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -301,7 +301,6 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_ICRXATC); rd32(IGC_ICTXPTC); rd32(IGC_ICTXATC); - rd32(IGC_ICTXQMTC); rd32(IGC_ICRXDMTC); rd32(IGC_RPTHC); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index e620d7a78d05..6f86783836c5 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3735,7 +3735,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.icrxatc += rd32(IGC_ICRXATC); adapter->stats.ictxptc += rd32(IGC_ICTXPTC); adapter->stats.ictxatc += rd32(IGC_ICTXATC); - adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC); adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); /* Fill out the OS statistics structure */ diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index d6ed1b1ebcbc..23554d39ad18 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -63,7 +63,6 @@ #define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */ #define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */ #define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */ -#define IGC_ICTXQMTC 0x0411C /* Tx Queue Min Threshold Count */ #define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ #define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ @@ -183,7 +182,6 @@ #define IGC_IAC 0x04100 /* Interrupt Assertion Count */ #define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ #define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ -#define IGC_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ #define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ #define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ From 94a5181f4bc418a5701e476b65aa347695f9dab3 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 29 Jun 2020 17:59:44 +0300 Subject: [PATCH 1636/2056] igc: Fix registers definition IGC_ICTXPTC and IGC_ICTXATC are already defined elsewhere, remove this double definition. Also, remove unneeded registers as they are not applicable to i225 devices. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_hw.h | 4 ---- drivers/net/ethernet/intel/igc/igc_mac.c | 4 ---- drivers/net/ethernet/intel/igc/igc_main.c | 4 ---- drivers/net/ethernet/intel/igc/igc_regs.h | 8 -------- 4 files changed, 20 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index f11fa0a4baff..ac3de58e6e26 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -276,12 +276,8 @@ struct igc_hw_stats { u64 tsctc; u64 tsctfc; u64 iac; - u64 icrxptc; - u64 icrxatc; u64 ictxptc; u64 ictxatc; - u64 icrxdmtc; - u64 icrxoc; u64 cbtmpc; u64 htdpmc; u64 cbrdpc; diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index f85c8bcd7f70..02bbb8ac4f68 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -295,13 +295,9 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_MGTPTC); rd32(IGC_IAC); - rd32(IGC_ICRXOC); - rd32(IGC_ICRXPTC); - rd32(IGC_ICRXATC); rd32(IGC_ICTXPTC); rd32(IGC_ICTXATC); - rd32(IGC_ICRXDMTC); rd32(IGC_RPTHC); rd32(IGC_TLPIC); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6f86783836c5..d91fa4c06f2e 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3730,12 +3730,8 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.tsctc += rd32(IGC_TSCTC); adapter->stats.iac += rd32(IGC_IAC); - adapter->stats.icrxoc += rd32(IGC_ICRXOC); - adapter->stats.icrxptc += rd32(IGC_ICRXPTC); - adapter->stats.icrxatc += rd32(IGC_ICRXATC); adapter->stats.ictxptc += rd32(IGC_ICTXPTC); adapter->stats.ictxatc += rd32(IGC_ICTXATC); - adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC); /* Fill out the OS statistics structure */ net_stats->multicast = adapter->stats.mprc; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 23554d39ad18..5ff3316717c7 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -58,14 +58,6 @@ #define IGC_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ #define IGC_GPIE 0x01514 /* General Purpose Intr Enable - RW */ -/* Interrupt Cause */ -#define IGC_ICRXPTC 0x04104 /* Rx Packet Timer Expire Count */ -#define IGC_ICRXATC 0x04108 /* Rx Absolute Timer Expire Count */ -#define IGC_ICTXPTC 0x0410C /* Tx Packet Timer Expire Count */ -#define IGC_ICTXATC 0x04110 /* Tx Absolute Timer Expire Count */ -#define IGC_ICRXDMTC 0x04120 /* Rx Descriptor Min Threshold Count */ -#define IGC_ICRXOC 0x04124 /* Receiver Overrun Count */ - /* MSI-X Table Register Descriptions */ #define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */ From 643e5c2e8c61fb21fe7a2221528016b9d712526b Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Wed, 1 Jul 2020 14:30:43 +0300 Subject: [PATCH 1637/2056] igc: Remove ledctl_ fields from the mac_info structure LED control currently not implemented. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_hw.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index ac3de58e6e26..694c209657b1 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -83,9 +83,6 @@ struct igc_mac_info { enum igc_mac_type type; u32 collision_delta; - u32 ledctl_default; - u32 ledctl_mode1; - u32 ledctl_mode2; u32 mc_filter_type; u32 tx_packet_delta; u32 txcw; From 4a9e9b8feed9513fb31d5d6db95a6633b93da77f Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Tue, 7 Jul 2020 14:05:03 +0300 Subject: [PATCH 1638/2056] igc: Clean up the mac_info structure collision_delta, tx_packet_delta, txcw, adaptive_ifs and has_fwsm fields not in use. This patch come to clean up the driver code. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_hw.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 694c209657b1..4b3d0b0f917b 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -82,10 +82,7 @@ struct igc_mac_info { enum igc_mac_type type; - u32 collision_delta; u32 mc_filter_type; - u32 tx_packet_delta; - u32 txcw; u16 mta_reg_count; u16 uta_reg_count; @@ -95,8 +92,6 @@ struct igc_mac_info { u8 forced_speed_duplex; - bool adaptive_ifs; - bool has_fwsm; bool asf_firmware_present; bool arc_subsystem_valid; From db02bee2ec1d74eb4b0ab48b3f995673d6faa3c6 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Sun, 26 Jul 2020 13:52:18 -0700 Subject: [PATCH 1639/2056] igc: Clean up the hw_stats structure Remove ictxptc, ictxatc, cbtmpc, cbrdpc, cbrmpc and htcbdpc fields from the hw_stats structure. Accordance to the i225 device specification these fields not in use. This patch come to clean up the driver code. Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_hw.h | 6 ------ drivers/net/ethernet/intel/igc/igc_mac.c | 3 --- drivers/net/ethernet/intel/igc/igc_main.c | 2 -- drivers/net/ethernet/intel/igc/igc_regs.h | 2 -- 4 files changed, 13 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h index 4b3d0b0f917b..b9fe51b91c47 100644 --- a/drivers/net/ethernet/intel/igc/igc_hw.h +++ b/drivers/net/ethernet/intel/igc/igc_hw.h @@ -268,15 +268,9 @@ struct igc_hw_stats { u64 tsctc; u64 tsctfc; u64 iac; - u64 ictxptc; - u64 ictxatc; - u64 cbtmpc; u64 htdpmc; - u64 cbrdpc; - u64 cbrmpc; u64 rpthc; u64 hgptc; - u64 htcbdpc; u64 hgorc; u64 hgotc; u64 lenerrs; diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 02bbb8ac4f68..674b8ad21fea 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -296,9 +296,6 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw) rd32(IGC_IAC); - rd32(IGC_ICTXPTC); - rd32(IGC_ICTXATC); - rd32(IGC_RPTHC); rd32(IGC_TLPIC); rd32(IGC_RLPIC); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index d91fa4c06f2e..7a6f2a0d413f 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -3730,8 +3730,6 @@ void igc_update_stats(struct igc_adapter *adapter) adapter->stats.tsctc += rd32(IGC_TSCTC); adapter->stats.iac += rd32(IGC_IAC); - adapter->stats.ictxptc += rd32(IGC_ICTXPTC); - adapter->stats.ictxatc += rd32(IGC_ICTXATC); /* Fill out the OS statistics structure */ net_stats->multicast = adapter->stats.mprc; diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 5ff3316717c7..b52dd9d737e8 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -172,8 +172,6 @@ #define IGC_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ #define IGC_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ #define IGC_IAC 0x04100 /* Interrupt Assertion Count */ -#define IGC_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ -#define IGC_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ #define IGC_RPTHC 0x04104 /* Rx Packets To Host */ #define IGC_TLPIC 0x04148 /* EEE Tx LPI Count */ #define IGC_RLPIC 0x0414C /* EEE Rx LPI Count */ From 360d749e0c8e689414d9a583102a59f795a6892d Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Thu, 9 Jul 2020 10:34:16 +0300 Subject: [PATCH 1640/2056] igc: Fix static checker warning drivers/net/ethernet/intel/igc/igc_mac.c:424 igc_check_for_copper_link() error: uninitialized symbol 'link'. This patch come to fix this warning and initialize the 'link' symbol. Reported-by: Dan Carpenter Fixes: 707abf069548 ("igc: Add initial LTR support") Signed-off-by: Sasha Neftin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 674b8ad21fea..09cd0ec7ee87 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -355,8 +355,8 @@ void igc_rar_set(struct igc_hw *hw, u8 *addr, u32 index) s32 igc_check_for_copper_link(struct igc_hw *hw) { struct igc_mac_info *mac = &hw->mac; + bool link = false; s32 ret_val; - bool link; /* We only want to go out to the PHY registers to see if Auto-Neg * has completed and/or if our link status has changed. The From d21a06d5d8268fb2d5e542c9e189db0556fc534c Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 26 Jul 2020 12:58:27 +0200 Subject: [PATCH 1641/2056] sfc: drop unnecessary list_empty list_for_each_safe is able to handle an empty list. The only effect of avoiding the loop is not initializing the index variable. Drop list_empty tests in cases where these variables are not used. The semantic patch that makes this change is as follows: (http://coccinelle.lip6.fr/) @@ expression x,e; iterator name list_for_each_safe; statement S; identifier i,j; @@ -if (!(list_empty(x))) { list_for_each_safe(i,j,x) S - } ... when != i when != j ( i = e; | ? j = e; ) Signed-off-by: Julia Lawall Acked-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ptp.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 393b7cbac8b2..bea4725a4499 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1154,17 +1154,15 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) /* Drop time-expired events */ spin_lock_bh(&ptp->evt_lock); - if (!list_empty(&ptp->evt_list)) { - list_for_each_safe(cursor, next, &ptp->evt_list) { - struct efx_ptp_event_rx *evt; + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; - evt = list_entry(cursor, struct efx_ptp_event_rx, - link); - if (time_after(jiffies, evt->expiry)) { - list_move(&evt->link, &ptp->evt_free_list); - netif_warn(efx, hw, efx->net_dev, - "PTP rx event dropped\n"); - } + evt = list_entry(cursor, struct efx_ptp_event_rx, + link); + if (time_after(jiffies, evt->expiry)) { + list_move(&evt->link, &ptp->evt_free_list); + netif_warn(efx, hw, efx->net_dev, + "PTP rx event dropped\n"); } } spin_unlock_bh(&ptp->evt_lock); From 211e5b7a0007e6a172eafdb89c512768f82d73bf Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 26 Jul 2020 20:34:27 +0200 Subject: [PATCH 1642/2056] s390/ism: indicate correct error reason in ism_alloc_dmb() When the ism driver allocates a new dmb in ism_alloc_dmb() it must first check for and reserve a slot in the sba bitmap. When find_next_zero_bit() finds no free slot then the return code is -ENOMEM. This code conflicts with the error when the alloc() fails later in the code. As a result of that the caller can not differentiate between out-of-memory conditions and sba-bitmap-full conditions. Fix that by using the return code -ENOSPC when the sba slot reservation failed. Reviewed-by: Ursula Braun Signed-off-by: Karsten Graul Signed-off-by: David S. Miller --- drivers/s390/net/ism_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index c7fade836d83..5fbe9eae84d1 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -231,7 +231,7 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, ISM_DMB_BIT_OFFSET); if (bit == ISM_NR_DMBS) - return -ENOMEM; + return -ENOSPC; dmb->sba_idx = bit; } From 72b7f6c48708e4524765a2f1316063207d8e0cd5 Mon Sep 17 00:00:00 2001 From: Karsten Graul Date: Sun, 26 Jul 2020 20:34:28 +0200 Subject: [PATCH 1643/2056] net/smc: unique reason code for exceeded max dmb count When the maximum dmb buffer limit for an ism device is reached no more dmb buffers can be registered. When this happens the reason code is set to SMC_CLC_DECL_MEM indicating out-of-memory. This is the same reason code that is used when no memory could be allocated for the new dmb buffer. This is confusing for users when they see this error but there is more memory available. To solve this set a separate new reason code when the maximum dmb limit exceeded. Reviewed-by: Ursula Braun Signed-off-by: Karsten Graul Signed-off-by: David S. Miller --- net/smc/af_smc.c | 13 +++++++++---- net/smc/smc_clc.h | 1 + net/smc/smc_core.c | 4 ++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 832e36269b10..e7649bbc2b87 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -719,8 +719,11 @@ static int smc_connect_ism(struct smc_sock *smc, } /* Create send and receive buffers */ - if (smc_buf_create(smc, true)) - return smc_connect_abort(smc, SMC_CLC_DECL_MEM, + rc = smc_buf_create(smc, true); + if (rc) + return smc_connect_abort(smc, (rc == -ENOSPC) ? + SMC_CLC_DECL_MAX_DMB : + SMC_CLC_DECL_MEM, ini->cln_first_contact); smc_conn_save_peer_info(smc, aclc); @@ -1200,12 +1203,14 @@ static int smc_listen_ism_init(struct smc_sock *new_smc, } /* Create send and receive buffers */ - if (smc_buf_create(new_smc, true)) { + rc = smc_buf_create(new_smc, true); + if (rc) { if (ini->cln_first_contact == SMC_FIRST_CONTACT) smc_lgr_cleanup_early(&new_smc->conn); else smc_conn_free(&new_smc->conn); - return SMC_CLC_DECL_MEM; + return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : + SMC_CLC_DECL_MEM; } return 0; diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 76c2b150d040..cf7b45306f4e 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -48,6 +48,7 @@ #define SMC_CLC_DECL_NOACTLINK 0x030a0000 /* no active smc-r link in lgr */ #define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */ #define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */ +#define SMC_CLC_DECL_MAX_DMB 0x030d0000 /* SMC-D DMB limit exceeded */ #define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */ #define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */ #define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */ diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index f82a2e599917..b42fa3b00d00 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1614,7 +1614,7 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr, rc = smc_ism_register_dmb(lgr, bufsize, buf_desc); if (rc) { kfree(buf_desc); - return ERR_PTR(-EAGAIN); + return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) : ERR_PTR(rc); } buf_desc->pages = virt_to_page(buf_desc->cpu_addr); /* CDC header stored in buf. So, pretend it was smaller */ @@ -1688,7 +1688,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb) } if (IS_ERR(buf_desc)) - return -ENOMEM; + return PTR_ERR(buf_desc); if (!is_smcd) { if (smcr_buf_map_usable_links(lgr, buf_desc, is_rmb)) { From 73cb11933c414b744193d9de660010082bc0a944 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 27 Jul 2020 10:18:34 +0300 Subject: [PATCH 1644/2056] ipmr: Copy option to correct variable Cited commit mistakenly copied provided option to 'val' instead of to 'mfc': ``` - if (copy_from_user(&mfc, optval, sizeof(mfc))) { + if (copy_from_sockptr(&val, optval, sizeof(val))) { ``` Fix this by copying the option to 'mfc'. selftest router_multicast.sh before: $ ./router_multicast.sh smcroutectl: Unknown or malformed IPC message 'a' from client. smcroutectl: failed removing multicast route, does not exist. TEST: mcast IPv4 [FAIL] Multicast not received on first host TEST: mcast IPv6 [ OK ] smcroutectl: Unknown or malformed IPC message 'a' from client. smcroutectl: failed removing multicast route, does not exist. TEST: RPF IPv4 [FAIL] Multicast not received on first host TEST: RPF IPv6 [ OK ] selftest router_multicast.sh after: $ ./router_multicast.sh TEST: mcast IPv4 [ OK ] TEST: mcast IPv6 [ OK ] TEST: RPF IPv4 [ OK ] TEST: RPF IPv6 [ OK ] Fixes: 01ccb5b48f08 ("net/ipv4: switch ip_mroute_setsockopt to sockptr_t") Signed-off-by: Ido Schimmel Reviewed-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/ipmr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index cdf3a40f9ff5..876fd6ff1ff9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1441,7 +1441,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, ret = -EINVAL; break; } - if (copy_from_sockptr(&val, optval, sizeof(val))) { + if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) { ret = -EFAULT; break; } From 350e7ab92da8420fe97e1edf040db8c9bab750f8 Mon Sep 17 00:00:00 2001 From: Martin Varghese Date: Mon, 27 Jul 2020 13:09:19 +0530 Subject: [PATCH 1645/2056] net: Removed the device type check to add mpls support for devices MPLS has no dependency with the device type of underlying devices. Hence the device type check to add mpls support for devices can be avoided. Signed-off-by: Martin Varghese Signed-off-by: David S. Miller --- net/mpls/af_mpls.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index fd30ea61336e..6fdd0c9f865a 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1584,21 +1584,10 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event, unsigned int flags; if (event == NETDEV_REGISTER) { + mdev = mpls_add_dev(dev); + if (IS_ERR(mdev)) + return notifier_from_errno(PTR_ERR(mdev)); - /* For now just support Ethernet, IPGRE, IP6GRE, SIT and - * IPIP devices - */ - if (dev->type == ARPHRD_ETHER || - dev->type == ARPHRD_LOOPBACK || - dev->type == ARPHRD_IPGRE || - dev->type == ARPHRD_IP6GRE || - dev->type == ARPHRD_SIT || - dev->type == ARPHRD_TUNNEL || - dev->type == ARPHRD_TUNNEL6) { - mdev = mpls_add_dev(dev); - if (IS_ERR(mdev)) - return notifier_from_errno(PTR_ERR(mdev)); - } return NOTIFY_OK; } From dfe64de974f8c8391776a376cc01c582064e8a46 Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Mon, 27 Jul 2020 05:40:36 -0400 Subject: [PATCH 1646/2056] bnxt_en: Remove PCIe non-counters from ethtool statistics Remove PCIe non-counters display from ethtool statistics, as they are not simple counters but register dump. The next few patches will add logic to detect counter roll-over and it won't work with these PCIe non-counters. There will be a follow up patch to get PCIe information via ethtool register dump. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 35 +---------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 6 --- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 39 ------------------- 3 files changed, 1 insertion(+), 79 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b4f0c638c467..47f58f95d5c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3738,12 +3738,6 @@ static void bnxt_free_port_stats(struct bnxt *bp) bp->hw_rx_port_stats_ext_map); bp->hw_rx_port_stats_ext = NULL; } - - if (bp->hw_pcie_stats) { - dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), - bp->hw_pcie_stats, bp->hw_pcie_stats_map); - bp->hw_pcie_stats = NULL; - } } static void bnxt_free_ring_stats(struct bnxt *bp) @@ -3826,7 +3820,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) alloc_tx_ext_stats: if (bp->hw_tx_port_stats_ext) - goto alloc_pcie_stats; + return 0; if (bp->hwrm_spec_code >= 0x10902 || (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { @@ -3837,19 +3831,6 @@ static int bnxt_alloc_stats(struct bnxt *bp) GFP_KERNEL); } bp->flags |= BNXT_FLAG_PORT_STATS_EXT; - -alloc_pcie_stats: - if (bp->hw_pcie_stats || - !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) - return 0; - - bp->hw_pcie_stats = - dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats), - &bp->hw_pcie_stats_map, GFP_KERNEL); - if (!bp->hw_pcie_stats) - return 0; - - bp->flags |= BNXT_FLAG_PCIE_STATS; return 0; } @@ -7574,19 +7555,6 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) return rc; } -static int bnxt_hwrm_pcie_qstats(struct bnxt *bp) -{ - struct hwrm_pcie_qstats_input req = {0}; - - if (!(bp->flags & BNXT_FLAG_PCIE_STATS)) - return 0; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); - req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats)); - req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -} - static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID) @@ -10466,7 +10434,6 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); bnxt_hwrm_port_qstats_ext(bp); - bnxt_hwrm_pcie_qstats(bp); } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2acd7f958246..10286cbc3120 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1566,7 +1566,6 @@ struct bnxt { #define BNXT_FLAG_DIM 0x2000000 #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 - #define BNXT_FLAG_PCIE_STATS 0x40000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1737,12 +1736,10 @@ struct bnxt { struct tx_port_stats *hw_tx_port_stats; struct rx_port_stats_ext *hw_rx_port_stats_ext; struct tx_port_stats_ext *hw_tx_port_stats_ext; - struct pcie_ctx_hw_stats *hw_pcie_stats; dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_tx_port_stats_map; dma_addr_t hw_rx_port_stats_ext_map; dma_addr_t hw_tx_port_stats_ext_map; - dma_addr_t hw_pcie_stats_map; int hw_port_stats_size; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; @@ -1898,9 +1895,6 @@ struct bnxt { #define BNXT_TX_STATS_EXT_OFFSET(counter) \ (offsetof(struct tx_port_stats_ext, counter) / 8) -#define BNXT_PCIE_STATS_OFFSET(counter) \ - (offsetof(struct pcie_ctx_hw_stats, counter) / 8) - #define BNXT_HW_FEATURE_VLAN_ALL_RX \ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX) #define BNXT_HW_FEATURE_VLAN_ALL_TX \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 25252d5beec8..5e5c746f7d53 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -293,9 +293,6 @@ static const char * const bnxt_cmn_sw_stats_str[] = { BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ BNXT_TX_STATS_PRI_ENTRY(counter, 7) -#define BNXT_PCIE_STATS_ENTRY(counter) \ - { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) } - enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, @@ -454,24 +451,6 @@ static const struct { BNXT_TX_STATS_PRI_ENTRIES(tx_packets), }; -static const struct { - long offset; - char string[ETH_GSTRING_LEN]; -} bnxt_pcie_stats_arr[] = { - BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_link_integrity), - BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate), - BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate), - BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics), - BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics), - BNXT_PCIE_STATS_ENTRY(pcie_equalization_time), - BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]), - BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]), - BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram), -}; - #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_STATS_PRI \ @@ -479,7 +458,6 @@ static const struct { ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) -#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr) static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp) { @@ -526,9 +504,6 @@ static int bnxt_get_num_stats(struct bnxt *bp) num_stats += BNXT_NUM_STATS_PRI; } - if (bp->flags & BNXT_FLAG_PCIE_STATS) - num_stats += BNXT_NUM_PCIE_STATS; - return num_stats; } @@ -674,14 +649,6 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, } } } - if (bp->flags & BNXT_FLAG_PCIE_STATS) { - __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats; - - for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) { - buf[j] = le64_to_cpu(*(pcie_stats + - bnxt_pcie_stats_arr[i].offset)); - } - } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -782,12 +749,6 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) } } } - if (bp->flags & BNXT_FLAG_PCIE_STATS) { - for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) { - strcpy(buf, bnxt_pcie_stats_arr[i].string); - buf += ETH_GSTRING_LEN; - } - } break; case ETH_SS_TEST: if (bp->num_tests) From bfc6e5fbcbbf75967a7f673ad67fa227d77f2541 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:37 -0400 Subject: [PATCH 1647/2056] bnxt_en: Update firmware interface to 1.10.1.54. Main changes are 200G support and fixing the definitions of discard and error counters to match the hardware definitions. Because the HWRM_PORT_PHY_QCFG message size has now exceeded the max. encapsulated response message size of 96 bytes from the PF to the VF, we now need to cap this message to 96 bytes for forwarding. The forwarded response only needs to contain the basic link status and speed information and can be capped without adding the new information. v2: Fix bnxt_re compile error. Cc: Selvin Xavier Reviewed-by: Vasundhara Volam Reviewed-by: Edwin Peer Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/infiniband/hw/bnxt_re/hw_counters.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 44 ++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 4 +- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 468 +++++++++++++----- .../net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 +- 6 files changed, 391 insertions(+), 131 deletions(-) diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 3421a0b15983..5f5408cdf008 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -132,7 +132,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, stats->value[BNXT_RE_RECOVERABLE_ERRORS] = le64_to_cpu(bnxt_re_stats->tx_bcast_pkts); stats->value[BNXT_RE_RX_DROPS] = - le64_to_cpu(bnxt_re_stats->rx_drop_pkts); + le64_to_cpu(bnxt_re_stats->rx_error_pkts); stats->value[BNXT_RE_RX_DISCARDS] = le64_to_cpu(bnxt_re_stats->rx_discard_pkts); stats->value[BNXT_RE_RX_PKTS] = diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 47f58f95d5c4..a08ada1a6949 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9605,7 +9605,7 @@ static void bnxt_get_ring_stats(struct bnxt *bp, stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); + stats->tx_dropped += le64_to_cpu(hw_stats->tx_error_pkts); } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 10286cbc3120..306636d9566a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1135,6 +1135,50 @@ struct bnxt_ntuple_filter { #define BNXT_FLTR_UPDATE 1 }; +struct hwrm_port_phy_qcfg_output_compat { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 link; + u8 link_signal_mode; + __le16 link_speed; + u8 duplex_cfg; + u8 pause; + __le16 support_speeds; + __le16 force_link_speed; + u8 auto_mode; + u8 auto_pause; + __le16 auto_link_speed; + __le16 auto_link_speed_mask; + u8 wirespeed; + u8 lpbk; + u8 force_pause; + u8 module_status; + __le32 preemphasis; + u8 phy_maj; + u8 phy_min; + u8 phy_bld; + u8 phy_type; + u8 media_type; + u8 xcvr_pkg_type; + u8 eee_config_phy_addr; + u8 parallel_detect; + __le16 link_partner_adv_speeds; + u8 link_partner_adv_auto_mode; + u8 link_partner_adv_pause; + __le16 adv_eee_link_speed_mask; + __le16 link_partner_adv_eee_link_speed_mask; + __le32 xcvr_identifier_type_tx_lpi_timer; + __le16 fec_cfg; + u8 duplex_state; + u8 option_flags; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + u8 unused_0[7]; + u8 valid; +}; + struct bnxt_link_info { u8 phy_type; u8 media_type; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 5e5c746f7d53..be48727be6ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -142,7 +142,7 @@ static const char * const bnxt_ring_rx_stats_str[] = { "rx_mcast_packets", "rx_bcast_packets", "rx_discards", - "rx_drops", + "rx_errors", "rx_ucast_bytes", "rx_mcast_bytes", "rx_bcast_bytes", @@ -152,8 +152,8 @@ static const char * const bnxt_ring_tx_stats_str[] = { "tx_ucast_packets", "tx_mcast_packets", "tx_bcast_packets", + "tx_errors", "tx_discards", - "tx_drops", "tx_ucast_bytes", "tx_mcast_bytes", "tx_bcast_bytes", diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 7e9235c8d21e..c4af6bf15e36 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -169,9 +169,14 @@ struct cmd_nums { #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL #define HWRM_RING_AGGINT_QCAPS 0x54UL + #define HWRM_RING_SCHQ_ALLOC 0x55UL + #define HWRM_RING_SCHQ_CFG 0x56UL + #define HWRM_RING_SCHQ_FREE 0x57UL #define HWRM_RING_RESET 0x5eUL #define HWRM_RING_GRP_ALLOC 0x60UL #define HWRM_RING_GRP_FREE 0x61UL + #define HWRM_RING_CFG 0x62UL + #define HWRM_RING_QCFG 0x63UL #define HWRM_RESERVED5 0x64UL #define HWRM_RESERVED6 0x65UL #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL @@ -224,6 +229,7 @@ struct cmd_nums { #define HWRM_FW_IPC_MAILBOX 0xccUL #define HWRM_FW_ECN_CFG 0xcdUL #define HWRM_FW_ECN_QCFG 0xceUL + #define HWRM_FW_SECURE_CFG 0xcfUL #define HWRM_EXEC_FWD_RESP 0xd0UL #define HWRM_REJECT_FWD_RESP 0xd1UL #define HWRM_FWD_RESP 0xd2UL @@ -337,6 +343,7 @@ struct cmd_nums { #define HWRM_FUNC_VF_BW_QCFG 0x196UL #define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL #define HWRM_FUNC_QSTATS_EXT 0x198UL + #define HWRM_STAT_EXT_CTX_QUERY 0x199UL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -353,24 +360,30 @@ struct cmd_nums { #define HWRM_TF_VERSION_GET 0x2bdUL #define HWRM_TF_SESSION_OPEN 0x2c6UL #define HWRM_TF_SESSION_ATTACH 0x2c7UL - #define HWRM_TF_SESSION_CLOSE 0x2c8UL - #define HWRM_TF_SESSION_QCFG 0x2c9UL - #define HWRM_TF_SESSION_RESC_QCAPS 0x2caUL - #define HWRM_TF_SESSION_RESC_ALLOC 0x2cbUL - #define HWRM_TF_SESSION_RESC_FREE 0x2ccUL - #define HWRM_TF_SESSION_RESC_FLUSH 0x2cdUL - #define HWRM_TF_TBL_TYPE_GET 0x2d0UL - #define HWRM_TF_TBL_TYPE_SET 0x2d1UL - #define HWRM_TF_CTXT_MEM_RGTR 0x2daUL - #define HWRM_TF_CTXT_MEM_UNRGTR 0x2dbUL - #define HWRM_TF_EXT_EM_QCAPS 0x2dcUL - #define HWRM_TF_EXT_EM_OP 0x2ddUL - #define HWRM_TF_EXT_EM_CFG 0x2deUL - #define HWRM_TF_EXT_EM_QCFG 0x2dfUL - #define HWRM_TF_TCAM_SET 0x2eeUL - #define HWRM_TF_TCAM_GET 0x2efUL - #define HWRM_TF_TCAM_MOVE 0x2f0UL - #define HWRM_TF_TCAM_FREE 0x2f1UL + #define HWRM_TF_SESSION_REGISTER 0x2c8UL + #define HWRM_TF_SESSION_UNREGISTER 0x2c9UL + #define HWRM_TF_SESSION_CLOSE 0x2caUL + #define HWRM_TF_SESSION_QCFG 0x2cbUL + #define HWRM_TF_SESSION_RESC_QCAPS 0x2ccUL + #define HWRM_TF_SESSION_RESC_ALLOC 0x2cdUL + #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL + #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL + #define HWRM_TF_TBL_TYPE_GET 0x2daUL + #define HWRM_TF_TBL_TYPE_SET 0x2dbUL + #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL + #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL + #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL + #define HWRM_TF_EXT_EM_OP 0x2e7UL + #define HWRM_TF_EXT_EM_CFG 0x2e8UL + #define HWRM_TF_EXT_EM_QCFG 0x2e9UL + #define HWRM_TF_EM_INSERT 0x2eaUL + #define HWRM_TF_EM_DELETE 0x2ebUL + #define HWRM_TF_TCAM_SET 0x2f8UL + #define HWRM_TF_TCAM_GET 0x2f9UL + #define HWRM_TF_TCAM_MOVE 0x2faUL + #define HWRM_TF_TCAM_FREE 0x2fbUL + #define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL + #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL @@ -391,6 +404,7 @@ struct cmd_nums { #define HWRM_DBG_QCAPS 0xff20UL #define HWRM_DBG_QCFG 0xff21UL #define HWRM_DBG_CRASHDUMP_MEDIUM_CFG 0xff22UL + #define HWRM_NVM_REQ_ARBITRATION 0xffedUL #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL #define HWRM_NVM_VALIDATE_OPTION 0xffefUL #define HWRM_NVM_FLUSH 0xfff0UL @@ -464,8 +478,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 1 -#define HWRM_VERSION_RSVD 33 -#define HWRM_VERSION_STR "1.10.1.33" +#define HWRM_VERSION_RSVD 54 +#define HWRM_VERSION_STR "1.10.1.54" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1094,6 +1108,8 @@ struct hwrm_func_vf_cfg_input { #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x100UL + #define FUNC_VF_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x200UL __le16 num_rsscos_ctxs; __le16 num_cmpl_rings; __le16 num_tx_rings; @@ -1189,10 +1205,16 @@ struct hwrm_func_qcaps_output { __le16 max_sp_tx_rings; u8 unused_0[2]; __le32 flags_ext; - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL - #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL - u8 unused_1[3]; + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_MARK_SUPPORTED 0x1UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_ECN_STATS_SUPPORTED 0x2UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PROXY_MODE_SUPPORT 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL + u8 max_schqs; + u8 unused_1[2]; u8 valid; }; @@ -1226,6 +1248,8 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL #define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL + #define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL + #define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1321,7 +1345,7 @@ struct hwrm_func_qcfg_output { u8 valid; }; -/* hwrm_func_cfg_input (size:704b/88B) */ +/* hwrm_func_cfg_input (size:768b/96B) */ struct hwrm_func_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1352,30 +1376,35 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST 0x800000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE 0x1000000UL #define FUNC_CFG_REQ_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x2000000UL + #define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL + #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL __le32 enables; - #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL - #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL - #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL - #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL - #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL - #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL - #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL - #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL - #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL - #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL - #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL - #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL - #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL - #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL - #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL - #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL - #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL - #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL - #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL - #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL - #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL - #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL - #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL + #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL + #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL + #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL + #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL + #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL + #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL + #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL + #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL + #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL + #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL + #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL + #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL + #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL + #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL + #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL + #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL + #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL + #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL + #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL + #define FUNC_CFG_REQ_ENABLES_NUM_MSIX 0x200000UL + #define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL + #define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL + #define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL __le16 mtu; __le16 mru; __le16 num_rsscos_ctxs; @@ -1449,6 +1478,8 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xf0UL #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4 __le16 num_mcast_filters; + __le16 schq_id; + u8 unused_0[6]; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -1507,7 +1538,7 @@ struct hwrm_func_qstats_output { u8 valid; }; -/* hwrm_func_qstats_ext_input (size:192b/24B) */ +/* hwrm_func_qstats_ext_input (size:256b/32B) */ struct hwrm_func_qstats_ext_input { __le16 req_type; __le16 cmpl_ring; @@ -1520,7 +1551,12 @@ struct hwrm_func_qstats_ext_input { #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK - u8 unused_0[5]; + u8 unused_0[1]; + __le32 enables; + #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL + __le16 schq_id; + __le16 traffic_class; + u8 unused_1[4]; }; /* hwrm_func_qstats_ext_output (size:1472b/184B) */ @@ -1533,15 +1569,15 @@ struct hwrm_func_qstats_ext_output { __le64 rx_mcast_pkts; __le64 rx_bcast_pkts; __le64 rx_discard_pkts; - __le64 rx_drop_pkts; + __le64 rx_error_pkts; __le64 rx_ucast_bytes; __le64 rx_mcast_bytes; __le64 rx_bcast_bytes; __le64 tx_ucast_pkts; __le64 tx_mcast_pkts; __le64 tx_bcast_pkts; + __le64 tx_error_pkts; __le64 tx_discard_pkts; - __le64 tx_drop_pkts; __le64 tx_ucast_bytes; __le64 tx_mcast_bytes; __le64 tx_bcast_bytes; @@ -2376,33 +2412,39 @@ struct hwrm_port_phy_cfg_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL - #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL - #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL - #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL - #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL - #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL - #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL + #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL + #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL + #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL __le32 enables; - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL - #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL - #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL - #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL - #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL - #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL - #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL - #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL + #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL + #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL + #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL + #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL + #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL __le16 port_id; __le16 force_link_speed; #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL @@ -2415,7 +2457,6 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB u8 auto_mode; @@ -2446,7 +2487,6 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB __le16 auto_link_speed_mask; @@ -2464,7 +2504,6 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL - #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_200GB 0x4000UL u8 wirespeed; #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL @@ -2488,11 +2527,19 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL - u8 unused_2[2]; + __le16 force_pam4_link_speed; + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB __le32 tx_lpi_timer; #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 - __le32 unused_3; + __le16 auto_link_pam4_speed_mask; + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL + u8 unused_2[2]; }; /* hwrm_port_phy_cfg_output (size:128b/16B) */ @@ -2526,7 +2573,7 @@ struct hwrm_port_phy_qcfg_input { u8 unused_0[6]; }; -/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +/* hwrm_port_phy_qcfg_output (size:832b/104B) */ struct hwrm_port_phy_qcfg_output { __le16 error_code; __le16 req_type; @@ -2537,7 +2584,10 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK - u8 unused_0; + u8 link_signal_mode; + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 __le16 link_speed; #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL @@ -2574,7 +2624,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL - #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_200GB 0x4000UL __le16 force_link_speed; #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL @@ -2586,7 +2635,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB u8 auto_mode; @@ -2611,7 +2659,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_200GB 0x7d0UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB __le16 auto_link_speed_mask; @@ -2629,7 +2676,6 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL - #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_200GB 0x4000UL u8 wirespeed; #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL @@ -2763,13 +2809,21 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 __le16 fec_cfg; - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL - #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL u8 duplex_state; #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL @@ -2778,7 +2832,24 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - u8 unused_2[7]; + __le16 support_pam4_speeds; + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL + #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL + __le16 force_pam4_link_speed; + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL + #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB + __le16 auto_pam4_link_speed_mask; + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL + __le16 link_partner_pam4_adv_speeds; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL + u8 unused_0[7]; u8 valid; }; @@ -3304,19 +3375,20 @@ struct hwrm_port_phy_qcaps_input { u8 unused_0[6]; }; -/* hwrm_port_phy_qcaps_output (size:192b/24B) */ +/* hwrm_port_phy_qcaps_output (size:256b/32B) */ struct hwrm_port_phy_qcaps_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; u8 flags; - #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL - #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL - #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xf0UL - #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 4 + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL + #define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL + #define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5 u8 port_cnt; #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL @@ -3339,7 +3411,6 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_200GB 0x4000UL __le16 supported_speeds_auto_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL @@ -3355,7 +3426,6 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL - #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_200GB 0x4000UL __le16 supported_speeds_eee_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL @@ -3372,8 +3442,18 @@ struct hwrm_port_phy_qcaps_output { __le32 valid_tx_lpi_timer_high; #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 - #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL - #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 + #define PORT_PHY_QCAPS_RESP_RSVD_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD_SFT 24 + __le16 supported_pam4_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL + __le16 supported_pam4_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL + u8 unused_0[3]; + u8 valid; }; /* hwrm_port_phy_i2c_read_input (size:320b/40B) */ @@ -3812,7 +3892,7 @@ struct hwrm_queue_qportcfg_input { u8 unused_0; }; -/* hwrm_queue_qportcfg_output (size:256b/32B) */ +/* hwrm_queue_qportcfg_output (size:1344b/168B) */ struct hwrm_queue_qportcfg_output { __le16 error_code; __le16 req_type; @@ -3898,6 +3978,49 @@ struct hwrm_queue_qportcfg_output { #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + u8 unused_0; + char qid0_name[16]; + char qid1_name[16]; + char qid2_name[16]; + char qid3_name[16]; + char qid4_name[16]; + char qid5_name[16]; + char qid6_name[16]; + char qid7_name[16]; + u8 unused_1[7]; + u8 valid; +}; + +/* hwrm_queue_qcfg_input (size:192b/24B) */ +struct hwrm_queue_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define QUEUE_QCFG_REQ_FLAGS_PATH 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_TX 0x0UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_RX 0x1UL + #define QUEUE_QCFG_REQ_FLAGS_PATH_LAST QUEUE_QCFG_REQ_FLAGS_PATH_RX + __le32 queue_id; +}; + +/* hwrm_queue_qcfg_output (size:128b/16B) */ +struct hwrm_queue_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 queue_len; + u8 service_profile; + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LOSSLESS 0x1UL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QCFG_RESP_SERVICE_PROFILE_LAST QUEUE_QCFG_RESP_SERVICE_PROFILE_UNKNOWN + u8 queue_cfg_info; + #define QUEUE_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL + u8 unused_0; u8 valid; }; @@ -4938,6 +5061,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID 0x20UL #define VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID 0x40UL #define VNIC_CFG_REQ_ENABLES_QUEUE_ID 0x80UL + #define VNIC_CFG_REQ_ENABLES_RX_CSUM_V2_MODE 0x100UL __le16 vnic_id; __le16 dflt_ring_grp; __le16 rss_rule; @@ -4947,7 +5071,12 @@ struct hwrm_vnic_cfg_input { __le16 default_rx_ring_id; __le16 default_cmpl_ring_id; __le16 queue_id; - u8 unused0[6]; + u8 rx_csum_v2_mode; + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_DEFAULT 0x0UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_ALL_OK 0x1UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX 0x2UL + #define VNIC_CFG_REQ_RX_CSUM_V2_MODE_LAST VNIC_CFG_REQ_RX_CSUM_V2_MODE_MAX + u8 unused0[5]; }; /* hwrm_vnic_cfg_output (size:128b/16B) */ @@ -4989,6 +5118,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL #define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL + #define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -5155,15 +5285,18 @@ struct hwrm_vnic_plcmodes_cfg_input { #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL + #define VNIC_PLCMODES_CFG_REQ_FLAGS_VIRTIO_PLACEMENT 0x40UL __le32 enables; #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL + #define VNIC_PLCMODES_CFG_REQ_ENABLES_MAX_BDS_VALID 0x8UL __le32 vnic_id; __le16 jumbo_thresh; __le16 hds_offset; __le16 hds_threshold; - u8 unused_0[6]; + __le16 max_bds; + u8 unused_0[4]; }; /* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */ @@ -5231,6 +5364,7 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL + #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL u8 ring_type; #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL @@ -5246,7 +5380,7 @@ struct hwrm_ring_alloc_input { __le32 fbo; u8 page_size; u8 page_tbl_depth; - u8 unused_1[2]; + __le16 schq_id; __le32 length; __le16 logical_id; __le16 cmpl_ring_id; @@ -5344,11 +5478,12 @@ struct hwrm_ring_reset_input { __le16 target_id; __le64 resp_addr; u8 ring_type; - #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL - #define RING_RESET_REQ_RING_TYPE_TX 0x1UL - #define RING_RESET_REQ_RING_TYPE_RX 0x2UL - #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL - #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL + #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL + #define RING_RESET_REQ_RING_TYPE_TX 0x1UL + #define RING_RESET_REQ_RING_TYPE_RX 0x2UL + #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL + #define RING_RESET_REQ_RING_TYPE_RX_RING_GRP 0x6UL + #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_RX_RING_GRP u8 unused_0; __le16 ring_id; u8 unused_1[4]; @@ -5529,6 +5664,7 @@ struct hwrm_ring_grp_free_output { u8 unused_0[7]; u8 valid; }; + #define DEFAULT_FLOW_ID 0xFFFFFFFFUL #define ROCEV1_FLOW_ID 0xFFFFFFFEUL #define ROCEV2_FLOW_ID 0xFFFFFFFDUL @@ -6816,15 +6952,15 @@ struct ctx_hw_stats { __le64 rx_mcast_pkts; __le64 rx_bcast_pkts; __le64 rx_discard_pkts; - __le64 rx_drop_pkts; + __le64 rx_error_pkts; __le64 rx_ucast_bytes; __le64 rx_mcast_bytes; __le64 rx_bcast_bytes; __le64 tx_ucast_pkts; __le64 tx_mcast_pkts; __le64 tx_bcast_pkts; + __le64 tx_error_pkts; __le64 tx_discard_pkts; - __le64 tx_drop_pkts; __le64 tx_ucast_bytes; __le64 tx_mcast_bytes; __le64 tx_bcast_bytes; @@ -6840,15 +6976,15 @@ struct ctx_hw_stats_ext { __le64 rx_mcast_pkts; __le64 rx_bcast_pkts; __le64 rx_discard_pkts; - __le64 rx_drop_pkts; + __le64 rx_error_pkts; __le64 rx_ucast_bytes; __le64 rx_mcast_bytes; __le64 rx_bcast_bytes; __le64 tx_ucast_pkts; __le64 tx_mcast_pkts; __le64 tx_bcast_pkts; + __le64 tx_error_pkts; __le64 tx_discard_pkts; - __le64 tx_drop_pkts; __le64 tx_ucast_bytes; __le64 tx_mcast_bytes; __le64 tx_bcast_bytes; @@ -6915,7 +7051,9 @@ struct hwrm_stat_ctx_query_input { __le16 target_id; __le64 resp_addr; __le32 stat_ctx_id; - u8 unused_0[4]; + u8 flags; + #define STAT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; }; /* hwrm_stat_ctx_query_output (size:1408b/176B) */ @@ -6948,6 +7086,50 @@ struct hwrm_stat_ctx_query_output { u8 valid; }; +/* hwrm_stat_ext_ctx_query_input (size:192b/24B) */ +struct hwrm_stat_ext_ctx_query_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 stat_ctx_id; + u8 flags; + #define STAT_EXT_CTX_QUERY_REQ_FLAGS_COUNTER_MASK 0x1UL + u8 unused_0[3]; +}; + +/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */ +struct hwrm_stat_ext_ctx_query_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le64 rx_ucast_pkts; + __le64 rx_mcast_pkts; + __le64 rx_bcast_pkts; + __le64 rx_discard_pkts; + __le64 rx_error_pkts; + __le64 rx_ucast_bytes; + __le64 rx_mcast_bytes; + __le64 rx_bcast_bytes; + __le64 tx_ucast_pkts; + __le64 tx_mcast_pkts; + __le64 tx_bcast_pkts; + __le64 tx_error_pkts; + __le64 tx_discard_pkts; + __le64 tx_ucast_bytes; + __le64 tx_mcast_bytes; + __le64 tx_bcast_bytes; + __le64 rx_tpa_eligible_pkt; + __le64 rx_tpa_eligible_bytes; + __le64 rx_tpa_pkt; + __le64 rx_tpa_bytes; + __le64 rx_tpa_errors; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */ struct hwrm_stat_ctx_clr_stats_input { __le16 req_type; @@ -7497,6 +7679,29 @@ struct hwrm_wol_reason_qcfg_output { u8 valid; }; +/* hwrm_dbg_read_direct_input (size:256b/32B) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* hwrm_dbg_read_direct_output (size:128b/16B) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 crc32; + u8 unused_0[3]; + u8 valid; +}; + /* coredump_segment_record (size:128b/16B) */ struct coredump_segment_record { __le16 component_id; @@ -7507,7 +7712,8 @@ struct coredump_segment_record { u8 seg_flags; u8 compress_flags; #define SFLAG_COMPRESSED_ZLIB 0x1UL - u8 unused_0[6]; + u8 unused_0[2]; + __le32 segment_len; }; /* hwrm_dbg_coredump_list_input (size:256b/32B) */ @@ -7620,7 +7826,8 @@ struct hwrm_dbg_ring_info_get_input { #define DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL 0x0UL #define DBG_RING_INFO_GET_REQ_RING_TYPE_TX 0x1UL #define DBG_RING_INFO_GET_REQ_RING_TYPE_RX 0x2UL - #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_RX + #define DBG_RING_INFO_GET_REQ_RING_TYPE_NQ 0x3UL + #define DBG_RING_INFO_GET_REQ_RING_TYPE_LAST DBG_RING_INFO_GET_REQ_RING_TYPE_NQ u8 unused_0[3]; __le32 fw_ring_id; }; @@ -7633,7 +7840,8 @@ struct hwrm_dbg_ring_info_get_output { __le16 resp_len; __le32 producer_index; __le32 consumer_index; - u8 unused_0[7]; + __le32 cag_vector_ctrl; + u8 unused_0[3]; u8 valid; }; @@ -7922,6 +8130,7 @@ struct hwrm_nvm_install_update_input { #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE 0x1UL #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG 0x2UL #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG 0x4UL + #define NVM_INSTALL_UPDATE_REQ_FLAGS_VERIFY_ONLY 0x8UL u8 unused_0[2]; }; @@ -8101,7 +8310,14 @@ struct hwrm_selftest_qlist_output { char test5_name[32]; char test6_name[32]; char test7_name[32]; - u8 unused_2[7]; + u8 eyescope_target_BER_support; + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED 0x0UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED 0x1UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E10_SUPPORTED 0x2UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E11_SUPPORTED 0x3UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED 0x4UL + #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_LAST SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E12_SUPPORTED + u8 unused_2[6]; u8 valid; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 392e32c7122a..cc2ee4d0bd18 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) rc = bnxt_hwrm_exec_fwd_resp( bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); } else { - struct hwrm_port_phy_qcfg_output phy_qcfg_resp; + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0}; struct hwrm_port_phy_qcfg_input *phy_qcfg_req; phy_qcfg_req = From 24c93443fe21ca3ec87b7b121548daa3b57a34af Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:38 -0400 Subject: [PATCH 1648/2056] bnxt_en: Use macros to define port statistics size and offset. The port statistics structures have hard coded padding and offset. Define macros to make this look cleaner. Reviewed-by: Pavan Chebbi Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 8 +++++++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a08ada1a6949..9d0800c56e7b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3788,8 +3788,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (bp->hw_rx_port_stats) goto alloc_ext_stats; - bp->hw_port_stats_size = sizeof(struct rx_port_stats) + - sizeof(struct tx_port_stats) + 1024; + bp->hw_port_stats_size = BNXT_PORT_STATS_SIZE; bp->hw_rx_port_stats = dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, @@ -3798,9 +3797,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (!bp->hw_rx_port_stats) return -ENOMEM; - bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512; + bp->hw_tx_port_stats = (void *)bp->hw_rx_port_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET; bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + - sizeof(struct rx_port_stats) + 512; + BNXT_TX_PORT_STATS_BYTE_OFFSET; bp->flags |= BNXT_FLAG_PORT_STATS; alloc_ext_stats: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 306636d9566a..df6289722721 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1926,12 +1926,18 @@ struct bnxt { struct device *hwmon_dev; }; +#define BNXT_PORT_STATS_SIZE \ + (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024) + +#define BNXT_TX_PORT_STATS_BYTE_OFFSET \ + (sizeof(struct rx_port_stats) + 512) + #define BNXT_RX_STATS_OFFSET(counter) \ (offsetof(struct rx_port_stats, counter) / 8) #define BNXT_TX_STATS_OFFSET(counter) \ ((offsetof(struct tx_port_stats, counter) + \ - sizeof(struct rx_port_stats) + 512) / 8) + BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8) #define BNXT_RX_STATS_EXT_OFFSET(counter) \ (offsetof(struct rx_port_stats_ext, counter) / 8) From 177a6cde47fcf23e5826c6224fc53038480451c2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:39 -0400 Subject: [PATCH 1649/2056] bnxt_en: Refactor statistics code and structures. The driver manages multiple statistics structures of different sizes. They are all allocated, freed, and handled practically the same. Define a new bnxt_stats_mem structure and common allocation and free functions for all staistics memory blocks. Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 129 ++++++++---------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 21 ++- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 2 +- .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 13 +- 4 files changed, 76 insertions(+), 89 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9d0800c56e7b..e08d636b7d91 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3711,61 +3711,55 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) return 0; } +static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) +{ + if (stats->hw_stats) { + dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, + stats->hw_stats_map); + stats->hw_stats = NULL; + } +} + +static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) +{ + stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, + &stats->hw_stats_map, GFP_KERNEL); + if (!stats->hw_stats) + return -ENOMEM; + + memset(stats->hw_stats, 0, stats->len); + return 0; +} + static void bnxt_free_port_stats(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - bp->flags &= ~BNXT_FLAG_PORT_STATS; bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; - if (bp->hw_rx_port_stats) { - dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, - bp->hw_rx_port_stats, - bp->hw_rx_port_stats_map); - bp->hw_rx_port_stats = NULL; - } - - if (bp->hw_tx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext), - bp->hw_tx_port_stats_ext, - bp->hw_tx_port_stats_ext_map); - bp->hw_tx_port_stats_ext = NULL; - } - - if (bp->hw_rx_port_stats_ext) { - dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - bp->hw_rx_port_stats_ext, - bp->hw_rx_port_stats_ext_map); - bp->hw_rx_port_stats_ext = NULL; - } + bnxt_free_stats_mem(bp, &bp->port_stats); + bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext); + bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext); } static void bnxt_free_ring_stats(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - int size, i; + int i; if (!bp->bnapi) return; - size = bp->hw_ring_stats_size; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - if (cpr->hw_stats) { - dma_free_coherent(&pdev->dev, size, cpr->hw_stats, - cpr->hw_stats_map); - cpr->hw_stats = NULL; - } + bnxt_free_stats_mem(bp, &cpr->stats); } } static int bnxt_alloc_stats(struct bnxt *bp) { u32 size, i; - struct pci_dev *pdev = bp->pdev; + int rc; size = bp->hw_ring_stats_size; @@ -3773,11 +3767,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size, - &cpr->hw_stats_map, - GFP_KERNEL); - if (!cpr->hw_stats) - return -ENOMEM; + cpr->stats.len = size; + rc = bnxt_alloc_stats_mem(bp, &cpr->stats); + if (rc) + return rc; cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } @@ -3785,22 +3778,14 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700) return 0; - if (bp->hw_rx_port_stats) + if (bp->port_stats.hw_stats) goto alloc_ext_stats; - bp->hw_port_stats_size = BNXT_PORT_STATS_SIZE; + bp->port_stats.len = BNXT_PORT_STATS_SIZE; + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats); + if (rc) + return rc; - bp->hw_rx_port_stats = - dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, - &bp->hw_rx_port_stats_map, - GFP_KERNEL); - if (!bp->hw_rx_port_stats) - return -ENOMEM; - - bp->hw_tx_port_stats = (void *)bp->hw_rx_port_stats + - BNXT_TX_PORT_STATS_BYTE_OFFSET; - bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + - BNXT_TX_PORT_STATS_BYTE_OFFSET; bp->flags |= BNXT_FLAG_PORT_STATS; alloc_ext_stats: @@ -3809,26 +3794,26 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) return 0; - if (bp->hw_rx_port_stats_ext) + if (bp->rx_port_stats_ext.hw_stats) goto alloc_tx_ext_stats; - bp->hw_rx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), - &bp->hw_rx_port_stats_ext_map, GFP_KERNEL); - if (!bp->hw_rx_port_stats_ext) + bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext); + /* Extended stats are optional */ + if (rc) return 0; alloc_tx_ext_stats: - if (bp->hw_tx_port_stats_ext) + if (bp->tx_port_stats_ext.hw_stats) return 0; if (bp->hwrm_spec_code >= 0x10902 || (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { - bp->hw_tx_port_stats_ext = - dma_alloc_coherent(&pdev->dev, - sizeof(struct tx_port_stats_ext), - &bp->hw_tx_port_stats_ext_map, - GFP_KERNEL); + bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext); + /* Extended stats are optional */ + if (rc) + return 0; } bp->flags |= BNXT_FLAG_PORT_STATS_EXT; return 0; @@ -6439,7 +6424,7 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map); + req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -7480,8 +7465,9 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); + req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + BNXT_TX_PORT_STATS_BYTE_OFFSET); + req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -7500,11 +7486,11 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); - tx_stat_size = bp->hw_tx_port_stats_ext ? - sizeof(*bp->hw_tx_port_stats_ext) : 0; + req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + tx_stat_size = bp->tx_port_stats_ext.hw_stats ? + sizeof(struct tx_port_stats_ext) : 0; req.tx_stat_size = cpu_to_le16(tx_stat_size); - req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map); + req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { @@ -9582,7 +9568,7 @@ static void bnxt_get_ring_stats(struct bnxt *bp, for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct ctx_hw_stats *hw_stats = cpr->hw_stats; + struct ctx_hw_stats *hw_stats = cpr->stats.hw_stats; stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); @@ -9643,8 +9629,9 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { - struct rx_port_stats *rx = bp->hw_rx_port_stats; - struct tx_port_stats *tx = bp->hw_tx_port_stats; + struct rx_port_stats *rx = bp->port_stats.hw_stats; + struct tx_port_stats *tx = bp->port_stats.hw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET; stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index df6289722721..7e9fe1fe33a9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -919,6 +919,12 @@ struct bnxt_sw_stats { struct bnxt_cmn_sw_stats cmn; }; +struct bnxt_stats_mem { + void *hw_stats; + dma_addr_t hw_stats_map; + int len; +}; + struct bnxt_cp_ring_info { struct bnxt_napi *bnapi; u32 cp_raw_cons; @@ -943,8 +949,7 @@ struct bnxt_cp_ring_info { dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; - struct ctx_hw_stats *hw_stats; - dma_addr_t hw_stats_map; + struct bnxt_stats_mem stats; u32 hw_stats_ctx_id; struct bnxt_sw_stats sw_stats; @@ -1776,15 +1781,9 @@ struct bnxt { dma_addr_t hwrm_cmd_kong_resp_dma_addr; struct rtnl_link_stats64 net_stats_prev; - struct rx_port_stats *hw_rx_port_stats; - struct tx_port_stats *hw_tx_port_stats; - struct rx_port_stats_ext *hw_rx_port_stats_ext; - struct tx_port_stats_ext *hw_tx_port_stats_ext; - dma_addr_t hw_rx_port_stats_map; - dma_addr_t hw_tx_port_stats_map; - dma_addr_t hw_rx_port_stats_ext_map; - dma_addr_t hw_tx_port_stats_ext_map; - int hw_port_stats_size; + struct bnxt_stats_mem port_stats; + struct bnxt_stats_mem rx_port_stats_ext; + struct bnxt_stats_mem tx_port_stats_ext; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; u16 hw_ring_stats_size; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 02b27551d34d..8e90224c43a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -544,7 +544,7 @@ static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct bnxt *bp = netdev_priv(dev); - __le64 *stats = (__le64 *)bp->hw_rx_port_stats; + __le64 *stats = bp->port_stats.hw_stats; struct ieee_pfc *my_pfc = bp->ieee_pfc; long rx_off, tx_off; int i, rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index be48727be6ca..ae89f507677d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -559,7 +559,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - __le64 *hw_stats = (__le64 *)cpr->hw_stats; + struct ctx_hw_stats *hw = cpr->stats.hw_stats; + __le64 *hw_stats = cpr->stats.hw_stats; u64 *sw; int k; @@ -593,9 +594,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = sw[k]; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += - le64_to_cpu(cpr->hw_stats->rx_discard_pkts); + le64_to_cpu(hw->rx_discard_pkts); bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += - le64_to_cpu(cpr->hw_stats->tx_discard_pkts); + le64_to_cpu(hw->tx_discard_pkts); } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) @@ -603,7 +604,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, skip_ring_stats: if (bp->flags & BNXT_FLAG_PORT_STATS) { - __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; + __le64 *port_stats = bp->port_stats.hw_stats; for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { buf[j] = le64_to_cpu(*(port_stats + @@ -611,8 +612,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, } } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - __le64 *rx_port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; - __le64 *tx_port_stats_ext = (__le64 *)bp->hw_tx_port_stats_ext; + __le64 *rx_port_stats_ext = bp->rx_port_stats_ext.hw_stats; + __le64 *tx_port_stats_ext = bp->tx_port_stats_ext.hw_stats; for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { buf[j] = le64_to_cpu(*(rx_port_stats_ext + From a37120b22e540b4eb068addf9be3f63b64dca690 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:40 -0400 Subject: [PATCH 1650/2056] bnxt_en: Allocate additional memory for all statistics blocks. Some of these DMAed hardware counters are not full 64-bit counters and so we need to accumulate them as they overflow. Allocate copies of these DMA statistics memory blocks with the same size for accumulation. The hardware counter widths are also counter specific so we allocate memory for masks that correspond to each counter. Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 29 +++++++++++++++++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e08d636b7d91..b676d78e5c14 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3713,6 +3713,10 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) { + kfree(stats->hw_masks); + stats->hw_masks = NULL; + kfree(stats->sw_stats); + stats->sw_stats = NULL; if (stats->hw_stats) { dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats, stats->hw_stats_map); @@ -3720,7 +3724,8 @@ static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) } } -static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) +static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, + bool alloc_masks) { stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len, &stats->hw_stats_map, GFP_KERNEL); @@ -3728,7 +3733,21 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats) return -ENOMEM; memset(stats->hw_stats, 0, stats->len); + + stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); + if (!stats->sw_stats) + goto stats_mem_err; + + if (alloc_masks) { + stats->hw_masks = kzalloc(stats->len, GFP_KERNEL); + if (!stats->hw_masks) + goto stats_mem_err; + } return 0; + +stats_mem_err: + bnxt_free_stats_mem(bp, stats); + return -ENOMEM; } static void bnxt_free_port_stats(struct bnxt *bp) @@ -3768,7 +3787,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; cpr->stats.len = size; - rc = bnxt_alloc_stats_mem(bp, &cpr->stats); + rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i); if (rc) return rc; @@ -3782,7 +3801,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) goto alloc_ext_stats; bp->port_stats.len = BNXT_PORT_STATS_SIZE; - rc = bnxt_alloc_stats_mem(bp, &bp->port_stats); + rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true); if (rc) return rc; @@ -3798,7 +3817,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) goto alloc_tx_ext_stats; bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext); - rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true); /* Extended stats are optional */ if (rc) return 0; @@ -3810,7 +3829,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (bp->hwrm_spec_code >= 0x10902 || (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) { bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext); - rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext); + rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true); /* Extended stats are optional */ if (rc) return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7e9fe1fe33a9..69672ec56430 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -920,6 +920,8 @@ struct bnxt_sw_stats { }; struct bnxt_stats_mem { + u64 *sw_stats; + u64 *hw_masks; void *hw_stats; dma_addr_t hw_stats_map; int len; From d752d0536c979068084e0c60ff506c07e1cf613e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:41 -0400 Subject: [PATCH 1651/2056] bnxt_en: Retrieve hardware counter masks from firmware if available. Newer firmware has a new call HWRM_FUNC_QSTATS_EXT to retrieve the masks of all ring counters. Make this call when supported to initialize the hardware masks of all ring counters. If the call is not available, assume 48-bit ring counter masks on P5 chips. Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 64 +++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b676d78e5c14..58792fddf510 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3750,6 +3750,69 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, return -ENOMEM; } +static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = mask; +} + +static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) +{ + int i; + + for (i = 0; i < count; i++) + mask_arr[i] = le64_to_cpu(hw_mask_arr[i]); +} + +static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, + struct bnxt_stats_mem *stats) +{ + struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qstats_ext_input req = {0}; + __le64 *hw_masks; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) || + !(bp->flags & BNXT_FLAG_CHIP_P5)) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); + req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto qstat_exit; + + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + +qstat_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static void bnxt_init_stats(struct bnxt *bp) +{ + struct bnxt_napi *bnapi = bp->bnapi[0]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + u64 mask; + int rc; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + rc = bnxt_hwrm_func_qstat_ext(bp, stats); + if (rc) { + if (bp->flags & BNXT_FLAG_CHIP_P5) + mask = (1ULL << 48) - 1; + else + mask = -1ULL; + bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); + } +} + static void bnxt_free_port_stats(struct bnxt *bp) { bp->flags &= ~BNXT_FLAG_PORT_STATS; @@ -4037,6 +4100,7 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) rc = bnxt_alloc_stats(bp); if (rc) goto alloc_mem_err; + bnxt_init_stats(bp); rc = bnxt_alloc_ntp_fltrs(bp); if (rc) From 531d1d269c1d432d691026c82d04b7bb5f1ae318 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:42 -0400 Subject: [PATCH 1652/2056] bnxt_en: Retrieve hardware masks for port counters. If supported by newer firmware, make the firmware call to query all the port counter masks. If not supported, assume 40-bit port counter masks. Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 75 +++++++++++++++++++++-- 1 file changed, 70 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 58792fddf510..1327f0782076 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3793,13 +3793,19 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, return rc; } +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags); +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags); + static void bnxt_init_stats(struct bnxt *bp) { struct bnxt_napi *bnapi = bp->bnapi[0]; struct bnxt_cp_ring_info *cpr; struct bnxt_stats_mem *stats; + __le64 *rx_stats, *tx_stats; + int rc, rx_count, tx_count; + u64 *rx_masks, *tx_masks; u64 mask; - int rc; + u8 flags; cpr = &bnapi->cp_ring; stats = &cpr->stats; @@ -3811,6 +3817,54 @@ static void bnxt_init_stats(struct bnxt *bp) mask = -1ULL; bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8); } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + stats = &bp->port_stats; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats) / 8; + tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + tx_count = sizeof(struct tx_port_stats) / 8; + + flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count); + bnxt_hwrm_port_qstats(bp, 0); + } + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + stats = &bp->rx_port_stats_ext; + rx_stats = stats->hw_stats; + rx_masks = stats->hw_masks; + rx_count = sizeof(struct rx_port_stats_ext) / 8; + stats = &bp->tx_port_stats_ext; + tx_stats = stats->hw_stats; + tx_masks = stats->hw_masks; + tx_count = sizeof(struct tx_port_stats_ext) / 8; + + flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + rc = bnxt_hwrm_port_qstats_ext(bp, flags); + if (rc) { + mask = (1ULL << 40) - 1; + + bnxt_fill_masks(rx_masks, mask, rx_count); + if (tx_stats) + bnxt_fill_masks(tx_masks, mask, tx_count); + } else { + bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count); + if (tx_stats) + bnxt_copy_hw_masks(tx_masks, tx_stats, + tx_count); + bnxt_hwrm_port_qstats_ext(bp, 0); + } + } } static void bnxt_free_port_stats(struct bnxt *bp) @@ -7538,7 +7592,7 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_port_qstats(struct bnxt *bp) +static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) { struct bnxt_pf_info *pf = &bp->pf; struct hwrm_port_qstats_input req = {0}; @@ -7546,6 +7600,10 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + + req.flags = flags; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); req.port_id = cpu_to_le16(pf->port_id); req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + @@ -7554,7 +7612,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; @@ -7566,7 +7624,11 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) return 0; + if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) + return -EOPNOTSUPP; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.flags = flags; req.port_id = cpu_to_le16(pf->port_id); req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); @@ -7584,6 +7646,9 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + if (flags) + goto qstats_done; + if (bp->fw_tx_stats_ext_size <= offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { mutex_unlock(&bp->hwrm_cmd_lock); @@ -10502,8 +10567,8 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event)) bnxt_hwrm_exec_fwd_req(bp); if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { - bnxt_hwrm_port_qstats(bp); - bnxt_hwrm_port_qstats_ext(bp); + bnxt_hwrm_port_qstats(bp, 0); + bnxt_hwrm_port_qstats_ext(bp, 0); } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { From fea6b3335527f41bc729466fb2a95e48a98827ac Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:43 -0400 Subject: [PATCH 1653/2056] bnxt_en: Accumulate all counters. Now that we have the infrastructure in place, add the new function bnxt_accumulate_all_stats() to periodically accumulate and check for counter rollover of all ring stats and port stats. A chip bug was also discovered that could cause some ring counters to become 0 during DMA. Workaround by ignoring zeros on the affected chips. Some older frimware will reset port counters during ifdown. We need to check for that and free the accumulated port counters during ifdown to prevent bogus counter overflow detection during ifup. Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 91 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 90 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1327f0782076..44ef3ba1b49e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4051,6 +4051,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { bnxt_free_ring_stats(bp); + if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET)) + bnxt_free_port_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); kfree(bp->tx_ring_map); @@ -7592,6 +7594,88 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) +{ + u64 sw_tmp; + + sw_tmp = (*sw & ~mask) | hw; + if (hw < (*sw & mask)) + sw_tmp += mask + 1; + WRITE_ONCE(*sw, sw_tmp); +} + +static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks, + int count, bool ignore_zero) +{ + int i; + + for (i = 0; i < count; i++) { + u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i])); + + if (ignore_zero && !hw) + continue; + + if (masks[i] == -1ULL) + sw_stats[i] = hw; + else + bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]); + } +} + +static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats) +{ + if (!stats->hw_stats) + return; + + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + stats->hw_masks, stats->len / 8, false); +} + +static void bnxt_accumulate_all_stats(struct bnxt *bp) +{ + struct bnxt_stats_mem *ring0_stats; + bool ignore_zero = false; + int i; + + /* Chip bug. Counter intermittently becomes 0. */ + if (bp->flags & BNXT_FLAG_CHIP_P5) + ignore_zero = true; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; + struct bnxt_stats_mem *stats; + + cpr = &bnapi->cp_ring; + stats = &cpr->stats; + if (!i) + ring0_stats = stats; + __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats, + ring0_stats->hw_masks, + ring0_stats->len / 8, ignore_zero); + } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct bnxt_stats_mem *stats = &bp->port_stats; + __le64 *hw_stats = stats->hw_stats; + u64 *sw_stats = stats->sw_stats; + u64 *masks = stats->hw_masks; + int cnt; + + cnt = sizeof(struct rx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + + hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; + cnt = sizeof(struct tx_port_stats) / 8; + __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false); + } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + bnxt_accumulate_stats(&bp->rx_port_stats_ext); + bnxt_accumulate_stats(&bp->tx_port_stats_ext); + } +} + static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) { struct bnxt_pf_info *pf = &bp->pf; @@ -8710,6 +8794,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (BNXT_PF(bp)) bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG; } + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET) + bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET; + if (resp->supported_speeds_auto_mode) link_info->support_auto_speeds = le16_to_cpu(resp->supported_speeds_auto_mode); @@ -10280,8 +10367,7 @@ static void bnxt_timer(struct timer_list *t) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) bnxt_fw_health_check(bp); - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && - bp->stats_coal_ticks) { + if (bp->link_info.link_up && bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } @@ -10569,6 +10655,7 @@ static void bnxt_sp_task(struct work_struct *work) if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp, 0); bnxt_hwrm_port_qstats_ext(bp, 0); + bnxt_accumulate_all_stats(bp); } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 69672ec56430..44c7812c525c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1769,6 +1769,7 @@ struct bnxt { #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 + #define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; From a0c30621c28c6d8e9c572cd6139881f15c806792 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 27 Jul 2020 05:40:44 -0400 Subject: [PATCH 1654/2056] bnxt_en: Switch over to use the 64-bit software accumulated counters. Now we can report all the full 64-bit CPU endian software accumulated counters instead of the hw counters, some of which may be less than 64-bit wide. Define the necessary macros to access the software counters. Reviewed-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 65 ++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 9 +++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 41 ++++++------ 3 files changed, 63 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 44ef3ba1b49e..1fc263ba34b1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9799,34 +9799,33 @@ static void bnxt_get_ring_stats(struct bnxt *bp, { int i; - for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct ctx_hw_stats *hw_stats = cpr->stats.hw_stats; + u64 *sw = cpr->stats.sw_stats; - stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); + stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts); - stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts); + stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes); - stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes); + stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes); - stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes); + stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes); stats->rx_missed_errors += - le64_to_cpu(hw_stats->rx_discard_pkts); + BNXT_GET_RING_STATS64(sw, rx_discard_pkts); - stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); + stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_error_pkts); + stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); } } @@ -9864,20 +9863,26 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { - struct rx_port_stats *rx = bp->port_stats.hw_stats; - struct tx_port_stats *tx = bp->port_stats.hw_stats + - BNXT_TX_PORT_STATS_BYTE_OFFSET; + u64 *rx = bp->port_stats.sw_stats; + u64 *tx = bp->port_stats.sw_stats + + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8; - stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); - stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); - stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + - le64_to_cpu(rx->rx_ovrsz_frames) + - le64_to_cpu(rx->rx_runt_frames); - stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + - le64_to_cpu(rx->rx_jbr_frames); - stats->collisions = le64_to_cpu(tx->tx_total_collisions); - stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); - stats->tx_errors = le64_to_cpu(tx->tx_err); + stats->rx_crc_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames); + stats->rx_frame_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames); + stats->rx_length_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames); + stats->rx_errors = + BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) + + BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames); + stats->collisions = + BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions); + stats->tx_fifo_errors = + BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns); + stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err); } clear_bit(BNXT_STATE_READ_STATS, &bp->state); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 44c7812c525c..0c9b79b765ae 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1928,6 +1928,15 @@ struct bnxt { struct device *hwmon_dev; }; +#define BNXT_GET_RING_STATS64(sw, counter) \ + (*((sw) + offsetof(struct ctx_hw_stats, counter) / 8)) + +#define BNXT_GET_RX_PORT_STATS64(sw, counter) \ + (*((sw) + offsetof(struct rx_port_stats, counter) / 8)) + +#define BNXT_GET_TX_PORT_STATS64(sw, counter) \ + (*((sw) + offsetof(struct tx_port_stats, counter) / 8)) + #define BNXT_PORT_STATS_SIZE \ (sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index ae89f507677d..5fd3e92fad85 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -559,20 +559,19 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - struct ctx_hw_stats *hw = cpr->stats.hw_stats; - __le64 *hw_stats = cpr->stats.hw_stats; + u64 *sw_stats = cpr->stats.sw_stats; u64 *sw; int k; if (is_rx_ring(bp, i)) { for (k = 0; k < NUM_RING_RX_HW_STATS; j++, k++) - buf[j] = le64_to_cpu(hw_stats[k]); + buf[j] = sw_stats[k]; } if (is_tx_ring(bp, i)) { k = NUM_RING_RX_HW_STATS; for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; j++, k++) - buf[j] = le64_to_cpu(hw_stats[k]); + buf[j] = sw_stats[k]; } if (!tpa_stats || !is_rx_ring(bp, i)) goto skip_tpa_ring_stats; @@ -580,7 +579,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, k = NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS; for (; k < NUM_RING_RX_HW_STATS + NUM_RING_TX_HW_STATS + tpa_stats; j++, k++) - buf[j] = le64_to_cpu(hw_stats[k]); + buf[j] = sw_stats[k]; skip_tpa_ring_stats: sw = (u64 *)&cpr->sw_stats.rx; @@ -594,9 +593,9 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = sw[k]; bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += - le64_to_cpu(hw->rx_discard_pkts); + BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += - le64_to_cpu(hw->tx_discard_pkts); + BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) @@ -604,49 +603,47 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, skip_ring_stats: if (bp->flags & BNXT_FLAG_PORT_STATS) { - __le64 *port_stats = bp->port_stats.hw_stats; + u64 *port_stats = bp->port_stats.sw_stats; - for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { - buf[j] = le64_to_cpu(*(port_stats + - bnxt_port_stats_arr[i].offset)); - } + for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) + buf[j] = *(port_stats + bnxt_port_stats_arr[i].offset); } if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { - __le64 *rx_port_stats_ext = bp->rx_port_stats_ext.hw_stats; - __le64 *tx_port_stats_ext = bp->tx_port_stats_ext.hw_stats; + u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats; + u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats; for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) { - buf[j] = le64_to_cpu(*(rx_port_stats_ext + - bnxt_port_stats_ext_arr[i].offset)); + buf[j] = *(rx_port_stats_ext + + bnxt_port_stats_ext_arr[i].offset); } for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) { - buf[j] = le64_to_cpu(*(tx_port_stats_ext + - bnxt_tx_port_stats_ext_arr[i].offset)); + buf[j] = *(tx_port_stats_ext + + bnxt_tx_port_stats_ext_arr[i].offset); } if (bp->pri2cos_valid) { for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_bytes_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); + buf[j] = *(rx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { long n = bnxt_rx_pkts_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); + buf[j] = *(rx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_bytes_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); + buf[j] = *(tx_port_stats_ext + n); } for (i = 0; i < 8; i++, j++) { long n = bnxt_tx_pkts_pri_arr[i].base_off + bp->pri2cos_idx[i]; - buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); + buf[j] = *(tx_port_stats_ext + n); } } } From b5d600b027eb2733a1d7d253b84efb96c40f6a9d Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Mon, 27 Jul 2020 05:40:45 -0400 Subject: [PATCH 1655/2056] bnxt_en: Add support for 'ethtool -d' Add support to dump PXP registers and PCIe statistics. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 32 +++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 ++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 55 +++++++++++++++++++ .../net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 2 + 4 files changed, 94 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1fc263ba34b1..2622d3c2d766 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -10228,6 +10228,38 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) return rc; } +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf) +{ + struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_dbg_read_direct_input req = {0}; + __le32 *dbg_reg_buf; + dma_addr_t mapping; + int rc, i; + + dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, + &mapping, GFP_KERNEL); + if (!dbg_reg_buf) + return -ENOMEM; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); + req.host_dest_addr = cpu_to_le64(mapping); + req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req.read_len32 = cpu_to_le32(num_words); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc || resp->error_code) { + rc = -EIO; + goto dbg_rd_reg_exit; + } + for (i = 0; i < num_words; i++) + reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); + +dbg_rd_reg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); + return rc; +} + static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 0c9b79b765ae..5a13eb66beda 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1304,6 +1304,9 @@ struct bnxt_test_info { char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; }; +#define CHIMP_REG_VIEW_ADDR \ + ((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000) + #define BNXT_GRCPF_REG_CHIMP_COMM 0x0 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 @@ -2117,6 +2120,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); +int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, + u32 *reg_buf); void bnxt_fw_exception(struct bnxt *bp); void bnxt_fw_reset(struct bnxt *bp); int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 5fd3e92fad85..64da654f1038 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1324,6 +1324,59 @@ static void bnxt_get_drvinfo(struct net_device *dev, info->regdump_len = 0; } +static int bnxt_get_regs_len(struct net_device *dev) +{ + struct bnxt *bp = netdev_priv(dev); + int reg_len; + + reg_len = BNXT_PXP_REG_LEN; + + if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) + reg_len += sizeof(struct pcie_ctx_hw_stats); + + return reg_len; +} + +static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *_p) +{ + struct pcie_ctx_hw_stats *hw_pcie_stats; + struct hwrm_pcie_qstats_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + dma_addr_t hw_pcie_stats_addr; + int rc; + + regs->version = 0; + bnxt_dbg_hwrm_rd_reg(bp, 0, BNXT_PXP_REG_LEN / 4, _p); + + if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) + return; + + hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, + sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr, GFP_KERNEL); + if (!hw_pcie_stats) + return; + + regs->version = 1; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); + req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + __le64 *src = (__le64 *)hw_pcie_stats; + u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); + int i; + + for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) + dst[i] = le64_to_cpu(src[i]); + } + mutex_unlock(&bp->hwrm_cmd_lock); + dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, + hw_pcie_stats_addr); +} + static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct bnxt *bp = netdev_priv(dev); @@ -3599,6 +3652,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, + .get_regs_len = bnxt_get_regs_len, + .get_regs = bnxt_get_regs, .get_wol = bnxt_get_wol, .set_wol = bnxt_set_wol, .get_coalesce = bnxt_get_coalesce, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index dddbca1d052c..34f44ddfad79 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -84,6 +84,8 @@ struct hwrm_dbg_cmn_output { ETH_RESET_PHY | ETH_RESET_RAM) \ << ETH_RESET_SHARED_SHIFT) +#define BNXT_PXP_REG_LEN 0x3110 + extern const struct ethtool_ops bnxt_ethtool_ops; u32 bnxt_get_rxfh_indir_size(struct net_device *dev); From 1775da47c34a520108bea02dd4cbc21ee2e1b6c2 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Mon, 27 Jul 2020 14:51:33 +0300 Subject: [PATCH 1656/2056] qed: fix the allocation of the chains with an external PBL Dan reports static checker warning: "The patch 9b6ee3cf95d3: "qed: sanitize PBL chains allocation" from Jul 23, 2020, leads to the following static checker warning: drivers/net/ethernet/qlogic/qed/qed_chain.c:299 qed_chain_alloc_pbl() error: uninitialized symbol 'pbl_virt'. drivers/net/ethernet/qlogic/qed/qed_chain.c 249 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) 250 { 251 struct device *dev = &cdev->pdev->dev; 252 struct addr_tbl_entry *addr_tbl; 253 dma_addr_t phys, pbl_phys; 254 __le64 *pbl_virt; ^^^^^^^^^^^^^^^^ [...] 271 if (chain->b_external_pbl) 272 goto alloc_pages; ^^^^^^^^^^^^^^^^ uninitialized [...] 298 /* Fill the PBL table with the physical address of the page */ 299 pbl_virt[i] = cpu_to_le64(phys); ^^^^^^^^^^^ [...] " This issue was introduced with commit c3a321b06a80 ("qed: simplify initialization of the chains with an external PBL"), when chain->pbl_sp.table_virt initialization was moved up to qed_chain_init_params(). Fix it by initializing pbl_virt with an already filled chain struct field. Fixes: c3a321b06a80 ("qed: simplify initialization of the chains with an external PBL") Reported-by: Dan Carpenter Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_chain.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c index f8efd36d66e0..b83d17b14e85 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_chain.c +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -268,8 +268,10 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) chain->pbl.pp_addr_tbl = addr_tbl; - if (chain->b_external_pbl) + if (chain->b_external_pbl) { + pbl_virt = chain->pbl_sp.table_virt; goto alloc_pages; + } size = array_size(page_cnt, sizeof(*pbl_virt)); if (unlikely(size == SIZE_MAX)) From 8f4c0e01789c18674acdf17cae3822b3dc3db715 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:16 -0400 Subject: [PATCH 1657/2056] hsr: enhance netlink socket interface to support PRP Parallel Redundancy Protocol (PRP) is another redundancy protocol introduced by IEC 63439 standard. It is similar to HSR in many aspects:- - Use a pair of Ethernet interfaces to created the PRP device - Use a 6 byte redundancy protocol part (RCT, Redundancy Check Trailer) similar to HSR Tag. - Has Link Redundancy Entity (LRE) that works with RCT to implement redundancy. Key difference is that the protocol unit is a trailer instead of a prefix as in HSR. That makes it inter-operable with tradition network components such as bridges/switches which treat it as pad bytes, whereas HSR nodes requires some kind of translators (Called redbox) to talk to regular network devices. This features allows regular linux box to be converted to a DAN-P box. DAN-P stands for Dual Attached Node - PRP similar to DAN-H (Dual Attached Node - HSR). Add a comment at the header/source code to explicitly state that the driver files also handles PRP protocol as well. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- include/uapi/linux/hsr_netlink.h | 2 +- include/uapi/linux/if_link.h | 12 +++++++++- net/hsr/Kconfig | 37 ++++++++++++++++++++----------- net/hsr/hsr_debugfs.c | 2 +- net/hsr/hsr_device.c | 7 ++++-- net/hsr/hsr_device.h | 2 ++ net/hsr/hsr_forward.c | 2 ++ net/hsr/hsr_forward.h | 2 ++ net/hsr/hsr_framereg.c | 1 + net/hsr/hsr_framereg.h | 2 ++ net/hsr/hsr_main.c | 2 ++ net/hsr/hsr_main.h | 11 ++++++++- net/hsr/hsr_netlink.c | 38 ++++++++++++++++++++++++++------ net/hsr/hsr_netlink.h | 2 ++ net/hsr/hsr_slave.c | 2 ++ net/hsr/hsr_slave.h | 2 ++ 16 files changed, 100 insertions(+), 26 deletions(-) diff --git a/include/uapi/linux/hsr_netlink.h b/include/uapi/linux/hsr_netlink.h index c218ef9c35dd..d540ea9bbef4 100644 --- a/include/uapi/linux/hsr_netlink.h +++ b/include/uapi/linux/hsr_netlink.h @@ -17,7 +17,7 @@ /* Generic Netlink HSR family definition */ -/* attributes */ +/* attributes for HSR or PRP node */ enum { HSR_A_UNSPEC, HSR_A_NODE_ADDR, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index af8f31987526..63af64646358 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -907,7 +907,14 @@ enum { #define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1) -/* HSR section */ +/* HSR/PRP section, both uses same interface */ + +/* Different redundancy protocols for hsr device */ +enum { + HSR_PROTOCOL_HSR, + HSR_PROTOCOL_PRP, + HSR_PROTOCOL_MAX, +}; enum { IFLA_HSR_UNSPEC, @@ -917,6 +924,9 @@ enum { IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */ IFLA_HSR_SEQ_NR, IFLA_HSR_VERSION, /* HSR version */ + IFLA_HSR_PROTOCOL, /* Indicate different protocol than + * HSR. For example PRP. + */ __IFLA_HSR_MAX, }; diff --git a/net/hsr/Kconfig b/net/hsr/Kconfig index 8095b034e76e..1b048c17b6c8 100644 --- a/net/hsr/Kconfig +++ b/net/hsr/Kconfig @@ -4,24 +4,35 @@ # config HSR - tristate "High-availability Seamless Redundancy (HSR)" + tristate "High-availability Seamless Redundancy (HSR & PRP)" help - If you say Y here, then your Linux box will be able to act as a - DANH ("Doubly attached node implementing HSR"). For this to work, - your Linux box needs (at least) two physical Ethernet interfaces, - and it must be connected as a node in a ring network together with - other HSR capable nodes. + This enables IEC 62439 defined High-availability Seamless + Redundancy (HSR) and Parallel Redundancy Protocol (PRP). - All Ethernet frames sent over the hsr device will be sent in both - directions on the ring (over both slave ports), giving a redundant, - instant fail-over network. Each HSR node in the ring acts like a - bridge for HSR frames, but filters frames that have been forwarded - earlier. + If you say Y here, then your Linux box will be able to act as a + DANH ("Doubly attached node implementing HSR") or DANP ("Doubly + attached node implementing PRP"). For this to work, your Linux box + needs (at least) two physical Ethernet interfaces. + + For DANH, it must be connected as a node in a ring network together + with other HSR capable nodes. All Ethernet frames sent over the HSR + device will be sent in both directions on the ring (over both slave + ports), giving a redundant, instant fail-over network. Each HSR node + in the ring acts like a bridge for HSR frames, but filters frames + that have been forwarded earlier. + + For DANP, it must be connected as a node connecting to two + separate networks over the two slave interfaces. Like HSR, Ethernet + frames sent over the PRP device will be sent to both networks giving + a redundant, instant fail-over network. Unlike HSR, PRP networks + can have Singly Attached Nodes (SAN) such as PC, printer, bridges + etc and will be able to communicate with DANP nodes. This code is a "best effort" to comply with the HSR standard as described in IEC 62439-3:2010 (HSRv0) and IEC 62439-3:2012 (HSRv1), - but no compliancy tests have been made. Use iproute2 to select - the version you desire. + and PRP standard described in IEC 62439-4:2012 (PRP), but no + compliancy tests have been made. Use iproute2 to select the protocol + you would like to use. You need to perform any and all necessary tests yourself before relying on this code in a safety critical system! diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index 9787ef11ca71..c1932c0a15be 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -1,5 +1,5 @@ /* - * hsr_debugfs code + * debugfs code for HSR & PRP * Copyright (C) 2019 Texas Instruments Incorporated * * Author(s): diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 8a927b647829..40ac45123a62 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -3,9 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se - * * This file contains device methods for creating, using and destroying - * virtual HSR devices. + * virtual HSR or PRP devices. */ #include @@ -427,6 +426,10 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); + /* currently PRP is not supported */ + if (protocol_version == PRP_V1) + return -EPROTONOSUPPORT; + /* Make sure we recognize frames from ourselves in hsr_rcv() */ res = hsr_create_self_node(hsr, hsr_dev->dev_addr, slave[1]->dev_addr); diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h index b8f9262ed101..868373822ee4 100644 --- a/net/hsr/hsr_device.h +++ b/net/hsr/hsr_device.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_DEVICE_H diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index ab8dca0c0b65..55adb4dbd235 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * Frame router for HSR and PRP. */ #include "hsr_forward.h" diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h index 51a69295566c..b2a6fa319d94 100644 --- a/net/hsr/hsr_forward.h +++ b/net/hsr/hsr_forward.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_FORWARD_H diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 530de24b1fb5..13b2190e6556 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -8,6 +8,7 @@ * interface. A frame is identified by its source MAC address and its HSR * sequence number. This code keeps track of senders and their sequence numbers * to allow filtering of duplicate frames, and to detect HSR ring errors. + * Same code handles filtering of duplicates for PRP as well. */ #include diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 0f0fa12b4329..c06447780d05 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_FRAMEREG_H diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index 144da15f0a81..2fd1976e5b1c 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * Event handling for HSR and PRP devices. */ #include diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index f74193465bf5..8cf10d67d5f9 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_PRIVATE_H @@ -131,6 +133,13 @@ struct hsr_port { enum hsr_port_type type; }; +/* used by driver internally to differentiate various protocols */ +enum hsr_version { + HSR_V0 = 0, + HSR_V1, + PRP_V1, +}; + struct hsr_priv { struct rcu_head rcu_head; struct list_head ports; @@ -141,7 +150,7 @@ struct hsr_priv { int announce_count; u16 sequence_nr; u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ - u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ + enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */ spinlock_t seqnr_lock; /* locking for sequence_nr */ spinlock_t list_lock; /* locking for node list */ unsigned char sup_multicast_addr[ETH_ALEN]; diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 6e14b7d22639..06c3cd988760 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -4,7 +4,7 @@ * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se * - * Routines for handling Netlink messages for HSR. + * Routines for handling Netlink messages for HSR and PRP. */ #include "hsr_netlink.h" @@ -22,6 +22,7 @@ static const struct nla_policy hsr_policy[IFLA_HSR_MAX + 1] = { [IFLA_HSR_VERSION] = { .type = NLA_U8 }, [IFLA_HSR_SUPERVISION_ADDR] = { .len = ETH_ALEN }, [IFLA_HSR_SEQ_NR] = { .type = NLA_U16 }, + [IFLA_HSR_PROTOCOL] = { .type = NLA_U8 }, }; /* Here, it seems a netdevice has already been allocated for us, and the @@ -31,8 +32,10 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + enum hsr_version proto_version; + unsigned char multicast_spec; + u8 proto = HSR_PROTOCOL_HSR; struct net_device *link[2]; - unsigned char multicast_spec, hsr_version; if (!data) { NL_SET_ERR_MSG_MOD(extack, "No slave devices specified"); @@ -69,18 +72,34 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, else multicast_spec = nla_get_u8(data[IFLA_HSR_MULTICAST_SPEC]); + if (data[IFLA_HSR_PROTOCOL]) + proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]); + + if (proto >= HSR_PROTOCOL_MAX) { + NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol\n"); + return -EINVAL; + } + if (!data[IFLA_HSR_VERSION]) { - hsr_version = 0; + proto_version = HSR_V0; } else { - hsr_version = nla_get_u8(data[IFLA_HSR_VERSION]); - if (hsr_version > 1) { + if (proto == HSR_PROTOCOL_PRP) { + NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported\n"); + return -EINVAL; + } + + proto_version = nla_get_u8(data[IFLA_HSR_VERSION]); + if (proto_version > HSR_V1) { NL_SET_ERR_MSG_MOD(extack, - "Only versions 0..1 are supported"); + "Only HSR version 0/1 supported\n"); return -EINVAL; } } - return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack); + if (proto == HSR_PROTOCOL_PRP) + proto_version = PRP_V1; + + return hsr_dev_finalize(dev, link, multicast_spec, proto_version, extack); } static void hsr_dellink(struct net_device *dev, struct list_head *head) @@ -102,6 +121,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head) static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct hsr_priv *hsr = netdev_priv(dev); + u8 proto = HSR_PROTOCOL_HSR; struct hsr_port *port; port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A); @@ -120,6 +140,10 @@ static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev) hsr->sup_multicast_addr) || nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr)) goto nla_put_failure; + if (hsr->prot_version == PRP_V1) + proto = HSR_PROTOCOL_PRP; + if (nla_put_u8(skb, IFLA_HSR_PROTOCOL, proto)) + goto nla_put_failure; return 0; diff --git a/net/hsr/hsr_netlink.h b/net/hsr/hsr_netlink.h index 1121bb192a18..501552d9753b 100644 --- a/net/hsr/hsr_netlink.h +++ b/net/hsr/hsr_netlink.h @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_NETLINK_H diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index 25b6ffba26cd..b5c0834de338 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -3,6 +3,8 @@ * * Author(s): * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * Frame handler other utility functions for HSR and PRP. */ #include "hsr_slave.h" diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h index 8953ea279ce9..9708a4f0ec09 100644 --- a/net/hsr/hsr_slave.h +++ b/net/hsr/hsr_slave.h @@ -2,6 +2,8 @@ /* Copyright 2011-2014 Autronica Fire and Security AS * * 2011-2014 Arvid Brodin, arvid.brodin@alten.se + * + * include file for HSR and PRP. */ #ifndef __HSR_SLAVE_H From 121c33b07b3127f501b366bc23d2a590e2f2b8ef Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:17 -0400 Subject: [PATCH 1658/2056] net: hsr: introduce common code for skb initialization As a preparatory patch to introduce PRP protocol support in the driver, refactor the skb init code to a separate function. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 41 ++++++++++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 40ac45123a62..bfe2d1449dbd 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -230,15 +230,11 @@ static const struct header_ops hsr_header_ops = { .parse = eth_header_parse, }; -static void send_hsr_supervision_frame(struct hsr_port *master, - u8 type, u8 hsr_ver) +static struct sk_buff *hsr_init_skb(struct hsr_port *master, u8 hsr_ver) { + struct hsr_priv *hsr = master->hsr; struct sk_buff *skb; int hlen, tlen; - struct hsr_tag *hsr_tag; - struct hsr_sup_tag *hsr_stag; - struct hsr_sup_payload *hsr_sp; - unsigned long irqflags; hlen = LL_RESERVED_SPACE(master->dev); tlen = master->dev->needed_tailroom; @@ -247,22 +243,44 @@ static void send_hsr_supervision_frame(struct hsr_port *master, sizeof(struct hsr_sup_payload) + hlen + tlen); if (!skb) - return; + return skb; skb_reserve(skb, hlen); - skb->dev = master->dev; skb->protocol = htons(hsr_ver ? ETH_P_HSR : ETH_P_PRP); skb->priority = TC_PRIO_CONTROL; if (dev_hard_header(skb, skb->dev, (hsr_ver ? ETH_P_HSR : ETH_P_PRP), - master->hsr->sup_multicast_addr, + hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) goto out; + skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_reset_transport_header(skb); + return skb; +out: + kfree_skb(skb); + + return NULL; +} + +static void send_hsr_supervision_frame(struct hsr_port *master, + u8 type, u8 hsr_ver) +{ + struct hsr_sup_payload *hsr_sp; + struct hsr_sup_tag *hsr_stag; + struct hsr_tag *hsr_tag; + unsigned long irqflags; + struct sk_buff *skb; + + skb = hsr_init_skb(master, hsr_ver); + if (!skb) { + WARN_ONCE(1, "HSR: Could not send supervision frame\n"); + return; + } + if (hsr_ver > 0) { hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); hsr_tag->encap_proto = htons(ETH_P_PRP); @@ -299,11 +317,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, return; hsr_forward_skb(skb, master); - return; -out: - WARN_ONCE(1, "HSR: Could not send supervision frame\n"); - kfree_skb(skb); + return; } /* Announce (supervision frame) timer function From 28e458e097f32af4f89f02e078a6d6b4ea7a52ed Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:18 -0400 Subject: [PATCH 1659/2056] net: hsr: introduce protocol specific function pointers As a preparatory patch to introduce support for PRP protocol, add a protocol ops ptr in the private hsr structure to hold function pointers as some of the functions at protocol level packet handling is different for HSR vs PRP. It is expected that PRP will add its of set of functions for protocol handling. Modify existing hsr_announce() function to call proto_ops->send_sv_frame() to send supervision frame for HSR. This is expected to be different for PRP. So introduce a ops function ptr, send_sv_frame() for the same and initialize it to send_hsr_supervsion_frame(). Modify hsr_announce() to call proto_ops->send_sv_frame(). Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 70 ++++++++++++++++++++++++-------------------- net/hsr/hsr_main.h | 6 ++++ 2 files changed, 45 insertions(+), 31 deletions(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index bfe2d1449dbd..006e715eccb6 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -230,7 +230,7 @@ static const struct header_ops hsr_header_ops = { .parse = eth_header_parse, }; -static struct sk_buff *hsr_init_skb(struct hsr_port *master, u8 hsr_ver) +static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto) { struct hsr_priv *hsr = master->hsr; struct sk_buff *skb; @@ -247,10 +247,10 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u8 hsr_ver) skb_reserve(skb, hlen); skb->dev = master->dev; - skb->protocol = htons(hsr_ver ? ETH_P_HSR : ETH_P_PRP); + skb->protocol = htons(proto); skb->priority = TC_PRIO_CONTROL; - if (dev_hard_header(skb, skb->dev, (hsr_ver ? ETH_P_HSR : ETH_P_PRP), + if (dev_hard_header(skb, skb->dev, proto, hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) goto out; @@ -267,47 +267,62 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u8 hsr_ver) } static void send_hsr_supervision_frame(struct hsr_port *master, - u8 type, u8 hsr_ver) + unsigned long *interval) { + struct hsr_priv *hsr = master->hsr; + __u8 type = HSR_TLV_LIFE_CHECK; + struct hsr_tag *hsr_tag = NULL; struct hsr_sup_payload *hsr_sp; struct hsr_sup_tag *hsr_stag; - struct hsr_tag *hsr_tag; unsigned long irqflags; struct sk_buff *skb; + u16 proto; - skb = hsr_init_skb(master, hsr_ver); + *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); + if (hsr->announce_count < 3 && hsr->prot_version == 0) { + type = HSR_TLV_ANNOUNCE; + *interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); + hsr->announce_count++; + } + + if (!hsr->prot_version) + proto = ETH_P_PRP; + else + proto = ETH_P_HSR; + + skb = hsr_init_skb(master, proto); if (!skb) { WARN_ONCE(1, "HSR: Could not send supervision frame\n"); return; } - if (hsr_ver > 0) { + if (hsr->prot_version > 0) { hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); hsr_tag->encap_proto = htons(ETH_P_PRP); set_hsr_tag_LSDU_size(hsr_tag, HSR_V1_SUP_LSDUSIZE); } hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag)); - set_hsr_stag_path(hsr_stag, (hsr_ver ? 0x0 : 0xf)); - set_hsr_stag_HSR_ver(hsr_stag, hsr_ver); + set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf)); + set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version); /* From HSRv1 on we have separate supervision sequence numbers. */ spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); - if (hsr_ver > 0) { - hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr); - hsr_tag->sequence_nr = htons(master->hsr->sequence_nr); - master->hsr->sup_sequence_nr++; - master->hsr->sequence_nr++; + if (hsr->prot_version > 0) { + hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); + hsr->sup_sequence_nr++; + hsr_tag->sequence_nr = htons(hsr->sequence_nr); + hsr->sequence_nr++; } else { - hsr_stag->sequence_nr = htons(master->hsr->sequence_nr); - master->hsr->sequence_nr++; + hsr_stag->sequence_nr = htons(hsr->sequence_nr); + hsr->sequence_nr++; } spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); hsr_stag->HSR_TLV_type = type; /* TODO: Why 12 in HSRv0? */ - hsr_stag->HSR_TLV_length = - hsr_ver ? sizeof(struct hsr_sup_payload) : 12; + hsr_stag->HSR_TLV_length = hsr->prot_version ? + sizeof(struct hsr_sup_payload) : 12; /* Payload: MacAddressA */ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); @@ -333,19 +348,7 @@ static void hsr_announce(struct timer_list *t) rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); - - if (hsr->announce_count < 3 && hsr->prot_version == 0) { - send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE, - hsr->prot_version); - hsr->announce_count++; - - interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); - } else { - send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK, - hsr->prot_version); - - interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); - } + hsr->proto_ops->send_sv_frame(master, &interval); if (is_admin_up(master->dev)) mod_timer(&hsr->announce_timer, jiffies + interval); @@ -382,6 +385,10 @@ static struct device_type hsr_type = { .name = "hsr", }; +static struct hsr_proto_ops hsr_ops = { + .send_sv_frame = send_hsr_supervision_frame, +}; + void hsr_dev_setup(struct net_device *dev) { eth_hw_addr_random(dev); @@ -445,6 +452,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], if (protocol_version == PRP_V1) return -EPROTONOSUPPORT; + hsr->proto_ops = &hsr_ops; /* Make sure we recognize frames from ourselves in hsr_rcv() */ res = hsr_create_self_node(hsr, hsr_dev->dev_addr, slave[1]->dev_addr); diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 8cf10d67d5f9..671270115a50 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -140,6 +140,11 @@ enum hsr_version { PRP_V1, }; +struct hsr_proto_ops { + /* format and send supervision frame */ + void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval); +}; + struct hsr_priv { struct rcu_head rcu_head; struct list_head ports; @@ -153,6 +158,7 @@ struct hsr_priv { enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */ spinlock_t seqnr_lock; /* locking for sequence_nr */ spinlock_t list_lock; /* locking for node list */ + struct hsr_proto_ops *proto_ops; unsigned char sup_multicast_addr[ETH_ALEN]; #ifdef CONFIG_DEBUG_FS struct dentry *node_tbl_root; From c643ff0383c858b9af09f582ed2947810e4cf407 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:19 -0400 Subject: [PATCH 1660/2056] net: prp: add supervision frame generation utility function Add support for generation of PRP supervision frames. For PRP, supervision frame format is similar to HSR version 0, but have a PRP Redundancy Control Trailer (RCT) added and uses a different message type, PRP_TLV_LIFE_CHECK_DD. Also update is_supervision_frame() to include the new message type used for PRP supervision frame. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 64 ++++++++++++++++++++++++++++++++++++++++++- net/hsr/hsr_forward.c | 4 ++- net/hsr/hsr_main.h | 22 +++++++++++++++ 3 files changed, 88 insertions(+), 2 deletions(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 006e715eccb6..74eaf28743a4 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -238,6 +238,10 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master, u16 proto) hlen = LL_RESERVED_SPACE(master->dev); tlen = master->dev->needed_tailroom; + /* skb size is same for PRP/HSR frames, only difference + * being, for PRP it is a trailer and for HSR it is a + * header + */ skb = dev_alloc_skb(sizeof(struct hsr_tag) + sizeof(struct hsr_sup_tag) + sizeof(struct hsr_sup_payload) + hlen + tlen); @@ -336,6 +340,55 @@ static void send_hsr_supervision_frame(struct hsr_port *master, return; } +static void send_prp_supervision_frame(struct hsr_port *master, + unsigned long *interval) +{ + struct hsr_priv *hsr = master->hsr; + struct hsr_sup_payload *hsr_sp; + struct hsr_sup_tag *hsr_stag; + unsigned long irqflags; + struct sk_buff *skb; + struct prp_rct *rct; + u8 *tail; + + skb = hsr_init_skb(master, ETH_P_PRP); + if (!skb) { + WARN_ONCE(1, "PRP: Could not send supervision frame\n"); + return; + } + + *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL); + hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag)); + set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf)); + set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0)); + + /* From HSRv1 on we have separate supervision sequence numbers. */ + spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags); + hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr); + hsr->sup_sequence_nr++; + hsr_stag->HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD; + hsr_stag->HSR_TLV_length = sizeof(struct hsr_sup_payload); + + /* Payload: MacAddressA */ + hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); + ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); + + if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) { + spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); + return; + } + + tail = skb_tail_pointer(skb) - HSR_HLEN; + rct = (struct prp_rct *)tail; + rct->PRP_suffix = htons(ETH_P_PRP); + set_prp_LSDU_size(rct, HSR_V1_SUP_LSDUSIZE); + rct->sequence_nr = htons(hsr->sequence_nr); + hsr->sequence_nr++; + spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags); + + hsr_forward_skb(skb, master); +} + /* Announce (supervision frame) timer function */ static void hsr_announce(struct timer_list *t) @@ -389,6 +442,10 @@ static struct hsr_proto_ops hsr_ops = { .send_sv_frame = send_hsr_supervision_frame, }; +struct hsr_proto_ops prp_ops = { + .send_sv_frame = send_prp_supervision_frame, +}; + void hsr_dev_setup(struct net_device *dev) { eth_hw_addr_random(dev); @@ -452,7 +509,12 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], if (protocol_version == PRP_V1) return -EPROTONOSUPPORT; - hsr->proto_ops = &hsr_ops; + /* initialize protocol specific functions */ + if (protocol_version == PRP_V1) + hsr->proto_ops = &prp_ops; + else + hsr->proto_ops = &hsr_ops; + /* Make sure we recognize frames from ourselves in hsr_rcv() */ res = hsr_create_self_node(hsr, hsr_dev->dev_addr, slave[1]->dev_addr); diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 55adb4dbd235..dedf8ac6f992 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -76,7 +76,9 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) } if (hsr_sup_tag->HSR_TLV_type != HSR_TLV_ANNOUNCE && - hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK) + hsr_sup_tag->HSR_TLV_type != HSR_TLV_LIFE_CHECK && + hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD && + hsr_sup_tag->HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA) return false; if (hsr_sup_tag->HSR_TLV_length != 12 && hsr_sup_tag->HSR_TLV_length != sizeof(struct hsr_sup_payload)) diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 671270115a50..58e1ad21b66f 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -35,6 +35,10 @@ #define HSR_TLV_ANNOUNCE 22 #define HSR_TLV_LIFE_CHECK 23 +/* PRP V1 life check for Duplicate discard */ +#define PRP_TLV_LIFE_CHECK_DD 20 +/* PRP V1 life check for Duplicate Accept */ +#define PRP_TLV_LIFE_CHECK_DA 21 /* HSR Tag. * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB, @@ -126,6 +130,24 @@ enum hsr_port_type { HSR_PT_PORTS, /* This must be the last item in the enum */ }; +/* PRP Redunancy Control Trailor (RCT). + * As defined in IEC-62439-4:2012, the PRP RCT is really { sequence Nr, + * Lan indentifier (LanId), LSDU_size and PRP_suffix = 0x88FB }. + * + * Field names as defined in the IEC:2012 standard for PRP. + */ +struct prp_rct { + __be16 sequence_nr; + __be16 lan_id_and_LSDU_size; + __be16 PRP_suffix; +} __packed; + +static inline void set_prp_LSDU_size(struct prp_rct *rct, u16 LSDU_size) +{ + rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) & + 0xF000) | (LSDU_size & 0x0FFF)); +} + struct hsr_port { struct list_head port_list; struct net_device *dev; From fa4dc89531360de760359bf94086b04dada98d4e Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:20 -0400 Subject: [PATCH 1661/2056] net: hsr: define and use proto_ops ptrs to handle hsr specific frames As a preparatory patch to introduce PRP, refactor the code specific to handling HSR frames into separate functions and call them through proto_ops function pointers. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 5 +++- net/hsr/hsr_forward.c | 63 +++++++++++++++++++++++++------------------ net/hsr/hsr_forward.h | 7 ++++- net/hsr/hsr_main.h | 8 ++++++ 4 files changed, 55 insertions(+), 28 deletions(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 74eaf28743a4..022393bed40a 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -440,9 +440,12 @@ static struct device_type hsr_type = { static struct hsr_proto_ops hsr_ops = { .send_sv_frame = send_hsr_supervision_frame, + .create_tagged_frame = hsr_create_tagged_frame, + .get_untagged_frame = hsr_get_untagged_frame, + .fill_frame_info = hsr_fill_frame_info, }; -struct hsr_proto_ops prp_ops = { +static struct hsr_proto_ops prp_ops = { .send_sv_frame = send_prp_supervision_frame, }; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index dedf8ac6f992..33e8136891bc 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -116,8 +116,8 @@ static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in, return skb; } -static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame, - struct hsr_port *port) +struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port) { if (!frame->skb_std) frame->skb_std = create_stripped_skb(frame->skb_hsr, frame); @@ -192,8 +192,8 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, /* If the original frame was an HSR tagged frame, just clone it to be sent * unchanged. Otherwise, create a private frame especially tagged for 'port'. */ -static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame, - struct hsr_port *port) +struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port) { if (frame->skb_hsr) return skb_clone(frame->skb_hsr, GFP_ATOMIC); @@ -257,6 +257,7 @@ static void hsr_forward_do(struct hsr_frame_info *frame) struct sk_buff *skb; hsr_for_each_port(frame->port_rcv->hsr, port) { + struct hsr_priv *hsr = port->hsr; /* Don't send frame back the way it came */ if (port == frame->port_rcv) continue; @@ -282,9 +283,10 @@ static void hsr_forward_do(struct hsr_frame_info *frame) } if (port->type != HSR_PT_MASTER) - skb = frame_get_tagged_skb(frame, port); + skb = hsr->proto_ops->create_tagged_frame(frame, port); else - skb = frame_get_stripped_skb(frame, port); + skb = hsr->proto_ops->get_untagged_frame(frame, port); + if (!skb) { /* FIXME: Record the dropped frame? */ continue; @@ -317,12 +319,34 @@ static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, } } -static int hsr_fill_frame_info(struct hsr_frame_info *frame, - struct sk_buff *skb, struct hsr_port *port) +void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame) { - struct ethhdr *ethhdr; + struct hsr_priv *hsr = frame->port_rcv->hsr; unsigned long irqflags; + if (proto == htons(ETH_P_PRP) || proto == htons(ETH_P_HSR)) { + frame->skb_std = NULL; + frame->skb_hsr = skb; + frame->sequence_nr = hsr_get_skb_sequence_nr(skb); + } else { + frame->skb_std = skb; + frame->skb_hsr = NULL; + /* Sequence nr for the master node */ + spin_lock_irqsave(&hsr->seqnr_lock, irqflags); + frame->sequence_nr = hsr->sequence_nr; + hsr->sequence_nr++; + spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags); + } +} + +static int fill_frame_info(struct hsr_frame_info *frame, + struct sk_buff *skb, struct hsr_port *port) +{ + struct hsr_priv *hsr = port->hsr; + struct ethhdr *ethhdr; + __be16 proto; + frame->is_supervision = is_supervision_frame(port->hsr, skb); frame->node_src = hsr_get_node(port, skb, frame->is_supervision); if (!frame->node_src) @@ -335,23 +359,10 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame, /* FIXME: */ netdev_warn_once(skb->dev, "VLAN not yet supported"); } - if (ethhdr->h_proto == htons(ETH_P_PRP) || - ethhdr->h_proto == htons(ETH_P_HSR)) { - frame->skb_std = NULL; - frame->skb_hsr = skb; - frame->sequence_nr = hsr_get_skb_sequence_nr(skb); - } else { - frame->skb_std = skb; - frame->skb_hsr = NULL; - /* Sequence nr for the master node */ - spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags); - frame->sequence_nr = port->hsr->sequence_nr; - port->hsr->sequence_nr++; - spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags); - } - + proto = ethhdr->h_proto; frame->port_rcv = port; - check_local_dest(port->hsr, skb, frame); + hsr->proto_ops->fill_frame_info(proto, skb, frame); + check_local_dest(hsr, skb, frame); return 0; } @@ -367,7 +378,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) goto out_drop; } - if (hsr_fill_frame_info(&frame, skb, port) < 0) + if (fill_frame_info(&frame, skb, port) < 0) goto out_drop; hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); hsr_forward_do(&frame); diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h index b2a6fa319d94..893207792d56 100644 --- a/net/hsr/hsr_forward.h +++ b/net/hsr/hsr_forward.h @@ -14,5 +14,10 @@ #include "hsr_main.h" void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port); - +struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port); +struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port); +void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame); #endif /* __HSR_FORWARD_H */ diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 58e1ad21b66f..14f442c57a84 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -162,9 +162,17 @@ enum hsr_version { PRP_V1, }; +struct hsr_frame_info; + struct hsr_proto_ops { /* format and send supervision frame */ void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval); + struct sk_buff * (*get_untagged_frame)(struct hsr_frame_info *frame, + struct hsr_port *port); + struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame, + struct hsr_port *port); + void (*fill_frame_info)(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame); }; struct hsr_priv { From 451d8123f89791bb628277c0bdb4cae34a3563e6 Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:21 -0400 Subject: [PATCH 1662/2056] net: prp: add packet handling support DAN-P (Dual Attached Nodes PRP) nodes are expected to receive traditional IP packets as well as PRP (Parallel Redundancy Protocol) tagged (trailer) packets. PRP trailer is 6 bytes of PRP protocol unit called RCT, Redundancy Control Trailer (RCT) similar to HSR tag. PRP network can have traditional devices such as bridges/switches or PC attached to it and should be able to communicate. Regular Ethernet devices treat the RCT as pads. This patch adds logic to format L2 frames from network stack to add a trailer (RCT) and send it as duplicates over the slave interfaces when the protocol is PRP as per IEC 62439-3. At the ingress, it strips the trailer, do duplicate detection and rejection and forward a stripped frame up the network stack. PRP device should accept frames from Singly Attached Nodes (SAN) and thus the driver mark the link where the frame came from in the node table. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_device.c | 20 ++- net/hsr/hsr_forward.c | 272 ++++++++++++++++++++++++++++++++--------- net/hsr/hsr_forward.h | 7 ++ net/hsr/hsr_framereg.c | 94 +++++++++++--- net/hsr/hsr_framereg.h | 29 ++++- net/hsr/hsr_main.h | 73 ++++++++++- net/hsr/hsr_slave.c | 24 +++- net/hsr/hsr_slave.h | 2 + 8 files changed, 433 insertions(+), 88 deletions(-) diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 022393bed40a..ab953a1a0d6c 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -443,10 +443,17 @@ static struct hsr_proto_ops hsr_ops = { .create_tagged_frame = hsr_create_tagged_frame, .get_untagged_frame = hsr_get_untagged_frame, .fill_frame_info = hsr_fill_frame_info, + .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame, }; static struct hsr_proto_ops prp_ops = { .send_sv_frame = send_prp_supervision_frame, + .create_tagged_frame = prp_create_tagged_frame, + .get_untagged_frame = prp_get_untagged_frame, + .drop_frame = prp_drop_frame, + .fill_frame_info = prp_fill_frame_info, + .handle_san_frame = prp_handle_san_frame, + .update_san_info = prp_update_san_info, }; void hsr_dev_setup(struct net_device *dev) @@ -508,15 +515,16 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); - /* currently PRP is not supported */ - if (protocol_version == PRP_V1) - return -EPROTONOSUPPORT; - /* initialize protocol specific functions */ - if (protocol_version == PRP_V1) + if (protocol_version == PRP_V1) { + /* For PRP, lan_id has most significant 3 bits holding + * the net_id of PRP_LAN_ID + */ + hsr->net_id = PRP_LAN_ID << 1; hsr->proto_ops = &prp_ops; - else + } else { hsr->proto_ops = &hsr_ops; + } /* Make sure we recognize frames from ourselves in hsr_rcv() */ res = hsr_create_self_node(hsr, hsr_dev->dev_addr, diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 33e8136891bc..cadfccd7876e 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -17,18 +17,6 @@ struct hsr_node; -struct hsr_frame_info { - struct sk_buff *skb_std; - struct sk_buff *skb_hsr; - struct hsr_port *port_rcv; - struct hsr_node *node_src; - u16 sequence_nr; - bool is_supervision; - bool is_vlan; - bool is_local_dest; - bool is_local_exclusive; -}; - /* The uses I can see for these HSR supervision frames are: * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type = * 22") to reset any sequence_nr counters belonging to that node. Useful if @@ -87,8 +75,8 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) return true; } -static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in, - struct hsr_frame_info *frame) +static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in, + struct hsr_frame_info *frame) { struct sk_buff *skb; int copylen; @@ -119,35 +107,120 @@ static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in, struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port) { - if (!frame->skb_std) - frame->skb_std = create_stripped_skb(frame->skb_hsr, frame); + if (!frame->skb_std) { + if (frame->skb_hsr) { + frame->skb_std = + create_stripped_skb_hsr(frame->skb_hsr, frame); + } else { + /* Unexpected */ + WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", + __FILE__, __LINE__, port->dev->name); + return NULL; + } + } + return skb_clone(frame->skb_std, GFP_ATOMIC); } +struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port) +{ + if (!frame->skb_std) { + if (frame->skb_prp) { + /* trim the skb by len - HSR_HLEN to exclude RCT */ + skb_trim(frame->skb_prp, + frame->skb_prp->len - HSR_HLEN); + frame->skb_std = + __pskb_copy(frame->skb_prp, + skb_headroom(frame->skb_prp), + GFP_ATOMIC); + } else { + /* Unexpected */ + WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n", + __FILE__, __LINE__, port->dev->name); + return NULL; + } + } + + return skb_clone(frame->skb_std, GFP_ATOMIC); +} + +static void prp_set_lan_id(struct prp_rct *trailer, + struct hsr_port *port) +{ + int lane_id; + + if (port->type == HSR_PT_SLAVE_A) + lane_id = 0; + else + lane_id = 1; + + /* Add net_id in the upper 3 bits of lane_id */ + lane_id |= port->hsr->net_id; + set_prp_lan_id(trailer, lane_id); +} + +/* Tailroom for PRP rct should have been created before calling this */ +static struct sk_buff *prp_fill_rct(struct sk_buff *skb, + struct hsr_frame_info *frame, + struct hsr_port *port) +{ + struct prp_rct *trailer; + int min_size = ETH_ZLEN; + int lsdu_size; + + if (!skb) + return skb; + + if (frame->is_vlan) + min_size = VLAN_ETH_ZLEN; + + if (skb_put_padto(skb, min_size)) + return NULL; + + trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN); + lsdu_size = skb->len - 14; + if (frame->is_vlan) + lsdu_size -= 4; + prp_set_lan_id(trailer, port); + set_prp_LSDU_size(trailer, lsdu_size); + trailer->sequence_nr = htons(frame->sequence_nr); + trailer->PRP_suffix = htons(ETH_P_PRP); + + return skb; +} + +static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr, + struct hsr_port *port) +{ + int path_id; + + if (port->type == HSR_PT_SLAVE_A) + path_id = 0; + else + path_id = 1; + + set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id); +} + static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame, struct hsr_port *port, u8 proto_version) { struct hsr_ethhdr *hsr_ethhdr; - int lane_id; int lsdu_size; /* pad to minimum packet size which is 60 + 6 (HSR tag) */ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN)) return NULL; - if (port->type == HSR_PT_SLAVE_A) - lane_id = 0; - else - lane_id = 1; - lsdu_size = skb->len - 14; if (frame->is_vlan) lsdu_size -= 4; hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb); - set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id); + hsr_set_path_id(hsr_ethhdr, port); set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size); hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr); hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto; @@ -157,16 +230,28 @@ static struct sk_buff *hsr_fill_tag(struct sk_buff *skb, return skb; } -static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, - struct hsr_frame_info *frame, - struct hsr_port *port) +/* If the original frame was an HSR tagged frame, just clone it to be sent + * unchanged. Otherwise, create a private frame especially tagged for 'port'. + */ +struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port) { - int movelen; unsigned char *dst, *src; struct sk_buff *skb; + int movelen; + + if (frame->skb_hsr) { + struct hsr_ethhdr *hsr_ethhdr = + (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr); + + /* set the lane id properly */ + hsr_set_path_id(hsr_ethhdr, port); + return skb_clone(frame->skb_hsr, GFP_ATOMIC); + } /* Create the new skb with enough headroom to fit the HSR tag */ - skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC); + skb = __pskb_copy(frame->skb_std, + skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC); if (!skb) return NULL; skb_reset_mac_header(skb); @@ -189,21 +274,29 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o, return hsr_fill_tag(skb, frame, port, port->hsr->prot_version); } -/* If the original frame was an HSR tagged frame, just clone it to be sent - * unchanged. Otherwise, create a private frame especially tagged for 'port'. - */ -struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, +struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame, struct hsr_port *port) { - if (frame->skb_hsr) - return skb_clone(frame->skb_hsr, GFP_ATOMIC); + struct sk_buff *skb; - if (port->type != HSR_PT_SLAVE_A && port->type != HSR_PT_SLAVE_B) { - WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port"); - return NULL; + if (frame->skb_prp) { + struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp); + + if (trailer) { + prp_set_lan_id(trailer, port); + } else { + WARN_ONCE(!trailer, "errored PRP skb"); + return NULL; + } + return skb_clone(frame->skb_prp, GFP_ATOMIC); } - return create_tagged_skb(frame->skb_std, frame, port); + skb = skb_copy_expand(frame->skb_std, 0, + skb_tailroom(frame->skb_std) + HSR_HLEN, + GFP_ATOMIC); + prp_fill_rct(skb, frame, port); + + return skb; } static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev, @@ -240,9 +333,18 @@ static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, return dev_queue_xmit(skb); } +bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) +{ + return ((frame->port_rcv->type == HSR_PT_SLAVE_A && + port->type == HSR_PT_SLAVE_B) || + (frame->port_rcv->type == HSR_PT_SLAVE_B && + port->type == HSR_PT_SLAVE_A)); +} + /* Forward the frame through all devices except: * - Back through the receiving device * - If it's a HSR frame: through a device where it has passed before + * - if it's a PRP frame: through another PRP slave device (no bridge) * - To the local HSR master only if the frame is directly addressed to it, or * a non-supervision multicast or broadcast frame. * @@ -270,25 +372,33 @@ static void hsr_forward_do(struct hsr_frame_info *frame) if (port->type != HSR_PT_MASTER && frame->is_local_exclusive) continue; - /* Don't send frame over port where it has been sent before */ - if (hsr_register_frame_out(port, frame->node_src, + /* Don't send frame over port where it has been sent before. + * Also fro SAN, this shouldn't be done. + */ + if (!frame->is_from_san && + hsr_register_frame_out(port, frame->node_src, frame->sequence_nr)) continue; if (frame->is_supervision && port->type == HSR_PT_MASTER) { - hsr_handle_sup_frame(frame->skb_hsr, - frame->node_src, - frame->port_rcv); + hsr_handle_sup_frame(frame); continue; } + /* Check if frame is to be dropped. Eg. for PRP no forward + * between ports. + */ + if (hsr->proto_ops->drop_frame && + hsr->proto_ops->drop_frame(frame, port)) + continue; + if (port->type != HSR_PT_MASTER) skb = hsr->proto_ops->create_tagged_frame(frame, port); else skb = hsr->proto_ops->get_untagged_frame(frame, port); if (!skb) { - /* FIXME: Record the dropped frame? */ + frame->port_rcv->dev->stats.rx_dropped++; continue; } @@ -319,19 +429,20 @@ static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb, } } -void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, - struct hsr_frame_info *frame) +static void handle_std_frame(struct sk_buff *skb, + struct hsr_frame_info *frame) { - struct hsr_priv *hsr = frame->port_rcv->hsr; + struct hsr_port *port = frame->port_rcv; + struct hsr_priv *hsr = port->hsr; unsigned long irqflags; - if (proto == htons(ETH_P_PRP) || proto == htons(ETH_P_HSR)) { - frame->skb_std = NULL; - frame->skb_hsr = skb; - frame->sequence_nr = hsr_get_skb_sequence_nr(skb); + frame->skb_hsr = NULL; + frame->skb_prp = NULL; + frame->skb_std = skb; + + if (port->type != HSR_PT_MASTER) { + frame->is_from_san = true; } else { - frame->skb_std = skb; - frame->skb_hsr = NULL; /* Sequence nr for the master node */ spin_lock_irqsave(&hsr->seqnr_lock, irqflags); frame->sequence_nr = hsr->sequence_nr; @@ -340,29 +451,74 @@ void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, } } +void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame) +{ + if (proto == htons(ETH_P_PRP) || + proto == htons(ETH_P_HSR)) { + /* HSR tagged frame :- Data or Supervision */ + frame->skb_std = NULL; + frame->skb_prp = NULL; + frame->skb_hsr = skb; + frame->sequence_nr = hsr_get_skb_sequence_nr(skb); + return; + } + + /* Standard frame or PRP from master port */ + handle_std_frame(skb, frame); +} + +void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame) +{ + /* Supervision frame */ + struct prp_rct *rct = skb_get_PRP_rct(skb); + + if (rct && + prp_check_lsdu_size(skb, rct, frame->is_supervision)) { + frame->skb_hsr = NULL; + frame->skb_std = NULL; + frame->skb_prp = skb; + frame->sequence_nr = prp_get_skb_sequence_nr(rct); + return; + } + handle_std_frame(skb, frame); +} + static int fill_frame_info(struct hsr_frame_info *frame, struct sk_buff *skb, struct hsr_port *port) { struct hsr_priv *hsr = port->hsr; + struct hsr_vlan_ethhdr *vlan_hdr; struct ethhdr *ethhdr; __be16 proto; + memset(frame, 0, sizeof(*frame)); frame->is_supervision = is_supervision_frame(port->hsr, skb); - frame->node_src = hsr_get_node(port, skb, frame->is_supervision); + frame->node_src = hsr_get_node(port, &hsr->node_db, skb, + frame->is_supervision, + port->type); if (!frame->node_src) return -1; /* Unknown node and !is_supervision, or no mem */ ethhdr = (struct ethhdr *)skb_mac_header(skb); frame->is_vlan = false; - if (ethhdr->h_proto == htons(ETH_P_8021Q)) { + proto = ethhdr->h_proto; + + if (proto == htons(ETH_P_8021Q)) frame->is_vlan = true; + + if (frame->is_vlan) { + vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; + proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; /* FIXME: */ netdev_warn_once(skb->dev, "VLAN not yet supported"); } - proto = ethhdr->h_proto; + + frame->is_from_san = false; frame->port_rcv = port; hsr->proto_ops->fill_frame_info(proto, skb, frame); - check_local_dest(hsr, skb, frame); + check_local_dest(port->hsr, skb, frame); return 0; } @@ -380,6 +536,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) if (fill_frame_info(&frame, skb, port) < 0) goto out_drop; + hsr_register_frame_in(frame.node_src, port, frame.sequence_nr); hsr_forward_do(&frame); /* Gets called for ingress frames as well as egress from master port. @@ -391,6 +548,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) } kfree_skb(frame.skb_hsr); + kfree_skb(frame.skb_prp); kfree_skb(frame.skb_std); return; diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h index 893207792d56..618140d484ad 100644 --- a/net/hsr/hsr_forward.h +++ b/net/hsr/hsr_forward.h @@ -14,10 +14,17 @@ #include "hsr_main.h" void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port); +struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port); struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame, struct hsr_port *port); struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame, struct hsr_port *port); +struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame, + struct hsr_port *port); +bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port); +void prp_fill_frame_info(__be16 proto, struct sk_buff *skb, + struct hsr_frame_info *frame); void hsr_fill_frame_info(__be16 proto, struct sk_buff *skb, struct hsr_frame_info *frame); #endif /* __HSR_FORWARD_H */ diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 13b2190e6556..5c97de459905 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -127,6 +127,19 @@ void hsr_del_nodes(struct list_head *node_db) kfree(node); } +void prp_handle_san_frame(bool san, enum hsr_port_type port, + struct hsr_node *node) +{ + /* Mark if the SAN node is over LAN_A or LAN_B */ + if (port == HSR_PT_SLAVE_A) { + node->san_a = true; + return; + } + + if (port == HSR_PT_SLAVE_B) + node->san_b = true; +} + /* Allocate an hsr_node and add it to node_db. 'addr' is the node's address_A; * seq_out is used to initialize filtering of outgoing duplicate frames * originating from the newly added node. @@ -134,7 +147,8 @@ void hsr_del_nodes(struct list_head *node_db) static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, struct list_head *node_db, unsigned char addr[], - u16 seq_out) + u16 seq_out, bool san, + enum hsr_port_type rx_port) { struct hsr_node *new_node, *node; unsigned long now; @@ -155,6 +169,9 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, for (i = 0; i < HSR_PT_PORTS; i++) new_node->seq_out[i] = seq_out; + if (san && hsr->proto_ops->handle_san_frame) + hsr->proto_ops->handle_san_frame(san, rx_port, new_node); + spin_lock_bh(&hsr->list_lock); list_for_each_entry_rcu(node, node_db, mac_list, lockdep_is_held(&hsr->list_lock)) { @@ -172,15 +189,26 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, return node; } +void prp_update_san_info(struct hsr_node *node, bool is_sup) +{ + if (!is_sup) + return; + + node->san_a = false; + node->san_b = false; +} + /* Get the hsr_node from which 'skb' was sent. */ -struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, - bool is_sup) +struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db, + struct sk_buff *skb, bool is_sup, + enum hsr_port_type rx_port) { - struct list_head *node_db = &port->hsr->node_db; struct hsr_priv *hsr = port->hsr; struct hsr_node *node; struct ethhdr *ethhdr; + struct prp_rct *rct; + bool san = false; u16 seq_out; if (!skb_mac_header_was_set(skb)) @@ -189,14 +217,21 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, ethhdr = (struct ethhdr *)skb_mac_header(skb); list_for_each_entry_rcu(node, node_db, mac_list) { - if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) + if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) { + if (hsr->proto_ops->update_san_info) + hsr->proto_ops->update_san_info(node, is_sup); return node; - if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) + } + if (ether_addr_equal(node->macaddress_B, ethhdr->h_source)) { + if (hsr->proto_ops->update_san_info) + hsr->proto_ops->update_san_info(node, is_sup); return node; + } } - /* Everyone may create a node entry, connected node to a HSR device. */ - + /* Everyone may create a node entry, connected node to a HSR/PRP + * device. + */ if (ethhdr->h_proto == htons(ETH_P_PRP) || ethhdr->h_proto == htons(ETH_P_HSR)) { /* Use the existing sequence_nr from the tag as starting point @@ -204,31 +239,47 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, */ seq_out = hsr_get_skb_sequence_nr(skb) - 1; } else { - /* this is called also for frames from master port and - * so warn only for non master ports - */ - if (port->type != HSR_PT_MASTER) - WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); - seq_out = HSR_SEQNR_START; + rct = skb_get_PRP_rct(skb); + if (rct && prp_check_lsdu_size(skb, rct, is_sup)) { + seq_out = prp_get_skb_sequence_nr(rct); + } else { + if (rx_port != HSR_PT_MASTER) + san = true; + seq_out = HSR_SEQNR_START; + } } - return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out); + return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out, + san, rx_port); } /* Use the Supervision frame's info about an eventual macaddress_B for merging * nodes that has previously had their macaddress_B registered as a separate * node. */ -void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, - struct hsr_port *port_rcv) +void hsr_handle_sup_frame(struct hsr_frame_info *frame) { + struct hsr_node *node_curr = frame->node_src; + struct hsr_port *port_rcv = frame->port_rcv; struct hsr_priv *hsr = port_rcv->hsr; struct hsr_sup_payload *hsr_sp; struct hsr_node *node_real; + struct sk_buff *skb = NULL; struct list_head *node_db; struct ethhdr *ethhdr; int i; + /* Here either frame->skb_hsr or frame->skb_prp should be + * valid as supervision frame always will have protocol + * header info. + */ + if (frame->skb_hsr) + skb = frame->skb_hsr; + else if (frame->skb_prp) + skb = frame->skb_prp; + if (!skb) + return; + ethhdr = (struct ethhdr *)skb_mac_header(skb); /* Leave the ethernet header. */ @@ -249,7 +300,8 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, if (!node_real) /* No frame received from AddrA of this node yet */ node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, - HSR_SEQNR_START - 1); + HSR_SEQNR_START - 1, true, + port_rcv->type); if (!node_real) goto done; /* No mem */ if (node_real == node_curr) @@ -275,7 +327,11 @@ void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, kfree_rcu(node_curr, rcu_head); done: - skb_push(skb, sizeof(struct hsrv1_ethhdr_sp)); + /* PRP uses v0 header */ + if (ethhdr->h_proto == htons(ETH_P_HSR)) + skb_push(skb, sizeof(struct hsrv1_ethhdr_sp)); + else + skb_push(skb, sizeof(struct hsrv0_ethhdr_sp)); } /* 'skb' is a frame meant for this host, that is to be passed to upper layers. diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index c06447780d05..86b43f539f2c 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -14,12 +14,26 @@ struct hsr_node; +struct hsr_frame_info { + struct sk_buff *skb_std; + struct sk_buff *skb_hsr; + struct sk_buff *skb_prp; + struct hsr_port *port_rcv; + struct hsr_node *node_src; + u16 sequence_nr; + bool is_supervision; + bool is_vlan; + bool is_local_dest; + bool is_local_exclusive; + bool is_from_san; +}; + void hsr_del_self_node(struct hsr_priv *hsr); void hsr_del_nodes(struct list_head *node_db); -struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, - bool is_sup); -void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, - struct hsr_port *port); +struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db, + struct sk_buff *skb, bool is_sup, + enum hsr_port_type rx_port); +void hsr_handle_sup_frame(struct hsr_frame_info *frame); bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr); void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb); @@ -49,6 +63,10 @@ int hsr_get_node_data(struct hsr_priv *hsr, int *if2_age, u16 *if2_seq); +void prp_handle_san_frame(bool san, enum hsr_port_type port, + struct hsr_node *node); +void prp_update_san_info(struct hsr_node *node, bool is_sup); + struct hsr_node { struct list_head mac_list; unsigned char macaddress_A[ETH_ALEN]; @@ -57,6 +75,9 @@ struct hsr_node { enum hsr_port_type addr_B_port; unsigned long time_in[HSR_PT_PORTS]; bool time_in_stale[HSR_PT_PORTS]; + /* if the node is a SAN */ + bool san_a; + bool san_b; u16 seq_out[HSR_PT_PORTS]; struct rcu_head rcu_head; }; diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 14f442c57a84..7dc92ce5a134 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -12,6 +12,7 @@ #include #include +#include /* Time constants as specified in the HSR specification (IEC-62439-3 2010) * Table 8. @@ -86,7 +87,12 @@ struct hsr_ethhdr { struct hsr_tag hsr_tag; } __packed; -/* HSR Supervision Frame data types. +struct hsr_vlan_ethhdr { + struct vlan_ethhdr vlanhdr; + struct hsr_tag hsr_tag; +} __packed; + +/* HSR/PRP Supervision Frame data types. * Field names as defined in the IEC:2010 standard for HSR. */ struct hsr_sup_tag { @@ -142,6 +148,16 @@ struct prp_rct { __be16 PRP_suffix; } __packed; +static inline u16 get_prp_LSDU_size(struct prp_rct *rct) +{ + return ntohs(rct->lan_id_and_LSDU_size) & 0x0FFF; +} + +static inline void set_prp_lan_id(struct prp_rct *rct, u16 lan_id) +{ + rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) & + 0x0FFF) | (lan_id << 12)); +} static inline void set_prp_LSDU_size(struct prp_rct *rct, u16 LSDU_size) { rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) & @@ -163,16 +179,22 @@ enum hsr_version { }; struct hsr_frame_info; +struct hsr_node; struct hsr_proto_ops { /* format and send supervision frame */ void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval); + void (*handle_san_frame)(bool san, enum hsr_port_type port, + struct hsr_node *node); + bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port); struct sk_buff * (*get_untagged_frame)(struct hsr_frame_info *frame, struct hsr_port *port); struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame, struct hsr_port *port); void (*fill_frame_info)(__be16 proto, struct sk_buff *skb, struct hsr_frame_info *frame); + bool (*invalid_dan_ingress_frame)(__be16 protocol); + void (*update_san_info)(struct hsr_node *node, bool is_sup); }; struct hsr_priv { @@ -189,6 +211,12 @@ struct hsr_priv { spinlock_t seqnr_lock; /* locking for sequence_nr */ spinlock_t list_lock; /* locking for node list */ struct hsr_proto_ops *proto_ops; +#define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set + * based on SLAVE_A or SLAVE_B + */ + u8 net_id; /* for PRP, it occupies most significant 3 bits + * of lan_id + */ unsigned char sup_multicast_addr[ETH_ALEN]; #ifdef CONFIG_DEBUG_FS struct dentry *node_tbl_root; @@ -209,6 +237,49 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb) return ntohs(hsr_ethhdr->hsr_tag.sequence_nr); } +static inline struct prp_rct *skb_get_PRP_rct(struct sk_buff *skb) +{ + unsigned char *tail = skb_tail_pointer(skb) - HSR_HLEN; + + struct prp_rct *rct = (struct prp_rct *)tail; + + if (rct->PRP_suffix == htons(ETH_P_PRP)) + return rct; + + return NULL; +} + +/* Assume caller has confirmed this skb is PRP suffixed */ +static inline u16 prp_get_skb_sequence_nr(struct prp_rct *rct) +{ + return ntohs(rct->sequence_nr); +} + +static inline u16 get_prp_lan_id(struct prp_rct *rct) +{ + return ntohs(rct->lan_id_and_LSDU_size) >> 12; +} + +/* assume there is a valid rct */ +static inline bool prp_check_lsdu_size(struct sk_buff *skb, + struct prp_rct *rct, + bool is_sup) +{ + struct ethhdr *ethhdr; + int expected_lsdu_size; + + if (is_sup) { + expected_lsdu_size = HSR_V1_SUP_LSDUSIZE; + } else { + ethhdr = (struct ethhdr *)skb_mac_header(skb); + expected_lsdu_size = skb->len - 14; + if (ethhdr->h_proto == htons(ETH_P_8021Q)) + expected_lsdu_size -= 4; + } + + return (expected_lsdu_size == get_prp_LSDU_size(rct)); +} + #if IS_ENABLED(CONFIG_DEBUG_FS) void hsr_debugfs_rename(struct net_device *dev); void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index b5c0834de338..36d5fcf09c61 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -16,12 +16,22 @@ #include "hsr_forward.h" #include "hsr_framereg.h" +bool hsr_invalid_dan_ingress_frame(__be16 protocol) +{ + return (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR)); +} + static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct hsr_port *port; + struct hsr_priv *hsr; __be16 protocol; + /* Packets from dev_loopback_xmit() do not have L2 header, bail out */ + if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) + return RX_HANDLER_PASS; + if (!skb_mac_header_was_set(skb)) { WARN_ONCE(1, "%s: skb invalid", __func__); return RX_HANDLER_PASS; @@ -30,6 +40,7 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) port = hsr_port_get_rcu(skb->dev); if (!port) goto finish_pass; + hsr = port->hsr; if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) { /* Directly kill frames sent by ourselves */ @@ -37,12 +48,23 @@ static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb) goto finish_consume; } + /* For HSR, only tagged frames are expected, but for PRP + * there could be non tagged frames as well from Single + * attached nodes (SANs). + */ protocol = eth_hdr(skb)->h_proto; - if (protocol != htons(ETH_P_PRP) && protocol != htons(ETH_P_HSR)) + if (hsr->proto_ops->invalid_dan_ingress_frame && + hsr->proto_ops->invalid_dan_ingress_frame(protocol)) goto finish_pass; skb_push(skb, ETH_HLEN); + if (skb_mac_header(skb) != skb->data) { + WARN_ONCE(1, "%s:%d: Malformed frame at source port %s)\n", + __func__, __LINE__, port->dev->name); + goto finish_consume; + } + hsr_forward_skb(skb, port); finish_consume: diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h index 9708a4f0ec09..edc4612bb009 100644 --- a/net/hsr/hsr_slave.h +++ b/net/hsr/hsr_slave.h @@ -32,4 +32,6 @@ static inline struct hsr_port *hsr_port_get_rcu(const struct net_device *dev) rcu_dereference(dev->rx_handler_data) : NULL; } +bool hsr_invalid_dan_ingress_frame(__be16 protocol); + #endif /* __HSR_SLAVE_H */ From 795ec4f572509979442b9f2d269487dd58e3595c Mon Sep 17 00:00:00 2001 From: Murali Karicheri Date: Wed, 22 Jul 2020 10:40:22 -0400 Subject: [PATCH 1663/2056] net: prp: enhance debugfs to display PRP info Print PRP specific information from node table as part of debugfs node table display. Also display the node as DAN-H or DAN-P depending on the info from node table. Signed-off-by: Murali Karicheri Signed-off-by: David S. Miller --- net/hsr/hsr_debugfs.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index c1932c0a15be..3b6f675bd55a 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -24,7 +24,7 @@ static struct dentry *hsr_debugfs_root_dir; static void print_mac_address(struct seq_file *sfp, unsigned char *mac) { - seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:", + seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x ", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); } @@ -35,20 +35,32 @@ hsr_node_table_show(struct seq_file *sfp, void *data) struct hsr_priv *priv = (struct hsr_priv *)sfp->private; struct hsr_node *node; - seq_puts(sfp, "Node Table entries\n"); - seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], "); - seq_puts(sfp, "time_in[B], Address-B port\n"); + seq_printf(sfp, "Node Table entries for (%s) device\n", + (priv->prot_version == PRP_V1 ? "PRP" : "HSR")); + seq_puts(sfp, "MAC-Address-A, MAC-Address-B, time_in[A], "); + seq_puts(sfp, "time_in[B], Address-B port, "); + if (priv->prot_version == PRP_V1) + seq_puts(sfp, "SAN-A, SAN-B, DAN-P\n"); + else + seq_puts(sfp, "DAN-H\n"); + rcu_read_lock(); list_for_each_entry_rcu(node, &priv->node_db, mac_list) { /* skip self node */ if (hsr_addr_is_self(priv, node->macaddress_A)) continue; print_mac_address(sfp, &node->macaddress_A[0]); - seq_puts(sfp, " "); print_mac_address(sfp, &node->macaddress_B[0]); - seq_printf(sfp, "0x%lx, ", node->time_in[HSR_PT_SLAVE_A]); - seq_printf(sfp, "0x%lx ", node->time_in[HSR_PT_SLAVE_B]); - seq_printf(sfp, "0x%x\n", node->addr_B_port); + seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]); + seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]); + seq_printf(sfp, "%14x, ", node->addr_B_port); + + if (priv->prot_version == PRP_V1) + seq_printf(sfp, "%5x, %5x, %5x\n", + node->san_a, node->san_b, + (node->san_a == 0 && node->san_b == 0)); + else + seq_printf(sfp, "%5x\n", 1); } rcu_read_unlock(); return 0; @@ -57,7 +69,8 @@ hsr_node_table_show(struct seq_file *sfp, void *data) /* hsr_node_table_open - Open the node_table file * * Description: - * This routine opens a debugfs file node_table of specific hsr device + * This routine opens a debugfs file node_table of specific hsr + * or prp device */ static int hsr_node_table_open(struct inode *inode, struct file *filp) From 0ccf267e3477157a91766ca5175b5624936e7491 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:54:55 +0100 Subject: [PATCH 1664/2056] sfc: remove efx_ethtool_nway_reset() An MDIO-based n-way restart does not make sense for any of the NICs supported by this driver, nor for the coming EF100. Unlike on Falcon (which was already split off into a separate driver), the PHY on all of Siena, EF10 and EF100 is managed by MC firmware. While Siena can talk to the PHY over MDIO, doing so for anything other than debugging purposes (mdio_mii_ioctl) is likely to confuse the firmware. (According to the SFC firmware team, this support was originally added to the Siena driver early in the development of that product, before it was decided to have firmware manage the PHY.) Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ethtool.c | 1 - drivers/net/ethernet/sfc/ethtool_common.c | 8 -------- drivers/net/ethernet/sfc/ethtool_common.h | 1 - 3 files changed, 10 deletions(-) diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 9828516bd82d..038c08d2d7aa 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -232,7 +232,6 @@ const struct ethtool_ops efx_ethtool_ops = { .get_regs = efx_ethtool_get_regs, .get_msglevel = efx_ethtool_get_msglevel, .set_msglevel = efx_ethtool_set_msglevel, - .nway_reset = efx_ethtool_nway_reset, .get_link = ethtool_op_get_link, .get_coalesce = efx_ethtool_get_coalesce, .set_coalesce = efx_ethtool_set_coalesce, diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index e9a5a66529bf..fb06097b70d8 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -173,14 +173,6 @@ void efx_ethtool_self_test(struct net_device *net_dev, test->flags |= ETH_TEST_FL_FAILED; } -/* Restart autonegotiation */ -int efx_ethtool_nway_reset(struct net_device *net_dev) -{ - struct efx_nic *efx = netdev_priv(net_dev); - - return mdio45_nway_restart(&efx->mdio); -} - void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause) { diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index 3f3aaa92fbb5..0c0ea9ac4d08 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -19,7 +19,6 @@ u32 efx_ethtool_get_msglevel(struct net_device *net_dev); void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable); void efx_ethtool_self_test(struct net_device *net_dev, struct ethtool_test *test, u64 *data); -int efx_ethtool_nway_reset(struct net_device *net_dev); void efx_ethtool_get_pauseparam(struct net_device *net_dev, struct ethtool_pauseparam *pause); int efx_ethtool_set_pauseparam(struct net_device *net_dev, From adf72ee3f741d4188d1d731b878601ca11fbda5a Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:55:28 +0100 Subject: [PATCH 1665/2056] sfc_ef100: add EF100 register definitions Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_regs.h | 693 ++++++++++++++++++++++++++ 1 file changed, 693 insertions(+) create mode 100644 drivers/net/ethernet/sfc/ef100_regs.h diff --git a/drivers/net/ethernet/sfc/ef100_regs.h b/drivers/net/ethernet/sfc/ef100_regs.h new file mode 100644 index 000000000000..710bbdb19885 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_regs.h @@ -0,0 +1,693 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_REGS_H +#define EFX_EF100_REGS_H + +/* EF100 hardware architecture definitions have a name prefix following + * the format: + * + * E__ + * + * The following strings are used: + * + * MMIO register Host memory structure + * ------------------------------------------------------------- + * Address R + * Bitfield RF SF + * Enumerator FE SE + * + * is the first revision to which the definition applies: + * + * G: Riverhead + * + * If the definition has been changed or removed in later revisions + * then is the last revision to which the definition applies; + * otherwise it is "Z". + */ + +/************************************************************************** + * + * EF100 registers and descriptors + * + ************************************************************************** + */ + +/* HW_REV_ID_REG: Hardware revision info register */ +#define ER_GZ_HW_REV_ID 0x00000000 + +/* NIC_REV_ID: SoftNIC revision info register */ +#define ER_GZ_NIC_REV_ID 0x00000004 + +/* NIC_MAGIC: Signature register that should contain a well-known value */ +#define ER_GZ_NIC_MAGIC 0x00000008 +#define ERF_GZ_NIC_MAGIC_LBN 0 +#define ERF_GZ_NIC_MAGIC_WIDTH 32 +#define EFE_GZ_NIC_MAGIC_EXPECTED 0xEF100FCB + +/* MC_SFT_STATUS: MC soft status */ +#define ER_GZ_MC_SFT_STATUS 0x00000010 +#define ER_GZ_MC_SFT_STATUS_STEP 4 +#define ER_GZ_MC_SFT_STATUS_ROWS 2 + +/* MC_DB_LWRD_REG: MC doorbell register, low word */ +#define ER_GZ_MC_DB_LWRD 0x00000020 + +/* MC_DB_HWRD_REG: MC doorbell register, high word */ +#define ER_GZ_MC_DB_HWRD 0x00000024 + +/* EVQ_INT_PRIME: Prime EVQ */ +#define ER_GZ_EVQ_INT_PRIME 0x00000040 +#define ERF_GZ_IDX_LBN 16 +#define ERF_GZ_IDX_WIDTH 16 +#define ERF_GZ_EVQ_ID_LBN 0 +#define ERF_GZ_EVQ_ID_WIDTH 16 + +/* INT_AGG_RING_PRIME: Prime interrupt aggregation ring. */ +#define ER_GZ_INT_AGG_RING_PRIME 0x00000048 +/* defined as ERF_GZ_IDX_LBN 16; access=WO reset=0x0 */ +/* defined as ERF_GZ_IDX_WIDTH 16 */ +#define ERF_GZ_RING_ID_LBN 0 +#define ERF_GZ_RING_ID_WIDTH 16 + +/* EVQ_TMR: EVQ timer control */ +#define ER_GZ_EVQ_TMR 0x00000104 +#define ER_GZ_EVQ_TMR_STEP 65536 +#define ER_GZ_EVQ_TMR_ROWS 1024 + +/* EVQ_UNSOL_CREDIT_GRANT_SEQ: Grant credits for unsolicited events. */ +#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ 0x00000108 +#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_STEP 65536 +#define ER_GZ_EVQ_UNSOL_CREDIT_GRANT_SEQ_ROWS 1024 + +/* EVQ_DESC_CREDIT_GRANT_SEQ: Grant credits for descriptor proxy events. */ +#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ 0x00000110 +#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_STEP 65536 +#define ER_GZ_EVQ_DESC_CREDIT_GRANT_SEQ_ROWS 1024 + +/* RX_RING_DOORBELL: Ring Rx doorbell. */ +#define ER_GZ_RX_RING_DOORBELL 0x00000180 +#define ER_GZ_RX_RING_DOORBELL_STEP 65536 +#define ER_GZ_RX_RING_DOORBELL_ROWS 1024 +#define ERF_GZ_RX_RING_PIDX_LBN 16 +#define ERF_GZ_RX_RING_PIDX_WIDTH 16 + +/* TX_RING_DOORBELL: Ring Tx doorbell. */ +#define ER_GZ_TX_RING_DOORBELL 0x00000200 +#define ER_GZ_TX_RING_DOORBELL_STEP 65536 +#define ER_GZ_TX_RING_DOORBELL_ROWS 1024 +#define ERF_GZ_TX_RING_PIDX_LBN 16 +#define ERF_GZ_TX_RING_PIDX_WIDTH 16 + +/* TX_DESC_PUSH: Tx ring descriptor push. Reserved for future use. */ +#define ER_GZ_TX_DESC_PUSH 0x00000210 +#define ER_GZ_TX_DESC_PUSH_STEP 65536 +#define ER_GZ_TX_DESC_PUSH_ROWS 1024 + +/* THE_TIME: NIC hardware time */ +#define ER_GZ_THE_TIME 0x00000280 +#define ER_GZ_THE_TIME_STEP 65536 +#define ER_GZ_THE_TIME_ROWS 1024 +#define ERF_GZ_THE_TIME_SECS_LBN 32 +#define ERF_GZ_THE_TIME_SECS_WIDTH 32 +#define ERF_GZ_THE_TIME_NANOS_LBN 2 +#define ERF_GZ_THE_TIME_NANOS_WIDTH 30 +#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_LBN 1 +#define ERF_GZ_THE_TIME_CLOCK_IN_SYNC_WIDTH 1 +#define ERF_GZ_THE_TIME_CLOCK_IS_SET_LBN 0 +#define ERF_GZ_THE_TIME_CLOCK_IS_SET_WIDTH 1 + +/* PARAMS_TLV_LEN: Size of design parameters area in bytes */ +#define ER_GZ_PARAMS_TLV_LEN 0x00000c00 +#define ER_GZ_PARAMS_TLV_LEN_STEP 65536 +#define ER_GZ_PARAMS_TLV_LEN_ROWS 1024 + +/* PARAMS_TLV: Design parameters */ +#define ER_GZ_PARAMS_TLV 0x00000c04 +#define ER_GZ_PARAMS_TLV_STEP 65536 +#define ER_GZ_PARAMS_TLV_ROWS 1024 + +/* EW_EMBEDDED_EVENT */ +#define ESF_GZ_EV_256_EVENT_LBN 0 +#define ESF_GZ_EV_256_EVENT_WIDTH 64 +#define ESE_GZ_EW_EMBEDDED_EVENT_STRUCT_SIZE 64 + +/* NMMU_PAGESZ_2M_ADDR */ +#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_LBN 59 +#define ESF_GZ_NMMU_2M_PAGE_SIZE_ID_WIDTH 5 +#define ESE_GZ_NMMU_PAGE_SIZE_2M 9 +#define ESF_GZ_NMMU_2M_PAGE_ID_LBN 21 +#define ESF_GZ_NMMU_2M_PAGE_ID_WIDTH 38 +#define ESF_GZ_NMMU_2M_PAGE_OFFSET_LBN 0 +#define ESF_GZ_NMMU_2M_PAGE_OFFSET_WIDTH 21 +#define ESE_GZ_NMMU_PAGESZ_2M_ADDR_STRUCT_SIZE 64 + +/* PARAM_TLV */ +#define ESF_GZ_TLV_VALUE_LBN 16 +#define ESF_GZ_TLV_VALUE_WIDTH 8 +#define ESE_GZ_TLV_VALUE_LENMIN 8 +#define ESE_GZ_TLV_VALUE_LENMAX 2040 +#define ESF_GZ_TLV_LEN_LBN 8 +#define ESF_GZ_TLV_LEN_WIDTH 8 +#define ESF_GZ_TLV_TYPE_LBN 0 +#define ESF_GZ_TLV_TYPE_WIDTH 8 +#define ESE_GZ_DP_NMMU_GROUP_SIZE 5 +#define ESE_GZ_DP_EVQ_UNSOL_CREDIT_SEQ_BITS 4 +#define ESE_GZ_DP_TX_EV_NUM_DESCS_BITS 3 +#define ESE_GZ_DP_RX_EV_NUM_PACKETS_BITS 2 +#define ESE_GZ_DP_PARTIAL_TSTAMP_SUB_NANO_BITS 1 +#define ESE_GZ_DP_PAD 0 +#define ESE_GZ_PARAM_TLV_STRUCT_SIZE 24 + +/* PCI_EXPRESS_XCAP_HDR */ +#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_LBN 20 +#define ESF_GZ_PCI_EXPRESS_XCAP_NEXT_WIDTH 12 +#define ESF_GZ_PCI_EXPRESS_XCAP_VER_LBN 16 +#define ESF_GZ_PCI_EXPRESS_XCAP_VER_WIDTH 4 +#define ESE_GZ_PCI_EXPRESS_XCAP_VER_VSEC 1 +#define ESF_GZ_PCI_EXPRESS_XCAP_ID_LBN 0 +#define ESF_GZ_PCI_EXPRESS_XCAP_ID_WIDTH 16 +#define ESE_GZ_PCI_EXPRESS_XCAP_ID_VNDR 0xb +#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_STRUCT_SIZE 32 + +/* RHEAD_BASE_EVENT */ +#define ESF_GZ_E_TYPE_LBN 60 +#define ESF_GZ_E_TYPE_WIDTH 4 +#define ESE_GZ_EF100_EV_DRIVER 5 +#define ESE_GZ_EF100_EV_MCDI 4 +#define ESE_GZ_EF100_EV_CONTROL 3 +#define ESE_GZ_EF100_EV_TX_TIMESTAMP 2 +#define ESE_GZ_EF100_EV_TX_COMPLETION 1 +#define ESE_GZ_EF100_EV_RX_PKTS 0 +#define ESF_GZ_EV_EVQ_PHASE_LBN 59 +#define ESF_GZ_EV_EVQ_PHASE_WIDTH 1 +#define ESE_GZ_RHEAD_BASE_EVENT_STRUCT_SIZE 64 + +/* RHEAD_EW_EVENT */ +#define ESF_GZ_EV_256_EV32_PHASE_LBN 255 +#define ESF_GZ_EV_256_EV32_PHASE_WIDTH 1 +#define ESF_GZ_EV_256_EV32_TYPE_LBN 251 +#define ESF_GZ_EV_256_EV32_TYPE_WIDTH 4 +#define ESE_GZ_EF100_EVEW_VIRTQ_DESC 2 +#define ESE_GZ_EF100_EVEW_TXQ_DESC 1 +#define ESE_GZ_EF100_EVEW_64BIT 0 +#define ESE_GZ_RHEAD_EW_EVENT_STRUCT_SIZE 256 + +/* RX_DESC */ +#define ESF_GZ_RX_BUF_ADDR_LBN 0 +#define ESF_GZ_RX_BUF_ADDR_WIDTH 64 +#define ESE_GZ_RX_DESC_STRUCT_SIZE 64 + +/* TXQ_DESC_PROXY_EVENT */ +#define ESF_GZ_EV_TXQ_DP_VI_ID_LBN 128 +#define ESF_GZ_EV_TXQ_DP_VI_ID_WIDTH 16 +#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_LBN 0 +#define ESF_GZ_EV_TXQ_DP_TXQ_DESC_WIDTH 128 +#define ESE_GZ_TXQ_DESC_PROXY_EVENT_STRUCT_SIZE 144 + +/* TX_DESC_TYPE */ +#define ESF_GZ_TX_DESC_TYPE_LBN 124 +#define ESF_GZ_TX_DESC_TYPE_WIDTH 4 +#define ESE_GZ_TX_DESC_TYPE_DESC2CMPT 7 +#define ESE_GZ_TX_DESC_TYPE_MEM2MEM 4 +#define ESE_GZ_TX_DESC_TYPE_SEG 3 +#define ESE_GZ_TX_DESC_TYPE_TSO 2 +#define ESE_GZ_TX_DESC_TYPE_PREFIX 1 +#define ESE_GZ_TX_DESC_TYPE_SEND 0 +#define ESE_GZ_TX_DESC_TYPE_STRUCT_SIZE 128 + +/* VIRTQ_DESC_PROXY_EVENT */ +#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_LBN 144 +#define ESF_GZ_EV_VQ_DP_AVAIL_ENTRY_WIDTH 16 +#define ESF_GZ_EV_VQ_DP_VI_ID_LBN 128 +#define ESF_GZ_EV_VQ_DP_VI_ID_WIDTH 16 +#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_LBN 0 +#define ESF_GZ_EV_VQ_DP_VIRTQ_DESC_WIDTH 128 +#define ESE_GZ_VIRTQ_DESC_PROXY_EVENT_STRUCT_SIZE 160 + +/* XIL_CFGBAR_TBL_ENTRY */ +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_LBN 96 +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_HI_WIDTH 32 +#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_LBN 68 +#define ESF_GZ_CFGBAR_CONT_CAP_OFFSET_WIDTH 60 +#define ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT 4 +#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_LBN 67 +#define ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF_WIDTH 29 +#define ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT 4 +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_LBN 68 +#define ESF_GZ_CFGBAR_CONT_CAP_OFF_LO_WIDTH 28 +#define ESF_GZ_CFGBAR_CONT_CAP_RSV_LBN 67 +#define ESF_GZ_CFGBAR_CONT_CAP_RSV_WIDTH 1 +#define ESF_GZ_CFGBAR_EF100_BAR_LBN 64 +#define ESF_GZ_CFGBAR_EF100_BAR_WIDTH 3 +#define ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID 7 +#define ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM 6 +#define ESF_GZ_CFGBAR_CONT_CAP_BAR_LBN 64 +#define ESF_GZ_CFGBAR_CONT_CAP_BAR_WIDTH 3 +#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID 7 +#define ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM 6 +#define ESF_GZ_CFGBAR_ENTRY_SIZE_LBN 32 +#define ESF_GZ_CFGBAR_ENTRY_SIZE_WIDTH 32 +#define ESE_GZ_CFGBAR_ENTRY_SIZE_EF100 12 +#define ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE 8 +#define ESF_GZ_CFGBAR_ENTRY_LAST_LBN 28 +#define ESF_GZ_CFGBAR_ENTRY_LAST_WIDTH 1 +#define ESF_GZ_CFGBAR_ENTRY_REV_LBN 20 +#define ESF_GZ_CFGBAR_ENTRY_REV_WIDTH 8 +#define ESE_GZ_CFGBAR_ENTRY_REV_EF100 0 +#define ESF_GZ_CFGBAR_ENTRY_FORMAT_LBN 0 +#define ESF_GZ_CFGBAR_ENTRY_FORMAT_WIDTH 20 +#define ESE_GZ_CFGBAR_ENTRY_LAST 0xfffff +#define ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR 0xffffe +#define ESE_GZ_CFGBAR_ENTRY_EF100 0xef100 +#define ESE_GZ_XIL_CFGBAR_TBL_ENTRY_STRUCT_SIZE 128 + +/* XIL_CFGBAR_VSEC */ +#define ESF_GZ_VSEC_TBL_OFF_HI_LBN 64 +#define ESF_GZ_VSEC_TBL_OFF_HI_WIDTH 32 +#define ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT 32 +#define ESF_GZ_VSEC_TBL_OFF_LO_LBN 36 +#define ESF_GZ_VSEC_TBL_OFF_LO_WIDTH 28 +#define ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT 4 +#define ESF_GZ_VSEC_TBL_BAR_LBN 32 +#define ESF_GZ_VSEC_TBL_BAR_WIDTH 4 +#define ESE_GZ_VSEC_BAR_NUM_INVALID 7 +#define ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM 6 +#define ESF_GZ_VSEC_LEN_LBN 20 +#define ESF_GZ_VSEC_LEN_WIDTH 12 +#define ESE_GZ_VSEC_LEN_HIGH_OFFT 16 +#define ESE_GZ_VSEC_LEN_MIN 12 +#define ESF_GZ_VSEC_VER_LBN 16 +#define ESF_GZ_VSEC_VER_WIDTH 4 +#define ESE_GZ_VSEC_VER_XIL_CFGBAR 0 +#define ESF_GZ_VSEC_ID_LBN 0 +#define ESF_GZ_VSEC_ID_WIDTH 16 +#define ESE_GZ_XILINX_VSEC_ID 0x20 +#define ESE_GZ_XIL_CFGBAR_VSEC_STRUCT_SIZE 96 + +/* rh_egres_hclass */ +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_LBN 15 +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM_WIDTH 1 +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_LBN 13 +#define ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_LBN 12 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM_WIDTH 1 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_LBN 10 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN 8 +#define ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_LBN 5 +#define ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS_WIDTH 3 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_LBN 3 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN_WIDTH 2 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_LBN 2 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS_WIDTH 1 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_LBN 0 +#define ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS_WIDTH 2 +#define ESE_GZ_RH_EGRES_HCLASS_STRUCT_SIZE 16 + +/* sf_driver */ +#define ESF_GZ_DRIVER_E_TYPE_LBN 60 +#define ESF_GZ_DRIVER_E_TYPE_WIDTH 4 +#define ESF_GZ_DRIVER_PHASE_LBN 59 +#define ESF_GZ_DRIVER_PHASE_WIDTH 1 +#define ESF_GZ_DRIVER_DATA_LBN 0 +#define ESF_GZ_DRIVER_DATA_WIDTH 59 +#define ESE_GZ_SF_DRIVER_STRUCT_SIZE 64 + +/* sf_ev_rsvd */ +#define ESF_GZ_EV_RSVD_TBD_NEXT_LBN 34 +#define ESF_GZ_EV_RSVD_TBD_NEXT_WIDTH 3 +#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_LBN 30 +#define ESF_GZ_EV_RSVD_EVENT_GEN_FLAGS_WIDTH 4 +#define ESF_GZ_EV_RSVD_SRC_QID_LBN 18 +#define ESF_GZ_EV_RSVD_SRC_QID_WIDTH 12 +#define ESF_GZ_EV_RSVD_SEQ_NUM_LBN 2 +#define ESF_GZ_EV_RSVD_SEQ_NUM_WIDTH 16 +#define ESF_GZ_EV_RSVD_TBD_LBN 0 +#define ESF_GZ_EV_RSVD_TBD_WIDTH 2 +#define ESE_GZ_SF_EV_RSVD_STRUCT_SIZE 37 + +/* sf_flush_evnt */ +#define ESF_GZ_EV_FLSH_E_TYPE_LBN 60 +#define ESF_GZ_EV_FLSH_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_FLSH_PHASE_LBN 59 +#define ESF_GZ_EV_FLSH_PHASE_WIDTH 1 +#define ESF_GZ_EV_FLSH_SUB_TYPE_LBN 53 +#define ESF_GZ_EV_FLSH_SUB_TYPE_WIDTH 6 +#define ESF_GZ_EV_FLSH_RSVD_LBN 10 +#define ESF_GZ_EV_FLSH_RSVD_WIDTH 43 +#define ESF_GZ_EV_FLSH_LABEL_LBN 4 +#define ESF_GZ_EV_FLSH_LABEL_WIDTH 6 +#define ESF_GZ_EV_FLSH_FLUSH_TYPE_LBN 0 +#define ESF_GZ_EV_FLSH_FLUSH_TYPE_WIDTH 4 +#define ESE_GZ_SF_FLUSH_EVNT_STRUCT_SIZE 64 + +/* sf_rx_pkts */ +#define ESF_GZ_EV_RXPKTS_E_TYPE_LBN 60 +#define ESF_GZ_EV_RXPKTS_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_RXPKTS_PHASE_LBN 59 +#define ESF_GZ_EV_RXPKTS_PHASE_WIDTH 1 +#define ESF_GZ_EV_RXPKTS_RSVD_LBN 22 +#define ESF_GZ_EV_RXPKTS_RSVD_WIDTH 37 +#define ESF_GZ_EV_RXPKTS_Q_LABEL_LBN 16 +#define ESF_GZ_EV_RXPKTS_Q_LABEL_WIDTH 6 +#define ESF_GZ_EV_RXPKTS_NUM_PKT_LBN 0 +#define ESF_GZ_EV_RXPKTS_NUM_PKT_WIDTH 16 +#define ESE_GZ_SF_RX_PKTS_STRUCT_SIZE 64 + +/* sf_rx_prefix */ +#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_LBN 160 +#define ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI_WIDTH 16 +#define ESF_GZ_RX_PREFIX_CSUM_FRAME_LBN 144 +#define ESF_GZ_RX_PREFIX_CSUM_FRAME_WIDTH 16 +#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_LBN 128 +#define ESF_GZ_RX_PREFIX_INGRESS_VPORT_WIDTH 16 +#define ESF_GZ_RX_PREFIX_USER_MARK_LBN 96 +#define ESF_GZ_RX_PREFIX_USER_MARK_WIDTH 32 +#define ESF_GZ_RX_PREFIX_RSS_HASH_LBN 64 +#define ESF_GZ_RX_PREFIX_RSS_HASH_WIDTH 32 +#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN 32 +#define ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_WIDTH 32 +#define ESF_GZ_RX_PREFIX_CLASS_LBN 16 +#define ESF_GZ_RX_PREFIX_CLASS_WIDTH 16 +#define ESF_GZ_RX_PREFIX_USER_FLAG_LBN 15 +#define ESF_GZ_RX_PREFIX_USER_FLAG_WIDTH 1 +#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN 14 +#define ESF_GZ_RX_PREFIX_RSS_HASH_VALID_WIDTH 1 +#define ESF_GZ_RX_PREFIX_LENGTH_LBN 0 +#define ESF_GZ_RX_PREFIX_LENGTH_WIDTH 14 +#define ESE_GZ_SF_RX_PREFIX_STRUCT_SIZE 176 + +/* sf_rxtx_generic */ +#define ESF_GZ_EV_BARRIER_LBN 167 +#define ESF_GZ_EV_BARRIER_WIDTH 1 +#define ESF_GZ_EV_RSVD_LBN 130 +#define ESF_GZ_EV_RSVD_WIDTH 37 +#define ESF_GZ_EV_DPRXY_LBN 129 +#define ESF_GZ_EV_DPRXY_WIDTH 1 +#define ESF_GZ_EV_VIRTIO_LBN 128 +#define ESF_GZ_EV_VIRTIO_WIDTH 1 +#define ESF_GZ_EV_COUNT_LBN 0 +#define ESF_GZ_EV_COUNT_WIDTH 128 +#define ESE_GZ_SF_RXTX_GENERIC_STRUCT_SIZE 168 + +/* sf_ts_stamp */ +#define ESF_GZ_EV_TS_E_TYPE_LBN 60 +#define ESF_GZ_EV_TS_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_TS_PHASE_LBN 59 +#define ESF_GZ_EV_TS_PHASE_WIDTH 1 +#define ESF_GZ_EV_TS_RSVD_LBN 56 +#define ESF_GZ_EV_TS_RSVD_WIDTH 3 +#define ESF_GZ_EV_TS_STATUS_LBN 54 +#define ESF_GZ_EV_TS_STATUS_WIDTH 2 +#define ESF_GZ_EV_TS_Q_LABEL_LBN 48 +#define ESF_GZ_EV_TS_Q_LABEL_WIDTH 6 +#define ESF_GZ_EV_TS_DESC_ID_LBN 32 +#define ESF_GZ_EV_TS_DESC_ID_WIDTH 16 +#define ESF_GZ_EV_TS_PARTIAL_STAMP_LBN 0 +#define ESF_GZ_EV_TS_PARTIAL_STAMP_WIDTH 32 +#define ESE_GZ_SF_TS_STAMP_STRUCT_SIZE 64 + +/* sf_tx_cmplt */ +#define ESF_GZ_EV_TXCMPL_E_TYPE_LBN 60 +#define ESF_GZ_EV_TXCMPL_E_TYPE_WIDTH 4 +#define ESF_GZ_EV_TXCMPL_PHASE_LBN 59 +#define ESF_GZ_EV_TXCMPL_PHASE_WIDTH 1 +#define ESF_GZ_EV_TXCMPL_RSVD_LBN 22 +#define ESF_GZ_EV_TXCMPL_RSVD_WIDTH 37 +#define ESF_GZ_EV_TXCMPL_Q_LABEL_LBN 16 +#define ESF_GZ_EV_TXCMPL_Q_LABEL_WIDTH 6 +#define ESF_GZ_EV_TXCMPL_NUM_DESC_LBN 0 +#define ESF_GZ_EV_TXCMPL_NUM_DESC_WIDTH 16 +#define ESE_GZ_SF_TX_CMPLT_STRUCT_SIZE 64 + +/* sf_tx_desc2cmpt_dsc_fmt */ +#define ESF_GZ_D2C_TGT_VI_ID_LBN 108 +#define ESF_GZ_D2C_TGT_VI_ID_WIDTH 16 +#define ESF_GZ_D2C_CMPT2_LBN 107 +#define ESF_GZ_D2C_CMPT2_WIDTH 1 +#define ESF_GZ_D2C_ABS_VI_ID_LBN 106 +#define ESF_GZ_D2C_ABS_VI_ID_WIDTH 1 +#define ESF_GZ_D2C_ORDERED_LBN 105 +#define ESF_GZ_D2C_ORDERED_WIDTH 1 +#define ESF_GZ_D2C_SKIP_N_LBN 97 +#define ESF_GZ_D2C_SKIP_N_WIDTH 8 +#define ESF_GZ_D2C_RSVD_LBN 64 +#define ESF_GZ_D2C_RSVD_WIDTH 33 +#define ESF_GZ_D2C_COMPLETION_LBN 0 +#define ESF_GZ_D2C_COMPLETION_WIDTH 64 +#define ESE_GZ_SF_TX_DESC2CMPT_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_mem2mem_dsc_fmt */ +#define ESF_GZ_M2M_ADDR_SPC_EN_LBN 123 +#define ESF_GZ_M2M_ADDR_SPC_EN_WIDTH 1 +#define ESF_GZ_M2M_TRANSLATE_ADDR_LBN 122 +#define ESF_GZ_M2M_TRANSLATE_ADDR_WIDTH 1 +#define ESF_GZ_M2M_RSVD_LBN 120 +#define ESF_GZ_M2M_RSVD_WIDTH 2 +#define ESF_GZ_M2M_ADDR_SPC_LBN 108 +#define ESF_GZ_M2M_ADDR_SPC_WIDTH 12 +#define ESF_GZ_M2M_ADDR_SPC_PASID_LBN 86 +#define ESF_GZ_M2M_ADDR_SPC_PASID_WIDTH 22 +#define ESF_GZ_M2M_ADDR_SPC_MODE_LBN 84 +#define ESF_GZ_M2M_ADDR_SPC_MODE_WIDTH 2 +#define ESF_GZ_M2M_LEN_MINUS_1_LBN 64 +#define ESF_GZ_M2M_LEN_MINUS_1_WIDTH 20 +#define ESF_GZ_M2M_ADDR_LBN 0 +#define ESF_GZ_M2M_ADDR_WIDTH 64 +#define ESE_GZ_SF_TX_MEM2MEM_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_ovr_dsc_fmt */ +#define ESF_GZ_TX_PREFIX_MARK_EN_LBN 123 +#define ESF_GZ_TX_PREFIX_MARK_EN_WIDTH 1 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_LBN 122 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_EN_WIDTH 1 +#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_LBN 121 +#define ESF_GZ_TX_PREFIX_INLINE_CAPSULE_META_WIDTH 1 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_LBN 120 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_EN_WIDTH 1 +#define ESF_GZ_TX_PREFIX_RSRVD_LBN 64 +#define ESF_GZ_TX_PREFIX_RSRVD_WIDTH 56 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_LBN 48 +#define ESF_GZ_TX_PREFIX_EGRESS_MPORT_WIDTH 16 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_LBN 32 +#define ESF_GZ_TX_PREFIX_INGRESS_MPORT_WIDTH 16 +#define ESF_GZ_TX_PREFIX_MARK_LBN 0 +#define ESF_GZ_TX_PREFIX_MARK_WIDTH 32 +#define ESE_GZ_SF_TX_OVR_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_seg_dsc_fmt */ +#define ESF_GZ_TX_SEG_ADDR_SPC_EN_LBN 123 +#define ESF_GZ_TX_SEG_ADDR_SPC_EN_WIDTH 1 +#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_LBN 122 +#define ESF_GZ_TX_SEG_TRANSLATE_ADDR_WIDTH 1 +#define ESF_GZ_TX_SEG_RSVD2_LBN 120 +#define ESF_GZ_TX_SEG_RSVD2_WIDTH 2 +#define ESF_GZ_TX_SEG_ADDR_SPC_LBN 108 +#define ESF_GZ_TX_SEG_ADDR_SPC_WIDTH 12 +#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_LBN 86 +#define ESF_GZ_TX_SEG_ADDR_SPC_PASID_WIDTH 22 +#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_LBN 84 +#define ESF_GZ_TX_SEG_ADDR_SPC_MODE_WIDTH 2 +#define ESF_GZ_TX_SEG_RSVD_LBN 80 +#define ESF_GZ_TX_SEG_RSVD_WIDTH 4 +#define ESF_GZ_TX_SEG_LEN_LBN 64 +#define ESF_GZ_TX_SEG_LEN_WIDTH 16 +#define ESF_GZ_TX_SEG_ADDR_LBN 0 +#define ESF_GZ_TX_SEG_ADDR_WIDTH 64 +#define ESE_GZ_SF_TX_SEG_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_std_dsc_fmt */ +#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_LBN 108 +#define ESF_GZ_TX_SEND_VLAN_INSERT_TCI_WIDTH 16 +#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_LBN 107 +#define ESF_GZ_TX_SEND_VLAN_INSERT_EN_WIDTH 1 +#define ESF_GZ_TX_SEND_TSTAMP_REQ_LBN 106 +#define ESF_GZ_TX_SEND_TSTAMP_REQ_WIDTH 1 +#define ESF_GZ_TX_SEND_CSO_OUTER_L4_LBN 105 +#define ESF_GZ_TX_SEND_CSO_OUTER_L4_WIDTH 1 +#define ESF_GZ_TX_SEND_CSO_OUTER_L3_LBN 104 +#define ESF_GZ_TX_SEND_CSO_OUTER_L3_WIDTH 1 +#define ESF_GZ_TX_SEND_CSO_INNER_L3_LBN 101 +#define ESF_GZ_TX_SEND_CSO_INNER_L3_WIDTH 3 +#define ESF_GZ_TX_SEND_RSVD_LBN 99 +#define ESF_GZ_TX_SEND_RSVD_WIDTH 2 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_LBN 97 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_EN_WIDTH 2 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_LBN 92 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W_WIDTH 5 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_LBN 83 +#define ESF_GZ_TX_SEND_CSO_PARTIAL_START_W_WIDTH 9 +#define ESF_GZ_TX_SEND_NUM_SEGS_LBN 78 +#define ESF_GZ_TX_SEND_NUM_SEGS_WIDTH 5 +#define ESF_GZ_TX_SEND_LEN_LBN 64 +#define ESF_GZ_TX_SEND_LEN_WIDTH 14 +#define ESF_GZ_TX_SEND_ADDR_LBN 0 +#define ESF_GZ_TX_SEND_ADDR_WIDTH 64 +#define ESE_GZ_SF_TX_STD_DSC_FMT_STRUCT_SIZE 124 + +/* sf_tx_tso_dsc_fmt */ +#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_LBN 108 +#define ESF_GZ_TX_TSO_VLAN_INSERT_TCI_WIDTH 16 +#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_LBN 107 +#define ESF_GZ_TX_TSO_VLAN_INSERT_EN_WIDTH 1 +#define ESF_GZ_TX_TSO_TSTAMP_REQ_LBN 106 +#define ESF_GZ_TX_TSO_TSTAMP_REQ_WIDTH 1 +#define ESF_GZ_TX_TSO_CSO_OUTER_L4_LBN 105 +#define ESF_GZ_TX_TSO_CSO_OUTER_L4_WIDTH 1 +#define ESF_GZ_TX_TSO_CSO_OUTER_L3_LBN 104 +#define ESF_GZ_TX_TSO_CSO_OUTER_L3_WIDTH 1 +#define ESF_GZ_TX_TSO_CSO_INNER_L3_LBN 101 +#define ESF_GZ_TX_TSO_CSO_INNER_L3_WIDTH 3 +#define ESF_GZ_TX_TSO_RSVD_LBN 94 +#define ESF_GZ_TX_TSO_RSVD_WIDTH 7 +#define ESF_GZ_TX_TSO_CSO_INNER_L4_LBN 93 +#define ESF_GZ_TX_TSO_CSO_INNER_L4_WIDTH 1 +#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_LBN 85 +#define ESF_GZ_TX_TSO_INNER_L4_OFF_W_WIDTH 8 +#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_LBN 77 +#define ESF_GZ_TX_TSO_INNER_L3_OFF_W_WIDTH 8 +#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_LBN 69 +#define ESF_GZ_TX_TSO_OUTER_L4_OFF_W_WIDTH 8 +#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_LBN 64 +#define ESF_GZ_TX_TSO_OUTER_L3_OFF_W_WIDTH 5 +#define ESF_GZ_TX_TSO_PAYLOAD_LEN_LBN 42 +#define ESF_GZ_TX_TSO_PAYLOAD_LEN_WIDTH 22 +#define ESF_GZ_TX_TSO_HDR_LEN_W_LBN 34 +#define ESF_GZ_TX_TSO_HDR_LEN_W_WIDTH 8 +#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_LBN 33 +#define ESF_GZ_TX_TSO_ED_OUTER_UDP_LEN_WIDTH 1 +#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_LBN 32 +#define ESF_GZ_TX_TSO_ED_INNER_IP_LEN_WIDTH 1 +#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_LBN 31 +#define ESF_GZ_TX_TSO_ED_OUTER_IP_LEN_WIDTH 1 +#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_LBN 29 +#define ESF_GZ_TX_TSO_ED_INNER_IP4_ID_WIDTH 2 +#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_LBN 27 +#define ESF_GZ_TX_TSO_ED_OUTER_IP4_ID_WIDTH 2 +#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_LBN 17 +#define ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS_WIDTH 10 +#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_LBN 14 +#define ESF_GZ_TX_TSO_HDR_NUM_SEGS_WIDTH 3 +#define ESF_GZ_TX_TSO_MSS_LBN 0 +#define ESF_GZ_TX_TSO_MSS_WIDTH 14 +#define ESE_GZ_SF_TX_TSO_DSC_FMT_STRUCT_SIZE 124 + + +/* Enum DESIGN_PARAMS */ +#define ESE_EF100_DP_GZ_RX_MAX_RUNT 17 +#define ESE_EF100_DP_GZ_VI_STRIDES 16 +#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES 15 +#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS 14 +#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN 13 +#define ESE_EF100_DP_GZ_COMPAT 12 +#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES 11 +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS 10 +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN 9 +#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY 8 +#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY 7 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS 6 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN 5 +#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS 4 +#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE 3 +#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS 2 +#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS 1 +#define ESE_EF100_DP_GZ_PAD 0 + +/* Enum DESIGN_PARAM_DEFAULTS */ +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT 0x3fffff +#define ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT 8192 +#define ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN_DEFAULT 8192 +#define ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS_DEFAULT 0x1106 +#define ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT 0x3ff +#define ESE_EF100_DP_GZ_RX_MAX_RUNT_DEFAULT 640 +#define ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS_DEFAULT 512 +#define ESE_EF100_DP_GZ_NMMU_PAGE_SIZES_DEFAULT 512 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT 192 +#define ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY_DEFAULT 64 +#define ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY_DEFAULT 64 +#define ESE_EF100_DP_GZ_NMMU_GROUP_SIZE_DEFAULT 32 +#define ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT 16 +#define ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS_DEFAULT 7 +#define ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT 4 +#define ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS_DEFAULT 2 +#define ESE_EF100_DP_GZ_COMPAT_DEFAULT 0 + +/* Enum HOST_IF_CONSTANTS */ +#define ESE_GZ_FCW_LEN 0x4C +#define ESE_GZ_RX_PKT_PREFIX_LEN 22 + +/* Enum PCI_CONSTANTS */ +#define ESE_GZ_PCI_BASE_CONFIG_SPACE_SIZE 256 +#define ESE_GZ_PCI_EXPRESS_XCAP_HDR_SIZE 4 + +/* Enum RH_HCLASS_L2_CLASS */ +#define ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN 1 +#define ESE_GZ_RH_HCLASS_L2_CLASS_OTHER 0 + +/* Enum RH_HCLASS_L2_STATUS */ +#define ESE_GZ_RH_HCLASS_L2_STATUS_RESERVED 3 +#define ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR 2 +#define ESE_GZ_RH_HCLASS_L2_STATUS_LEN_ERR 1 +#define ESE_GZ_RH_HCLASS_L2_STATUS_OK 0 + +/* Enum RH_HCLASS_L3_CLASS */ +#define ESE_GZ_RH_HCLASS_L3_CLASS_OTHER 3 +#define ESE_GZ_RH_HCLASS_L3_CLASS_IP6 2 +#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD 1 +#define ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD 0 + +/* Enum RH_HCLASS_L4_CLASS */ +#define ESE_GZ_RH_HCLASS_L4_CLASS_OTHER 3 +#define ESE_GZ_RH_HCLASS_L4_CLASS_FRAG 2 +#define ESE_GZ_RH_HCLASS_L4_CLASS_UDP 1 +#define ESE_GZ_RH_HCLASS_L4_CLASS_TCP 0 + +/* Enum RH_HCLASS_L4_CSUM */ +#define ESE_GZ_RH_HCLASS_L4_CSUM_GOOD 1 +#define ESE_GZ_RH_HCLASS_L4_CSUM_BAD_OR_UNKNOWN 0 + +/* Enum RH_HCLASS_TUNNEL_CLASS */ +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_7 7 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_6 6 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_5 5 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_RESERVED_4 4 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE 3 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE 2 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN 1 +#define ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE 0 + +/* Enum TX_DESC_CSO_PARTIAL_EN */ +#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_TCP 2 +#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_UDP 1 +#define ESE_GZ_TX_DESC_CSO_PARTIAL_EN_OFF 0 + +/* Enum TX_DESC_CS_INNER_L3 */ +#define ESE_GZ_TX_DESC_CS_INNER_L3_GENEVE 3 +#define ESE_GZ_TX_DESC_CS_INNER_L3_NVGRE 2 +#define ESE_GZ_TX_DESC_CS_INNER_L3_VXLAN 1 +#define ESE_GZ_TX_DESC_CS_INNER_L3_OFF 0 + +/* Enum TX_DESC_IP4_ID */ +#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD16 2 +#define ESE_GZ_TX_DESC_IP4_ID_INC_MOD15 1 +#define ESE_GZ_TX_DESC_IP4_ID_NO_OP 0 +/**************************************************************************/ + +#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_LBN 44 +#define ESF_GZ_EV_DEBUG_EVENT_GEN_FLAGS_WIDTH 4 +#define ESF_GZ_EV_DEBUG_SRC_QID_LBN 32 +#define ESF_GZ_EV_DEBUG_SRC_QID_WIDTH 12 +#define ESF_GZ_EV_DEBUG_SEQ_NUM_LBN 16 +#define ESF_GZ_EV_DEBUG_SEQ_NUM_WIDTH 16 + +#endif /* EFX_EF100_REGS_H */ From 61060c5dc5c5734942528f31c094606539fffb8b Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:55:41 +0100 Subject: [PATCH 1666/2056] sfc_ef100: register accesses on EF100 EF100 adds a few new valid addresses for efx_writed_page(), as well as a Function Control Window in the BAR whose location is variable. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/io.h | 16 +++++++++++++--- drivers/net/ethernet/sfc/net_driver.h | 2 ++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h index c3c011bc6a68..30439cc83a89 100644 --- a/drivers/net/ethernet/sfc/io.h +++ b/drivers/net/ethernet/sfc/io.h @@ -75,6 +75,11 @@ #endif #endif +static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg) +{ + return efx->reg_base + reg; +} + #ifdef EFX_USE_QWORD_IO static inline void _efx_writeq(struct efx_nic *efx, __le64 value, unsigned int reg) @@ -217,8 +222,11 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); } -/* default VI stride (step between per-VI registers) is 8K */ -#define EFX_DEFAULT_VI_STRIDE 0x2000 +/* default VI stride (step between per-VI registers) is 8K on EF10 and + * 64K on EF100 + */ +#define EFX_DEFAULT_VI_STRIDE 0x2000 +#define EF100_DEFAULT_VI_STRIDE 0x10000 /* Calculate offset to page-mapped register */ static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page, @@ -265,7 +273,9 @@ _efx_writed_page(struct efx_nic *efx, const efx_dword_t *value, #define efx_writed_page(efx, value, reg, page) \ _efx_writed_page(efx, value, \ reg + \ - BUILD_BUG_ON_ZERO((reg) != 0x400 && \ + BUILD_BUG_ON_ZERO((reg) != 0x180 && \ + (reg) != 0x200 && \ + (reg) != 0x400 && \ (reg) != 0x420 && \ (reg) != 0x830 && \ (reg) != 0x83c && \ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 786fda559976..db35beabdcff 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -964,6 +964,7 @@ struct efx_async_filter_insertion { * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their * xdp_rxq_info structures? * @mem_bar: The BAR that is mapped into membase. + * @reg_base: Offset from the start of the bar to the function control window. * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock * @last_irq_cpu: Last CPU to handle a possible test interrupt. This @@ -1142,6 +1143,7 @@ struct efx_nic { bool xdp_rxq_info_failed; unsigned int mem_bar; + u32 reg_base; /* The following fields may be written more often */ From 51b35a454efdcd86f578e61ec8bf7596299c5f80 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:55:55 +0100 Subject: [PATCH 1667/2056] sfc: skeleton EF100 PF driver No TX or RX path, no MCDI, not even an ifup/down handler. Besides stubs, the bulk of the patch deals with reading the Xilinx extended PCIe capability, which tells us where to find our BAR. Though in the same module, EF100 has its own struct pci_driver, which is named sfc_ef100. A small number of additional nic_type methods are added; those in the TX (tx_enqueue) and RX (rx_packet) paths are called through indirect call wrappers to minimise the performance impact. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/Kconfig | 5 +- drivers/net/ethernet/sfc/Makefile | 4 +- drivers/net/ethernet/sfc/ef10.c | 7 + drivers/net/ethernet/sfc/ef100.c | 541 ++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100.h | 12 + drivers/net/ethernet/sfc/ef100_ethtool.c | 24 + drivers/net/ethernet/sfc/ef100_ethtool.h | 12 + drivers/net/ethernet/sfc/ef100_netdev.c | 129 ++++++ drivers/net/ethernet/sfc/ef100_netdev.h | 17 + drivers/net/ethernet/sfc/ef100_nic.c | 174 +++++++ drivers/net/ethernet/sfc/ef100_nic.h | 26 ++ drivers/net/ethernet/sfc/ef100_rx.c | 25 + drivers/net/ethernet/sfc/ef100_rx.h | 19 + drivers/net/ethernet/sfc/ef100_tx.c | 32 ++ drivers/net/ethernet/sfc/ef100_tx.h | 18 + drivers/net/ethernet/sfc/efx.c | 8 + drivers/net/ethernet/sfc/efx.h | 16 +- drivers/net/ethernet/sfc/ethtool.c | 2 - drivers/net/ethernet/sfc/ethtool_common.c | 2 +- drivers/net/ethernet/sfc/ethtool_common.h | 2 - drivers/net/ethernet/sfc/mcdi.c | 2 +- drivers/net/ethernet/sfc/net_driver.h | 16 + drivers/net/ethernet/sfc/nic_common.h | 6 + drivers/net/ethernet/sfc/siena.c | 3 + drivers/net/ethernet/sfc/tx.c | 4 +- drivers/net/ethernet/sfc/tx_common.h | 2 + 26 files changed, 1095 insertions(+), 13 deletions(-) create mode 100644 drivers/net/ethernet/sfc/ef100.c create mode 100644 drivers/net/ethernet/sfc/ef100.h create mode 100644 drivers/net/ethernet/sfc/ef100_ethtool.c create mode 100644 drivers/net/ethernet/sfc/ef100_ethtool.h create mode 100644 drivers/net/ethernet/sfc/ef100_netdev.c create mode 100644 drivers/net/ethernet/sfc/ef100_netdev.h create mode 100644 drivers/net/ethernet/sfc/ef100_nic.c create mode 100644 drivers/net/ethernet/sfc/ef100_nic.h create mode 100644 drivers/net/ethernet/sfc/ef100_rx.c create mode 100644 drivers/net/ethernet/sfc/ef100_rx.h create mode 100644 drivers/net/ethernet/sfc/ef100_tx.c create mode 100644 drivers/net/ethernet/sfc/ef100_tx.h diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 81b0f7d3a025..5e37c8313725 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -17,7 +17,7 @@ config NET_VENDOR_SOLARFLARE if NET_VENDOR_SOLARFLARE config SFC - tristate "Solarflare SFC9000/SFC9100-family support" + tristate "Solarflare SFC9000/SFC9100/EF100-family support" depends on PCI select MDIO select CRC32 @@ -26,6 +26,9 @@ config SFC This driver supports 10/40-gigabit Ethernet cards based on the Solarflare SFC9000-family and SFC9100-family controllers. + It also supports 10/25/40/100-gigabit Ethernet cards based + on the Solarflare EF100 networking IP in Xilinx FPGAs. + To compile this driver as a module, choose M here. The module will be called sfc. config SFC_MTD diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index 87d093da22ca..8bd01c429f91 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -4,7 +4,9 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \ tx.o tx_common.o tx_tso.o rx.o rx_common.o \ selftest.o ethtool.o ethtool_common.o ptp.o \ mcdi.o mcdi_port.o mcdi_port_common.o \ - mcdi_functions.o mcdi_filters.o mcdi_mon.o + mcdi_functions.o mcdi_filters.o mcdi_mon.o \ + ef100.o ef100_nic.o ef100_netdev.o \ + ef100_ethtool.o ef100_rx.o ef100_tx.o sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += sriov.o siena_sriov.o ef10_sriov.o diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index fa7229fff2ff..4b0b2cf026a5 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6,6 +6,7 @@ #include "net_driver.h" #include "rx_common.h" +#include "tx_common.h" #include "ef10_regs.h" #include "io.h" #include "mcdi.h" @@ -3977,6 +3978,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .tx_remove = efx_mcdi_tx_remove, .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, .rx_push_rss_config = efx_mcdi_vf_rx_push_rss_config, .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, .rx_probe = efx_mcdi_rx_probe, @@ -3984,6 +3986,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .rx_remove = efx_mcdi_rx_remove, .rx_write = efx_ef10_rx_write, .rx_defer_refill = efx_ef10_rx_defer_refill, + .rx_packet = __efx_rx_packet, .ev_probe = efx_mcdi_ev_probe, .ev_init = efx_ef10_ev_init, .ev_fini = efx_mcdi_ev_fini, @@ -4038,6 +4041,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .rx_hash_key_size = 40, .check_caps = ef10_check_caps, .print_additional_fwver = efx_ef10_print_additional_fwver, + .sensor_event = efx_mcdi_sensor_event, }; const struct efx_nic_type efx_hunt_a0_nic_type = { @@ -4087,6 +4091,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .tx_remove = efx_mcdi_tx_remove, .tx_write = efx_ef10_tx_write, .tx_limit_len = efx_ef10_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, @@ -4097,6 +4102,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .rx_remove = efx_mcdi_rx_remove, .rx_write = efx_ef10_rx_write, .rx_defer_refill = efx_ef10_rx_defer_refill, + .rx_packet = __efx_rx_packet, .ev_probe = efx_mcdi_ev_probe, .ev_init = efx_ef10_ev_init, .ev_fini = efx_mcdi_ev_fini, @@ -4172,4 +4178,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .rx_hash_key_size = 40, .check_caps = ef10_check_caps, .print_additional_fwver = efx_ef10_print_additional_fwver, + .sensor_event = efx_mcdi_sensor_event, }; diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c new file mode 100644 index 000000000000..de611c0f94e7 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100.c @@ -0,0 +1,541 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include +#include +#include "efx_common.h" +#include "efx_channels.h" +#include "io.h" +#include "ef100_nic.h" +#include "ef100_netdev.h" +#include "ef100_regs.h" +#include "ef100.h" + +#define EFX_EF100_PCI_DEFAULT_BAR 2 + +/* Number of bytes at start of vendor specified extended capability that indicate + * that the capability is vendor specified. i.e. offset from value returned by + * pci_find_next_ext_capability() to beginning of vendor specified capability + * header. + */ +#define PCI_EXT_CAP_HDR_LENGTH 4 + +/* Expected size of a Xilinx continuation address table entry. */ +#define ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH 16 + +struct ef100_func_ctl_window { + bool valid; + unsigned int bar; + u64 offset; +}; + +static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, + struct ef100_func_ctl_window *result); + +/* Number of bytes to offset when reading bit position x with dword accessors. */ +#define ROUND_DOWN_TO_DWORD(x) (((x) & (~31)) >> 3) + +#define EXTRACT_BITS(x, lbn, width) \ + (((x) >> ((lbn) & 31)) & ((1ull << (width)) - 1)) + +static u32 _ef100_pci_get_bar_bits_with_width(struct efx_nic *efx, + int structure_start, + int lbn, int width) +{ + efx_dword_t dword; + + efx_readd(efx, &dword, structure_start + ROUND_DOWN_TO_DWORD(lbn)); + + return EXTRACT_BITS(le32_to_cpu(dword.u32[0]), lbn, width); +} + +#define ef100_pci_get_bar_bits(efx, entry_location, bitdef) \ + _ef100_pci_get_bar_bits_with_width(efx, entry_location, \ + ESF_GZ_CFGBAR_ ## bitdef ## _LBN, \ + ESF_GZ_CFGBAR_ ## bitdef ## _WIDTH) + +static int ef100_pci_parse_ef100_entry(struct efx_nic *efx, int entry_location, + struct ef100_func_ctl_window *result) +{ + u64 offset = ef100_pci_get_bar_bits(efx, entry_location, EF100_FUNC_CTL_WIN_OFF) << + ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT; + u32 bar = ef100_pci_get_bar_bits(efx, entry_location, EF100_BAR); + + netif_dbg(efx, probe, efx->net_dev, + "Found EF100 function control window bar=%d offset=0x%llx\n", + bar, offset); + + if (result->valid) { + netif_err(efx, probe, efx->net_dev, + "Duplicated EF100 table entry.\n"); + return -EINVAL; + } + + if (bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_EXPANSION_ROM || + bar == ESE_GZ_CFGBAR_EF100_BAR_NUM_INVALID) { + netif_err(efx, probe, efx->net_dev, + "Bad BAR value of %d in Xilinx capabilities EF100 entry.\n", + bar); + return -EINVAL; + } + + result->bar = bar; + result->offset = offset; + result->valid = true; + return 0; +} + +static bool ef100_pci_does_bar_overflow(struct efx_nic *efx, int bar, + u64 next_entry) +{ + return next_entry + ESE_GZ_CFGBAR_ENTRY_HEADER_SIZE > + pci_resource_len(efx->pci_dev, bar); +} + +/* Parse a Xilinx capabilities table entry describing a continuation to a new + * sub-table. + */ +static int ef100_pci_parse_continue_entry(struct efx_nic *efx, int entry_location, + struct ef100_func_ctl_window *result) +{ + unsigned int previous_bar; + efx_oword_t entry; + u64 offset; + int rc = 0; + u32 bar; + + efx_reado(efx, &entry, entry_location); + + bar = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_CONT_CAP_BAR); + + offset = EFX_OWORD_FIELD64(entry, ESF_GZ_CFGBAR_CONT_CAP_OFFSET) << + ESE_GZ_CONT_CAP_OFFSET_BYTES_SHIFT; + + previous_bar = efx->mem_bar; + + if (bar == ESE_GZ_VSEC_BAR_NUM_EXPANSION_ROM || + bar == ESE_GZ_VSEC_BAR_NUM_INVALID) { + netif_err(efx, probe, efx->net_dev, + "Bad BAR value of %d in Xilinx capabilities sub-table.\n", + bar); + return -EINVAL; + } + + if (bar != previous_bar) { + efx_fini_io(efx); + + if (ef100_pci_does_bar_overflow(efx, bar, offset)) { + netif_err(efx, probe, efx->net_dev, + "Xilinx table will overrun BAR[%d] offset=0x%llx\n", + bar, offset); + return -EINVAL; + } + + /* Temporarily map new BAR. */ + rc = efx_init_io(efx, bar, + DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + pci_resource_len(efx->pci_dev, bar)); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Mapping new BAR for Xilinx table failed, rc=%d\n", rc); + return rc; + } + } + + rc = ef100_pci_walk_xilinx_table(efx, offset, result); + if (rc) + return rc; + + if (bar != previous_bar) { + efx_fini_io(efx); + + /* Put old BAR back. */ + rc = efx_init_io(efx, previous_bar, + DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + pci_resource_len(efx->pci_dev, previous_bar)); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Putting old BAR back failed, rc=%d\n", rc); + return rc; + } + } + + return 0; +} + +/* Iterate over the Xilinx capabilities table in the currently mapped BAR and + * call ef100_pci_parse_ef100_entry() on any EF100 entries and + * ef100_pci_parse_continue_entry() on any table continuations. + */ +static int ef100_pci_walk_xilinx_table(struct efx_nic *efx, u64 offset, + struct ef100_func_ctl_window *result) +{ + u64 current_entry = offset; + int rc = 0; + + while (true) { + u32 id = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_FORMAT); + u32 last = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_LAST); + u32 rev = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_REV); + u32 entry_size; + + if (id == ESE_GZ_CFGBAR_ENTRY_LAST) + return 0; + + entry_size = ef100_pci_get_bar_bits(efx, current_entry, ENTRY_SIZE); + + netif_dbg(efx, probe, efx->net_dev, + "Seen Xilinx table entry 0x%x size 0x%x at 0x%llx in BAR[%d]\n", + id, entry_size, current_entry, efx->mem_bar); + + if (entry_size < sizeof(u32) * 2) { + netif_err(efx, probe, efx->net_dev, + "Xilinx table entry too short len=0x%x\n", entry_size); + return -EINVAL; + } + + switch (id) { + case ESE_GZ_CFGBAR_ENTRY_EF100: + if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 || + entry_size < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) { + netif_err(efx, probe, efx->net_dev, + "Bad length or rev for EF100 entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", + entry_size, rev); + return -EINVAL; + } + + rc = ef100_pci_parse_ef100_entry(efx, current_entry, + result); + if (rc) + return rc; + break; + case ESE_GZ_CFGBAR_ENTRY_CONT_CAP_ADDR: + if (rev != 0 || entry_size < ESE_GZ_CFGBAR_CONT_CAP_MIN_LENGTH) { + netif_err(efx, probe, efx->net_dev, + "Bad length or rev for continue entry in Xilinx capabilities table. entry_size=%d rev=%d.\n", + entry_size, rev); + return -EINVAL; + } + + rc = ef100_pci_parse_continue_entry(efx, current_entry, result); + if (rc) + return rc; + break; + default: + /* Ignore unknown table entries. */ + break; + } + + if (last) + return 0; + + current_entry += entry_size; + + if (ef100_pci_does_bar_overflow(efx, efx->mem_bar, current_entry)) { + netif_err(efx, probe, efx->net_dev, + "Xilinx table overrun at position=0x%llx.\n", + current_entry); + return -EINVAL; + } + } +} + +static int _ef100_pci_get_config_bits_with_width(struct efx_nic *efx, + int structure_start, int lbn, + int width, u32 *result) +{ + int rc, pos = structure_start + ROUND_DOWN_TO_DWORD(lbn); + u32 temp; + + rc = pci_read_config_dword(efx->pci_dev, pos, &temp); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read PCI config dword at %d\n", + pos); + return rc; + } + + *result = EXTRACT_BITS(temp, lbn, width); + + return 0; +} + +#define ef100_pci_get_config_bits(efx, entry_location, bitdef, result) \ + _ef100_pci_get_config_bits_with_width(efx, entry_location, \ + ESF_GZ_VSEC_ ## bitdef ## _LBN, \ + ESF_GZ_VSEC_ ## bitdef ## _WIDTH, result) + +/* Call ef100_pci_walk_xilinx_table() for the Xilinx capabilities table pointed + * to by this PCI_EXT_CAP_ID_VNDR. + */ +static int ef100_pci_parse_xilinx_cap(struct efx_nic *efx, int vndr_cap, + bool has_offset_hi, + struct ef100_func_ctl_window *result) +{ + u32 offset_high = 0; + u32 offset_lo = 0; + u64 offset = 0; + u32 bar = 0; + int rc = 0; + + rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_BAR, &bar); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read ESF_GZ_VSEC_TBL_BAR, rc=%d\n", + rc); + return rc; + } + + if (bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_EXPANSION_ROM || + bar == ESE_GZ_CFGBAR_CONT_CAP_BAR_NUM_INVALID) { + netif_err(efx, probe, efx->net_dev, + "Bad BAR value of %d in Xilinx capabilities sub-table.\n", + bar); + return -EINVAL; + } + + rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_LO, &offset_lo); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read ESF_GZ_VSEC_TBL_OFF_LO, rc=%d\n", + rc); + return rc; + } + + /* Get optional extension to 64bit offset. */ + if (has_offset_hi) { + rc = ef100_pci_get_config_bits(efx, vndr_cap, TBL_OFF_HI, &offset_high); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read ESF_GZ_VSEC_TBL_OFF_HI, rc=%d\n", + rc); + return rc; + } + } + + offset = (((u64)offset_lo) << ESE_GZ_VSEC_TBL_OFF_LO_BYTES_SHIFT) | + (((u64)offset_high) << ESE_GZ_VSEC_TBL_OFF_HI_BYTES_SHIFT); + + if (offset > pci_resource_len(efx->pci_dev, bar) - sizeof(u32) * 2) { + netif_err(efx, probe, efx->net_dev, + "Xilinx table will overrun BAR[%d] offset=0x%llx\n", + bar, offset); + return -EINVAL; + } + + /* Temporarily map BAR. */ + rc = efx_init_io(efx, bar, + DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + pci_resource_len(efx->pci_dev, bar)); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "efx_init_io failed, rc=%d\n", rc); + return rc; + } + + rc = ef100_pci_walk_xilinx_table(efx, offset, result); + + /* Unmap temporarily mapped BAR. */ + efx_fini_io(efx); + return rc; +} + +/* Call ef100_pci_parse_ef100_entry() for each Xilinx PCI_EXT_CAP_ID_VNDR + * capability. + */ +static int ef100_pci_find_func_ctrl_window(struct efx_nic *efx, + struct ef100_func_ctl_window *result) +{ + int num_xilinx_caps = 0; + int cap = 0; + + result->valid = false; + + while ((cap = pci_find_next_ext_capability(efx->pci_dev, cap, PCI_EXT_CAP_ID_VNDR)) != 0) { + int vndr_cap = cap + PCI_EXT_CAP_HDR_LENGTH; + u32 vsec_ver = 0; + u32 vsec_len = 0; + u32 vsec_id = 0; + int rc = 0; + + num_xilinx_caps++; + + rc = ef100_pci_get_config_bits(efx, vndr_cap, ID, &vsec_id); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read ESF_GZ_VSEC_ID, rc=%d\n", + rc); + return rc; + } + + rc = ef100_pci_get_config_bits(efx, vndr_cap, VER, &vsec_ver); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read ESF_GZ_VSEC_VER, rc=%d\n", + rc); + return rc; + } + + /* Get length of whole capability - i.e. starting at cap */ + rc = ef100_pci_get_config_bits(efx, vndr_cap, LEN, &vsec_len); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to read ESF_GZ_VSEC_LEN, rc=%d\n", + rc); + return rc; + } + + if (vsec_id == ESE_GZ_XILINX_VSEC_ID && + vsec_ver == ESE_GZ_VSEC_VER_XIL_CFGBAR && + vsec_len >= ESE_GZ_VSEC_LEN_MIN) { + bool has_offset_hi = (vsec_len >= ESE_GZ_VSEC_LEN_HIGH_OFFT); + + rc = ef100_pci_parse_xilinx_cap(efx, vndr_cap, + has_offset_hi, result); + if (rc) + return rc; + } + } + + if (num_xilinx_caps && !result->valid) { + netif_err(efx, probe, efx->net_dev, + "Seen %d Xilinx tables, but no EF100 entry.\n", + num_xilinx_caps); + return -EINVAL; + } + + return 0; +} + +/* Final NIC shutdown + * This is called only at module unload (or hotplug removal). A PF can call + * this on its VFs to ensure they are unbound first. + */ +static void ef100_pci_remove(struct pci_dev *pci_dev) +{ + struct efx_nic *efx; + + efx = pci_get_drvdata(pci_dev); + if (!efx) + return; + + rtnl_lock(); + dev_close(efx->net_dev); + rtnl_unlock(); + + /* Unregistering our netdev notifier triggers unbinding of TC indirect + * blocks, so we have to do it before PCI removal. + */ + unregister_netdevice_notifier(&efx->netdev_notifier); + ef100_remove(efx); + efx_fini_io(efx); + netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); + + pci_set_drvdata(pci_dev, NULL); + efx_fini_struct(efx); + free_netdev(efx->net_dev); + + pci_disable_pcie_error_reporting(pci_dev); +}; + +static int ef100_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *entry) +{ + struct ef100_func_ctl_window fcw = { 0 }; + struct net_device *net_dev; + struct efx_nic *efx; + int rc; + + /* Allocate and initialise a struct net_device and struct efx_nic */ + net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); + if (!net_dev) + return -ENOMEM; + efx = netdev_priv(net_dev); + efx->type = (const struct efx_nic_type *)entry->driver_data; + + pci_set_drvdata(pci_dev, efx); + SET_NETDEV_DEV(net_dev, &pci_dev->dev); + rc = efx_init_struct(efx, pci_dev, net_dev); + if (rc) + goto fail; + + efx->vi_stride = EF100_DEFAULT_VI_STRIDE; + netif_info(efx, probe, efx->net_dev, + "Solarflare EF100 NIC detected\n"); + + rc = ef100_pci_find_func_ctrl_window(efx, &fcw); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Error looking for ef100 function control window, rc=%d\n", + rc); + goto fail; + } + + if (!fcw.valid) { + /* Extended capability not found - use defaults. */ + fcw.bar = EFX_EF100_PCI_DEFAULT_BAR; + fcw.offset = 0; + fcw.valid = true; + } + + if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) { + netif_err(efx, probe, efx->net_dev, + "Func control window overruns BAR\n"); + goto fail; + } + + /* Set up basic I/O (BAR mappings etc) */ + rc = efx_init_io(efx, fcw.bar, + DMA_BIT_MASK(ESF_GZ_TX_SEND_ADDR_WIDTH), + pci_resource_len(efx->pci_dev, fcw.bar)); + if (rc) + goto fail; + + efx->reg_base = fcw.offset; + + efx->netdev_notifier.notifier_call = ef100_netdev_event; + rc = register_netdevice_notifier(&efx->netdev_notifier); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Failed to register netdevice notifier, rc=%d\n", rc); + goto fail; + } + + rc = efx->type->probe(efx); + if (rc) + goto fail; + + netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); + + return 0; + +fail: + ef100_pci_remove(pci_dev); + return rc; +} + +/* PCI device ID table */ +static const struct pci_device_id ef100_pci_table[] = { + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ + .driver_data = (unsigned long) &ef100_pf_nic_type }, + {0} /* end of list */ +}; + +struct pci_driver ef100_pci_driver = { + .name = "sfc_ef100", + .id_table = ef100_pci_table, + .probe = ef100_pci_probe, + .remove = ef100_pci_remove, + .err_handler = &efx_err_handlers, +}; + +MODULE_DEVICE_TABLE(pci, ef100_pci_table); diff --git a/drivers/net/ethernet/sfc/ef100.h b/drivers/net/ethernet/sfc/ef100.h new file mode 100644 index 000000000000..a63f0ff6641b --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +extern struct pci_driver ef100_pci_driver; diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c new file mode 100644 index 000000000000..729c425d0f78 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include +#include +#include "net_driver.h" +#include "efx.h" +#include "mcdi_port_common.h" +#include "ethtool_common.h" +#include "ef100_ethtool.h" +#include "mcdi_functions.h" + +/* Ethtool options available + */ +const struct ethtool_ops ef100_ethtool_ops = { + .get_drvinfo = efx_ethtool_get_drvinfo, +}; diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.h b/drivers/net/ethernet/sfc/ef100_ethtool.h new file mode 100644 index 000000000000..6efda72dfc6c --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_ethtool.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +extern const struct ethtool_ops ef100_ethtool_ops; diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c new file mode 100644 index 000000000000..f900b375d9b1 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ +#include "net_driver.h" +#include "mcdi_port_common.h" +#include "mcdi_functions.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "tx_common.h" +#include "ef100_netdev.h" +#include "ef100_ethtool.h" +#include "efx_common.h" +#include "nic_common.h" +#include "ef100_nic.h" +#include "ef100_tx.h" +#include "ef100_regs.h" +#include "mcdi_filters.h" +#include "rx_common.h" + +static void ef100_update_name(struct efx_nic *efx) +{ + strcpy(efx->name, efx->net_dev->name); +} + +/* Initiate a packet transmission. We use one channel per CPU + * (sharing when we have more CPUs than channels). + * + * Context: non-blocking. + * Note that returning anything other than NETDEV_TX_OK will cause the + * OS to free the skb. + */ +static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_tx_queue *tx_queue; + struct efx_channel *channel; + int rc; + + channel = efx_get_tx_channel(efx, skb_get_queue_mapping(skb)); + netif_vdbg(efx, tx_queued, efx->net_dev, + "%s len %d data %d channel %d\n", __func__, + skb->len, skb->data_len, channel->channel); + if (!efx->n_channels || !efx->n_tx_channels || !channel) { + netif_stop_queue(net_dev); + goto err; + } + + tx_queue = &channel->tx_queue[0]; + rc = ef100_enqueue_skb(tx_queue, skb); + if (rc == 0) + return NETDEV_TX_OK; + +err: + net_dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +static const struct net_device_ops ef100_netdev_ops = { + .ndo_start_xmit = ef100_hard_start_xmit, +}; + +/* Netdev registration + */ +int ef100_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct efx_nic *efx = container_of(this, struct efx_nic, netdev_notifier); + struct net_device *net_dev = netdev_notifier_info_to_dev(ptr); + + if (netdev_priv(net_dev) == efx && event == NETDEV_CHANGENAME) + ef100_update_name(efx); + + return NOTIFY_DONE; +} + +int ef100_register_netdev(struct efx_nic *efx) +{ + struct net_device *net_dev = efx->net_dev; + int rc; + + net_dev->watchdog_timeo = 5 * HZ; + net_dev->irq = efx->pci_dev->irq; + net_dev->netdev_ops = &ef100_netdev_ops; + net_dev->min_mtu = EFX_MIN_MTU; + net_dev->max_mtu = EFX_MAX_MTU; + net_dev->ethtool_ops = &ef100_ethtool_ops; + + rtnl_lock(); + + rc = dev_alloc_name(net_dev, net_dev->name); + if (rc < 0) + goto fail_locked; + ef100_update_name(efx); + + rc = register_netdevice(net_dev); + if (rc) + goto fail_locked; + + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + + efx->state = STATE_READY; + rtnl_unlock(); + efx_init_mcdi_logging(efx); + + return 0; + +fail_locked: + rtnl_unlock(); + netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); + return rc; +} + +void ef100_unregister_netdev(struct efx_nic *efx) +{ + if (efx_dev_registered(efx)) { + efx_fini_mcdi_logging(efx); + efx->state = STATE_UNINIT; + unregister_netdev(efx->net_dev); + } +} diff --git a/drivers/net/ethernet/sfc/ef100_netdev.h b/drivers/net/ethernet/sfc/ef100_netdev.h new file mode 100644 index 000000000000..d40abb7cc086 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_netdev.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include + +int ef100_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr); +int ef100_register_netdev(struct efx_nic *efx); +void ef100_unregister_netdev(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c new file mode 100644 index 000000000000..7bb304bc148e --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "ef100_nic.h" +#include "efx_common.h" +#include "efx_channels.h" +#include "io.h" +#include "selftest.h" +#include "ef100_regs.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "mcdi_port_common.h" +#include "mcdi_functions.h" +#include "mcdi_filters.h" +#include "ef100_rx.h" +#include "ef100_tx.h" +#include "ef100_netdev.h" + +#define EF100_MAX_VIS 4096 + +/* MCDI + */ +static int ef100_get_warm_boot_count(struct efx_nic *efx) +{ + efx_dword_t reg; + + efx_readd(efx, ®, efx_reg(efx, ER_GZ_MC_SFT_STATUS)); + + if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) { + netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n"); + efx->state = STATE_DISABLED; + return -ENETDOWN; + } else { + return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ? + EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO; + } +} + +/* Event handling + */ +static int ef100_ev_probe(struct efx_channel *channel) +{ + /* Allocate an extra descriptor for the QMDA status completion entry */ + return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf, + (channel->eventq_mask + 2) * + sizeof(efx_qword_t), + GFP_KERNEL); +} + +static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) +{ + struct efx_msi_context *context = dev_id; + struct efx_nic *efx = context->efx; + + netif_vdbg(efx, intr, efx->net_dev, + "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); + + if (likely(READ_ONCE(efx->irq_soft_enabled))) { + /* Note test interrupts */ + if (context->index == efx->irq_level) + efx->last_irq_cpu = raw_smp_processor_id(); + + /* Schedule processing of the channel */ + efx_schedule_channel_irq(efx->channel[context->index]); + } + + return IRQ_HANDLED; +} + +/* NIC level access functions + */ +const struct efx_nic_type ef100_pf_nic_type = { + .revision = EFX_REV_EF100, + .is_vf = false, + .probe = ef100_probe_pf, + .mcdi_max_ver = 2, + .irq_enable_master = efx_port_dummy_op_void, + .irq_disable_non_ev = efx_port_dummy_op_void, + .push_irq_moderation = efx_channel_dummy_op_void, + .min_interrupt_mode = EFX_INT_MODE_MSIX, + + .ev_probe = ef100_ev_probe, + .irq_handle_msi = ef100_msi_interrupt, + + /* Per-type bar/size configuration not used on ef100. Location of + * registers is defined by extended capabilities. + */ + .mem_bar = NULL, + .mem_map_size = NULL, + +}; + +/* NIC probe and remove + */ +static int ef100_probe_main(struct efx_nic *efx) +{ + unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]); + struct net_device *net_dev = efx->net_dev; + struct ef100_nic_data *nic_data; + int i, rc; + + if (WARN_ON(bar_size == 0)) + return -EIO; + + nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); + if (!nic_data) + return -ENOMEM; + efx->nic_data = nic_data; + nic_data->efx = efx; + net_dev->features |= efx->type->offload_features; + net_dev->hw_features |= efx->type->offload_features; + + /* Get the MC's warm boot count. In case it's rebooting right + * now, be prepared to retry. + */ + i = 0; + for (;;) { + rc = ef100_get_warm_boot_count(efx); + if (rc >= 0) + break; + if (++i == 5) + goto fail; + ssleep(1); + } + nic_data->warm_boot_count = rc; + + /* In case we're recovering from a crash (kexec), we want to + * cancel any outstanding request by the previous user of this + * function. We send a special message using the least + * significant bits of the 'high' (doorbell) register. + */ + _efx_writed(efx, cpu_to_le32(1), efx_reg(efx, ER_GZ_MC_DB_HWRD)); + + /* Post-IO section. */ + + efx->max_vis = EF100_MAX_VIS; + + rc = efx_init_channels(efx); + if (rc) + goto fail; + + rc = ef100_register_netdev(efx); + if (rc) + goto fail; + + return 0; +fail: + return rc; +} + +int ef100_probe_pf(struct efx_nic *efx) +{ + return ef100_probe_main(efx); +} + +void ef100_remove(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + ef100_unregister_netdev(efx); + efx_fini_channels(efx); + kfree(efx->phy_data); + efx->phy_data = NULL; + kfree(nic_data); + efx->nic_data = NULL; +} diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h new file mode 100644 index 000000000000..643111aebba5 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "nic_common.h" + +extern const struct efx_nic_type ef100_pf_nic_type; + +int ef100_probe_pf(struct efx_nic *efx); +void ef100_remove(struct efx_nic *efx); + +struct ef100_nic_data { + struct efx_nic *efx; + u16 warm_boot_count; +}; + +#define efx_ef100_has_cap(caps, flag) \ + (!!((caps) & BIT_ULL(MC_CMD_GET_CAPABILITIES_V4_OUT_ ## flag ## _LBN))) diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c new file mode 100644 index 000000000000..d6b62f980463 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_rx.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2005-2019 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "ef100_rx.h" +#include "rx_common.h" +#include "efx.h" + +void __ef100_rx_packet(struct efx_channel *channel) +{ + /* Stub. No RX path yet. Discard the buffer. */ + struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, + channel->rx_pkt_index); + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + + efx_free_rx_buffers(rx_queue, rx_buf, 1); + channel->rx_pkt_n_frags = 0; +} diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h new file mode 100644 index 000000000000..a6083a98f362 --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_rx.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_RX_H +#define EFX_EF100_RX_H + +#include "net_driver.h" + +void __ef100_rx_packet(struct efx_channel *channel); + +#endif diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c new file mode 100644 index 000000000000..b12295413c0d --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0-only +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2018 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#include "net_driver.h" +#include "tx_common.h" +#include "nic_common.h" +#include "ef100_tx.h" + +/* Add a socket buffer to a TX queue + * + * You must hold netif_tx_lock() to call this function. + * + * Returns 0 on success, error code otherwise. In case of an error this + * function will free the SKB. + */ +int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + /* Stub. No TX path yet. */ + struct efx_nic *efx = tx_queue->efx; + + netif_stop_queue(efx->net_dev); + dev_kfree_skb_any(skb); + return -ENODEV; +} diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h new file mode 100644 index 000000000000..a7895efca4ff --- /dev/null +++ b/drivers/net/ethernet/sfc/ef100_tx.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/**************************************************************************** + * Driver for Solarflare network controllers and boards + * Copyright 2019 Solarflare Communications Inc. + * Copyright 2019-2020 Xilinx Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef EFX_EF100_TX_H +#define EFX_EF100_TX_H + +#include "net_driver.h" + +netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +#endif diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index f16b4f236031..f5aa1bd02f19 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -25,6 +25,7 @@ #include "efx.h" #include "efx_common.h" #include "efx_channels.h" +#include "ef100.h" #include "rx_common.h" #include "tx_common.h" #include "nic.h" @@ -1360,8 +1361,14 @@ static int __init efx_init_module(void) if (rc < 0) goto err_pci; + rc = pci_register_driver(&ef100_pci_driver); + if (rc < 0) + goto err_pci_ef100; + return 0; + err_pci_ef100: + pci_unregister_driver(&efx_pci_driver); err_pci: efx_destroy_reset_workqueue(); err_reset: @@ -1378,6 +1385,7 @@ static void __exit efx_exit_module(void) { printk(KERN_INFO "Solarflare NET driver unloading\n"); + pci_unregister_driver(&ef100_pci_driver); pci_unregister_driver(&efx_pci_driver); efx_destroy_reset_workqueue(); #ifdef CONFIG_SFC_SRIOV diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index e7e7d8d1a07b..a9808e86068d 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -8,7 +8,10 @@ #ifndef EFX_EFX_H #define EFX_EFX_H +#include #include "net_driver.h" +#include "ef100_rx.h" +#include "ef100_tx.h" #include "filter.h" int efx_net_open(struct net_device *net_dev); @@ -18,13 +21,18 @@ int efx_net_stop(struct net_device *net_dev); void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); -netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); +static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue, + ef100_enqueue_skb, __efx_enqueue_skb, + tx_queue, skb); +} void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_xmit_done_single(struct efx_tx_queue *tx_queue); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data); extern unsigned int efx_piobuf_size; -extern bool efx_separate_tx_channels; /* RX */ void __efx_rx_packet(struct efx_channel *channel); @@ -33,7 +41,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, static inline void efx_rx_flush_packet(struct efx_channel *channel) { if (channel->rx_pkt_n_frags) - __efx_rx_packet(channel); + INDIRECT_CALL_2(channel->efx->type->rx_packet, + __ef100_rx_packet, __efx_rx_packet, + channel); } /* Maximum number of TCP segments we support for soft-TSO */ diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 038c08d2d7aa..4ffda7782f68 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -221,8 +221,6 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev, return 0; } -const char *efx_driver_name = KBUILD_MODNAME; - const struct ethtool_ops efx_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USECS_IRQ | diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index fb06097b70d8..05ac87807929 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -104,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev, { struct efx_nic *efx = netdev_priv(net_dev); - strlcpy(info->driver, efx_driver_name, sizeof(info->driver)); + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); efx_mcdi_print_fwver(efx, info->fw_version, sizeof(info->fw_version)); diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index 0c0ea9ac4d08..659491932101 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -11,8 +11,6 @@ #ifndef EFX_ETHTOOL_COMMON_H #define EFX_ETHTOOL_COMMON_H -extern const char *efx_driver_name; - void efx_ethtool_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info); u32 efx_ethtool_get_msglevel(struct net_device *net_dev); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 6c49740a178e..5467819aef6e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -1337,7 +1337,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, efx_mcdi_process_link_change(efx, event); break; case MCDI_EVENT_CODE_SENSOREVT: - efx_mcdi_sensor_event(efx, event); + efx_sensor_event(efx, event); break; case MCDI_EVENT_CODE_SCHEDERR: netif_dbg(efx, hw, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index db35beabdcff..9791fac0b649 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -963,6 +963,7 @@ struct efx_async_filter_insertion { * @vpd_sn: Serial number read from VPD * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their * xdp_rxq_info structures? + * @netdev_notifier: Netdevice notifier. * @mem_bar: The BAR that is mapped into membase. * @reg_base: Offset from the start of the bar to the function control window. * @monitor_work: Hardware monitor workitem @@ -1142,6 +1143,8 @@ struct efx_nic { char *vpd_sn; bool xdp_rxq_info_failed; + struct notifier_block netdev_notifier; + unsigned int mem_bar; u32 reg_base; @@ -1246,6 +1249,7 @@ struct efx_udp_tunnel { * @tx_init: Initialise TX queue on the NIC * @tx_remove: Free resources for TX queue * @tx_write: Write TX descriptors and doorbell + * @tx_enqueue: Add an SKB to TX queue * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC * @rx_push_rss_context_config: Write RSS hash key and indirection table for @@ -1257,6 +1261,7 @@ struct efx_udp_tunnel { * @rx_remove: Free resources for RX queue * @rx_write: Write RX descriptors and doorbell * @rx_defer_refill: Generate a refill reminder event + * @rx_packet: Receive the queued RX buffer on a channel * @ev_probe: Allocate resources for event queue * @ev_init: Initialise event queue on the NIC * @ev_fini: Deinitialise event queue on the NIC @@ -1301,6 +1306,7 @@ struct efx_udp_tunnel { * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required. * @udp_tnl_has_port: Check if a port has been added as UDP tunnel * @print_additional_fwver: Dump NIC-specific additional FW version info + * @sensor_event: Handle a sensor event from MCDI * @revision: Hardware architecture revision * @txd_ptr_tbl_base: TX descriptor ring base address * @rxd_ptr_tbl_base: RX descriptor ring base address @@ -1381,6 +1387,7 @@ struct efx_nic_type { void (*tx_init)(struct efx_tx_queue *tx_queue); void (*tx_remove)(struct efx_tx_queue *tx_queue); void (*tx_write)(struct efx_tx_queue *tx_queue); + netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb); unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned int len); int (*rx_push_rss_config)(struct efx_nic *efx, bool user, @@ -1398,6 +1405,7 @@ struct efx_nic_type { void (*rx_remove)(struct efx_rx_queue *rx_queue); void (*rx_write)(struct efx_rx_queue *rx_queue); void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); + void (*rx_packet)(struct efx_channel *channel); int (*ev_probe)(struct efx_channel *channel); int (*ev_init)(struct efx_channel *channel); void (*ev_fini)(struct efx_channel *channel); @@ -1472,6 +1480,7 @@ struct efx_nic_type { bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port); size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf, size_t len); + void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev); int revision; unsigned int txd_ptr_tbl_base; @@ -1523,6 +1532,13 @@ efx_get_channel(struct efx_nic *efx, unsigned index) _channel = _channel->channel ? \ (_efx)->channel[_channel->channel - 1] : NULL) +static inline struct efx_channel * +efx_get_tx_channel(struct efx_nic *efx, unsigned int index) +{ + EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels); + return efx->channel[efx->tx_channel_offset + index]; +} + static inline struct efx_tx_queue * efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) { diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h index e04b6817cde3..974107354087 100644 --- a/drivers/net/ethernet/sfc/nic_common.h +++ b/drivers/net/ethernet/sfc/nic_common.h @@ -225,6 +225,12 @@ void efx_nic_event_test_start(struct efx_channel *channel); bool efx_nic_event_present(struct efx_channel *channel); +static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev) +{ + if (efx->type->sensor_event) + efx->type->sensor_event(efx, ev); +} + /* Some statistics are computed as A - B where A and B each increase * linearly with some hardware counter(s) and the counters are read * asynchronously. If the counters contributing to B are always read diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 219fb3a0c9d0..a7ea630bb5e6 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -1018,6 +1018,7 @@ const struct efx_nic_type siena_a0_nic_type = { .tx_remove = efx_farch_tx_remove, .tx_write = efx_farch_tx_write, .tx_limit_len = efx_farch_tx_limit_len, + .tx_enqueue = __efx_enqueue_skb, .rx_push_rss_config = siena_rx_push_rss_config, .rx_pull_rss_config = siena_rx_pull_rss_config, .rx_probe = efx_farch_rx_probe, @@ -1025,6 +1026,7 @@ const struct efx_nic_type siena_a0_nic_type = { .rx_remove = efx_farch_rx_remove, .rx_write = efx_farch_rx_write, .rx_defer_refill = efx_farch_rx_defer_refill, + .rx_packet = __efx_rx_packet, .ev_probe = efx_farch_ev_probe, .ev_init = efx_farch_ev_init, .ev_fini = efx_farch_ev_fini, @@ -1096,4 +1098,5 @@ const struct efx_nic_type siena_a0_nic_type = { 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT), .rx_hash_key_size = 16, .check_caps = siena_check_caps, + .sensor_event = efx_mcdi_sensor_event, }; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 1bcf50ab95d9..727201d5eb24 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -284,7 +284,7 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, * Returns NETDEV_TX_OK. * You must hold netif_tx_lock() to call this function. */ -netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { unsigned int old_insert_count = tx_queue->insert_count; bool xmit_more = netdev_xmit_more(); @@ -503,7 +503,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, } tx_queue = efx_get_tx_queue(efx, index, type); - return efx_enqueue_skb(tx_queue, skb); + return __efx_enqueue_skb(tx_queue, skb); } void efx_xmit_done_single(struct efx_tx_queue *tx_queue) diff --git a/drivers/net/ethernet/sfc/tx_common.h b/drivers/net/ethernet/sfc/tx_common.h index cbe995b024a6..bbab7f248250 100644 --- a/drivers/net/ethernet/sfc/tx_common.h +++ b/drivers/net/ethernet/sfc/tx_common.h @@ -40,4 +40,6 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); + +extern bool efx_separate_tx_channels; #endif From c027f2a72a3149a28387a67a58a6cfc585bbcd53 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:56:11 +0100 Subject: [PATCH 1668/2056] sfc_ef100: reset-handling stub We don't actually do the efx_mcdi_reset() because we don't have MCDI yet. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 50 ++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 7bb304bc148e..875bbbdf606c 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -26,6 +26,8 @@ #define EF100_MAX_VIS 4096 +#define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT) + /* MCDI */ static int ef100_get_warm_boot_count(struct efx_nic *efx) @@ -75,6 +77,51 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +/* Other + */ + +static enum reset_type ef100_map_reset_reason(enum reset_type reason) +{ + if (reason == RESET_TYPE_TX_WATCHDOG) + return reason; + return RESET_TYPE_DISABLE; +} + +static int ef100_map_reset_flags(u32 *flags) +{ + /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */ + if ((*flags & EF100_RESET_PORT)) { + *flags &= ~EF100_RESET_PORT; + return RESET_TYPE_ALL; + } + if (*flags & ETH_RESET_MGMT) { + *flags &= ~ETH_RESET_MGMT; + return RESET_TYPE_DISABLE; + } + + return -EINVAL; +} + +static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) +{ + int rc; + + dev_close(efx->net_dev); + + if (reset_type == RESET_TYPE_TX_WATCHDOG) { + netif_device_attach(efx->net_dev); + __clear_bit(reset_type, &efx->reset_pending); + rc = dev_open(efx->net_dev, NULL); + } else if (reset_type == RESET_TYPE_ALL) { + netif_device_attach(efx->net_dev); + + rc = dev_open(efx->net_dev, NULL); + } else { + rc = 1; /* Leave the device closed */ + } + return rc; +} + /* NIC level access functions */ const struct efx_nic_type ef100_pf_nic_type = { @@ -86,6 +133,9 @@ const struct efx_nic_type ef100_pf_nic_type = { .irq_disable_non_ev = efx_port_dummy_op_void, .push_irq_moderation = efx_channel_dummy_op_void, .min_interrupt_mode = EFX_INT_MODE_MSIX, + .map_reset_reason = ef100_map_reset_reason, + .map_reset_flags = ef100_map_reset_flags, + .reset = ef100_reset, .ev_probe = ef100_ev_probe, .irq_handle_msi = ef100_msi_interrupt, From aa86a75fed03bca1372cbaf0c3587f6707453a52 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:56:26 +0100 Subject: [PATCH 1669/2056] sfc_ef100: PHY probe stub We can't actually do the MCDI to probe it fully until we have working MCDI, which comes later, but we need efx->phy_data to be allocated so that when we get MCDI events the link-state change handler doesn't NULL-dereference. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 875bbbdf606c..802ab0ce00fe 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -77,6 +77,16 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } +static int ef100_phy_probe(struct efx_nic *efx) +{ + /* stub: allocate the phy_data */ + efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL); + if (!efx->phy_data) + return -ENOMEM; + + return 0; +} + /* Other */ @@ -193,6 +203,10 @@ static int ef100_probe_main(struct efx_nic *efx) efx->max_vis = EF100_MAX_VIS; + rc = ef100_phy_probe(efx); + if (rc) + goto fail; + rc = efx_init_channels(efx); if (rc) goto fail; From 35a36af88f65c2ea0558a2456ccb42fc99c5229f Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:56:40 +0100 Subject: [PATCH 1670/2056] sfc_ef100: don't call efx_reset_down()/up() on EF100 We handle everything ourselves in ef100_reset(), rather than relying on the generic down/up routines. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_common.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 5667694c6514..26b05ec4e712 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -814,14 +814,18 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) */ int efx_reset(struct efx_nic *efx, enum reset_type method) { + int rc, rc2 = 0; bool disabled; - int rc, rc2; netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", RESET_TYPE(method)); efx_device_detach_sync(efx); - efx_reset_down(efx, method); + /* efx_reset_down() grabs locks that prevent recovery on EF100. + * EF100 reset is handled in the efx_nic_type callback below. + */ + if (efx_nic_rev(efx) != EFX_REV_EF100) + efx_reset_down(efx, method); rc = efx->type->reset(efx, method); if (rc) { @@ -849,7 +853,8 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) disabled = rc || method == RESET_TYPE_DISABLE || method == RESET_TYPE_RECOVER_OR_DISABLE; - rc2 = efx_reset_up(efx, method, !disabled); + if (efx_nic_rev(efx) != EFX_REV_EF100) + rc2 = efx_reset_up(efx, method, !disabled); if (rc2) { disabled = true; if (!rc) From 2200e6d92e05ed00adda1a58bc438dbfc5a7b8e2 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:57:09 +0100 Subject: [PATCH 1671/2056] sfc_ef100: implement MCDI transport Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 106 +++++++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100_nic.h | 1 + 2 files changed, 107 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 802ab0ce00fe..189a8a706c0e 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -25,11 +25,23 @@ #include "ef100_netdev.h" #define EF100_MAX_VIS 4096 +#define EF100_NUM_MCDI_BUFFERS 1 +#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX) #define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT) /* MCDI */ +static u8 *ef100_mcdi_buf(struct efx_nic *efx, u8 bufid, dma_addr_t *dma_addr) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + if (dma_addr) + *dma_addr = nic_data->mcdi_buf.dma_addr + + bufid * ALIGN(MCDI_BUF_LEN, 256); + return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256); +} + static int ef100_get_warm_boot_count(struct efx_nic *efx) { efx_dword_t reg; @@ -46,6 +58,72 @@ static int ef100_get_warm_boot_count(struct efx_nic *efx) } } +static void ef100_mcdi_request(struct efx_nic *efx, + const efx_dword_t *hdr, size_t hdr_len, + const efx_dword_t *sdu, size_t sdu_len) +{ + dma_addr_t dma_addr; + u8 *pdu = ef100_mcdi_buf(efx, 0, &dma_addr); + + memcpy(pdu, hdr, hdr_len); + memcpy(pdu + hdr_len, sdu, sdu_len); + wmb(); + + /* The hardware provides 'low' and 'high' (doorbell) registers + * for passing the 64-bit address of an MCDI request to + * firmware. However the dwords are swapped by firmware. The + * least significant bits of the doorbell are then 0 for all + * MCDI requests due to alignment. + */ + _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), efx_reg(efx, ER_GZ_MC_DB_LWRD)); + _efx_writed(efx, cpu_to_le32((u32)dma_addr), efx_reg(efx, ER_GZ_MC_DB_HWRD)); +} + +static bool ef100_mcdi_poll_response(struct efx_nic *efx) +{ + const efx_dword_t hdr = + *(const efx_dword_t *)(ef100_mcdi_buf(efx, 0, NULL)); + + rmb(); + return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE); +} + +static void ef100_mcdi_read_response(struct efx_nic *efx, + efx_dword_t *outbuf, size_t offset, + size_t outlen) +{ + const u8 *pdu = ef100_mcdi_buf(efx, 0, NULL); + + memcpy(outbuf, pdu + offset, outlen); +} + +static int ef100_mcdi_poll_reboot(struct efx_nic *efx) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + int rc; + + rc = ef100_get_warm_boot_count(efx); + if (rc < 0) { + /* The firmware is presumably in the process of + * rebooting. However, we are supposed to report each + * reboot just once, so we must only do that once we + * can read and store the updated warm boot count. + */ + return 0; + } + + if (rc == nic_data->warm_boot_count) + return 0; + + nic_data->warm_boot_count = rc; + + return -EIO; +} + +static void ef100_mcdi_reboot_detected(struct efx_nic *efx) +{ +} + /* Event handling */ static int ef100_ev_probe(struct efx_channel *channel) @@ -139,6 +217,11 @@ const struct efx_nic_type ef100_pf_nic_type = { .is_vf = false, .probe = ef100_probe_pf, .mcdi_max_ver = 2, + .mcdi_request = ef100_mcdi_request, + .mcdi_poll_response = ef100_mcdi_poll_response, + .mcdi_read_response = ef100_mcdi_read_response, + .mcdi_poll_reboot = ef100_mcdi_poll_reboot, + .mcdi_reboot_detected = ef100_mcdi_reboot_detected, .irq_enable_master = efx_port_dummy_op_void, .irq_disable_non_ev = efx_port_dummy_op_void, .push_irq_moderation = efx_channel_dummy_op_void, @@ -178,6 +261,15 @@ static int ef100_probe_main(struct efx_nic *efx) net_dev->features |= efx->type->offload_features; net_dev->hw_features |= efx->type->offload_features; + /* we assume later that we can copy from this buffer in dwords */ + BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); + + /* MCDI buffers must be 256 byte aligned. */ + rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, MCDI_BUF_LEN, + GFP_KERNEL); + if (rc) + goto fail; + /* Get the MC's warm boot count. In case it's rebooting right * now, be prepared to retry. */ @@ -201,6 +293,16 @@ static int ef100_probe_main(struct efx_nic *efx) /* Post-IO section. */ + rc = efx_mcdi_init(efx); + if (!rc && efx->mcdi->fn_flags & + (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) { + netif_info(efx, probe, efx->net_dev, + "No network port on this PCI function"); + rc = -ENODEV; + } + if (rc) + goto fail; + efx->max_vis = EF100_MAX_VIS; rc = ef100_phy_probe(efx); @@ -233,6 +335,10 @@ void ef100_remove(struct efx_nic *efx) efx_fini_channels(efx); kfree(efx->phy_data); efx->phy_data = NULL; + efx_mcdi_detach(efx); + efx_mcdi_fini(efx); + if (nic_data) + efx_nic_free_buffer(efx, &nic_data->mcdi_buf); kfree(nic_data); efx->nic_data = NULL; } diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 643111aebba5..a4290d183879 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -19,6 +19,7 @@ void ef100_remove(struct efx_nic *efx); struct ef100_nic_data { struct efx_nic *efx; + struct efx_buffer mcdi_buf; u16 warm_boot_count; }; From 965b549f3c20dba97c206937f163379ee44c9c04 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:57:34 +0100 Subject: [PATCH 1672/2056] sfc_ef100: implement ndo_open/close and EVQ probing Channels are probed, but actual event handling is still stubbed out. Stub implementation of check_caps is needed because ptp.c will call into it from efx_ptp_use_mac_tx_timestamps() to decide if it wants TXQs. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_netdev.c | 143 ++++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100_nic.c | 51 +++++++++ drivers/net/ethernet/sfc/ef100_nic.h | 1 + drivers/net/ethernet/sfc/ef100_rx.c | 6 + drivers/net/ethernet/sfc/ef100_rx.h | 1 + drivers/net/ethernet/sfc/ef100_tx.c | 19 ++++ drivers/net/ethernet/sfc/ef100_tx.h | 4 + 7 files changed, 225 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index f900b375d9b1..d1753ed7aaca 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -29,6 +29,147 @@ static void ef100_update_name(struct efx_nic *efx) strcpy(efx->name, efx->net_dev->name); } +static int ef100_alloc_vis(struct efx_nic *efx, unsigned int *allocated_vis) +{ + /* EF100 uses a single TXQ per channel, as all checksum offloading + * is configured in the TX descriptor, and there is no TX Pacer for + * HIGHPRI queues. + */ + unsigned int tx_vis = efx->n_tx_channels + efx->n_extra_tx_channels; + unsigned int rx_vis = efx->n_rx_channels; + unsigned int min_vis, max_vis; + + EFX_WARN_ON_PARANOID(efx->tx_queues_per_channel != 1); + + tx_vis += efx->n_xdp_channels * efx->xdp_tx_per_channel; + + max_vis = max(rx_vis, tx_vis); + /* Currently don't handle resource starvation and only accept + * our maximum needs and no less. + */ + min_vis = max_vis; + + return efx_mcdi_alloc_vis(efx, min_vis, max_vis, + NULL, allocated_vis); +} + +static int ef100_remap_bar(struct efx_nic *efx, int max_vis) +{ + unsigned int uc_mem_map_size; + void __iomem *membase; + + efx->max_vis = max_vis; + uc_mem_map_size = PAGE_ALIGN(max_vis * efx->vi_stride); + + /* Extend the original UC mapping of the memory BAR */ + membase = ioremap(efx->membase_phys, uc_mem_map_size); + if (!membase) { + netif_err(efx, probe, efx->net_dev, + "could not extend memory BAR to %x\n", + uc_mem_map_size); + return -ENOMEM; + } + iounmap(efx->membase); + efx->membase = membase; + return 0; +} + +/* Context: process, rtnl_lock() held. + * Note that the kernel will ignore our return code; this method + * should really be a void. + */ +static int ef100_net_stop(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", + raw_smp_processor_id()); + + netif_stop_queue(net_dev); + efx_stop_all(efx); + efx_disable_interrupts(efx); + efx_clear_interrupt_affinity(efx); + efx_nic_fini_interrupt(efx); + efx_fini_napi(efx); + efx_remove_channels(efx); + efx_mcdi_free_vis(efx); + efx_remove_interrupts(efx); + + return 0; +} + +/* Context: process, rtnl_lock() held. */ +static int ef100_net_open(struct net_device *net_dev) +{ + struct efx_nic *efx = netdev_priv(net_dev); + unsigned int allocated_vis; + int rc; + + ef100_update_name(efx); + netif_dbg(efx, ifup, net_dev, "opening device on CPU %d\n", + raw_smp_processor_id()); + + rc = efx_check_disabled(efx); + if (rc) + goto fail; + + rc = efx_probe_interrupts(efx); + if (rc) + goto fail; + + rc = efx_set_channels(efx); + if (rc) + goto fail; + + rc = efx_mcdi_free_vis(efx); + if (rc) + goto fail; + + rc = ef100_alloc_vis(efx, &allocated_vis); + if (rc) + goto fail; + + rc = efx_probe_channels(efx); + if (rc) + return rc; + + rc = ef100_remap_bar(efx, allocated_vis); + if (rc) + goto fail; + + efx_init_napi(efx); + + rc = efx_nic_init_interrupt(efx); + if (rc) + goto fail; + efx_set_interrupt_affinity(efx); + + rc = efx_enable_interrupts(efx); + if (rc) + goto fail; + + /* in case the MC rebooted while we were stopped, consume the change + * to the warm reboot count + */ + (void) efx_mcdi_poll_reboot(efx); + + efx_start_all(efx); + + /* Link state detection is normally event-driven; we have + * to poll now because we could have missed a change + */ + mutex_lock(&efx->mac_lock); + if (efx_mcdi_phy_poll(efx)) + efx_link_status_changed(efx); + mutex_unlock(&efx->mac_lock); + + return 0; + +fail: + ef100_net_stop(net_dev); + return rc; +} + /* Initiate a packet transmission. We use one channel per CPU * (sharing when we have more CPUs than channels). * @@ -64,6 +205,8 @@ static netdev_tx_t ef100_hard_start_xmit(struct sk_buff *skb, } static const struct net_device_ops ef100_netdev_ops = { + .ndo_open = ef100_net_open, + .ndo_stop = ef100_net_stop, .ndo_start_xmit = ef100_hard_start_xmit, }; diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 189a8a706c0e..be37055743c3 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -135,6 +135,34 @@ static int ef100_ev_probe(struct efx_channel *channel) GFP_KERNEL); } +static int ef100_ev_init(struct efx_channel *channel) +{ + struct ef100_nic_data *nic_data = channel->efx->nic_data; + + /* initial phase is 0 */ + clear_bit(channel->channel, nic_data->evq_phases); + + return efx_mcdi_ev_init(channel, false, false); +} + +static void ef100_ev_read_ack(struct efx_channel *channel) +{ + efx_dword_t evq_prime; + + EFX_POPULATE_DWORD_2(evq_prime, + ERF_GZ_EVQ_ID, channel->channel, + ERF_GZ_IDX, channel->eventq_read_ptr & + channel->eventq_mask); + + efx_writed(channel->efx, &evq_prime, + efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME)); +} + +static int ef100_ev_process(struct efx_channel *channel, int quota) +{ + return 0; +} + static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) { struct efx_msi_context *context = dev_id; @@ -210,6 +238,13 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) return rc; } +static unsigned int ef100_check_caps(const struct efx_nic *efx, + u8 flag, u32 offset) +{ + /* stub */ + return 0; +} + /* NIC level access functions */ const struct efx_nic_type ef100_pf_nic_type = { @@ -230,8 +265,24 @@ const struct efx_nic_type ef100_pf_nic_type = { .map_reset_flags = ef100_map_reset_flags, .reset = ef100_reset, + .check_caps = ef100_check_caps, + .ev_probe = ef100_ev_probe, + .ev_init = ef100_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, .irq_handle_msi = ef100_msi_interrupt, + .ev_process = ef100_ev_process, + .ev_read_ack = ef100_ev_read_ack, + .tx_probe = ef100_tx_probe, + .tx_init = ef100_tx_init, + .tx_write = ef100_tx_write, + .tx_enqueue = ef100_enqueue_skb, + .rx_probe = efx_mcdi_rx_probe, + .rx_init = efx_mcdi_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = ef100_rx_write, + .rx_packet = __ef100_rx_packet, /* Per-type bar/size configuration not used on ef100. Location of * registers is defined by extended capabilities. diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index a4290d183879..5d376e2d98a7 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -21,6 +21,7 @@ struct ef100_nic_data { struct efx_nic *efx; struct efx_buffer mcdi_buf; u16 warm_boot_count; + DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); }; #define efx_ef100_has_cap(caps, flag) \ diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c index d6b62f980463..4223a38f46d3 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.c +++ b/drivers/net/ethernet/sfc/ef100_rx.c @@ -13,6 +13,12 @@ #include "rx_common.h" #include "efx.h" +/* RX stubs */ + +void ef100_rx_write(struct efx_rx_queue *rx_queue) +{ +} + void __ef100_rx_packet(struct efx_channel *channel) { /* Stub. No RX path yet. Discard the buffer. */ diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h index a6083a98f362..b5dadf741aa0 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.h +++ b/drivers/net/ethernet/sfc/ef100_rx.h @@ -14,6 +14,7 @@ #include "net_driver.h" +void ef100_rx_write(struct efx_rx_queue *rx_queue); void __ef100_rx_packet(struct efx_channel *channel); #endif diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c index b12295413c0d..15e646f8c3e0 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.c +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -14,6 +14,25 @@ #include "nic_common.h" #include "ef100_tx.h" +/* TX queue stubs */ +int ef100_tx_probe(struct efx_tx_queue *tx_queue) +{ + return 0; +} + +void ef100_tx_init(struct efx_tx_queue *tx_queue) +{ + /* must be the inverse of lookup in efx_get_tx_channel */ + tx_queue->core_txq = + netdev_get_tx_queue(tx_queue->efx->net_dev, + tx_queue->channel->channel - + tx_queue->efx->tx_channel_offset); +} + +void ef100_tx_write(struct efx_tx_queue *tx_queue) +{ +} + /* Add a socket buffer to a TX queue * * You must hold netif_tx_lock() to call this function. diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h index a7895efca4ff..9a472f7aff43 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.h +++ b/drivers/net/ethernet/sfc/ef100_tx.h @@ -14,5 +14,9 @@ #include "net_driver.h" +int ef100_tx_probe(struct efx_tx_queue *tx_queue); +void ef100_tx_init(struct efx_tx_queue *tx_queue); +void ef100_tx_write(struct efx_tx_queue *tx_queue); + netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); #endif From 5e4ef67346ee8f64b3cac4cbc1c866fd8f18dcd7 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:58:02 +0100 Subject: [PATCH 1673/2056] sfc_ef100: process events for MCDI completions Currently RX and TX-completion events are unhandled, as neither the RX nor the TX path has been implemented yet. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 57 +++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index be37055743c3..c6703527bbe9 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -160,7 +160,62 @@ static void ef100_ev_read_ack(struct efx_channel *channel) static int ef100_ev_process(struct efx_channel *channel, int quota) { - return 0; + struct efx_nic *efx = channel->efx; + struct ef100_nic_data *nic_data; + bool evq_phase, old_evq_phase; + unsigned int read_ptr; + efx_qword_t *p_event; + int spent = 0; + bool ev_phase; + int ev_type; + + if (unlikely(!channel->enabled)) + return 0; + + nic_data = efx->nic_data; + evq_phase = test_bit(channel->channel, nic_data->evq_phases); + old_evq_phase = evq_phase; + read_ptr = channel->eventq_read_ptr; + BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN != ESF_GZ_EV_TXCMPL_PHASE_LBN); + + while (spent < quota) { + p_event = efx_event(channel, read_ptr); + + ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE); + if (ev_phase != evq_phase) + break; + + netif_vdbg(efx, drv, efx->net_dev, + "processing event on %d " EFX_QWORD_FMT "\n", + channel->channel, EFX_QWORD_VAL(*p_event)); + + ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE); + + switch (ev_type) { + case ESE_GZ_EF100_EV_MCDI: + efx_mcdi_process_event(channel, p_event); + break; + case ESE_GZ_EF100_EV_DRIVER: + netif_info(efx, drv, efx->net_dev, + "Driver initiated event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*p_event)); + break; + default: + netif_info(efx, drv, efx->net_dev, + "Unhandled event " EFX_QWORD_FMT "\n", + EFX_QWORD_VAL(*p_event)); + } + + ++read_ptr; + if ((read_ptr & channel->eventq_mask) == 0) + evq_phase = !evq_phase; + } + + channel->eventq_read_ptr = read_ptr; + if (evq_phase != old_evq_phase) + change_bit(channel->channel, nic_data->evq_phases); + + return spent; } static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) From f65731207d99c0858e38563a17b480adbd45e4a2 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:58:26 +0100 Subject: [PATCH 1674/2056] sfc_ef100: read datapath caps, implement check_caps Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 58 +++++++++++++++++++++++++++- drivers/net/ethernet/sfc/ef100_nic.h | 2 + 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index c6703527bbe9..3fb81d6e8df3 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -124,6 +124,49 @@ static void ef100_mcdi_reboot_detected(struct efx_nic *efx) { } +/* MCDI calls + */ +static int efx_ef100_init_datapath_caps(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); + struct ef100_nic_data *nic_data = efx->nic_data; + u8 vi_window_mode; + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { + netif_err(efx, drv, efx->net_dev, + "unable to read datapath firmware capabilities\n"); + return -EIO; + } + + nic_data->datapath_caps = MCDI_DWORD(outbuf, + GET_CAPABILITIES_OUT_FLAGS1); + nic_data->datapath_caps2 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V2_OUT_FLAGS2); + + vi_window_mode = MCDI_BYTE(outbuf, + GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); + rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode); + if (rc) + return rc; + + if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3)) + efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6; + efx->num_mac_stats = MCDI_WORD(outbuf, + GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); + netif_dbg(efx, probe, efx->net_dev, + "firmware reports num_mac_stats = %u\n", + efx->num_mac_stats); + return 0; +} + /* Event handling */ static int ef100_ev_probe(struct efx_channel *channel) @@ -296,8 +339,16 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) static unsigned int ef100_check_caps(const struct efx_nic *efx, u8 flag, u32 offset) { - /* stub */ - return 0; + const struct ef100_nic_data *nic_data = efx->nic_data; + + switch (offset) { + case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST: + return nic_data->datapath_caps & BIT_ULL(flag); + case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST: + return nic_data->datapath_caps2 & BIT_ULL(flag); + default: + return 0; + } } /* NIC level access functions @@ -408,6 +459,9 @@ static int ef100_probe_main(struct efx_nic *efx) } if (rc) goto fail; + rc = efx_ef100_init_datapath_caps(efx); + if (rc < 0) + goto fail; efx->max_vis = EF100_MAX_VIS; diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 5d376e2d98a7..392611cc33b5 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -20,6 +20,8 @@ void ef100_remove(struct efx_nic *efx); struct ef100_nic_data { struct efx_nic *efx; struct efx_buffer mcdi_buf; + u32 datapath_caps; + u32 datapath_caps2; u16 warm_boot_count; DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); }; From d802b0ae652fb8aa917205c3ad29998e0e097b61 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 12:59:18 +0100 Subject: [PATCH 1675/2056] sfc_ef100: extend ef100_check_caps to cover datapath_caps3 MC_CMD_GET_CAPABILITIES now has a third word of flags; extend the efx_has_cap() machinery to cover it. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 9 ++++++++- drivers/net/ethernet/sfc/ef100_nic.h | 1 + drivers/net/ethernet/sfc/mcdi.h | 4 ++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 3fb81d6e8df3..bb246acca574 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -128,7 +128,7 @@ static void ef100_mcdi_reboot_detected(struct efx_nic *efx) */ static int efx_ef100_init_datapath_caps(struct efx_nic *efx) { - MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V4_OUT_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN); struct ef100_nic_data *nic_data = efx->nic_data; u8 vi_window_mode; size_t outlen; @@ -150,6 +150,11 @@ static int efx_ef100_init_datapath_caps(struct efx_nic *efx) GET_CAPABILITIES_OUT_FLAGS1); nic_data->datapath_caps2 = MCDI_DWORD(outbuf, GET_CAPABILITIES_V2_OUT_FLAGS2); + if (outlen < MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) + nic_data->datapath_caps3 = 0; + else + nic_data->datapath_caps3 = MCDI_DWORD(outbuf, + GET_CAPABILITIES_V7_OUT_FLAGS3); vi_window_mode = MCDI_BYTE(outbuf, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); @@ -346,6 +351,8 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx, return nic_data->datapath_caps & BIT_ULL(flag); case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST: return nic_data->datapath_caps2 & BIT_ULL(flag); + case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST: + return nic_data->datapath_caps3 & BIT_ULL(flag); default: return 0; } diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 392611cc33b5..7744ec85bec6 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -22,6 +22,7 @@ struct ef100_nic_data { struct efx_buffer mcdi_buf; u32 datapath_caps; u32 datapath_caps2; + u32 datapath_caps3; u16 warm_boot_count; DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); }; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index e053adfe82b0..658cf345420d 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -327,10 +327,10 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev); EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) #define MCDI_CAPABILITY(field) \ - MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _LBN + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN #define MCDI_CAPABILITY_OFST(field) \ - MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST + MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST #define efx_has_cap(efx, field) \ efx->type->check_caps(efx, \ From 4e5675bbabd69f1f30feef56f670527f3330b6df Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 13:04:57 +0100 Subject: [PATCH 1676/2056] sfc_ef100: actually perform resets In ef100_reset(), make the MCDI call to do the reset. Also, do a reset at start-of-day during probe, to put the function in a clean state. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index bb246acca574..5e6a8337a336 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -332,6 +332,10 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) __clear_bit(reset_type, &efx->reset_pending); rc = dev_open(efx->net_dev, NULL); } else if (reset_type == RESET_TYPE_ALL) { + rc = efx_mcdi_reset(efx, reset_type); + if (rc) + return rc; + netif_device_attach(efx->net_dev); rc = dev_open(efx->net_dev, NULL); @@ -466,6 +470,11 @@ static int ef100_probe_main(struct efx_nic *efx) } if (rc) goto fail; + /* Reset (most) configuration for this function */ + rc = efx_mcdi_reset(efx, RESET_TYPE_ALL); + if (rc) + goto fail; + rc = efx_ef100_init_datapath_caps(efx); if (rc < 0) goto fail; From 99a23c1168b7a6b8d1d1b228788b32749b06cbfe Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 13:05:08 +0100 Subject: [PATCH 1677/2056] sfc_ef100: probe the PHY and configure the MAC Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 42 +++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 5e6a8337a336..dc7d4aaa2b04 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -288,16 +288,54 @@ static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id) static int ef100_phy_probe(struct efx_nic *efx) { - /* stub: allocate the phy_data */ + struct efx_mcdi_phy_data *phy_data; + int rc; + + /* Probe for the PHY */ efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL); if (!efx->phy_data) return -ENOMEM; + rc = efx_mcdi_get_phy_cfg(efx, efx->phy_data); + if (rc) + return rc; + + /* Populate driver and ethtool settings */ + phy_data = efx->phy_data; + mcdi_to_ethtool_linkset(phy_data->media, phy_data->supported_cap, + efx->link_advertising); + efx->fec_config = mcdi_fec_caps_to_ethtool(phy_data->supported_cap, + false); + + /* Default to Autonegotiated flow control if the PHY supports it */ + efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; + if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) + efx->wanted_fc |= EFX_FC_AUTO; + efx_link_set_wanted_fc(efx, efx->wanted_fc); + + /* Push settings to the PHY. Failure is not fatal, the user can try to + * fix it using ethtool. + */ + rc = efx_mcdi_port_reconfigure(efx); + if (rc && rc != -EPERM) + netif_warn(efx, drv, efx->net_dev, + "could not initialise PHY settings\n"); + return 0; } /* Other */ +static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only) +{ + WARN_ON(!mutex_is_locked(&efx->mac_lock)); + + efx_mcdi_filter_sync_rx_mode(efx); + + if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED)) + return efx_mcdi_set_mtu(efx); + return efx_mcdi_set_mac(efx); +} static enum reset_type ef100_map_reset_reason(enum reset_type reason) { @@ -401,6 +439,8 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_write = ef100_rx_write, .rx_packet = __ef100_rx_packet, + .reconfigure_mac = ef100_reconfigure_mac, + /* Per-type bar/size configuration not used on ef100. Location of * registers is defined by extended capabilities. */ From 29ec1b27e73990cf32aedd9208aa6eff2d4a1e5e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 13:05:31 +0100 Subject: [PATCH 1678/2056] sfc_ef100: read device MAC address at probe time Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 40 +++++++++++++++++++++++++++- drivers/net/ethernet/sfc/ef100_nic.h | 1 + 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index dc7d4aaa2b04..1161190391b1 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -126,6 +126,26 @@ static void ef100_mcdi_reboot_detected(struct efx_nic *efx) /* MCDI calls */ +static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address) +{ + MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); + size_t outlen; + int rc; + + BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0); + + rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0, + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) + return -EIO; + + ether_addr_copy(mac_address, + MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE)); + return 0; +} + static int efx_ef100_init_datapath_caps(struct efx_nic *efx) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN); @@ -540,7 +560,25 @@ static int ef100_probe_main(struct efx_nic *efx) int ef100_probe_pf(struct efx_nic *efx) { - return ef100_probe_main(efx); + struct net_device *net_dev = efx->net_dev; + struct ef100_nic_data *nic_data; + int rc = ef100_probe_main(efx); + + if (rc) + goto fail; + + nic_data = efx->nic_data; + rc = ef100_get_mac_address(efx, net_dev->perm_addr); + if (rc) + goto fail; + /* Assign MAC address */ + memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN); + memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN); + + return 0; + +fail: + return rc; } void ef100_remove(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 7744ec85bec6..6367bbb2c9b3 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -24,6 +24,7 @@ struct ef100_nic_data { u32 datapath_caps2; u32 datapath_caps3; u16 warm_boot_count; + u8 port_id[ETH_ALEN]; DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); }; From 1c74884387e5b6d599316eff7d51d484b3f6dcca Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 27 Jul 2020 13:05:48 +0100 Subject: [PATCH 1679/2056] sfc_ef100: implement ndo_get_phys_port_{id,name} Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_netdev.c | 2 ++ drivers/net/ethernet/sfc/ef100_nic.c | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index d1753ed7aaca..4c3caac2c8cc 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -208,6 +208,8 @@ static const struct net_device_ops ef100_netdev_ops = { .ndo_open = ef100_net_open, .ndo_stop = ef100_net_stop, .ndo_start_xmit = ef100_hard_start_xmit, + .ndo_get_phys_port_id = efx_get_phys_port_id, + .ndo_get_phys_port_name = efx_get_phys_port_name, }; /* Netdev registration diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 1161190391b1..6a00f2a2dc2b 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -403,6 +403,20 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) return rc; } +static int efx_ef100_get_phys_port_id(struct efx_nic *efx, + struct netdev_phys_item_id *ppid) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + if (!is_valid_ether_addr(nic_data->port_id)) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + memcpy(ppid->id, nic_data->port_id, ppid->id_len); + + return 0; +} + static unsigned int ef100_check_caps(const struct efx_nic *efx, u8 flag, u32 offset) { @@ -459,6 +473,8 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_write = ef100_rx_write, .rx_packet = __ef100_rx_packet, + .get_phys_port_id = efx_ef100_get_phys_port_id, + .reconfigure_mac = ef100_reconfigure_mac, /* Per-type bar/size configuration not used on ef100. Location of @@ -541,6 +557,11 @@ static int ef100_probe_main(struct efx_nic *efx) efx->max_vis = EF100_MAX_VIS; + rc = efx_mcdi_port_get_number(efx); + if (rc < 0) + goto fail; + efx->port_num = rc; + rc = ef100_phy_probe(efx); if (rc) goto fail; From 3540669761b8a26cd5a4ffb9b6c737bc8b941f00 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 27 Jul 2020 15:17:12 +0100 Subject: [PATCH 1680/2056] qed: fix assignment of n_rq_elems to incorrect params field Currently n_rq_elems is being assigned to params.elem_size instead of the field params.num_elems. Coverity is detecting this as a double assingment to params.elem_size and reporting this as an usused value on the first assignment. Fix this. Addresses-Coverity: ("Unused value") Fixes: b6db3f71c976 ("qed: simplify chain allocation with init params struct") Signed-off-by: Colin Ian King Acked-by: Alexander Lobakin Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 49b8a43e3fa2..f5aa85a5377c 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1923,7 +1923,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE; - params.elem_size = n_rq_elems; + params.num_elems = n_rq_elems; params.elem_size = QEDR_RQE_ELEMENT_SIZE; rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, ¶ms); From 5e619d73e6797ed9f2554a1bf996d52d8c91ca50 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 27 Jul 2020 12:55:26 -0500 Subject: [PATCH 1681/2056] net/mlx4: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. [1] https://www.kernel.org/doc/html/v5.7/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/eq.c | 2 +- drivers/net/ethernet/mellanox/mlx4/mcg.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 8a10285b0e10..b50c567ef508 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -806,10 +806,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud goto xdp_drop_no_cnt; /* Drop on xmit failure */ default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: trace_xdp_exception(dev, xdp_prog, act); - /* fall through */ + fallthrough; case XDP_DROP: ring->xdp_drop++; xdp_drop_no_cnt: diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index c790a5fcea73..ae305c2e9225 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -558,7 +558,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n", __func__, be32_to_cpu(eqe->event.srq.srqn), eq->eqn); - /* fall through */ + fallthrough; case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 9486caecfbdc..f1b4ad9c66d2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1412,7 +1412,7 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], case MLX4_STEERING_MODE_A0: if (prot == MLX4_PROT_ETH) return 0; - /* fall through */ + fallthrough; case MLX4_STEERING_MODE_B0: if (prot == MLX4_PROT_ETH) @@ -1442,7 +1442,7 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], case MLX4_STEERING_MODE_A0: if (prot == MLX4_PROT_ETH) return 0; - /* fall through */ + fallthrough; case MLX4_STEERING_MODE_B0: if (prot == MLX4_PROT_ETH) From 70cfab1d871c771bd1963dd898f4f9b2731590c2 Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Fri, 24 Jul 2020 10:06:17 +0100 Subject: [PATCH 1682/2056] tools, bpftool: Skip type probe if name is not found For probing program and map types, bpftool loops on type values and uses the relevant type name in prog_type_name[] or map_type_name[]. To ensure the name exists, we exit from the loop if we go over the size of the array. However, this is not enough in the case where the arrays have "holes" in them, program or map types for which they have no name, but not at the end of the list. This is currently the case for BPF_PROG_TYPE_LSM, not known to bpftool and which name is a null string. When probing for features, bpftool attempts to strlen() that name and segfaults. Let's fix it by skipping probes for "unknown" program and map types, with an informational message giving the numeral value in that case. Fixes: 93a3545d812a ("tools/bpftool: Add name mappings for SK_LOOKUP prog and attach type") Reported-by: Paul Chaignon Signed-off-by: Quentin Monnet Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200724090618.16378-2-quentin@isovalent.com --- tools/bpf/bpftool/feature.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index 1cd75807673e..a43a6f10b564 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -504,6 +504,10 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types, supported_types[prog_type] |= res; + if (!prog_type_name[prog_type]) { + p_info("program type name not found (type %d)", prog_type); + return; + } maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; if (strlen(prog_type_name[prog_type]) > maxlen) { p_info("program type name too long"); @@ -533,6 +537,10 @@ probe_map_type(enum bpf_map_type map_type, const char *define_prefix, * check required for unprivileged users */ + if (!map_type_name[map_type]) { + p_info("map type name not found (type %d)", map_type); + return; + } maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1; if (strlen(map_type_name[map_type]) > maxlen) { p_info("map type name too long"); From 9a97c9d2af5ca798377342debf7f0f44281d050e Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Fri, 24 Jul 2020 10:06:18 +0100 Subject: [PATCH 1683/2056] tools, bpftool: Add LSM type to array of prog names Assign "lsm" as a printed name for BPF_PROG_TYPE_LSM in bpftool, so that it can use it when listing programs loaded on the system or when probing features. Signed-off-by: Quentin Monnet Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200724090618.16378-3-quentin@isovalent.com --- tools/bpf/bpftool/prog.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 3e6ecc6332e2..158995d853b0 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -59,6 +59,7 @@ const char * const prog_type_name[] = { [BPF_PROG_TYPE_TRACING] = "tracing", [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops", [BPF_PROG_TYPE_EXT] = "ext", + [BPF_PROG_TYPE_LSM] = "lsm", [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup", }; From cde1a8a992875a7479c4321b2a4a190c2e92ec2a Mon Sep 17 00:00:00 2001 From: Ismael Ferreras Morezuelas Date: Sun, 26 Jul 2020 23:12:28 +0200 Subject: [PATCH 1684/2056] Bluetooth: btusb: Fix and detect most of the Chinese Bluetooth controllers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For some reason they tend to squat on the very first CSR/ Cambridge Silicon Radio VID/PID instead of paying fees. This is an extremely common problem; the issue goes as back as 2013 and these devices are only getting more popular, even rebranded by reputable vendors and sold by retailers everywhere. So, at this point in time there are hundreds of modern dongles reusing the ID of what originally was an early Bluetooth 1.1 controller. Linux is the only place where they don't work due to spotty checks in our detection code. It only covered a minimum subset. So what's the big idea? Take advantage of the fact that all CSR chips report the same internal version as both the LMP sub-version and HCI revision number. It always matches, couple that with the manufacturer code, that rarely lies, and we now have a good idea of who is who. Additionally, by compiling a list of user-reported HCI/lsusb dumps, and searching around for legit CSR dongles in similar product ranges we can find what CSR BlueCore firmware supported which Bluetooth versions. That way we can narrow down ranges of fakes for each of them. e.g. Real CSR dongles with LMP subversion 0x73 are old enough that support BT 1.1 only; so it's a dead giveaway when some third-party BT 4.0 dongle reuses it. So, to sum things up; there are multiple classes of fake controllers reusing the same 0A12:0001 VID/PID. This has been broken for a while. Known 'fake' bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 IC markings on 0x7558: FR3191AHAL 749H15143 (???) https://bugzilla.kernel.org/show_bug.cgi?id=60824 Fixes: 81cac64ba258ae (Deal with USB devices that are faking CSR vendor) Reported-by: Michał Wiśniewski Tested-by: Mike Johnson Tested-by: Ricardo Rodrigues Tested-by: M.Hanny Sabbagh Tested-by: Oussama BEN BRAHIM Tested-by: Ismael Ferreras Morezuelas Signed-off-by: Ismael Ferreras Morezuelas Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 76 ++++++++++++++++++++++++++----- include/net/bluetooth/bluetooth.h | 2 + include/net/bluetooth/hci.h | 11 +++++ net/bluetooth/hci_core.c | 6 ++- 4 files changed, 82 insertions(+), 13 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index aa0bc9942afd..1f51494f5818 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1742,6 +1742,7 @@ static int btusb_setup_csr(struct hci_dev *hdev) { struct hci_rp_read_local_version *rp; struct sk_buff *skb; + bool is_fake = false; BT_DBG("%s", hdev->name); @@ -1761,18 +1762,69 @@ static int btusb_setup_csr(struct hci_dev *hdev) rp = (struct hci_rp_read_local_version *)skb->data; - /* Detect controllers which aren't real CSR ones. */ + /* Detect a wide host of Chinese controllers that aren't CSR. + * + * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 + * + * The main thing they have in common is that these are really popular low-cost + * options that support newer Bluetooth versions but rely on heavy VID/PID + * squatting of this poor old Bluetooth 1.1 device. Even sold as such. + * + * We detect actual CSR devices by checking that the HCI manufacturer code + * is Cambridge Silicon Radio (10) and ensuring that LMP sub-version and + * HCI rev values always match. As they both store the firmware number. + */ if (le16_to_cpu(rp->manufacturer) != 10 || - le16_to_cpu(rp->lmp_subver) == 0x0c5c) { + le16_to_cpu(rp->hci_rev) != le16_to_cpu(rp->lmp_subver)) + is_fake = true; + + /* Known legit CSR firmware build numbers and their supported BT versions: + * - 1.1 (0x1) -> 0x0073, 0x020d, 0x033c, 0x034e + * - 1.2 (0x2) -> 0x04d9, 0x0529 + * - 2.0 (0x3) -> 0x07a6, 0x07ad, 0x0c5c + * - 2.1 (0x4) -> 0x149c, 0x1735, 0x1899 (0x1899 is a BlueCore4-External) + * - 4.0 (0x6) -> 0x1d86, 0x2031, 0x22bb + * + * e.g. Real CSR dongles with LMP subversion 0x73 are old enough that + * support BT 1.1 only; so it's a dead giveaway when some + * third-party BT 4.0 dongle reuses it. + */ + else if (le16_to_cpu(rp->lmp_subver) <= 0x034e && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_1) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x0529 && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_1_2) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x0c5c && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_0) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x1899 && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_2_1) + is_fake = true; + + else if (le16_to_cpu(rp->lmp_subver) <= 0x22bb && + le16_to_cpu(rp->hci_ver) > BLUETOOTH_VER_4_0) + is_fake = true; + + if (is_fake) { + bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds..."); + + /* Generally these clones have big discrepancies between + * advertised features and what's actually supported. + * Probably will need to be expanded in the future; + * without these the controller will lock up. + */ + set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); + /* Clear the reset quirk since this is not an actual * early Bluetooth 1.1 device from CSR. */ clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); - - /* These fake CSR controllers have all a broken - * stored link key handling and so just disable it. - */ - set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); + clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); } kfree_skb(skb); @@ -4070,11 +4122,13 @@ static int btusb_probe(struct usb_interface *intf, if (bcdDevice < 0x117) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); - /* Fake CSR devices with broken commands */ - if (bcdDevice <= 0x100 || bcdDevice == 0x134) - hdev->setup = btusb_setup_csr; - + /* This must be set first in case we disable it for fakes */ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); + + /* Fake CSR devices with broken commands */ + if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 && + le16_to_cpu(udev->descriptor.idProduct) == 0x0001) + hdev->setup = btusb_setup_csr; } if (id->driver_info & BTUSB_SNIFFER) { diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 7ee8041af803..9125effbf448 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -41,6 +41,8 @@ #define BLUETOOTH_VER_1_1 1 #define BLUETOOTH_VER_1_2 2 #define BLUETOOTH_VER_2_0 3 +#define BLUETOOTH_VER_2_1 4 +#define BLUETOOTH_VER_4_0 6 /* Reserv for core and drivers use */ #define BT_SKB_RESERVE 8 diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 1f18f71363e9..1317dfd8f962 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -227,6 +227,17 @@ enum { * supported. */ HCI_QUIRK_VALID_LE_STATES, + + /* When this quirk is set, then erroneous data reporting + * is ignored. This is mainly due to the fact that the HCI + * Read Default Erroneous Data Reporting command is advertised, + * but not supported; these controllers often reply with unknown + * command and tend to lock up randomly. Needing a hard reset. + * + * This quirk can be set before hci_register_dev is called or + * during the hdev->setup vendor callback. + */ + HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, }; /* HCI device flags */ diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 6509f785dd14..2891e16c1cc1 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -605,7 +605,8 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) if (hdev->commands[8] & 0x01) hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); - if (hdev->commands[18] & 0x04) + if (hdev->commands[18] & 0x04 && + !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); /* Some older Broadcom based Bluetooth 1.2 controllers do not @@ -850,7 +851,8 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt) /* Set erroneous data reporting if supported to the wideband speech * setting value */ - if (hdev->commands[18] & 0x08) { + if (hdev->commands[18] & 0x08 && + !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) { bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); From a7ad4b6119d740b1ec5788f1b98be0fd1c1b5a5a Mon Sep 17 00:00:00 2001 From: Nicolas Boichat Date: Tue, 21 Jul 2020 10:37:15 +0800 Subject: [PATCH 1685/2056] Bluetooth: hci_h5: Set HCI_UART_RESET_ON_INIT to correct flags HCI_UART_RESET_ON_INIT belongs in hdev_flags, not flags. Fixes: ce945552fde4a09 ("Bluetooth: hci_h5: Add support for serdev enumerated devices") Signed-off-by: Nicolas Boichat Reviewed-by: Hans de Goede Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_h5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index e60b2e0773db..e41854e0d79a 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -793,7 +793,7 @@ static int h5_serdev_probe(struct serdev_device *serdev) if (!h5) return -ENOMEM; - set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags); + set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags); h5->hu = &h5->serdev_hu; h5->serdev_hu.serdev = serdev; From 24b065727ceba53cc5bec0e725672417154df24f Mon Sep 17 00:00:00 2001 From: Max Chou Date: Thu, 23 Jul 2020 18:47:42 +0800 Subject: [PATCH 1686/2056] Bluetooth: Return NOTIFY_DONE for hci_suspend_notifier The original return is NOTIFY_STOP, but notifier_call_chain would stop the future call for register_pm_notifier even registered on other Kernel modules with the same priority which value is zero. Signed-off-by: Max Chou Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2891e16c1cc1..5394ab56c915 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3515,7 +3515,7 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", action, ret); - return NOTIFY_STOP; + return NOTIFY_DONE; } /* Alloc HCI device */ From 202798db9570104728dce8bb57dfeed47ce764bc Mon Sep 17 00:00:00 2001 From: Nicolas Boichat Date: Tue, 21 Jul 2020 10:37:16 +0800 Subject: [PATCH 1687/2056] Bluetooth: hci_serdev: Only unregister device if it was registered We should not call hci_unregister_dev if the device was not successfully registered. Fixes: c34dc3bfa7642fd ("Bluetooth: hci_serdev: Introduce hci_uart_unregister_device()") Signed-off-by: Nicolas Boichat Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_serdev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index 599855e4c57c..7b233312e723 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -355,7 +355,8 @@ void hci_uart_unregister_device(struct hci_uart *hu) struct hci_dev *hdev = hu->hdev; clear_bit(HCI_UART_PROTO_READY, &hu->flags); - hci_unregister_dev(hdev); + if (test_bit(HCI_UART_REGISTERED, &hu->flags)) + hci_unregister_dev(hdev); hci_free_dev(hdev); cancel_work_sync(&hu->write_work); From d6945242f45d4745a8169fdab7afeb40f4e36f06 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Mon, 18 May 2020 11:13:52 +0300 Subject: [PATCH 1688/2056] net/mlx5: Hold pages RB tree per VF Per page request event, FW request to allocated or release pages for a single function. Driver maintains FW pages object per function, so there is no need to hold one global page data-base. Instead, have a page data-base per function, which will improve performance release flow in all cases, especially for "release all pages". As the range of function IDs is large and not sequential, use xarray to store a per function ID page data-base, where the function ID is the key. Upon first allocation of a page to a function ID, create the page data-base per function. This data-base will be released only at pagealloc mechanism cleanup. NIC: ConnectX-4 Lx CPU: Intel(R) Xeon(R) CPU E5-2650 v2 @ 2.60GHz Test case: 32 VFs, measure release pages on one VF as part of FLR Before: 0.021 Sec After: 0.014 Sec The improvement depends on amount of VFs and memory utilization by them. Time measurements above were taken from idle system. Signed-off-by: Eran Ben Elisha Reviewed-by: Mark Bloch Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/pagealloc.c | 140 +++++++++++++----- include/linux/mlx5/driver.h | 2 +- 2 files changed, 104 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 5ddd18639a1e..1b20e3397dde 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mlx5_core.h" #include "lib/eq.h" @@ -73,15 +74,45 @@ enum { MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; +static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id) +{ + struct rb_root *root; + int err; + + root = xa_load(&dev->priv.page_root_xa, func_id); + if (root) + return root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + + err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL); + if (err) { + kfree(root); + return ERR_PTR(err); + } + + *root = RB_ROOT; + + return root; +} + static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; + struct rb_root *root; + struct rb_node **new; struct fw_page *nfp; struct fw_page *tfp; int i; + root = page_root_per_func_id(dev, func_id); + if (IS_ERR(root)) + return PTR_ERR(root); + + new = &root->rb_node; + while (*new) { parent = *new; tfp = rb_entry(parent, struct fw_page, rb_node); @@ -111,13 +142,20 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u return 0; } -static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr) +static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, + u32 func_id) { - struct rb_root *root = &dev->priv.page_root; - struct rb_node *tmp = root->rb_node; struct fw_page *result = NULL; + struct rb_root *root; + struct rb_node *tmp; struct fw_page *tfp; + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return NULL; + + tmp = root->rb_node; + while (tmp) { tfp = rb_entry(tmp, struct fw_page, rb_node); if (tfp->addr < addr) { @@ -191,7 +229,13 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, bool in_free_list) { - rb_erase(&fwp->rb_node, &dev->priv.page_root); + struct rb_root *root; + + root = xa_load(&dev->priv.page_root_xa, fwp->func_id); + if (WARN_ON_ONCE(!root)) + return; + + rb_erase(&fwp->rb_node, root); if (in_free_list) list_del(&fwp->list); dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK, @@ -200,12 +244,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, kfree(fwp); } -static void free_4k(struct mlx5_core_dev *dev, u64 addr) +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id); if (!fwp) { mlx5_core_warn_rl(dev, "page not found\n"); return; @@ -340,7 +384,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, out_4k: for (i--; i >= 0; i--) - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id); out_free: kvfree(in); if (notify_fail) @@ -351,16 +395,19 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, bool ec_function) { + struct rb_root *root; struct rb_node *p; int npages = 0; - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return; + + p = rb_first(root); while (p) { struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count); free_fwp(dev, fwp, fwp->free_count); } @@ -378,6 +425,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, static int reclaim_pages_cmd(struct mlx5_core_dev *dev, u32 *in, int in_size, u32 *out, int out_size) { + struct rb_root *root; struct fw_page *fwp; struct rb_node *p; u32 func_id; @@ -391,12 +439,14 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, npages = MLX5_GET(manage_pages_in, in, input_num_entries); func_id = MLX5_GET(manage_pages_in, in, function_id); - p = rb_first(&dev->priv.page_root); + root = xa_load(&dev->priv.page_root_xa, func_id); + if (WARN_ON_ONCE(!root)) + return -EEXIST; + + p = rb_first(root); while (p && i < npages) { fwp = rb_entry(p, struct fw_page, rb_node); p = rb_next(p); - if (fwp->func_id != func_id) - continue; MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr); i++; @@ -446,7 +496,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } for (i = 0; i < num_claimed; i++) - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i])); + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id); if (nclaimed) *nclaimed = num_claimed; @@ -560,35 +610,49 @@ static int optimal_reclaimed_pages(void) return ret; } -int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, + struct rb_root *root, u16 func_id) { unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); - struct fw_page *fwp; - struct rb_node *p; - int nclaimed = 0; - int err = 0; - do { - p = rb_first(&dev->priv.page_root); - if (p) { - fwp = rb_entry(p, struct fw_page, rb_node); - err = reclaim_pages(dev, fwp->func_id, - optimal_reclaimed_pages(), - &nclaimed, mlx5_core_is_ecpf(dev)); + while (!RB_EMPTY_ROOT(root)) { + int nclaimed; + int err; - if (err) { - mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", - err); - return err; - } - if (nclaimed) - end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), + &nclaimed, mlx5_core_is_ecpf(dev)); + if (err) { + mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", + err, func_id); + return err; } + + if (nclaimed) + end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS); + if (time_after(jiffies, end)) { mlx5_core_warn(dev, "FW did not return all pages. giving up...\n"); break; } - } while (p); + } + + return 0; +} + +int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) +{ + struct rb_root *root; + unsigned long id; + void *entry; + + xa_for_each(&dev->priv.page_root_xa, id, entry) { + root = entry; + mlx5_reclaim_root_pages(dev, root, id); + xa_erase(&dev->priv.page_root_xa, id); + kfree(root); + } + + WARN_ON(!xa_empty(&dev->priv.page_root_xa)); WARN(dev->priv.fw_pages, "FW pages counter is %d after reclaiming all pages\n", @@ -605,17 +669,19 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) int mlx5_pagealloc_init(struct mlx5_core_dev *dev) { - dev->priv.page_root = RB_ROOT; INIT_LIST_HEAD(&dev->priv.free_list); dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); if (!dev->priv.pg_wq) return -ENOMEM; + xa_init(&dev->priv.page_root_xa); + return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { + xa_destroy(&dev->priv.page_root_xa); destroy_workqueue(dev->priv.pg_wq); } diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6a97ad601991..a0fcc4d13e93 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -541,7 +541,7 @@ struct mlx5_priv { /* pages stuff */ struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct rb_root page_root; + struct xarray page_root_xa; int fw_pages; atomic_t reg_pages; struct list_head free_list; From c1a0969ee829154cfc3e3721d8a94607636ce4a6 Mon Sep 17 00:00:00 2001 From: Avihu Hagag Date: Mon, 8 Jun 2020 13:53:09 +0300 Subject: [PATCH 1689/2056] net/mlx5: Add function ID to reclaim pages debug log Add function ID to reclaim pages debug log for better user visibility. Signed-off-by: Avihu Hagag Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 1b20e3397dde..a4a23a27c368 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -480,7 +480,8 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, MLX5_SET(manage_pages_in, in, input_num_entries, npages); MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function); - mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); + mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n", + func_id, npages, outlen); err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen); if (err) { mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err); From 8b95bda47c73625a06561657c7b21e5874833567 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 20 Jul 2020 14:02:15 +0300 Subject: [PATCH 1690/2056] net/mlx5: E-switch, Consider maximum vf vports for steering init When eswitch is enabled, VFs might not be enabled. Hence, consider maximum number of VFs. This further closes the gap between handling VF vports between ECPF and PF. Fixes: ea2128fd632c ("net/mlx5: E-switch, Reduce dependency on num_vfs during mode set") Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Reviewed-by: Bodong Wang Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index db856d70c4f8..a21b00d6a37d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1986,15 +1986,9 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static int esw_offloads_steering_init(struct mlx5_eswitch *esw) { - int num_vfs = esw->esw_funcs.num_vfs; - int total_vports; + int total_vports = esw->total_vports; int err; - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) - total_vports = esw->total_vports; - else - total_vports = num_vfs + MLX5_SPECIAL_VPORTS(esw->dev); - memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); mutex_init(&esw->fdb_table.offloads.vports.lock); hash_init(esw->fdb_table.offloads.vports.table); From 0da3c12dd6fc08e40802fc102b00a37b5b415eeb Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 20 Jul 2020 14:10:16 +0300 Subject: [PATCH 1691/2056] net/mlx5: E-switch, Reuse total_vports and avoid duplicate nvports Total e-switch vports are already stored in mlx5_eswitch total_vports. Avoid copy of it in nvports and reuse existing total_vports calculation. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Reviewed-by: Bodong Wang Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 1 - .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 13 ++++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index b68e02ad65e2..1f52b329e915 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -271,7 +271,6 @@ struct mlx5_eswitch { struct mlx5_esw_offload offloads; int mode; - int nvports; u16 manager_vport; u16 first_host_vport; struct mlx5_esw_functions esw_funcs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index a21b00d6a37d..6097f9aac938 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1132,7 +1132,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw, } } -static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) +static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_table_attr ft_attr = {}; @@ -1165,7 +1165,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) goto ns_err; } - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + + table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + MLX5_ESW_MISS_FLOWS + esw->total_vports; /* create the slow path fdb with encap set, so further table instances @@ -1202,7 +1202,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); - ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ; + ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); @@ -1270,7 +1270,6 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) if (err) goto miss_rule_err; - esw->nvports = nvports; kvfree(flow_group_in); return 0; @@ -2005,7 +2004,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_restore_err; - err = esw_create_offloads_fdb_tables(esw, total_vports); + err = esw_create_offloads_fdb_tables(esw); if (err) goto create_fdb_err; @@ -2459,13 +2458,13 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, esw->offloads.encap = encap; - err = esw_create_offloads_fdb_tables(esw, esw->nvports); + err = esw_create_offloads_fdb_tables(esw); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed re-creating fast FDB table"); esw->offloads.encap = !encap; - (void)esw_create_offloads_fdb_tables(esw, esw->nvports); + (void)esw_create_offloads_fdb_tables(esw); } unlock: From 8d6bd3c339a723ff2b21c432b2054f7dd96205ae Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Mon, 20 Jul 2020 14:27:54 +0300 Subject: [PATCH 1692/2056] net/mlx5: E-switch, Use eswitch total_vports Currently steering table and rx group initialization helper routines works on the total_vports passed as input parameter. Both eswitch helpers work on the mlx5_eswitch and thereby have access to esw->total_vports. Hence use it directly instead of passing it via function input arguments. Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Reviewed-by: Bodong Wang Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 6097f9aac938..54de53daf1c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1310,7 +1310,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) MLX5_FLOW_STEERING_MODE_DMFS); } -static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) +static int esw_create_offloads_table(struct mlx5_eswitch *esw) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_core_dev *dev = esw->dev; @@ -1324,7 +1324,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports) return -EOPNOTSUPP; } - ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS; + ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS; ft_attr.prio = 1; ft_offloads = mlx5_create_flow_table(ns, &ft_attr); @@ -1345,14 +1345,15 @@ static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) mlx5_destroy_flow_table(offloads->ft_offloads); } -static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports) +static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *g; u32 *flow_group_in; + int nvports; int err = 0; - nvports = nvports + MLX5_ESW_MISS_FLOWS; + nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; @@ -1985,7 +1986,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) static int esw_offloads_steering_init(struct mlx5_eswitch *esw) { - int total_vports = esw->total_vports; int err; memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb)); @@ -1996,7 +1996,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_acl_err; - err = esw_create_offloads_table(esw, total_vports); + err = esw_create_offloads_table(esw); if (err) goto create_offloads_err; @@ -2008,7 +2008,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw) if (err) goto create_fdb_err; - err = esw_create_vport_rx_group(esw, total_vports); + err = esw_create_vport_rx_group(esw); if (err) goto create_fg_err; From 123f0f53dd64b67e34142485fe866a8a581f12f1 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Fri, 12 Jun 2020 12:16:51 +0300 Subject: [PATCH 1693/2056] net/mlx5e: Link non uplink representors to PCI device Currently PF and VF representors are exposed as virtual device. They are not linked to its parent PCI device like how uplink representor is linked. Due to this, PF and VF representors cannot benefit of the systemd defined naming scheme. This requires special handling by the users. Hence, link the PF and VF representors to their parent PCI device similar to existing uplink representor netdevice. Example: udevadm output before linking to PCI device: $ udevadm test-builtin net_id /sys/class/net/eth6 Load module index Network interface NamePolicy= disabled on kernel command line, ignoring. Parsed configuration file /usr/lib/systemd/network/99-default.link Created link configuration context. Using default interface naming scheme 'v243'. ID_NET_NAMING_SCHEME=v243 Unload module index Unloaded link configuration context. udevadm output after linking to PCI device: $ udevadm test-builtin net_id /sys/class/net/eth6 Load module index Network interface NamePolicy= disabled on kernel command line, ignoring. Parsed configuration file /usr/lib/systemd/network/99-default.link Created link configuration context. Using default interface naming scheme 'v243'. ID_NET_NAMING_SCHEME=v243 ID_NET_NAME_PATH=enp0s8f0npf0vf0 Unload module index Unloaded link configuration context. In past there was little concern over seeing 10,000 lines output showing up at thread [1] is not applicable as ndo ops for VF handling is not exposed for all the 100 repesentors for mlx5 devices. Additionally alternative device naming [2] to overcome shorter device naming is also part of the latest systemd release v245. [1] https://marc.info/?l=linux-netdev&m=152657949117904&w=2 [2] https://lwn.net/Articles/814068/ Signed-off-by: Parav Pandit Reviewed-by: Roi Dayan Acked-by: Jiri Pirko Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c300729fb498..a7a74748c948 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -699,8 +699,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_core_dev *mdev = priv->mdev; + SET_NETDEV_DEV(netdev, mdev->device); if (rep->vport == MLX5_VPORT_UPLINK) { - SET_NETDEV_DEV(netdev, mdev->device); netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; /* we want a persistent mac for the uplink rep */ mlx5_query_mac_address(mdev, netdev->dev_addr); From 5adf4c475aaa037d8f787d08c7699954cf78b19b Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 30 Apr 2020 15:54:17 +0300 Subject: [PATCH 1694/2056] net/mlx5e: RX, Re-work initializaiton of RX function pointers Instead of exposing the RQ datapath handlers (from en_rx.c) so that they are set in the control path (in en_main.c), wrap this logic in a single function in en_rx.c and expose it alone. Every profile will now have a pointer to the new mlx5e_rx_handlers structure, instead of directly pointing to the previously-exposed RQ handlers. This significantly improves locality and modularity of the driver, and allows many functions in en_rx.c to become static. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 31 ++---- .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 1 - .../net/ethernet/mellanox/mlx5/core/en_main.c | 53 +-------- .../net/ethernet/mellanox/mlx5/core/en_rep.c | 6 +- .../net/ethernet/mellanox/mlx5/core/en_rep.h | 6 +- .../net/ethernet/mellanox/mlx5/core/en_rx.c | 103 +++++++++++++++--- .../ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 3 +- .../ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 2 +- .../mellanox/mlx5/core/ipoib/ipoib_vlan.c | 3 +- 9 files changed, 112 insertions(+), 96 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c44669102626..878714f949a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -530,6 +530,8 @@ typedef struct sk_buff * typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq); typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16); +int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk); + enum mlx5e_rq_flag { MLX5E_RQ_FLAG_XDP_XMIT, MLX5E_RQ_FLAG_XDP_REDIRECT, @@ -812,6 +814,13 @@ struct mlx5e_priv { struct mlx5e_scratchpad scratchpad; }; +struct mlx5e_rx_handlers { + mlx5e_fp_handle_rx_cqe handle_rx_cqe; + mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; +}; + +extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic; + struct mlx5e_profile { int (*init)(struct mlx5_core_dev *mdev, struct net_device *netdev, @@ -828,10 +837,7 @@ struct mlx5e_profile { void (*update_carrier)(struct mlx5e_priv *priv); unsigned int (*stats_grps_num)(struct mlx5e_priv *priv); mlx5e_stats_grp_t *stats_grps; - struct { - mlx5e_fp_handle_rx_cqe handle_rx_cqe; - mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; - } rx_handlers; + const struct mlx5e_rx_handlers *rx_handlers; int max_tc; u8 rq_groups; }; @@ -860,26 +866,9 @@ void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); -void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); -void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); -struct sk_buff * -mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, - u16 cqe_bcnt, u32 head_offset, u32 page_idx); -struct sk_buff * -mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, - u16 cqe_bcnt, u32 head_offset, u32 page_idx); -struct sk_buff * -mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); -struct sk_buff * -mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, - struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); - void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 2a47673da5a4..f96e786db158 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -47,7 +47,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, u32 *cqe_bcnt); -void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_ipsec_inverse_table_init(void); bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9d5d8b28bcd8..ac91504be8e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -45,7 +45,6 @@ #include "en_tc.h" #include "en_rep.h" #include "en_accel/ipsec.h" -#include "en_accel/ipsec_rxtx.h" #include "en_accel/en_accel.h" #include "en_accel/tls.h" #include "accel/ipsec.h" @@ -65,7 +64,6 @@ #include "en/hv_vhca_stats.h" #include "en/devlink.h" #include "lib/mlx5.h" -#include "fpga/ipsec.h" bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) { @@ -428,29 +426,6 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, pool_size = MLX5_MPWRQ_PAGES_PER_WQE << mlx5e_mpwqe_get_log_rq_size(params, xsk); - rq->post_wqes = mlx5e_post_rx_mpwqes; - rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; - - rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; -#ifdef CONFIG_MLX5_EN_IPSEC - if (MLX5_IPSEC_DEV(mdev)) { - err = -EINVAL; - netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); - goto err_rq_wq_destroy; - } -#endif - if (!rq->handle_rx_cqe) { - err = -EINVAL; - netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err); - goto err_rq_wq_destroy; - } - - rq->mpwqe.skb_from_cqe_mpwrq = xsk ? - mlx5e_xsk_skb_from_cqe_mpwrq_linear : - mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? - mlx5e_skb_from_cqe_mpwrq_linear : - mlx5e_skb_from_cqe_mpwrq_nonlinear; - rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); rq->mpwqe.num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); @@ -492,30 +467,13 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, if (err) goto err_free; - rq->post_wqes = mlx5e_post_rx_wqes; - rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; - -#ifdef CONFIG_MLX5_EN_IPSEC - if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && - c->priv->ipsec) - rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; - else -#endif - rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe; - if (!rq->handle_rx_cqe) { - err = -EINVAL; - netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err); - goto err_free; - } - - rq->wqe.skb_from_cqe = xsk ? - mlx5e_xsk_skb_from_cqe_linear : - mlx5e_rx_is_linear_skb(params, NULL) ? - mlx5e_skb_from_cqe_linear : - mlx5e_skb_from_cqe_nonlinear; rq->mkey_be = c->mkey_be; } + err = mlx5e_rq_set_handlers(rq, params, xsk); + if (err) + goto err_free; + if (xsk) { err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); @@ -5288,8 +5246,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .update_rx = mlx5e_update_nic_rx, .update_stats = mlx5e_update_ndo_stats, .update_carrier = mlx5e_update_carrier, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, + .rx_handlers = &mlx5e_rx_handlers_nic, .max_tc = MLX5E_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK), .stats_grps = mlx5e_nic_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a7a74748c948..ea6b99b8bcd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1143,8 +1143,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .enable = mlx5e_rep_enable, .update_rx = mlx5e_update_rep_rx, .update_stats = mlx5e_update_ndo_stats, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, + .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = 1, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_rep_stats_grps, @@ -1163,8 +1162,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .update_rx = mlx5e_update_rep_rx, .update_stats = mlx5e_update_ndo_stats, .update_carrier = mlx5e_update_carrier, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, + .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = MLX5E_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5e_ul_rep_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 1d5669801484..622c27ae4ac7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -41,6 +41,8 @@ #include "lib/port_tun.h" #ifdef CONFIG_MLX5_ESWITCH +extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep; + struct mlx5e_neigh_update_table { struct rhashtable neigh_ht; /* Save the neigh hash entries in a list in addition to the hash table @@ -223,10 +225,6 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); -void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); -void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe); - void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); bool mlx5e_eswitch_vf_rep(struct net_device *netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 74860f3827b1..9c0ef6e5da88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -44,12 +44,29 @@ #include "en_rep.h" #include "en/rep/tc.h" #include "ipoib/ipoib.h" +#include "accel/ipsec.h" +#include "fpga/ipsec.h" #include "en_accel/ipsec_rxtx.h" #include "en_accel/tls_rxtx.h" #include "lib/clock.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/health.h" +#include "en/params.h" + +static struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +static struct sk_buff * +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, + u16 cqe_bcnt, u32 head_offset, u32 page_idx); +static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); + +const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { + .handle_rx_cqe = mlx5e_handle_rx_cqe, + .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, +}; static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { @@ -370,7 +387,7 @@ static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, mlx5e_put_rx_frag(rq, wi, recycle); } -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) +static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); @@ -537,7 +554,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) return err; } -void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) +static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) { struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; /* Don't recycle, this function is called on rq/netdev close */ @@ -1106,7 +1123,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, xdp->frame_sz = rq->buff.frame0_sz; } -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -1146,7 +1163,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, return skb; } -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) { @@ -1201,7 +1218,7 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } } -void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; @@ -1244,7 +1261,7 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } #ifdef CONFIG_MLX5_ESWITCH -void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct net_device *netdev = rq->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1299,8 +1316,7 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5_wq_cyc_pop(wq); } -void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, - struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); @@ -1358,9 +1374,14 @@ void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, mlx5e_free_rx_mpwqe(rq, wi, true); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); } + +const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { + .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, + .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, +}; #endif -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { @@ -1406,7 +1427,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w return skb; } -struct sk_buff * +static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, u16 cqe_bcnt, u32 head_offset, u32 page_idx) { @@ -1456,7 +1477,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return skb; } -void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); @@ -1652,7 +1673,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, stats->bytes += cqe_bcnt; } -void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; @@ -1688,11 +1709,15 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5_wq_cyc_pop(wq); } +const struct mlx5e_rx_handlers mlx5i_rx_handlers = { + .handle_rx_cqe = mlx5i_handle_rx_cqe, + .handle_rx_cqe_mpwqe = NULL, /* Not supported */ +}; #endif /* CONFIG_MLX5_CORE_IPOIB */ #ifdef CONFIG_MLX5_EN_IPSEC -void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; struct mlx5e_wqe_frag_info *wi; @@ -1729,3 +1754,55 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) } #endif /* CONFIG_MLX5_EN_IPSEC */ + +int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) +{ + struct mlx5_core_dev *mdev = rq->mdev; + struct mlx5e_channel *c = rq->channel; + + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + rq->mpwqe.skb_from_cqe_mpwrq = xsk ? + mlx5e_xsk_skb_from_cqe_mpwrq_linear : + mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? + mlx5e_skb_from_cqe_mpwrq_linear : + mlx5e_skb_from_cqe_mpwrq_nonlinear; + rq->post_wqes = mlx5e_post_rx_mpwqes; + rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; + + rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe; +#ifdef CONFIG_MLX5_EN_IPSEC + if (MLX5_IPSEC_DEV(mdev)) { + netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); + return -EINVAL; + } +#endif + if (!rq->handle_rx_cqe) { + netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n"); + return -EINVAL; + } + break; + default: /* MLX5_WQ_TYPE_CYCLIC */ + rq->wqe.skb_from_cqe = xsk ? + mlx5e_xsk_skb_from_cqe_linear : + mlx5e_rx_is_linear_skb(params, NULL) ? + mlx5e_skb_from_cqe_linear : + mlx5e_skb_from_cqe_nonlinear; + rq->post_wqes = mlx5e_post_rx_wqes; + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; + +#ifdef CONFIG_MLX5_EN_IPSEC + if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && + c->priv->ipsec) + rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; + else +#endif + rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe; + if (!rq->handle_rx_cqe) { + netdev_err(c->netdev, "RX handler of RQ is not set\n"); + return -EINVAL; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 690b822c6152..5763965d5ef3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -464,8 +464,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = { .update_rx = mlx5i_update_nic_rx, .update_stats = NULL, /* mlx5i_update_stats */ .update_carrier = NULL, /* no HW update in IB link */ - .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, - .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ + .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), .stats_grps = mlx5i_stats_grps, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 79071a15c4ca..b79dc1e28c41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -42,6 +42,7 @@ extern const struct ethtool_ops mlx5i_ethtool_ops; extern const struct ethtool_ops mlx5i_pkey_ethtool_ops; +extern const struct mlx5e_rx_handlers mlx5i_rx_handlers; #define MLX5_IB_GRH_BYTES 40 #define MLX5_IPOIB_ENCAP_LEN 4 @@ -117,7 +118,6 @@ struct mlx5i_tx_wqe { void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more); -void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* CONFIG_MLX5_CORE_IPOIB */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index f70367018862..7163d9f6c4a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -349,8 +349,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = { .disable = NULL, .update_rx = mlx5i_update_nic_rx, .update_stats = NULL, - .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, - .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ + .rx_handlers = &mlx5i_rx_handlers, .max_tc = MLX5I_MAX_NUM_TC, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), }; From b307f7f163e351833cf6aa41585be8f5a1617901 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Thu, 30 Apr 2020 17:02:56 +0300 Subject: [PATCH 1695/2056] net/mlx5e: Move exposure of datapath function to txrx header Move them from the generic header file "en.h", to the datapath header file "txrx.h". Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 23 ---------------- .../net/ethernet/mellanox/mlx5/core/en/txrx.h | 27 +++++++++++++++++++ .../mellanox/mlx5/core/en/xsk/setup.c | 1 + .../net/ethernet/mellanox/mlx5/core/en_rep.c | 1 + 4 files changed, 29 insertions(+), 23 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 878714f949a5..0fb30fe93207 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -844,31 +844,10 @@ struct mlx5e_profile { void mlx5e_build_ptys2ethtool_map(void); -u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); -netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); -void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, - struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); - -void mlx5e_trigger_irq(struct mlx5e_icosq *sq); -void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); -void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); -int mlx5e_napi_poll(struct napi_struct *napi, int budget); -bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); -int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); -void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); - bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); -void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, - struct mlx5e_dma_info *dma_info, - bool recycle); -bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); -int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); -bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); @@ -971,8 +950,6 @@ void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); -void mlx5e_free_rx_descs(struct mlx5e_rq *rq); -void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); void mlx5e_activate_icosq(struct mlx5e_icosq *icosq); void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index cf425a60cddc..b25e4ec75228 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -18,6 +18,33 @@ enum mlx5e_icosq_wqe_type { #endif }; +/* General */ +void mlx5e_trigger_irq(struct mlx5e_icosq *sq); +void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); +void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); +int mlx5e_napi_poll(struct napi_struct *napi, int budget); +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq); + +/* RX */ +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); +void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, + struct mlx5e_dma_info *dma_info, + bool recycle); +bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); +int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); +void mlx5e_free_rx_descs(struct mlx5e_rq *rq); +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); + +/* TX */ +u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev); +netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); +void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, + struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); +void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); + static inline bool mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index cc46414773b5..dd9df519d383 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -3,6 +3,7 @@ #include "setup.h" #include "en/params.h" +#include "en/txrx.h" /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may * change unexpectedly, and mlx5e has a minimum valid stride size for striding diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index ea6b99b8bcd8..111477086f66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -42,6 +42,7 @@ #include "esw/chains.h" #include "en.h" #include "en_rep.h" +#include "en/txrx.h" #include "en_tc.h" #include "en/rep/tc.h" #include "en/rep/neigh.h" From 5d0b84769477fd647a491e882d5aea402c5fafa8 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Sun, 12 Jul 2020 18:22:39 +0300 Subject: [PATCH 1696/2056] net/mlx5e: Use indirect call wrappers for RX post WQEs functions Use the indirect call wrapper API macros for declaration and scope of the RX post WQEs functions. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 5 +++-- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 2 +- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index b25e4ec75228..f7fd2ed32279 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -5,6 +5,7 @@ #define __MLX5_EN_TXRX_H___ #include "en.h" +#include #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) @@ -30,8 +31,8 @@ void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, bool recycle); -bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); -bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); +INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)); +INDIRECT_CALLABLE_DECLARE(bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index e0c1b010d41a..90db221e31df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -34,7 +34,6 @@ #include #include "en/xdp.h" #include "en/params.h" -#include int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c index 0dfbc96e952a..4d892f6cecb3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c @@ -6,7 +6,6 @@ #include "en/xdp.h" #include "en/params.h" #include -#include int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 9c0ef6e5da88..65828af120b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -34,11 +34,11 @@ #include #include #include -#include #include #include #include #include "en.h" +#include "en/txrx.h" #include "en_tc.h" #include "eswitch.h" #include "en_rep.h" @@ -561,7 +561,7 @@ static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) mlx5e_free_rx_mpwqe(rq, wi, false); } -bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) +INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_cyc *wq = &rq->wqe.wq; u8 wqe_bulk; @@ -702,7 +702,7 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) return i; } -bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) +INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) { struct mlx5e_icosq *sq = &rq->channel->icosq; struct mlx5_wq_ll *wq = &rq->mpwqe.wq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index e3dbab2a294c..de10b06bade5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -31,8 +31,8 @@ */ #include -#include #include "en.h" +#include "en/txrx.h" #include "en/xdp.h" #include "en/xsk/rx.h" #include "en/xsk/tx.h" From 17347d5430c4e4e1a3c58ffa2732746bd26a9c02 Mon Sep 17 00:00:00 2001 From: Aya Levin Date: Thu, 26 Mar 2020 14:03:19 +0200 Subject: [PATCH 1697/2056] net/mlx5e: Add support for PCI relaxed ordering The concept of Relaxed Ordering in the PCI Express environment allows switches in the path between the Requester and Completer to reorder some transactions just received before others that were previously enqueued. In ETH driver, there is no question of write integrity since each memory segment is written only once per cycle. In addition, the driver doesn't access the memory shared with the hardware until the corresponding CQE arrives indicating all PCI transactions are done. Running TCP single stream over ConnectX-4 LX, ARM CPU on remote-numa has 300% improvement in the bandwidth. With relaxed ordering turned off: BW:10 [GB/s] With relaxed ordering turned on: BW:40 [GB/s] The driver turns relaxed ordering with respect to the firmware capabilities and the return value from pcie_relaxed_ordering_enabled(). Signed-off-by: Aya Levin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_common.c | 12 +++++++++++- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0fb30fe93207..f2fa1307e90c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -974,6 +974,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, bool enable_mc_lb); +void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc); /* common netdev helpers */ void mlx5e_create_q_counters(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 1e42c7ae621b..a6cf008057b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -60,6 +60,16 @@ void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, mutex_unlock(&mdev->mlx5e_res.td.list_lock); } +void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) +{ + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); + bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); + + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); +} + static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey) { @@ -76,7 +86,7 @@ static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, pd, pdn); MLX5_SET(mkc, mkc, length64, 1); MLX5_SET(mkc, mkc, qpn, 0xffffff); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index ac91504be8e8..f374348fa810 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -274,7 +274,7 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); - + mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn); MLX5_SET64(mkc, mkc, len, npages << page_shift); From ffdc8ec0b79f499c21284f4229de02d70204bf6a Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Mon, 6 Jul 2020 16:32:11 +0300 Subject: [PATCH 1698/2056] net/mlx5: DR, Reduce print level for matcher print There is no need to print on each unsuccessful matcher ip_version combination since it probably will happen when trying to create all the possible combinations. On a real failure we have a print in the calling function. Signed-off-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 31abcbb95ca2..6960aedd33cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -395,7 +395,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, /* Check that all mask fields were consumed */ for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) { if (((u8 *)&mask)[i] != 0) { - mlx5dr_err(dmn, "Mask contains unsupported parameters\n"); + mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n"); return -EOPNOTSUPP; } } From c8b838d108bca05d759f0131db960ddf305cc255 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 27 Jul 2020 13:03:56 -0500 Subject: [PATCH 1699/2056] net/mlx5: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/v5.7/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 4 ++-- .../net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 2 +- .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/vport.c | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index f7fd2ed32279..9334c9c3e208 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -388,7 +388,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, switch (swp_spec->tun_l4_proto) { case IPPROTO_UDP: eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; - /* fall through */ + fallthrough; case IPPROTO_TCP: eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 90db221e31df..0e6946fc121f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -152,11 +152,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, return true; default: bpf_warn_invalid_xdp_action(act); - /* fall through */ + fallthrough; case XDP_ABORTED: xdp_abort: trace_xdp_exception(rq->netdev, prog, act); - /* fall through */ + fallthrough; case XDP_DROP: rq->stats->xdp_drop++; return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 0e6698d1b4ca..f4861545b236 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -470,7 +470,7 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s if (likely(!skb->decrypted)) goto out; WARN_ON_ONCE(1); - /* fall-through */ + fallthrough; case MLX5E_KTLS_SYNC_FAIL: goto err_out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index af849bc83c30..08270987c506 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -243,7 +243,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) return MLX5E_NUM_PFLAGS; case ETH_SS_TEST: return mlx5e_self_test_num(priv); - /* fallthrough */ + fallthrough; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 54de53daf1c0..be610d40749a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2346,7 +2346,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) goto out; - /* fall through */ + fallthrough; case MLX5_CAP_INLINE_MODE_L2: NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); err = -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 182d3ac3e73f..831d2c39e153 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -339,14 +339,14 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, switch (opcode) { case MLX5_CQE_REQ_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; - /* Fall through */ + fallthrough; case MLX5_CQE_REQ: mlx5_fpga_conn_sq_cqe(conn, cqe, status); break; case MLX5_CQE_RESP_ERR: status = ((struct mlx5_err_cqe *)cqe)->syndrome; - /* Fall through */ + fallthrough; case MLX5_CQE_RESP_SEND: mlx5_fpga_conn_rq_cqe(conn, cqe, status); break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index e9089a793632..9e68f5926ab6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -198,13 +198,13 @@ static void mlx5_lag_fib_update(struct work_struct *work) /* Protect internal structures from changes */ rtnl_lock(); switch (fib_work->event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: mlx5_lag_fib_route_event(ldev, fib_work->event, fib_work->fen_info.fi); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fib_nh = fib_work->fnh_info.fib_nh; mlx5_lag_fib_nexthop_event(ldev, @@ -255,7 +255,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, return NOTIFY_DONE; switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_REPLACE: case FIB_EVENT_ENTRY_DEL: fen_info = container_of(info, struct fib_entry_notifier_info, info); @@ -278,7 +278,7 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_ADD: case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 88cdb9bb4c4a..bdafc85fd874 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -110,7 +110,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT: if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode)) break; - /* fall through */ + fallthrough; case MLX5_CAP_INLINE_MODE_L2: *min_inline_mode = MLX5_INLINE_MODE_L2; break; From 22f9d2f4ee810e6442185ba7ecab37e24de1b413 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 26 Jul 2020 12:58:29 +0200 Subject: [PATCH 1700/2056] net/mlx5: drop unnecessary list_empty list_for_each_entry is able to handle an empty list. The only effect of avoiding the loop is not initializing the index variable. Drop list_empty tests in cases where these variables are not used. Note that list_for_each_entry is defined in terms of list_first_entry, which indicates that it should not be used on an empty list. But in list_for_each_entry, the element obtained by list_first_entry is not really accessed, only the address of its list_head field is compared to the address of the list head, so the list_first_entry is safe. The semantic patch that makes this change is as follows (with another variant for the no brace case): (http://coccinelle.lip6.fr/) @@ expression x,e; iterator name list_for_each_entry; statement S; identifier i; @@ -if (!(list_empty(x))) { list_for_each_entry(i,x,...) S - } ... when != i ? i = e Signed-off-by: Julia Lawall Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/dr_matcher.c | 13 ++++++------- .../ethernet/mellanox/mlx5/core/steering/dr_rule.c | 5 ++--- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 6960aedd33cb..c63f727273d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -474,14 +474,13 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher) int ret; next_matcher = NULL; - if (!list_empty(&tbl->matcher_list)) - list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { - if (tmp_matcher->prio >= matcher->prio) { - next_matcher = tmp_matcher; - break; - } - first = false; + list_for_each_entry(tmp_matcher, &tbl->matcher_list, matcher_list) { + if (tmp_matcher->prio >= matcher->prio) { + next_matcher = tmp_matcher; + break; } + first = false; + } prev_matcher = NULL; if (next_matcher && !first) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index cd708dcc2e3a..6ec5106bc472 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -574,9 +574,8 @@ void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, { struct mlx5dr_rule_member *rule_mem; - if (!list_empty(&ste->rule_list)) - list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) - rule_mem->ste = new_ste; + list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) + rule_mem->ste = new_ste; } static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, From e1613b5714ee6c186c9628e9958edf65e9d9cddd Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 27 Jul 2020 15:47:15 -0700 Subject: [PATCH 1701/2056] bpf: Fix bpf_ringbuf_output() signature to return long Due to bpf tree fix merge, bpf_ringbuf_output() signature ended up with int as a return type, while all other helpers got converted to returning long. So fix it in bpf-next now. Fixes: b0659d8a950d ("bpf: Fix definition of bpf_ringbuf_output() helper in UAPI comments") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200727224715.652037-1-andriin@fb.com --- include/uapi/linux/bpf.h | 2 +- tools/include/uapi/linux/bpf.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e1ba4ae6a916..eb5e0c38eb2c 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3241,7 +3241,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e1ba4ae6a916..eb5e0c38eb2c 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3241,7 +3241,7 @@ union bpf_attr { * Return * The id is returned or 0 in case the id could not be retrieved. * - * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) * Description * Copy *size* bytes from *data* into a ring buffer *ringbuf*. * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification From 363885d7c62e293fb093c7c355bf5f05fa0a25a9 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 27 Jul 2020 16:33:45 -0700 Subject: [PATCH 1702/2056] selftests/bpf: Add new bpf_iter context structs to fix build on old kernels Add bpf_iter__bpf_map_elem and bpf_iter__bpf_sk_storage_map to bpf_iter.h. Fixes: 3b1c420bd882 ("selftests/bpf: Add a test for bpf sk_storage_map iterator") Fixes: 2a7c2fff7dd6 ("selftests/bpf: Add test for bpf hash map iterators") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200727233345.1686358-1-andriin@fb.com --- tools/testing/selftests/bpf/progs/bpf_iter.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h index 17db3bac518b..c196280df90d 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -11,6 +11,8 @@ #define tcp6_sock tcp6_sock___not_used #define bpf_iter__udp bpf_iter__udp___not_used #define udp6_sock udp6_sock___not_used +#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used +#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used #include "vmlinux.h" #undef bpf_iter_meta #undef bpf_iter__bpf_map @@ -22,6 +24,8 @@ #undef tcp6_sock #undef bpf_iter__udp #undef udp6_sock +#undef bpf_iter__bpf_map_elem +#undef bpf_iter__bpf_sk_storage_map struct bpf_iter_meta { struct seq_file *seq; @@ -78,3 +82,17 @@ struct udp6_sock { struct udp_sock udp; struct ipv6_pinfo inet6; } __attribute__((preserve_access_index)); + +struct bpf_iter__bpf_map_elem { + struct bpf_iter_meta *meta; + struct bpf_map *map; + void *key; + void *value; +}; + +struct bpf_iter__bpf_sk_storage_map { + struct bpf_iter_meta *meta; + struct bpf_map *map; + struct sock *sk; + void *value; +}; From f6dfbe31e8fa5cbd5bc89df9d7f0fa0af7e69981 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 27 Jul 2020 18:54:11 +0100 Subject: [PATCH 1703/2056] bpf: Fix swapped arguments in calls to check_buffer_access There are a couple of arguments of the boolean flag zero_size_allowed and the char pointer buf_info when calling to function check_buffer_access that are swapped by mistake. Fix these by swapping them to correct the argument ordering. Fixes: afbf21dce668 ("bpf: Support readonly/readwrite buffers in verifier") Addresses-Coverity: ("Array compared to 0") Signed-off-by: Colin Ian King Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20200727175411.155179-1-colin.king@canonical.com --- kernel/bpf/verifier.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cd14e70f2d07..88bb25d08bf8 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3477,14 +3477,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn regno, reg_type_str[reg->type]); return -EACCES; } - err = check_buffer_access(env, reg, regno, off, size, "rdonly", - false, + err = check_buffer_access(env, reg, regno, off, size, false, + "rdonly", &env->prog->aux->max_rdonly_access); if (!err && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_RDWR_BUF) { - err = check_buffer_access(env, reg, regno, off, size, "rdwr", - false, + err = check_buffer_access(env, reg, regno, off, size, false, + "rdwr", &env->prog->aux->max_rdwr_access); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); From 3c4f850e8441ac8b3b6dbaa6107604c4199ef01f Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Tue, 28 Jul 2020 01:36:04 -0400 Subject: [PATCH 1704/2056] xdp: Prevent kernel-infoleak in xsk_getsockopt() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit xsk_getsockopt() is copying uninitialized stack memory to userspace when 'extra_stats' is 'false'. Fix it. Doing '= {};' is sufficient since currently 'struct xdp_statistics' is defined as follows: struct xdp_statistics { __u64 rx_dropped; __u64 rx_invalid_descs; __u64 tx_invalid_descs; __u64 rx_ring_full; __u64 rx_fill_ring_empty_descs; __u64 tx_ring_empty_descs; }; When being copied to the userspace, 'stats' will not contain any uninitialized 'holes' between struct fields. Fixes: 8aa5a33578e9 ("xsk: Add new statistics") Suggested-by: Dan Carpenter Signed-off-by: Peilin Ye Signed-off-by: Daniel Borkmann Acked-by: Björn Töpel Acked-by: Song Liu Acked-by: Arnd Bergmann Link: https://lore.kernel.org/bpf/20200728053604.404631-1-yepeilin.cs@gmail.com --- net/xdp/xsk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 2e94a7e94671..c3231620d210 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -840,7 +840,7 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname, switch (optname) { case XDP_STATISTICS: { - struct xdp_statistics stats; + struct xdp_statistics stats = {}; bool extra_stats = true; size_t stats_size; From 4e8c36c3b0d73d46aa27cfd4308aaa445a1067df Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Tue, 28 Jul 2020 09:58:07 -0700 Subject: [PATCH 1705/2056] Bluetooth: Fix suspend notifier race Unregister from suspend notifications and cancel suspend preparations before running hci_dev_do_close. Otherwise, the suspend notifier may race with unregister and cause cmd_timeout even after hdev has been freed. Below is the trace from when this panic was seen: [ 832.578518] Bluetooth: hci_core.c:hci_cmd_timeout() hci0: command 0x0c05 tx timeout [ 832.586200] BUG: kernel NULL pointer dereference, address: 0000000000000000 [ 832.586203] #PF: supervisor read access in kernel mode [ 832.586205] #PF: error_code(0x0000) - not-present page [ 832.586206] PGD 0 P4D 0 [ 832.586210] PM: suspend exit [ 832.608870] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 832.613232] CPU: 3 PID: 10755 Comm: kworker/3:7 Not tainted 5.4.44-04894-g1e9dbb96a161 #1 [ 832.630036] Workqueue: events hci_cmd_timeout [bluetooth] [ 832.630046] RIP: 0010:__queue_work+0xf0/0x374 [ 832.630051] RSP: 0018:ffff9b5285f1fdf8 EFLAGS: 00010046 [ 832.674033] RAX: ffff8a97681bac00 RBX: 0000000000000000 RCX: ffff8a976a000600 [ 832.681162] RDX: 0000000000000000 RSI: 0000000000000009 RDI: ffff8a976a000748 [ 832.688289] RBP: ffff9b5285f1fe38 R08: 0000000000000000 R09: ffff8a97681bac00 [ 832.695418] R10: 0000000000000002 R11: ffff8a976a0006d8 R12: ffff8a9745107600 [ 832.698045] usb 1-6: new full-speed USB device number 119 using xhci_hcd [ 832.702547] R13: ffff8a9673658850 R14: 0000000000000040 R15: 000000000000001e [ 832.702549] FS: 0000000000000000(0000) GS:ffff8a976af80000(0000) knlGS:0000000000000000 [ 832.702550] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 832.702550] CR2: 0000000000000000 CR3: 000000010415a000 CR4: 00000000003406e0 [ 832.702551] Call Trace: [ 832.702558] queue_work_on+0x3f/0x68 [ 832.702562] process_one_work+0x1db/0x396 [ 832.747397] worker_thread+0x216/0x375 [ 832.751147] kthread+0x138/0x140 [ 832.754377] ? pr_cont_work+0x58/0x58 [ 832.758037] ? kthread_blkcg+0x2e/0x2e [ 832.761787] ret_from_fork+0x22/0x40 [ 832.846191] ---[ end trace fa93f466da517212 ]--- Fixes: 9952d90ea2885 ("Bluetooth: Handle PM_SUSPEND_PREPARE and PM_POST_SUSPEND") Signed-off-by: Abhishek Pandit-Subedi Reviewed-by: Miao-chen Chou Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 5394ab56c915..4ba23b821cbf 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3767,9 +3767,10 @@ void hci_unregister_dev(struct hci_dev *hdev) cancel_work_sync(&hdev->power_on); - hci_dev_do_close(hdev); - unregister_pm_notifier(&hdev->suspend_notifier); + cancel_work_sync(&hdev->suspend_prepare); + + hci_dev_do_close(hdev); if (!test_bit(HCI_INIT, &hdev->flags) && !hci_dev_test_flag(hdev, HCI_SETUP) && From 58789a1990c1a849a461ac912e72a698a771951a Mon Sep 17 00:00:00 2001 From: Venkata Lakshmi Narayana Gubba Date: Tue, 28 Jul 2020 21:23:00 +0530 Subject: [PATCH 1706/2056] Bluetooth: hci_qca: Stop collecting memdump again for command timeout during SSR Setting memdump state to idle prior to setting of callback function pointer for command timeout to NULL,causing the issue.Now moved the initialisation of memdump state to qca_setup(). Fixes: d841502c79e3 ("Bluetooth: hci_qca: Collect controller memory dump during SSR") Signed-off-by: Venkata Lakshmi Narayana Gubba Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_qca.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 3d1300286993..20e1dedbc58c 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1668,6 +1668,8 @@ static int qca_setup(struct hci_uart *hu) bt_dev_info(hdev, "setting up %s", qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390"); + qca->memdump_state = QCA_MEMDUMP_IDLE; + retry: ret = qca_power_on(hdev); if (ret) @@ -1817,9 +1819,6 @@ static void qca_power_shutdown(struct hci_uart *hu) qca_flush(hu); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); - hu->hdev->hw_error = NULL; - hu->hdev->cmd_timeout = NULL; - /* Non-serdev device usually is powered by external power * and don't need additional action in driver for power down */ @@ -1841,6 +1840,9 @@ static int qca_power_off(struct hci_dev *hdev) struct qca_data *qca = hu->priv; enum qca_btsoc_type soc_type = qca_soc_type(hu); + hu->hdev->hw_error = NULL; + hu->hdev->cmd_timeout = NULL; + /* Stop sending shutdown command if soc crashes. */ if (soc_type != QCA_ROME && qca->memdump_state == QCA_MEMDUMP_IDLE) { @@ -1848,7 +1850,6 @@ static int qca_power_off(struct hci_dev *hdev) usleep_range(8000, 10000); } - qca->memdump_state = QCA_MEMDUMP_IDLE; qca_power_shutdown(hu); return 0; } From f21bbd63308f677f0f32471cfa7dcdae17fd14da Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Tue, 28 Jul 2020 09:58:10 +0530 Subject: [PATCH 1707/2056] farsync: use generic power management The .suspend() and .resume() callbacks are not defined for this driver. Still, their power management structure follows the legacy framework. To bring it under the generic framework, simply remove the binding of callbacks from "struct pci_driver". Change code indentation from space to tab in "struct pci_driver". Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/wan/farsync.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 0196b7fd130d..fe4650260bc6 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2636,12 +2636,10 @@ fst_remove_one(struct pci_dev *pdev) } static struct pci_driver fst_driver = { - .name = FST_NAME, - .id_table = fst_pci_dev_id, - .probe = fst_add_one, - .remove = fst_remove_one, - .suspend = NULL, - .resume = NULL, + .name = FST_NAME, + .id_table = fst_pci_dev_id, + .probe = fst_add_one, + .remove = fst_remove_one, }; static int __init From 6af496adcbb8d4656b90a85401eeceb88d520c0d Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 28 Jul 2020 13:20:15 +0300 Subject: [PATCH 1708/2056] mlxsw: core: Add ethtool support for QSFP-DD transceivers The Quad Small Form Factor Pluggable Double Density (QSFP-DD) hardware specification defines a form factor that supports up to 400 Gbps in aggregate over an 8x50-Gbps electrical interface. The QSFP-DD supports both optical and copper interfaces. Implementation is based on Common Management Interface Specification; Rev 4.0 May 8, 2019. Table 8-2 "Identifier and Status Summary (Lower Page)" from this spec defines "Id and Status" fields located at offsets 00h - 02h. Bit 2 at offset 02h ("Flat_mem") specifies QSFP EEPROM memory mode, which could be "upper memory flat" or "paged". Flat memory mode is coded "1", and indicates that only page 00h is implemented in EEPROM. Paged memory is coded "0" and indicates that pages 00h, 01h, 02h, 10h and 11h are implemented. Pages 10h and 11h are currently not supported by the driver. "Flat" memory mode is used for the passive copper transceivers. For this type only page 00h (256 bytes) is available. "Paged" memory is used for the optical transceivers. For this type pages 00h (256 bytes), 01h (128 bytes) and 02h (128 bytes) are available. Upper page 01h contains static advertising field, while upper page 02h contains the module-defined thresholds and lane-specific monitors. Extend enumerator 'mlxsw_reg_mcia_eeprom_module_info_id' with additional field 'MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID'. This field is used to indicate for QSFP-DD transceiver type which memory mode is to be used. Expose 256 bytes buffer for QSFP-DD passive copper transceiver and 512 bytes buffer for optical. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/core_env.c | 21 +++++++++++++++++-- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 ++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index a7d86df7123f..1828dc569524 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -70,8 +70,9 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module, if (qsfp) { /* When reading upper pages 1, 2 and 3 the offset * starts at 128. Please refer to "QSFP+ Memory Map" - * figure in SFF-8436 specification for graphical - * depiction. + * figure in SFF-8436 specification and to "CMIS Module + * Memory Map" figure in CMIS specification for + * graphical depiction. */ page = MLXSW_REG_MCIA_PAGE_GET(offset); offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page; @@ -221,6 +222,22 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module, else modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2; break; + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD: + /* Use SFF_8636 as base type. ethtool should recognize specific + * type through the identifier value. + */ + modinfo->type = ETH_MODULE_SFF_8636; + /* Verify if module EEPROM is a flat memory. In case of flat + * memory only page 00h (0-255 bytes) can be read. Otherwise + * upper pages 01h and 02h can also be read. Upper pages 10h + * and 11h are currently not supported by the driver. + */ + if (module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID] & + MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY) + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + else + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 3c5b25495751..0638dfb23b7e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -8607,6 +8607,7 @@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16); #define MLXSW_REG_MCIA_TH_PAGE_NUM 3 #define MLXSW_REG_MCIA_PAGE0_LO 0 #define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80 +#define MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY BIT(7) enum mlxsw_reg_mcia_eeprom_module_info_rev_id { MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00, @@ -8625,6 +8626,7 @@ enum mlxsw_reg_mcia_eeprom_module_info_id { enum mlxsw_reg_mcia_eeprom_module_info { MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID, MLXSW_REG_MCIA_EEPROM_MODULE_INFO_REV_ID, + MLXSW_REG_MCIA_EEPROM_MODULE_INFO_TYPE_ID, MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE, }; From f152b41ba6cf43a3a5a87c7b45e6a2722a6195dc Mon Sep 17 00:00:00 2001 From: Vadim Pasternak Date: Tue, 28 Jul 2020 13:20:16 +0300 Subject: [PATCH 1709/2056] mlxsw: core: Add support for temperature thresholds reading for QSFP-DD transceivers Allow QSFP-DD transceivers temperature thresholds reading for hardware monitoring and thermal control. For this type, the thresholds are located in page 02h according to the "Module and Lane Thresholds" description from Common Management Interface Specification. Signed-off-by: Vadim Pasternak Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/core_env.c | 32 +++++++++++++------ drivers/net/ethernet/mellanox/mlxsw/reg.h | 1 + 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 1828dc569524..44fa02cbb683 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -11,7 +11,7 @@ #include "reg.h" static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, - bool *qsfp) + bool *qsfp, bool *cmis) { char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE]; char mcia_pl[MLXSW_REG_MCIA_LEN]; @@ -25,15 +25,19 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id, return err; mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp); ident = eeprom_tmp[0]; + *cmis = false; switch (ident) { case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: *qsfp = false; break; case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP: /* fall-through */ case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_PLUS: /* fall-through */ - case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: /* fall-through */ + case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP28: + *qsfp = true; + break; case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_QSFP_DD: *qsfp = true; + *cmis = true; break; default: return -EINVAL; @@ -117,7 +121,8 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, char mcia_pl[MLXSW_REG_MCIA_LEN] = {0}; char mtmp_pl[MLXSW_REG_MTMP_LEN]; unsigned int module_temp; - bool qsfp; + bool qsfp, cmis; + int page; int err; mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module, @@ -141,21 +146,28 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module, */ /* Validate module identifier value. */ - err = mlxsw_env_validate_cable_ident(core, module, &qsfp); + err = mlxsw_env_validate_cable_ident(core, module, &qsfp, &cmis); if (err) return err; - if (qsfp) - mlxsw_reg_mcia_pack(mcia_pl, module, 0, - MLXSW_REG_MCIA_TH_PAGE_NUM, + if (qsfp) { + /* For QSFP/CMIS module-defined thresholds are located in page + * 02h, otherwise in page 03h. + */ + if (cmis) + page = MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM; + else + page = MLXSW_REG_MCIA_TH_PAGE_NUM; + mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, MLXSW_REG_MCIA_TH_PAGE_OFF + off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_LOW); - else + } else { mlxsw_reg_mcia_pack(mcia_pl, module, 0, MLXSW_REG_MCIA_PAGE0_LO, off, MLXSW_REG_MCIA_TH_ITEM_SIZE, MLXSW_REG_MCIA_I2C_ADDR_HIGH); + } err = mlxsw_reg_query(core, MLXSW_REG(mcia), mcia_pl); if (err) @@ -252,8 +264,8 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, { int offset = ee->offset; unsigned int read_size; + bool qsfp, cmis; int i = 0; - bool qsfp; int err; if (!ee->len) @@ -261,7 +273,7 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev, memset(data, 0, ee->len); /* Validate module identifier value. */ - err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp); + err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp, &cmis); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0638dfb23b7e..1d23ad70e83d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -8605,6 +8605,7 @@ MLXSW_ITEM32(reg, mcia, size, 0x08, 0, 16); #define MLXSW_REG_MCIA_PAGE0_LO_OFF 0xa0 #define MLXSW_REG_MCIA_TH_ITEM_SIZE 2 #define MLXSW_REG_MCIA_TH_PAGE_NUM 3 +#define MLXSW_REG_MCIA_TH_PAGE_CMIS_NUM 2 #define MLXSW_REG_MCIA_PAGE0_LO 0 #define MLXSW_REG_MCIA_TH_PAGE_OFF 0x80 #define MLXSW_REG_MCIA_EEPROM_CMIS_FLAT_MEMORY BIT(7) From a3ad434ad782085670d74e1a35c4c3f6043749f0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:33 +0200 Subject: [PATCH 1710/2056] netfilter: arp_tables: restore a SPDX identifier This was accidentally removed in an unrelated commit. Fixes: c2f12630c60f ("netfilter: switch nf_setsockopt to sockptr_t") Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- net/ipv4/netfilter/arp_tables.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index f5b26ef17820..9a1567dbc022 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1,4 +1,4 @@ - +// SPDX-License-Identifier: GPL-2.0-only /* * Packet matching code for ARP packets. * From 035bfd051eae5b365368be915dfaf916aa501a52 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:34 +0200 Subject: [PATCH 1711/2056] net: make sockptr_is_null strict aliasing safe While the kernel in general is not strict aliasing safe we can trivially do that in sockptr_is_null without affecting code generation, so always check the actually assigned union member. Reported-by: Jan Engelhardt Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 7d5cdb2b30b5..b13ea1422f93 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -64,7 +64,9 @@ static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) static inline bool sockptr_is_null(sockptr_t sockptr) { - return !sockptr.user && !sockptr.kernel; + if (sockptr_is_kernel(sockptr)) + return !sockptr.kernel; + return !sockptr.user; } static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) From d3c48151512922dd35f1f393b30b9138e4441d14 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:35 +0200 Subject: [PATCH 1712/2056] net: remove sockptr_advance sockptr_advance never properly worked. Replace it with _offset variants of copy_from_sockptr and copy_to_sockptr. Fixes: ba423fdaa589 ("net: add a new sockptr_t type") Reported-by: Jason A. Donenfeld Reported-by: Ido Schimmel Signed-off-by: Christoph Hellwig Acked-by: Jason A. Donenfeld Tested-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/crypto/chelsio/chtls/chtls_main.c | 12 +++++----- include/linux/sockptr.h | 27 +++++++++++------------ net/dccp/proto.c | 5 ++--- net/ipv4/netfilter/arp_tables.c | 8 +++---- net/ipv4/netfilter/ip_tables.c | 8 +++---- net/ipv4/tcp.c | 5 +++-- net/ipv6/ip6_flowlabel.c | 11 ++++----- net/ipv6/netfilter/ip6_tables.c | 8 +++---- net/netfilter/x_tables.c | 7 +++--- net/tls/tls_main.c | 6 ++--- 10 files changed, 49 insertions(+), 48 deletions(-) diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c index c3058dcdb33c..66d247efd561 100644 --- a/drivers/crypto/chelsio/chtls/chtls_main.c +++ b/drivers/crypto/chelsio/chtls/chtls_main.c @@ -525,9 +525,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, /* Obtain version and type from previous copy */ crypto_info[0] = tmp_crypto_info; /* Now copy the following data */ - sockptr_advance(optval, sizeof(*crypto_info)); - rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), - optval, + rc = copy_from_sockptr_offset((char *)crypto_info + + sizeof(*crypto_info), + optval, sizeof(*crypto_info), sizeof(struct tls12_crypto_info_aes_gcm_128) - sizeof(*crypto_info)); @@ -542,9 +542,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname, } case TLS_CIPHER_AES_GCM_256: { crypto_info[0] = tmp_crypto_info; - sockptr_advance(optval, sizeof(*crypto_info)); - rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info), - optval, + rc = copy_from_sockptr_offset((char *)crypto_info + + sizeof(*crypto_info), + optval, sizeof(*crypto_info), sizeof(struct tls12_crypto_info_aes_gcm_256) - sizeof(*crypto_info)); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index b13ea1422f93..9e6c81d474cb 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -69,19 +69,26 @@ static inline bool sockptr_is_null(sockptr_t sockptr) return !sockptr.user; } -static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, + size_t offset, size_t size) { if (!sockptr_is_kernel(src)) - return copy_from_user(dst, src.user, size); - memcpy(dst, src.kernel, size); + return copy_from_user(dst, src.user + offset, size); + memcpy(dst, src.kernel + offset, size); return 0; } -static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) +static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +{ + return copy_from_sockptr_offset(dst, src, 0, size); +} + +static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, + const void *src, size_t size) { if (!sockptr_is_kernel(dst)) - return copy_to_user(dst.user, src, size); - memcpy(dst.kernel, src, size); + return copy_to_user(dst.user + offset, src, size); + memcpy(dst.kernel + offset, src, size); return 0; } @@ -112,14 +119,6 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) return p; } -static inline void sockptr_advance(sockptr_t sockptr, size_t len) -{ - if (sockptr_is_kernel(sockptr)) - sockptr.kernel += len; - else - sockptr.user += len; -} - static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) { if (sockptr_is_kernel(src)) { diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 2e9e8449698f..d148ab1530e5 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -426,9 +426,8 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service, return -ENOMEM; sl->dccpsl_nr = optlen / sizeof(u32) - 1; - sockptr_advance(optval, sizeof(service)); - if (copy_from_sockptr(sl->dccpsl_list, optval, - optlen - sizeof(service)) || + if (copy_from_sockptr_offset(sl->dccpsl_list, optval, + sizeof(service), optlen - sizeof(service)) || dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) { kfree(sl); return -EFAULT; diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 9a1567dbc022..d1e04d2b5170 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -971,8 +971,8 @@ static int do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1267,8 +1267,8 @@ static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index f2a9680303d8..f15bc21d7301 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1126,8 +1126,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1508,8 +1508,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 27de9380ed14..4afec552f211 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2801,12 +2801,13 @@ static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, { struct tcp_sock *tp = tcp_sk(sk); struct tcp_repair_opt opt; + size_t offset = 0; while (len >= sizeof(opt)) { - if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) + if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) return -EFAULT; - sockptr_advance(optbuf, sizeof(opt)); + offset += sizeof(opt); len -= sizeof(opt); switch (opt.opt_code) { diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 215b6f5e733e..2d655260dedc 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -401,8 +401,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; - sockptr_advance(optval, CMSG_ALIGN(sizeof(*freq))); - if (copy_from_sockptr(fl->opt + 1, optval, olen)) + if (copy_from_sockptr_offset(fl->opt + 1, optval, + CMSG_ALIGN(sizeof(*freq)), olen)) goto done; msg.msg_controllen = olen; @@ -703,9 +703,10 @@ static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq, goto recheck; if (!freq->flr_label) { - sockptr_advance(optval, - offsetof(struct in6_flowlabel_req, flr_label)); - if (copy_to_sockptr(optval, &fl->label, sizeof(fl->label))) { + size_t offset = offsetof(struct in6_flowlabel_req, flr_label); + + if (copy_to_sockptr_offset(optval, offset, &fl->label, + sizeof(fl->label))) { /* Intentionally ignore fault. */ } } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1d52957a413f..2e2119bfcf13 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1143,8 +1143,8 @@ do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } @@ -1517,8 +1517,8 @@ compat_do_replace(struct net *net, sockptr_t arg, unsigned int len) return -ENOMEM; loc_cpu_entry = newinfo->entries; - sockptr_advance(arg, sizeof(tmp)); - if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) { + if (copy_from_sockptr_offset(loc_cpu_entry, arg, sizeof(tmp), + tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index b97eb4b538fd..91bf6635ea9e 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1050,6 +1050,7 @@ EXPORT_SYMBOL_GPL(xt_check_target); void *xt_copy_counters(sockptr_t arg, unsigned int len, struct xt_counters_info *info) { + size_t offset; void *mem; u64 size; @@ -1067,7 +1068,7 @@ void *xt_copy_counters(sockptr_t arg, unsigned int len, memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); info->num_counters = compat_tmp.num_counters; - sockptr_advance(arg, sizeof(compat_tmp)); + offset = sizeof(compat_tmp); } else #endif { @@ -1078,7 +1079,7 @@ void *xt_copy_counters(sockptr_t arg, unsigned int len, if (copy_from_sockptr(info, arg, sizeof(*info)) != 0) return ERR_PTR(-EFAULT); - sockptr_advance(arg, sizeof(*info)); + offset = sizeof(*info); } info->name[sizeof(info->name) - 1] = '\0'; @@ -1092,7 +1093,7 @@ void *xt_copy_counters(sockptr_t arg, unsigned int len, if (!mem) return ERR_PTR(-ENOMEM); - if (copy_from_sockptr(mem, arg, len) == 0) + if (copy_from_sockptr_offset(mem, arg, offset, len) == 0) return mem; vfree(mem); diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index d77f7d821130..bbc52b088d29 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -522,9 +522,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, goto err_crypto_info; } - sockptr_advance(optval, sizeof(*crypto_info)); - rc = copy_from_sockptr(crypto_info + 1, optval, - optlen - sizeof(*crypto_info)); + rc = copy_from_sockptr_offset(crypto_info + 1, optval, + sizeof(*crypto_info), + optlen - sizeof(*crypto_info)); if (rc) { rc = -EFAULT; goto err_crypto_info; From a31edb2059ed4e498f9aa8230c734b59d0ad797a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Jul 2020 18:38:36 +0200 Subject: [PATCH 1713/2056] net: improve the user pointer check in init_user_sockptr Make sure not just the pointer itself but the whole range lies in the user address space. For that pass the length and then use the access_ok helper to do the check. Fixes: 6d04fe15f78a ("net: optimize the sockptr_t for unified kernel/user address spaces") Reported-by: David Laight Signed-off-by: Christoph Hellwig Signed-off-by: David S. Miller --- include/linux/sockptr.h | 18 ++++++------------ net/ipv4/bpfilter/sockopt.c | 2 +- net/socket.c | 2 +- 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 9e6c81d474cb..96840def9d69 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -27,14 +27,6 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) { return (sockptr_t) { .kernel = p }; } - -static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) -{ - if ((unsigned long)p >= TASK_SIZE) - return -EFAULT; - sp->user = p; - return 0; -} #else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ typedef struct { union { @@ -53,14 +45,16 @@ static inline sockptr_t KERNEL_SOCKPTR(void *p) { return (sockptr_t) { .kernel = p, .is_kernel = true }; } +#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ -static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p) +static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p, + size_t size) { - sp->user = p; - sp->is_kernel = false; + if (!access_ok(p, size)) + return -EFAULT; + *sp = (sockptr_t) { .user = p }; return 0; } -#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */ static inline bool sockptr_is_null(sockptr_t sockptr) { diff --git a/net/ipv4/bpfilter/sockopt.c b/net/ipv4/bpfilter/sockopt.c index 94f18d2352d0..545b2640f019 100644 --- a/net/ipv4/bpfilter/sockopt.c +++ b/net/ipv4/bpfilter/sockopt.c @@ -65,7 +65,7 @@ int bpfilter_ip_get_sockopt(struct sock *sk, int optname, if (get_user(len, optlen)) return -EFAULT; - err = init_user_sockptr(&optval, user_optval); + err = init_user_sockptr(&optval, user_optval, len); if (err) return err; return bpfilter_mbox_request(sk, optname, optval, len, false); diff --git a/net/socket.c b/net/socket.c index 94ca4547cd7c..aff52e81653c 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2105,7 +2105,7 @@ int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval, if (optlen < 0) return -EINVAL; - err = init_user_sockptr(&optval, user_optval); + err = init_user_sockptr(&optval, user_optval, optlen); if (err) return err; From ca5cd355b7f0372da0d50fce5b12a3367e417290 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 28 Jul 2020 07:39:02 -0700 Subject: [PATCH 1714/2056] bpf, selftests: use :: 1 for localhost in tcp_server.py Using localhost requires the host to have a /etc/hosts file with that specific line in it. By default my dev box did not, they used ip6-localhost, so the test was failing. To fix remove the need for any /etc/hosts and use ::1. I could just add the line, but this seems easier. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Acked-by: Song Liu Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159594714197.21431.10113693935099326445.stgit@john-Precision-5820-Tower --- tools/testing/selftests/bpf/tcp_client.py | 2 +- tools/testing/selftests/bpf/tcp_server.py | 2 +- tools/testing/selftests/bpf/test_netcnt.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py index a53ed58528d6..bfff82be3fc1 100755 --- a/tools/testing/selftests/bpf/tcp_client.py +++ b/tools/testing/selftests/bpf/tcp_client.py @@ -34,7 +34,7 @@ serverPort = int(sys.argv[1]) # create active socket sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) try: - sock.connect(('localhost', serverPort)) + sock.connect(('::1', serverPort)) except socket.error as e: sys.exit(1) diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py index 0ca60d193bed..42ab8882f00f 100755 --- a/tools/testing/selftests/bpf/tcp_server.py +++ b/tools/testing/selftests/bpf/tcp_server.py @@ -38,7 +38,7 @@ serverSocket = None # create passive socket serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) -try: serverSocket.bind(('localhost', 0)) +try: serverSocket.bind(('::1', 0)) except socket.error as msg: print('bind fails: ' + str(msg)) diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c index c1da5404454a..7a68c9069639 100644 --- a/tools/testing/selftests/bpf/test_netcnt.c +++ b/tools/testing/selftests/bpf/test_netcnt.c @@ -82,9 +82,9 @@ int main(int argc, char **argv) } if (system("which ping6 &>/dev/null") == 0) - assert(!system("ping6 localhost -c 10000 -f -q > /dev/null")); + assert(!system("ping6 ::1 -c 10000 -f -q > /dev/null")); else - assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null")); + assert(!system("ping -6 ::1 -c 10000 -f -q > /dev/null")); if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL, &prog_cnt)) { From 310ad7970a0dec847563dc6dba9e7e587d545622 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 Jul 2020 12:05:27 -0700 Subject: [PATCH 1715/2056] bpf: Fix build without CONFIG_NET when using BPF XDP link Entire net/core subsystem is not built without CONFIG_NET. linux/netdevice.h just assumes that it's always there, so the easiest way to fix this is to conditionally compile out bpf_xdp_link_attach() use in bpf/syscall.c. Fixes: aa8d3a716b59 ("bpf, xdp: Add bpf_link-based XDP attachment API") Reported-by: Randy Dunlap Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Randy Dunlap # build-tested Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200728190527.110830-1-andriin@fb.com --- kernel/bpf/syscall.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0e8c88db7e7a..cd3d599e9e90 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3923,9 +3923,11 @@ static int link_create(union bpf_attr *attr) case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; +#ifdef CONFIG_NET case BPF_PROG_TYPE_XDP: ret = bpf_xdp_link_attach(attr, prog); break; +#endif default: ret = -EINVAL; } From 0bac966a1f2ae0e3cbc259c5bb10aab7bbcf8f4b Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:11:59 -0700 Subject: [PATCH 1716/2056] mptcp: Allow DATA_FIN in headers without TCP FIN RFC 8684-compliant DATA_FIN needs to be sent and ack'd before subflows are closed with TCP FIN, so write DATA_FIN DSS headers whenever their transmission has been enabled by the MPTCP connection-level socket. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/options.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 3bc56eb608d8..0b122b2a9c69 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -482,17 +482,10 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, struct mptcp_sock *msk; unsigned int ack_size; bool ret = false; - u8 tcp_fin; - if (skb) { - mpext = mptcp_get_ext(skb); - tcp_fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; - } else { - mpext = NULL; - tcp_fin = 0; - } + mpext = skb ? mptcp_get_ext(skb) : NULL; - if (!skb || (mpext && mpext->use_map) || tcp_fin) { + if (!skb || (mpext && mpext->use_map) || subflow->data_fin_tx_enable) { unsigned int map_size; map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64; @@ -502,7 +495,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, if (mpext) opts->ext_copy = *mpext; - if (skb && tcp_fin && subflow->data_fin_tx_enable) + if (skb && subflow->data_fin_tx_enable) mptcp_write_data_fin(subflow, skb, &opts->ext_copy); ret = true; } From 57baaf2875404b555587391608da1625863086fa Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:00 -0700 Subject: [PATCH 1717/2056] mptcp: Return EPIPE if sending is shut down during a sendmsg A MPTCP socket where sending has been shut down should not attempt to send additional data, since DATA_FIN has already been sent. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 2891ae8a1028..b3c3dbc89b3f 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -748,6 +748,11 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) restart: mptcp_clean_una(sk); + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { + ret = -EPIPE; + goto out; + } + wait_for_sndbuf: __mptcp_flush_join_list(msk); ssk = mptcp_subflow_get_send(msk); From 242e63f651e94da5fa3cbe6ae0a62dd219226418 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:01 -0700 Subject: [PATCH 1718/2056] mptcp: Remove outdated and incorrect comment mptcp_close() acquires the msk lock, so it clearly should not be held before the function is called. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index b3c3dbc89b3f..7d7e0fa17219 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1421,7 +1421,6 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how, release_sock(ssk); } -/* Called with msk lock held, releases such lock before returning */ static void mptcp_close(struct sock *sk, long timeout) { struct mptcp_subflow_context *subflow, *tmp; From 7279da6145bbb2e41a61def5d9bca5b65f12de9d Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:02 -0700 Subject: [PATCH 1719/2056] mptcp: Use MPTCP-level flag for sending DATA_FIN Since DATA_FIN information is the same for every subflow, store it only in the mptcp_sock. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/options.c | 18 ++++++++++++------ net/mptcp/protocol.c | 21 +++++---------------- net/mptcp/protocol.h | 3 +-- 3 files changed, 18 insertions(+), 24 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 0b122b2a9c69..f157cb7e14c0 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -451,6 +451,8 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb, static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, struct sk_buff *skb, struct mptcp_ext *ext) { + u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq); + if (!ext->use_map || !skb->len) { /* RFC6824 requires a DSS mapping with specific values * if DATA_FIN is set but no data payload is mapped @@ -458,10 +460,13 @@ static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, ext->data_fin = 1; ext->use_map = 1; ext->dsn64 = 1; - ext->data_seq = subflow->data_fin_tx_seq; + /* The write_seq value has already been incremented, so + * the actual sequence number for the DATA_FIN is one less. + */ + ext->data_seq = data_fin_tx_seq - 1; ext->subflow_seq = 0; ext->data_len = 1; - } else if (ext->data_seq + ext->data_len == subflow->data_fin_tx_seq) { + } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) { /* If there's an existing DSS mapping and it is the * final mapping, DATA_FIN consumes 1 additional byte of * mapping space. @@ -477,15 +482,17 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, struct mptcp_out_options *opts) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); + struct mptcp_sock *msk = mptcp_sk(subflow->conn); unsigned int dss_size = 0; + u64 snd_data_fin_enable; struct mptcp_ext *mpext; - struct mptcp_sock *msk; unsigned int ack_size; bool ret = false; mpext = skb ? mptcp_get_ext(skb) : NULL; + snd_data_fin_enable = READ_ONCE(msk->snd_data_fin_enable); - if (!skb || (mpext && mpext->use_map) || subflow->data_fin_tx_enable) { + if (!skb || (mpext && mpext->use_map) || snd_data_fin_enable) { unsigned int map_size; map_size = TCPOLEN_MPTCP_DSS_BASE + TCPOLEN_MPTCP_DSS_MAP64; @@ -495,7 +502,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, if (mpext) opts->ext_copy = *mpext; - if (skb && subflow->data_fin_tx_enable) + if (skb && snd_data_fin_enable) mptcp_write_data_fin(subflow, skb, &opts->ext_copy); ret = true; } @@ -504,7 +511,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, * if the first subflow may have the already the remote key handy */ opts->ext_copy.use_ack = 0; - msk = mptcp_sk(subflow->conn); if (!READ_ONCE(msk->can_ack)) { *size = ALIGN(dss_size, 4); return ret; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 7d7e0fa17219..dd403ba3679a 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1391,8 +1391,7 @@ static void mptcp_cancel_work(struct sock *sk) sock_put(sk); } -static void mptcp_subflow_shutdown(struct sock *ssk, int how, - bool data_fin_tx_enable, u64 data_fin_tx_seq) +static void mptcp_subflow_shutdown(struct sock *ssk, int how) { lock_sock(ssk); @@ -1405,14 +1404,6 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how, tcp_disconnect(ssk, O_NONBLOCK); break; default: - if (data_fin_tx_enable) { - struct mptcp_subflow_context *subflow; - - subflow = mptcp_subflow_ctx(ssk); - subflow->data_fin_tx_seq = data_fin_tx_seq; - subflow->data_fin_tx_enable = 1; - } - ssk->sk_shutdown |= how; tcp_shutdown(ssk, how); break; @@ -1426,7 +1417,6 @@ static void mptcp_close(struct sock *sk, long timeout) struct mptcp_subflow_context *subflow, *tmp; struct mptcp_sock *msk = mptcp_sk(sk); LIST_HEAD(conn_list); - u64 data_fin_tx_seq; lock_sock(sk); @@ -1440,7 +1430,7 @@ static void mptcp_close(struct sock *sk, long timeout) spin_unlock_bh(&msk->join_list_lock); list_splice_init(&msk->conn_list, &conn_list); - data_fin_tx_seq = msk->write_seq; + msk->snd_data_fin_enable = 1; __mptcp_clear_xmit(sk); @@ -1448,9 +1438,6 @@ static void mptcp_close(struct sock *sk, long timeout) list_for_each_entry_safe(subflow, tmp, &conn_list, node) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); - - subflow->data_fin_tx_seq = data_fin_tx_seq; - subflow->data_fin_tx_enable = 1; __mptcp_close_ssk(sk, ssk, subflow, timeout); } @@ -2146,10 +2133,12 @@ static int mptcp_shutdown(struct socket *sock, int how) } __mptcp_flush_join_list(msk); + msk->snd_data_fin_enable = 1; + mptcp_for_each_subflow(msk, subflow) { struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); - mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq); + mptcp_subflow_shutdown(tcp_sk, how); } /* Wake up anyone sleeping in poll. */ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 67634b595466..3f49cc105772 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -199,6 +199,7 @@ struct mptcp_sock { unsigned long flags; bool can_ack; bool fully_established; + bool snd_data_fin_enable; spinlock_t join_list_lock; struct work_struct work; struct list_head conn_list; @@ -291,10 +292,8 @@ struct mptcp_subflow_context { backup : 1, data_avail : 1, rx_eof : 1, - data_fin_tx_enable : 1, use_64bit_ack : 1, /* Set when we received a 64-bit DSN */ can_ack : 1; /* only after processing the remote a key */ - u64 data_fin_tx_seq; u32 remote_nonce; u64 thmac; u32 local_nonce; From 3721b9b64676b3377a966f3d96acafd70bb32dd9 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:03 -0700 Subject: [PATCH 1720/2056] mptcp: Track received DATA_FIN sequence number and add related helpers Incoming DATA_FIN headers need to propagate the presence of the DATA_FIN bit and the associated sequence number to the MPTCP layer, even when arriving on a bare ACK that does not get added to the receive queue. Add structure members to store the DATA_FIN information and helpers to set and check those values. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/options.c | 16 +++++++ net/mptcp/protocol.c | 106 +++++++++++++++++++++++++++++++++++++++---- net/mptcp/protocol.h | 3 ++ 3 files changed, 115 insertions(+), 10 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index f157cb7e14c0..38583d1b9b5f 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -782,6 +782,22 @@ static void update_una(struct mptcp_sock *msk, } } +bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq) +{ + /* Skip if DATA_FIN was already received. + * If updating simultaneously with the recvmsg loop, values + * should match. If they mismatch, the peer is misbehaving and + * we will prefer the most recent information. + */ + if (READ_ONCE(msk->rcv_data_fin) || !READ_ONCE(msk->first)) + return false; + + WRITE_ONCE(msk->rcv_data_fin_seq, data_fin_seq); + WRITE_ONCE(msk->rcv_data_fin, 1); + + return true; +} + static bool add_addr_hmac_valid(struct mptcp_sock *msk, struct mptcp_options_received *mp_opt) { diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index dd403ba3679a..e1c71bfd61a3 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -16,6 +16,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_MPTCP_IPV6) #include #endif @@ -163,6 +164,101 @@ static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk, return mptcp_subflow_data_available(ssk); } +static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + + if (READ_ONCE(msk->rcv_data_fin) && + ((1 << sk->sk_state) & + (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { + u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); + + if (msk->ack_seq == rcv_data_fin_seq) { + if (seq) + *seq = rcv_data_fin_seq; + + return true; + } + } + + return false; +} + +static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk) +{ + long tout = ssk && inet_csk(ssk)->icsk_pending ? + inet_csk(ssk)->icsk_timeout - jiffies : 0; + + if (tout <= 0) + tout = mptcp_sk(sk)->timer_ival; + mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; +} + +static void mptcp_check_data_fin(struct sock *sk) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + u64 rcv_data_fin_seq; + + if (__mptcp_check_fallback(msk) || !msk->first) + return; + + /* Need to ack a DATA_FIN received from a peer while this side + * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. + * msk->rcv_data_fin was set when parsing the incoming options + * at the subflow level and the msk lock was not held, so this + * is the first opportunity to act on the DATA_FIN and change + * the msk state. + * + * If we are caught up to the sequence number of the incoming + * DATA_FIN, send the DATA_ACK now and do state transition. If + * not caught up, do nothing and let the recv code send DATA_ACK + * when catching up. + */ + + if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { + struct mptcp_subflow_context *subflow; + + msk->ack_seq++; + WRITE_ONCE(msk->rcv_data_fin, 0); + + sk->sk_shutdown |= RCV_SHUTDOWN; + + switch (sk->sk_state) { + case TCP_ESTABLISHED: + inet_sk_state_store(sk, TCP_CLOSE_WAIT); + break; + case TCP_FIN_WAIT1: + inet_sk_state_store(sk, TCP_CLOSING); + break; + case TCP_FIN_WAIT2: + inet_sk_state_store(sk, TCP_CLOSE); + // @@ Close subflows now? + break; + default: + /* Other states not expected */ + WARN_ON_ONCE(1); + break; + } + + mptcp_set_timeout(sk, NULL); + mptcp_for_each_subflow(msk, subflow) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + + lock_sock(ssk); + tcp_send_ack(ssk); + release_sock(ssk); + } + + sk->sk_state_change(sk); + + if (sk->sk_shutdown == SHUTDOWN_MASK || + sk->sk_state == TCP_CLOSE) + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); + else + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + } +} + static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, struct sock *ssk, unsigned int *bytes) @@ -303,16 +399,6 @@ static void __mptcp_flush_join_list(struct mptcp_sock *msk) spin_unlock_bh(&msk->join_list_lock); } -static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk) -{ - long tout = ssk && inet_csk(ssk)->icsk_pending ? - inet_csk(ssk)->icsk_timeout - jiffies : 0; - - if (tout <= 0) - tout = mptcp_sk(sk)->timer_ival; - mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; -} - static bool mptcp_timer_pending(struct sock *sk) { return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 3f49cc105772..beb34b8a5363 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -193,12 +193,14 @@ struct mptcp_sock { u64 remote_key; u64 write_seq; u64 ack_seq; + u64 rcv_data_fin_seq; atomic64_t snd_una; unsigned long timer_ival; u32 token; unsigned long flags; bool can_ack; bool fully_established; + bool rcv_data_fin; bool snd_data_fin_enable; spinlock_t join_list_lock; struct work_struct work; @@ -385,6 +387,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk); bool mptcp_finish_join(struct sock *sk); void mptcp_data_acked(struct sock *sk); void mptcp_subflow_eof(struct sock *sk); +bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq); void __init mptcp_token_init(void); static inline void mptcp_token_init_request(struct request_sock *req) From 6920b851584cc69a61ebf2cff3948bb153bcef20 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:04 -0700 Subject: [PATCH 1721/2056] mptcp: Add mptcp_close_state() helper This will be used to transition to the appropriate state on close and determine if a DATA_FIN needs to be sent for that state transition. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index e1c71bfd61a3..51370b69e30b 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1498,6 +1498,33 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how) release_sock(ssk); } +static const unsigned char new_state[16] = { + /* current state: new state: action: */ + [0 /* (Invalid) */] = TCP_CLOSE, + [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, + [TCP_SYN_SENT] = TCP_CLOSE, + [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, + [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, + [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, + [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ + [TCP_CLOSE] = TCP_CLOSE, + [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, + [TCP_LAST_ACK] = TCP_LAST_ACK, + [TCP_LISTEN] = TCP_CLOSE, + [TCP_CLOSING] = TCP_CLOSING, + [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ +}; + +static int mptcp_close_state(struct sock *sk) +{ + int next = (int)new_state[sk->sk_state]; + int ns = next & TCP_STATE_MASK; + + inet_sk_state_store(sk, ns); + + return next & TCP_ACTION_FIN; +} + static void mptcp_close(struct sock *sk, long timeout) { struct mptcp_subflow_context *subflow, *tmp; From 16a9a9da17234797b01ca05024d33269872a0ae0 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:05 -0700 Subject: [PATCH 1722/2056] mptcp: Add helper to process acks of DATA_FIN After DATA_FIN has been sent, the peer will acknowledge it. An ack of the relevant MPTCP-level sequence number will update the MPTCP connection state appropriately. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 54 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 46 insertions(+), 8 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 51370b69e30b..b3350830e14d 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -143,6 +143,14 @@ static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, MPTCP_SKB_CB(skb)->offset = offset; } +static void mptcp_stop_timer(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + sk_stop_timer(sk, &icsk->icsk_retransmit_timer); + mptcp_sk(sk)->timer_ival = 0; +} + /* both sockets must be locked */ static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk, struct sock *ssk) @@ -164,6 +172,42 @@ static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk, return mptcp_subflow_data_available(ssk); } +static void mptcp_check_data_fin_ack(struct sock *sk) +{ + struct mptcp_sock *msk = mptcp_sk(sk); + + if (__mptcp_check_fallback(msk)) + return; + + /* Look for an acknowledged DATA_FIN */ + if (((1 << sk->sk_state) & + (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && + msk->write_seq == atomic64_read(&msk->snd_una)) { + mptcp_stop_timer(sk); + + WRITE_ONCE(msk->snd_data_fin_enable, 0); + + switch (sk->sk_state) { + case TCP_FIN_WAIT1: + inet_sk_state_store(sk, TCP_FIN_WAIT2); + sk->sk_state_change(sk); + break; + case TCP_CLOSING: + fallthrough; + case TCP_LAST_ACK: + inet_sk_state_store(sk, TCP_CLOSE); + sk->sk_state_change(sk); + break; + } + + if (sk->sk_shutdown == SHUTDOWN_MASK || + sk->sk_state == TCP_CLOSE) + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); + else + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + } +} + static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) { struct mptcp_sock *msk = mptcp_sk(sk); @@ -222,6 +266,8 @@ static void mptcp_check_data_fin(struct sock *sk) WRITE_ONCE(msk->rcv_data_fin, 0); sk->sk_shutdown |= RCV_SHUTDOWN; + smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ + set_bit(MPTCP_DATA_READY, &msk->flags); switch (sk->sk_state) { case TCP_ESTABLISHED: @@ -455,14 +501,6 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk) } } -static void mptcp_stop_timer(struct sock *sk) -{ - struct inet_connection_sock *icsk = inet_csk(sk); - - sk_stop_timer(sk, &icsk->icsk_retransmit_timer); - mptcp_sk(sk)->timer_ival = 0; -} - static bool mptcp_ext_cache_refill(struct mptcp_sock *msk) { const struct sock *sk = (const struct sock *)msk; From 43b54c6ee382f026fc93babf5301ec79e1c9614a Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:06 -0700 Subject: [PATCH 1723/2056] mptcp: Use full MPTCP-level disconnect state machine RFC 8684 appendix D describes the connection state machine for MPTCP. This patch implements the DATA_FIN / DATA_ACK exchanges and MPTCP-level socket state changes described in that appendix, rather than simply sending DATA_FIN along with TCP FIN when disconnecting subflows. DATA_FIN is now sent and acknowledged before shutting down the subflows. Received DATA_FIN information (if not part of a data packet) is written to the MPTCP socket when the incoming DSS option is parsed by the subflow, and the MPTCP worker is scheduled to process the flag. DATA_FIN received as part of a full DSS mapping will be handled when the mapping is processed. The DATA_FIN is acknowledged by the worker if the reader is caught up. If there is still data to be moved to the MPTCP-level queue, ack_seq will be incremented to account for the DATA_FIN when it reaches the end of the stream and a DATA_ACK will be sent to the peer. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/options.c | 11 ++++++ net/mptcp/protocol.c | 87 +++++++++++++++++++++++++++++++++++++------- net/mptcp/subflow.c | 11 ++++-- 3 files changed, 92 insertions(+), 17 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 38583d1b9b5f..b4458ecd01f8 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -868,6 +868,17 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb, if (mp_opt.use_ack) update_una(msk, &mp_opt); + /* Zero-length packets, like bare ACKs carrying a DATA_FIN, are + * dropped by the caller and not propagated to the MPTCP layer. + * Copy the DATA_FIN information now. + */ + if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { + if (mp_opt.data_fin && mp_opt.data_len == 1 && + mptcp_update_rcv_data_fin(msk, mp_opt.data_seq) && + schedule_work(&msk->work)) + sock_hold(subflow->conn); + } + mpext = skb_ext_add(skb, SKB_EXT_MPTCP); if (!mpext) return; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index b3350830e14d..f264ea15e081 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -381,6 +381,15 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, *bytes = moved; + /* If the moves have caught up with the DATA_FIN sequence number + * it's time to ack the DATA_FIN and change socket state, but + * this is not a good place to change state. Let the workqueue + * do it. + */ + if (mptcp_pending_data_fin(sk, NULL) && + schedule_work(&msk->work)) + sock_hold(sk); + return done; } @@ -466,7 +475,8 @@ void mptcp_data_acked(struct sock *sk) { mptcp_reset_timer(sk); - if (!sk_stream_is_writeable(sk) && + if ((!sk_stream_is_writeable(sk) || + (inet_sk_state_load(sk) != TCP_ESTABLISHED)) && schedule_work(&mptcp_sk(sk)->work)) sock_hold(sk); } @@ -1384,6 +1394,7 @@ static void mptcp_worker(struct work_struct *work) lock_sock(sk); mptcp_clean_una(sk); + mptcp_check_data_fin_ack(sk); __mptcp_flush_join_list(msk); __mptcp_move_skbs(msk); @@ -1393,6 +1404,8 @@ static void mptcp_worker(struct work_struct *work) if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) mptcp_check_for_eof(msk); + mptcp_check_data_fin(sk); + if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) goto unlock; @@ -1515,7 +1528,7 @@ static void mptcp_cancel_work(struct sock *sk) sock_put(sk); } -static void mptcp_subflow_shutdown(struct sock *ssk, int how) +static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) { lock_sock(ssk); @@ -1528,8 +1541,15 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how) tcp_disconnect(ssk, O_NONBLOCK); break; default: - ssk->sk_shutdown |= how; - tcp_shutdown(ssk, how); + if (__mptcp_check_fallback(mptcp_sk(sk))) { + pr_debug("Fallback"); + ssk->sk_shutdown |= how; + tcp_shutdown(ssk, how); + } else { + pr_debug("Sending DATA_FIN on subflow %p", ssk); + mptcp_set_timeout(sk, ssk); + tcp_send_ack(ssk); + } break; } @@ -1570,9 +1590,35 @@ static void mptcp_close(struct sock *sk, long timeout) LIST_HEAD(conn_list); lock_sock(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + if (sk->sk_state == TCP_LISTEN) { + inet_sk_state_store(sk, TCP_CLOSE); + goto cleanup; + } else if (sk->sk_state == TCP_CLOSE) { + goto cleanup; + } + + if (__mptcp_check_fallback(msk)) { + goto update_state; + } else if (mptcp_close_state(sk)) { + pr_debug("Sending DATA_FIN sk=%p", sk); + WRITE_ONCE(msk->write_seq, msk->write_seq + 1); + WRITE_ONCE(msk->snd_data_fin_enable, 1); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); + + mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK); + } + } + + sk_stream_wait_close(sk, timeout); + +update_state: inet_sk_state_store(sk, TCP_CLOSE); +cleanup: /* be sure to always acquire the join list lock, to sync vs * mptcp_finish_join(). */ @@ -1581,8 +1627,6 @@ static void mptcp_close(struct sock *sk, long timeout) spin_unlock_bh(&msk->join_list_lock); list_splice_init(&msk->conn_list, &conn_list); - msk->snd_data_fin_enable = 1; - __mptcp_clear_xmit(sk); release_sock(sk); @@ -2265,11 +2309,8 @@ static int mptcp_shutdown(struct socket *sock, int how) pr_debug("sk=%p, how=%d", msk, how); lock_sock(sock->sk); - if (how == SHUT_WR || how == SHUT_RDWR) - inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); how++; - if ((how & ~SHUTDOWN_MASK) || !how) { ret = -EINVAL; goto out_unlock; @@ -2283,13 +2324,31 @@ static int mptcp_shutdown(struct socket *sock, int how) sock->state = SS_CONNECTED; } - __mptcp_flush_join_list(msk); - msk->snd_data_fin_enable = 1; + /* If we've already sent a FIN, or it's a closed state, skip this. */ + if (__mptcp_check_fallback(msk)) { + if (how == SHUT_WR || how == SHUT_RDWR) + inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); - mptcp_for_each_subflow(msk, subflow) { - struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); + mptcp_for_each_subflow(msk, subflow) { + struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); - mptcp_subflow_shutdown(tcp_sk, how); + mptcp_subflow_shutdown(sock->sk, tcp_sk, how); + } + } else if ((how & SEND_SHUTDOWN) && + ((1 << sock->sk->sk_state) & + (TCPF_ESTABLISHED | TCPF_SYN_SENT | + TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) && + mptcp_close_state(sock->sk)) { + __mptcp_flush_join_list(msk); + + WRITE_ONCE(msk->write_seq, msk->write_seq + 1); + WRITE_ONCE(msk->snd_data_fin_enable, 1); + + mptcp_for_each_subflow(msk, subflow) { + struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); + + mptcp_subflow_shutdown(sock->sk, tcp_sk, how); + } } /* Wake up anyone sleeping in poll. */ diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index e645483d1200..7ab2a52ad150 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -598,7 +598,8 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb) return true; } -static enum mapping_status get_mapping_status(struct sock *ssk) +static enum mapping_status get_mapping_status(struct sock *ssk, + struct mptcp_sock *msk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct mptcp_ext *mpext; @@ -648,7 +649,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk) if (mpext->data_fin == 1) { if (data_len == 1) { - pr_debug("DATA_FIN with no payload"); + mptcp_update_rcv_data_fin(msk, mpext->data_seq); + pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq); if (subflow->map_valid) { /* A DATA_FIN might arrive in a DSS * option before the previous mapping @@ -660,6 +662,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk) } else { return MAPPING_DATA_FIN; } + } else { + mptcp_update_rcv_data_fin(msk, mpext->data_seq + data_len); + pr_debug("DATA_FIN with mapping seq=%llu", mpext->data_seq + data_len); } /* Adjust for DATA_FIN using 1 byte of sequence space */ @@ -748,7 +753,7 @@ static bool subflow_check_data_avail(struct sock *ssk) u64 ack_seq; u64 old_ack; - status = get_mapping_status(ssk); + status = get_mapping_status(ssk, msk); pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status); if (status == MAPPING_INVALID) { ssk->sk_err = EBADMSG; From 067a0b3dc52f0f79b9fe64ff8d9bcbb0ffbcf8fc Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:07 -0700 Subject: [PATCH 1724/2056] mptcp: Only use subflow EOF signaling on fallback connections The MPTCP state machine handles disconnections on non-fallback connections, but the mptcp_sock still needs to get notified when fallback subflows disconnect. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 7ab2a52ad150..1c8482bc2ce5 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1159,7 +1159,8 @@ static void subflow_state_change(struct sock *sk) if (mptcp_subflow_data_available(sk)) mptcp_data_ready(parent, sk); - if (!(parent->sk_shutdown & RCV_SHUTDOWN) && + if (__mptcp_check_fallback(mptcp_sk(parent)) && + !(parent->sk_shutdown & RCV_SHUTDOWN) && !subflow->rx_eof && subflow_is_done(sk)) { subflow->rx_eof = 1; mptcp_subflow_eof(parent); From 06827b348b1d43850a63c3e490fe9712c124fa0c Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:08 -0700 Subject: [PATCH 1725/2056] mptcp: Skip unnecessary skb extension allocation for bare acks Bare TCP ack skbs are freed right after MPTCP sees them, so the work to allocate, zero, and populate the MPTCP skb extension is wasted. Detect these skbs and do not add skb extensions to them. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/options.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/mptcp/options.c b/net/mptcp/options.c index b4458ecd01f8..7fa822b55c34 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -868,15 +868,18 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb, if (mp_opt.use_ack) update_una(msk, &mp_opt); - /* Zero-length packets, like bare ACKs carrying a DATA_FIN, are - * dropped by the caller and not propagated to the MPTCP layer. - * Copy the DATA_FIN information now. + /* Zero-data-length packets are dropped by the caller and not + * propagated to the MPTCP layer, so the skb extension does not + * need to be allocated or populated. DATA_FIN information, if + * present, needs to be updated here before the skb is freed. */ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { if (mp_opt.data_fin && mp_opt.data_len == 1 && mptcp_update_rcv_data_fin(msk, mp_opt.data_seq) && schedule_work(&msk->work)) sock_hold(subflow->conn); + + return; } mpext = skb_ext_add(skb, SKB_EXT_MPTCP); From c75293925f24630326abdf79751d980ec3878f65 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:09 -0700 Subject: [PATCH 1726/2056] mptcp: Safely read sequence number when lock isn't held The MPTCP socket's write_seq member should be read with READ_ONCE() when the msk lock is not held. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f264ea15e081..f2455a68d231 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -1269,7 +1269,7 @@ static void mptcp_retransmit_handler(struct sock *sk) { struct mptcp_sock *msk = mptcp_sk(sk); - if (atomic64_read(&msk->snd_una) == msk->write_seq) { + if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) { mptcp_stop_timer(sk); } else { set_bit(MPTCP_WORK_RTX, &msk->flags); From 721e9089905ab7aebd5364b86b5f068f632a0e49 Mon Sep 17 00:00:00 2001 From: Mat Martineau Date: Tue, 28 Jul 2020 15:12:10 -0700 Subject: [PATCH 1727/2056] mptcp: Safely store sequence number when sending data The MPTCP socket's write_seq member can be read without the msk lock held, so use WRITE_ONCE() to store it. Signed-off-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f2455a68d231..687f0bea2b35 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -793,7 +793,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, out: if (!retransmission) pfrag->offset += frag_truesize; - *write_seq += ret; + WRITE_ONCE(*write_seq, *write_seq + ret); mptcp_subflow_ctx(ssk)->rel_write_seq += ret; return ret; From b8265621f4888af9494e1d685620871ec81bc33d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 23 Jul 2020 17:21:59 -0700 Subject: [PATCH 1728/2056] Add pldmfw library for PLDM firmware update The pldmfw library is used to implement common logic needed to flash devices based on firmware files using the format described by the PLDM for Firmware Update standard. This library consists of logic to parse the PLDM file format from a firmware file object, as well as common logic for sending the relevant PLDM header data to the device firmware. A simple ops table is provided so that device drivers can implement device specific hardware interactions while keeping the common logic to the pldmfw library. This library will be used by the Intel ice networking driver as part of implementing device flash update via devlink. The library aims to be vendor and device agnostic. For this reason, it has been placed in lib/pldmfw, in the hopes that other devices which use the PLDM firmware file format may benefit from it in the future. However, do note that not all features defined in the PLDM standard have been implemented. Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- Documentation/driver-api/index.rst | 1 + .../driver-api/pldmfw/driver-ops.rst | 56 ++ .../driver-api/pldmfw/file-format.rst | 203 ++++ Documentation/driver-api/pldmfw/index.rst | 72 ++ MAINTAINERS | 7 + include/linux/pldmfw.h | 165 ++++ lib/Kconfig | 4 + lib/Makefile | 3 + lib/pldmfw/Makefile | 2 + lib/pldmfw/pldmfw.c | 879 ++++++++++++++++++ lib/pldmfw/pldmfw_private.h | 238 +++++ 11 files changed, 1630 insertions(+) create mode 100644 Documentation/driver-api/pldmfw/driver-ops.rst create mode 100644 Documentation/driver-api/pldmfw/file-format.rst create mode 100644 Documentation/driver-api/pldmfw/index.rst create mode 100644 include/linux/pldmfw.h create mode 100644 lib/pldmfw/Makefile create mode 100644 lib/pldmfw/pldmfw.c create mode 100644 lib/pldmfw/pldmfw_private.h diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 6567187e7687..7fc1e0cccae7 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -95,6 +95,7 @@ available subsections can be seen below. phy/index pti_intel_mid pwm + pldmfw/index rfkill serial/index sm501 diff --git a/Documentation/driver-api/pldmfw/driver-ops.rst b/Documentation/driver-api/pldmfw/driver-ops.rst new file mode 100644 index 000000000000..f0654783d3b3 --- /dev/null +++ b/Documentation/driver-api/pldmfw/driver-ops.rst @@ -0,0 +1,56 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +========================= +Driver-specific callbacks +========================= + +The ``pldmfw`` module relies on the device driver for implementing device +specific behavior using the following operations. + +``.match_record`` +----------------- + +The ``.match_record`` operation is used to determine whether a given PLDM +record matches the device being updated. This requires comparing the record +descriptors in the record with information from the device. Many record +descriptors are defined by the PLDM standard, but it is also allowed for +devices to implement their own descriptors. + +The ``.match_record`` operation should return true if a given record matches +the device. + +``.send_package_data`` +---------------------- + +The ``.send_package_data`` operation is used to send the device-specific +package data in a record to the device firmware. If the matching record +provides package data, ``pldmfw`` will call the ``.send_package_data`` +function with a pointer to the package data and with the package data +length. The device driver should send this data to firmware. + +``.send_component_table`` +------------------------- + +The ``.send_component_table`` operation is used to forward component +information to the device. It is called once for each applicable component, +that is, for each component indicated by the matching record. The +device driver should send the component information to the device firmware, +and wait for a response. The provided transfer flag indicates whether this +is the first, last, or a middle component, and is expected to be forwarded +to firmware as part of the component table information. The driver should an +error in the case when the firmware indicates that the component cannot be +updated, or return zero if the component can be updated. + +``.flash_component`` +-------------------- + +The ``.flash_component`` operation is used to inform the device driver to +flash a given component. The driver must perform any steps necessary to send +the component data to the device. + +``.finalize_update`` +-------------------- + +The ``.finalize_update`` operation is used by the ``pldmfw`` library in +order to allow the device driver to perform any remaining device specific +logic needed to finish the update. diff --git a/Documentation/driver-api/pldmfw/file-format.rst b/Documentation/driver-api/pldmfw/file-format.rst new file mode 100644 index 000000000000..b7a9cebe09c6 --- /dev/null +++ b/Documentation/driver-api/pldmfw/file-format.rst @@ -0,0 +1,203 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +================================== +PLDM Firmware file format overview +================================== + +A PLDM firmware package is a binary file which contains a header that +describes the contents of the firmware package. This includes an initial +package header, one or more firmware records, and one or more components +describing the actual flash contents to program. + +This diagram provides an overview of the file format:: + + overall file layout + +----------------------+ + | | + | Package Header | + | | + +----------------------+ + | | + | Device Records | + | | + +----------------------+ + | | + | Component Info | + | | + +----------------------+ + | | + | Package Header CRC | + | | + +----------------------+ + | | + | Component Image 1 | + | | + +----------------------+ + | | + | Component Image 2 | + | | + +----------------------+ + | | + | ... | + | | + +----------------------+ + | | + | Component Image N | + | | + +----------------------+ + +Package Header +============== + +The package header begins with the UUID of the PLDM file format, and +contains information about the version of the format that the file uses. It +also includes the total header size, a release date, the size of the +component bitmap, and an overall package version. + +The following diagram provides an overview of the package header:: + + header layout + +-------------------------+ + | PLDM UUID | + +-------------------------+ + | Format Revision | + +-------------------------+ + | Header Size | + +-------------------------+ + | Release Date | + +-------------------------+ + | Component Bitmap Length | + +-------------------------+ + | Package Version Info | + +-------------------------+ + +Device Records +============== + +The device firmware records area starts with a count indicating the total +number of records in the file, followed by each record. A single device +record describes what device matches this record. All valid PLDM firmware +files must contain at least one record, but optionally may contain more than +one record if they support multiple devices. + +Each record will identify the device it supports via TLVs that describe the +device, such as the PCI device and vendor information. It will also indicate +which set of components that are used by this device. It is possible that +only subset of provided components will be used by a given record. A record +may also optionally contain device-specific package data that will be used +by the device firmware during the update process. + +The following diagram provides an overview of the device record area:: + + area layout + +---------------+ + | | + | Record Count | + | | + +---------------+ + | | + | Record 1 | + | | + +---------------+ + | | + | Record 2 | + | | + +---------------+ + | | + | ... | + | | + +---------------+ + | | + | Record N | + | | + +---------------+ + + record layout + +-----------------------+ + | Record Length | + +-----------------------+ + | Descriptor Count | + +-----------------------+ + | Option Flags | + +-----------------------+ + | Version Settings | + +-----------------------+ + | Package Data Length | + +-----------------------+ + | Applicable Components | + +-----------------------+ + | Version String | + +-----------------------+ + | Descriptor TLVs | + +-----------------------+ + | Package Data | + +-----------------------+ + +Component Info +============== + +The component information area begins with a count of the number of +components. Following this count is a description for each component. The +component information points to the location in the file where the component +data is stored, and includes version data used to identify the version of +the component. + +The following diagram provides an overview of the component area:: + + area layout + +-----------------+ + | | + | Component Count | + | | + +-----------------+ + | | + | Component 1 | + | | + +-----------------+ + | | + | Component 2 | + | | + +-----------------+ + | | + | ... | + | | + +-----------------+ + | | + | Component N | + | | + +-----------------+ + + component layout + +------------------------+ + | Classification | + +------------------------+ + | Component Identifier | + +------------------------+ + | Comparison Stamp | + +------------------------+ + | Component Options | + +------------------------+ + | Activation Method | + +------------------------+ + | Location Offset | + +------------------------+ + | Component Size | + +------------------------+ + | Component Version Info | + +------------------------+ + | Package Data | + +------------------------+ + + +Package Header CRC +================== + +Following the component information is a short 4-byte CRC calculated over +the contents of all of the header information. + +Component Images +================ + +The component images follow the package header information in the PLDM +firmware file. Each of these is simply a binary chunk with its start and +size defined by the matching component structure in the component info area. diff --git a/Documentation/driver-api/pldmfw/index.rst b/Documentation/driver-api/pldmfw/index.rst new file mode 100644 index 000000000000..ad2c33ece30f --- /dev/null +++ b/Documentation/driver-api/pldmfw/index.rst @@ -0,0 +1,72 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +================================== +PLDM Firmware Flash Update Library +================================== + +``pldmfw`` implements functionality for updating the flash on a device using +the PLDM for Firmware Update standard +. + +.. toctree:: + :maxdepth: 1 + + file-format + driver-ops + +================================== +Overview of the ``pldmfw`` library +================================== + +The ``pldmfw`` library is intended to be used by device drivers for +implementing device flash update based on firmware files following the PLDM +firwmare file format. + +It is implemented using an ops table that allows device drivers to provide +the underlying device specific functionality. + +``pldmfw`` implements logic to parse the packed binary format of the PLDM +firmware file into data structures, and then uses the provided function +operations to determine if the firmware file is a match for the device. If +so, it sends the record and component data to the firmware using the device +specific implementations provided by device drivers. Once the device +firmware indicates that the update may be performed, the firmware data is +sent to the device for programming. + +Parsing the PLDM file +===================== + +The PLDM file format uses packed binary data, with most multi-byte fields +stored in the Little Endian format. Several pieces of data are variable +length, including version strings and the number of records and components. +Due to this, it is not straight forward to index the record, record +descriptors, or components. + +To avoid proliferating access to the packed binary data, the ``pldmfw`` +library parses and extracts this data into simpler structures for ease of +access. + +In order to safely process the firmware file, care is taken to avoid +unaligned access of multi-byte fields, and to properly convert from Little +Endian to CPU host format. Additionally the records, descriptors, and +components are stored in linked lists. + +Performing a flash update +========================= + +To perform a flash update, the ``pldmfw`` module performs the following +steps + +1. Parse the firmware file for record and component information +2. Scan through the records and determine if the device matches any record + in the file. The first matched record will be used. +3. If the matching record provides package data, send this package data to + the device. +4. For each component that the record indicates, send the component data to + the device. For each component, the firmware may respond with an + indication of whether the update is suitable or not. If any component is + not suitable, the update is canceled. +5. For each component, send the binary data to the device firmware for + updating. +6. After all components are programmed, perform any final device-specific + actions to finalize the update. diff --git a/MAINTAINERS b/MAINTAINERS index b61f9063faf2..7bc4360b592f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13604,6 +13604,13 @@ S: Maintained F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml F: drivers/iio/chemical/pms7003.c +PLDMFW LIBRARY +M: Jacob Keller +S: Maintained +F: Documentation/driver-api/pldmfw/ +F: include/linux/pldmfw.h +F: lib/pldmfw/ + PLX DMA DRIVER M: Logan Gunthorpe S: Maintained diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h new file mode 100644 index 000000000000..0fc831338226 --- /dev/null +++ b/include/linux/pldmfw.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _PLDMFW_H_ +#define _PLDMFW_H_ + +#include +#include + +#define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0) + +#define PLDM_STRING_TYPE_UNKNOWN 0 +#define PLDM_STRING_TYPE_ASCII 1 +#define PLDM_STRING_TYPE_UTF8 2 +#define PLDM_STRING_TYPE_UTF16 3 +#define PLDM_STRING_TYPE_UTF16LE 4 +#define PLDM_STRING_TYPE_UTF16BE 5 + +struct pldmfw_record { + struct list_head entry; + + /* List of descriptor TLVs */ + struct list_head descs; + + /* Component Set version string*/ + const u8 *version_string; + u8 version_type; + u8 version_len; + + /* Package Data length */ + u16 package_data_len; + + /* Bitfield of Device Update Flags */ + u32 device_update_flags; + + /* Package Data block */ + const u8 *package_data; + + /* Bitmap of components applicable to this record */ + unsigned long *component_bitmap; + u16 component_bitmap_len; +}; + +/* Standard descriptor TLV identifiers */ +#define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000 +#define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001 +#define PLDM_DESC_ID_UUID 0x0002 +#define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003 +#define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004 +#define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100 +#define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101 +#define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102 +#define PLDM_DESC_ID_PCI_REVISION_ID 0x0103 +#define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104 +#define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105 +#define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF + +struct pldmfw_desc_tlv { + struct list_head entry; + + const u8 *data; + u16 type; + u16 size; +}; + +#define PLDM_CLASSIFICATION_UNKNOWN 0x0000 +#define PLDM_CLASSIFICATION_OTHER 0x0001 +#define PLDM_CLASSIFICATION_DRIVER 0x0002 +#define PLDM_CLASSIFICATION_CONFIG_SW 0x0003 +#define PLDM_CLASSIFICATION_APP_SW 0x0004 +#define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005 +#define PLDM_CLASSIFICATION_BIOS 0x0006 +#define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007 +#define PLDM_CLASSIFICATION_OS 0x0008 +#define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009 +#define PLDM_CLASSIFICATION_FIRMWARE 0x000A +#define PLDM_CLASSIFICATION_CODE 0x000B +#define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C +#define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D + +#define PLDM_ACTIVATION_METHOD_AUTO BIT(0) +#define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1) +#define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2) +#define PLDM_ACTIVATION_METHOD_REBOOT BIT(3) +#define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4) +#define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5) + +#define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0) +#define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1) + +struct pldmfw_component { + struct list_head entry; + + /* component identifier */ + u16 classification; + u16 identifier; + + u16 options; + u16 activation_method; + + u32 comparison_stamp; + + u32 component_size; + const u8 *component_data; + + /* Component version string */ + const u8 *version_string; + u8 version_type; + u8 version_len; + + /* component index */ + u8 index; + +}; + +/* Transfer flag used for sending components to the firmware */ +#define PLDM_TRANSFER_FLAG_START BIT(0) +#define PLDM_TRANSFER_FLAG_MIDDLE BIT(1) +#define PLDM_TRANSFER_FLAG_END BIT(2) + +struct pldmfw_ops; + +/* Main entry point to the PLDM firmware update engine. Device drivers + * should embed this in a private structure and use container_of to obtain + * a pointer to their own data, used to implement the device specific + * operations. + */ +struct pldmfw { + const struct pldmfw_ops *ops; + struct device *dev; +}; + +bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record); + +/* Operations invoked by the generic PLDM firmware update engine. Used to + * implement device specific logic. + * + * @match_record: check if the device matches the given record. For + * convenience, a standard implementation is provided for PCI devices. + * + * @send_package_data: send the package data associated with the matching + * record to firmware. + * + * @send_component_table: send the component data associated with a given + * component to firmware. Called once for each applicable component. + * + * @flash_component: Flash the data for a given component to the device. + * Called once for each applicable component, after all component tables have + * been sent. + * + * @finalize_update: (optional) Finish the update. Called after all components + * have been flashed. + */ +struct pldmfw_ops { + bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record); + int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length); + int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flag); + int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component); + int (*finalize_update)(struct pldmfw *context); +}; + +int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw); + +#endif diff --git a/lib/Kconfig b/lib/Kconfig index df3f3da95990..3ffbca6998e5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -676,3 +676,7 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool + +config PLDMFW + bool + default n diff --git a/lib/Makefile b/lib/Makefile index b1c42c10073b..281888ff713b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -315,6 +315,9 @@ obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o obj-$(CONFIG_OBJAGG) += objagg.o +# pldmfw library +obj-$(CONFIG_PLDMFW) += pldmfw/ + # KUnit tests obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/pldmfw/Makefile b/lib/pldmfw/Makefile new file mode 100644 index 000000000000..99ad10711abe --- /dev/null +++ b/lib/pldmfw/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_PLDMFW) += pldmfw.o diff --git a/lib/pldmfw/pldmfw.c b/lib/pldmfw/pldmfw.c new file mode 100644 index 000000000000..e5d4b3b2af81 --- /dev/null +++ b/lib/pldmfw/pldmfw.c @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pldmfw_private.h" + +/* Internal structure used to store details about the PLDM image file as it is + * being validated and processed. + */ +struct pldmfw_priv { + struct pldmfw *context; + const struct firmware *fw; + + /* current offset of firmware image */ + size_t offset; + + struct list_head records; + struct list_head components; + + /* PLDM Firmware Package Header */ + const struct __pldm_header *header; + u16 total_header_size; + + /* length of the component bitmap */ + u16 component_bitmap_len; + u16 bitmap_size; + + /* Start of the component image information */ + u16 component_count; + const u8 *component_start; + + /* Start pf the firmware device id records */ + const u8 *record_start; + u8 record_count; + + /* The CRC at the end of the package header */ + u32 header_crc; + + struct pldmfw_record *matching_record; +}; + +/** + * pldm_check_fw_space - Verify that the firmware image has space left + * @data: pointer to private data + * @offset: offset to start from + * @length: length to check for + * + * Verify that the firmware data can hold a chunk of bytes with the specified + * offset and length. + * + * Returns: zero on success, or -EFAULT if the image does not have enough + * space left to fit the expected length. + */ +static int +pldm_check_fw_space(struct pldmfw_priv *data, size_t offset, size_t length) +{ + size_t expected_size = offset + length; + struct device *dev = data->context->dev; + + if (data->fw->size < expected_size) { + dev_dbg(dev, "Firmware file size smaller than expected. Got %zu bytes, needed %zu bytes\n", + data->fw->size, expected_size); + return -EFAULT; + } + + return 0; +} + +/** + * pldm_move_fw_offset - Move the current firmware offset forward + * @data: pointer to private data + * @bytes_to_move: number of bytes to move the offset forward by + * + * Check that there is enough space past the current offset, and then move the + * offset forward by this ammount. + * + * Returns: zero on success, or -EFAULT if the image is too small to fit the + * expected length. + */ +static int +pldm_move_fw_offset(struct pldmfw_priv *data, size_t bytes_to_move) +{ + int err; + + err = pldm_check_fw_space(data, data->offset, bytes_to_move); + if (err) + return err; + + data->offset += bytes_to_move; + + return 0; +} + +/** + * pldm_parse_header - Validate and extract details about the PLDM header + * @data: pointer to private data + * + * Performs initial basic verification of the PLDM image, up to the first + * firmware record. + * + * This includes the following checks and extractions + * + * * Verify that the UUID at the start of the header matches the expected + * value as defined in the DSP0267 PLDM specification + * * Check that the revision is 0x01 + * * Extract the total header_size and verify that the image is large enough + * to contain at least the length of this header + * * Extract the size of the component bitmap length + * * Extract a pointer to the start of the record area + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_header(struct pldmfw_priv *data) +{ + const struct __pldmfw_record_area *record_area; + struct device *dev = data->context->dev; + const struct __pldm_header *header; + size_t header_size; + int err; + + err = pldm_move_fw_offset(data, sizeof(*header)); + if (err) + return err; + + header = (const struct __pldm_header *)data->fw->data; + data->header = header; + + if (!uuid_equal(&header->id, &pldm_firmware_header_id)) { + dev_dbg(dev, "Invalid package header identifier. Expected UUID %pUB, but got %pUB\n", + &pldm_firmware_header_id, &header->id); + return -EINVAL; + } + + if (header->revision != PACKAGE_HEADER_FORMAT_REVISION) { + dev_dbg(dev, "Invalid package header revision. Expected revision %u but got %u\n", + PACKAGE_HEADER_FORMAT_REVISION, header->revision); + return -EOPNOTSUPP; + } + + data->total_header_size = get_unaligned_le16(&header->size); + header_size = data->total_header_size - sizeof(*header); + + err = pldm_check_fw_space(data, data->offset, header_size); + if (err) + return err; + + data->component_bitmap_len = + get_unaligned_le16(&header->component_bitmap_len); + + if (data->component_bitmap_len % 8 != 0) { + dev_dbg(dev, "Invalid component bitmap length. The length is %u, which is not a multiple of 8\n", + data->component_bitmap_len); + return -EINVAL; + } + + data->bitmap_size = data->component_bitmap_len / 8; + + err = pldm_move_fw_offset(data, header->version_len); + if (err) + return err; + + /* extract a pointer to the record area, which just follows the main + * PLDM header data. + */ + record_area = (const struct __pldmfw_record_area *)(data->fw->data + + data->offset); + + err = pldm_move_fw_offset(data, sizeof(*record_area)); + if (err) + return err; + + data->record_count = record_area->record_count; + data->record_start = record_area->records; + + return 0; +} + +/** + * pldm_check_desc_tlv_len - Check that the length matches expectation + * @data: pointer to image details + * @type: the descriptor type + * @size: the length from the descriptor header + * + * If the descriptor type is one of the documented descriptor types according + * to the standard, verify that the provided length matches. + * + * If the type is not recognized or is VENDOR_DEFINED, return zero. + * + * Returns: zero on success, or -EINVAL if the specified size of a standard + * TLV does not match the expected value defined for that TLV. + */ +static int +pldm_check_desc_tlv_len(struct pldmfw_priv *data, u16 type, u16 size) +{ + struct device *dev = data->context->dev; + u16 expected_size; + + switch (type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + case PLDM_DESC_ID_PCI_DEVICE_ID: + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + case PLDM_DESC_ID_PCI_SUBDEV_ID: + expected_size = 2; + break; + case PLDM_DESC_ID_PCI_REVISION_ID: + expected_size = 1; + break; + case PLDM_DESC_ID_PNP_VENDOR_ID: + expected_size = 3; + break; + case PLDM_DESC_ID_IANA_ENTERPRISE_ID: + case PLDM_DESC_ID_ACPI_VENDOR_ID: + case PLDM_DESC_ID_PNP_PRODUCT_ID: + case PLDM_DESC_ID_ACPI_PRODUCT_ID: + expected_size = 4; + break; + case PLDM_DESC_ID_UUID: + expected_size = 16; + break; + case PLDM_DESC_ID_VENDOR_DEFINED: + return 0; + default: + /* Do not report an error on an unexpected TLV */ + dev_dbg(dev, "Found unrecognized TLV type 0x%04x\n", type); + return 0; + } + + if (size != expected_size) { + dev_dbg(dev, "Found TLV type 0x%04x with unexpected length. Got %u bytes, but expected %u bytes\n", + type, size, expected_size); + return -EINVAL; + } + + return 0; +} + +/** + * pldm_parse_desc_tlvs - Check and skip past a number of TLVs + * @data: pointer to private data + * @record: pointer to the record this TLV belongs too + * @desc_count: descriptor count + * + * From the current offset, read and extract the descriptor TLVs, updating the + * current offset each time. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_parse_desc_tlvs(struct pldmfw_priv *data, struct pldmfw_record *record, u8 desc_count) +{ + const struct __pldmfw_desc_tlv *__desc; + const u8 *desc_start; + u8 i; + + desc_start = data->fw->data + data->offset; + + pldm_for_each_desc_tlv(i, __desc, desc_start, desc_count) { + struct pldmfw_desc_tlv *desc; + int err; + u16 type, size; + + err = pldm_move_fw_offset(data, sizeof(*__desc)); + if (err) + return err; + + type = get_unaligned_le16(&__desc->type); + + /* According to DSP0267, this only includes the data field */ + size = get_unaligned_le16(&__desc->size); + + err = pldm_check_desc_tlv_len(data, type, size); + if (err) + return err; + + /* check that we have space and move the offset forward */ + err = pldm_move_fw_offset(data, size); + if (err) + return err; + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + desc->type = type; + desc->size = size; + desc->data = __desc->data; + + list_add_tail(&desc->entry, &record->descs); + } + + return 0; +} + +/** + * pldm_parse_one_record - Verify size of one PLDM record + * @data: pointer to image details + * @__record: pointer to the record to check + * + * This function checks that the record size does not exceed either the size + * of the firmware file or the total length specified in the header section. + * + * It also verifies that the recorded length of the start of the record + * matches the size calculated by adding the static structure length, the + * component bitmap length, the version string length, the length of all + * descriptor TLVs, and the length of the package data. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_parse_one_record(struct pldmfw_priv *data, + const struct __pldmfw_record_info *__record) +{ + struct pldmfw_record *record; + size_t measured_length; + int err; + const u8 *bitmap_ptr; + u16 record_len; + int i; + + /* Make a copy and insert it into the record list */ + record = kzalloc(sizeof(*record), GFP_KERNEL); + if (!record) + return -ENOMEM; + + INIT_LIST_HEAD(&record->descs); + list_add_tail(&record->entry, &data->records); + + /* Then check that we have space and move the offset */ + err = pldm_move_fw_offset(data, sizeof(*__record)); + if (err) + return err; + + record_len = get_unaligned_le16(&__record->record_len); + record->package_data_len = get_unaligned_le16(&__record->package_data_len); + record->version_len = __record->version_len; + record->version_type = __record->version_type; + + bitmap_ptr = data->fw->data + data->offset; + + /* check that we have space for the component bitmap length */ + err = pldm_move_fw_offset(data, data->bitmap_size); + if (err) + return err; + + record->component_bitmap_len = data->component_bitmap_len; + record->component_bitmap = bitmap_zalloc(record->component_bitmap_len, + GFP_KERNEL); + if (!record->component_bitmap) + return -ENOMEM; + + for (i = 0; i < data->bitmap_size; i++) + bitmap_set_value8(record->component_bitmap, bitmap_ptr[i], i * 8); + + record->version_string = data->fw->data + data->offset; + + err = pldm_move_fw_offset(data, record->version_len); + if (err) + return err; + + /* Scan through the descriptor TLVs and find the end */ + err = pldm_parse_desc_tlvs(data, record, __record->descriptor_count); + if (err) + return err; + + record->package_data = data->fw->data + data->offset; + + err = pldm_move_fw_offset(data, record->package_data_len); + if (err) + return err; + + measured_length = data->offset - ((const u8 *)__record - data->fw->data); + if (measured_length != record_len) { + dev_dbg(data->context->dev, "Unexpected record length. Measured record length is %zu bytes, expected length is %u bytes\n", + measured_length, record_len); + return -EFAULT; + } + + return 0; +} + +/** + * pldm_parse_records - Locate the start of the component area + * @data: pointer to private data + * + * Extract the record count, and loop through each record, searching for the + * component area. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_records(struct pldmfw_priv *data) +{ + const struct __pldmfw_component_area *component_area; + const struct __pldmfw_record_info *record; + int err; + u8 i; + + pldm_for_each_record(i, record, data->record_start, data->record_count) { + err = pldm_parse_one_record(data, record); + if (err) + return err; + } + + /* Extract a pointer to the component area, which just follows the + * PLDM device record data. + */ + component_area = (const struct __pldmfw_component_area *)(data->fw->data + data->offset); + + err = pldm_move_fw_offset(data, sizeof(*component_area)); + if (err) + return err; + + data->component_count = + get_unaligned_le16(&component_area->component_image_count); + data->component_start = component_area->components; + + return 0; +} + +/** + * pldm_parse_components - Locate the CRC header checksum + * @data: pointer to private data + * + * Extract the component count, and find the pointer to the component area. + * Scan through each component searching for the end, which should point to + * the package header checksum. + * + * Extract the package header CRC and save it for verification. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_components(struct pldmfw_priv *data) +{ + const struct __pldmfw_component_info *__component; + struct device *dev = data->context->dev; + const u8 *header_crc_ptr; + int err; + u8 i; + + pldm_for_each_component(i, __component, data->component_start, data->component_count) { + struct pldmfw_component *component; + u32 offset, size; + + err = pldm_move_fw_offset(data, sizeof(*__component)); + if (err) + return err; + + err = pldm_move_fw_offset(data, __component->version_len); + if (err) + return err; + + offset = get_unaligned_le32(&__component->location_offset); + size = get_unaligned_le32(&__component->size); + + err = pldm_check_fw_space(data, offset, size); + if (err) + return err; + + component = kzalloc(sizeof(*component), GFP_KERNEL); + if (!component) + return -ENOMEM; + + component->index = i; + component->classification = get_unaligned_le16(&__component->classification); + component->identifier = get_unaligned_le16(&__component->identifier); + component->comparison_stamp = get_unaligned_le32(&__component->comparison_stamp); + component->options = get_unaligned_le16(&__component->options); + component->activation_method = get_unaligned_le16(&__component->activation_method); + component->version_type = __component->version_type; + component->version_len = __component->version_len; + component->version_string = __component->version_string; + component->component_data = data->fw->data + offset; + component->component_size = size; + + list_add_tail(&component->entry, &data->components); + } + + header_crc_ptr = data->fw->data + data->offset; + + err = pldm_move_fw_offset(data, sizeof(data->header_crc)); + if (err) + return err; + + /* Make sure that we reached the expected offset */ + if (data->offset != data->total_header_size) { + dev_dbg(dev, "Invalid firmware header size. Expected %u but got %zu\n", + data->total_header_size, data->offset); + return -EFAULT; + } + + data->header_crc = get_unaligned_le32(header_crc_ptr); + + return 0; +} + +/** + * pldm_verify_header_crc - Verify that the CRC in the header matches + * @data: pointer to private data + * + * Calculates the 32-bit CRC using the standard IEEE 802.3 CRC polynomial and + * compares it to the value stored in the header. + * + * Returns: zero on success if the CRC matches, or -EBADMSG on an invalid CRC. + */ +static int pldm_verify_header_crc(struct pldmfw_priv *data) +{ + struct device *dev = data->context->dev; + u32 calculated_crc; + size_t length; + + /* Calculate the 32-bit CRC of the header header contents up to but + * not including the checksum. Note that the Linux crc32_le function + * does not perform an expected final XOR. + */ + length = data->offset - sizeof(data->header_crc); + calculated_crc = crc32_le(~0, data->fw->data, length) ^ ~0; + + if (calculated_crc != data->header_crc) { + dev_dbg(dev, "Invalid CRC in firmware header. Got 0x%08x but expected 0x%08x\n", + calculated_crc, data->header_crc); + return -EBADMSG; + } + + return 0; +} + +/** + * pldmfw_free_priv - Free memory allocated while parsing the PLDM image + * @data: pointer to the PLDM data structure + * + * Loops through and clears all allocated memory associated with each + * allocated descriptor, record, and component. + */ +static void pldmfw_free_priv(struct pldmfw_priv *data) +{ + struct pldmfw_component *component, *c_safe; + struct pldmfw_record *record, *r_safe; + struct pldmfw_desc_tlv *desc, *d_safe; + + list_for_each_entry_safe(component, c_safe, &data->components, entry) { + list_del(&component->entry); + kfree(component); + } + + list_for_each_entry_safe(record, r_safe, &data->records, entry) { + list_for_each_entry_safe(desc, d_safe, &record->descs, entry) { + list_del(&desc->entry); + kfree(desc); + } + + if (record->component_bitmap) { + bitmap_free(record->component_bitmap); + record->component_bitmap = NULL; + } + + list_del(&record->entry); + kfree(record); + } +} + +/** + * pldm_parse_image - parse and extract details from PLDM image + * @data: pointer to private data + * + * Verify that the firmware file contains valid data for a PLDM firmware + * file. Extract useful pointers and data from the firmware file and store + * them in the data structure. + * + * The PLDM firmware file format is defined in DMTF DSP0267 1.0.0. Care + * should be taken to use get_unaligned_le* when accessing data from the + * pointers in data. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_parse_image(struct pldmfw_priv *data) +{ + int err; + + if (WARN_ON(!(data->context->dev && data->fw->data && data->fw->size))) + return -EINVAL; + + err = pldm_parse_header(data); + if (err) + return err; + + err = pldm_parse_records(data); + if (err) + return err; + + err = pldm_parse_components(data); + if (err) + return err; + + return pldm_verify_header_crc(data); +} + +/* these are u32 so that we can store PCI_ANY_ID */ +struct pldm_pci_record_id { + int vendor; + int device; + int subsystem_vendor; + int subsystem_device; +}; + +/** + * pldmfw_op_pci_match_record - Check if a PCI device matches the record + * @context: PLDM fw update structure + * @record: list of records extracted from the PLDM image + * + * Determine of the PCI device associated with this device matches the record + * data provided. + * + * Searches the descriptor TLVs and extracts the relevant descriptor data into + * a pldm_pci_record_id. This is then compared against the PCI device ID + * information. + * + * Returns: true if the device matches the record, false otherwise. + */ +bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ + struct pci_dev *pdev = to_pci_dev(context->dev); + struct pldm_pci_record_id id = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subsystem_vendor = PCI_ANY_ID, + .subsystem_device = PCI_ANY_ID, + }; + struct pldmfw_desc_tlv *desc; + + list_for_each_entry(desc, &record->descs, entry) { + u16 value; + int *ptr; + + switch (desc->type) { + case PLDM_DESC_ID_PCI_VENDOR_ID: + ptr = &id.vendor; + break; + case PLDM_DESC_ID_PCI_DEVICE_ID: + ptr = &id.device; + break; + case PLDM_DESC_ID_PCI_SUBVENDOR_ID: + ptr = &id.subsystem_vendor; + break; + case PLDM_DESC_ID_PCI_SUBDEV_ID: + ptr = &id.subsystem_device; + break; + default: + /* Skip unrelated TLVs */ + continue; + } + + value = get_unaligned_le16(desc->data); + /* A value of zero for one of the descriptors is sometimes + * used when the record should ignore this field when matching + * device. For example if the record applies to any subsystem + * device or vendor. + */ + if (value) + *ptr = (int)value; + else + *ptr = PCI_ANY_ID; + } + + if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) && + (id.device == PCI_ANY_ID || id.device == pdev->device) && + (id.subsystem_vendor == PCI_ANY_ID || id.subsystem_vendor == pdev->subsystem_vendor) && + (id.subsystem_device == PCI_ANY_ID || id.subsystem_device == pdev->subsystem_device)) + return true; + else + return false; +} +EXPORT_SYMBOL(pldmfw_op_pci_match_record); + +/** + * pldm_find_matching_record - Find the first matching PLDM record + * @data: pointer to private data + * + * Search through PLDM records and find the first matching entry. It is + * expected that only one entry matches. + * + * Store a pointer to the matching record, if found. + * + * Returns: zero on success, or -ENOENT if no matching record is found. + */ +static int pldm_find_matching_record(struct pldmfw_priv *data) +{ + struct pldmfw_record *record; + + list_for_each_entry(record, &data->records, entry) { + if (data->context->ops->match_record(data->context, record)) { + data->matching_record = record; + return 0; + } + } + + return -ENOENT; +} + +/** + * pldm_send_package_data - Send firmware the package data for the record + * @data: pointer to private data + * + * Send the package data associated with the matching record to the firmware, + * using the send_pkg_data operation. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_send_package_data(struct pldmfw_priv *data) +{ + struct pldmfw_record *record = data->matching_record; + const struct pldmfw_ops *ops = data->context->ops; + + return ops->send_package_data(data->context, record->package_data, + record->package_data_len); +} + +/** + * pldm_send_component_tables - Send component table information to firmware + * @data: pointer to private data + * + * Loop over each component, sending the applicable components to the firmware + * via the send_component_table operation. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +pldm_send_component_tables(struct pldmfw_priv *data) +{ + unsigned long *bitmap = data->matching_record->component_bitmap; + struct pldmfw_component *component; + int err; + + list_for_each_entry(component, &data->components, entry) { + u8 index = component->index, transfer_flag = 0; + + /* Skip components which are not intended for this device */ + if (!test_bit(index, bitmap)) + continue; + + /* determine whether this is the start, middle, end, or both + * the start and end of the component tables + */ + if (index == find_first_bit(bitmap, data->component_bitmap_len)) + transfer_flag |= PLDM_TRANSFER_FLAG_START; + if (index == find_last_bit(bitmap, data->component_bitmap_len)) + transfer_flag |= PLDM_TRANSFER_FLAG_END; + if (!transfer_flag) + transfer_flag = PLDM_TRANSFER_FLAG_MIDDLE; + + err = data->context->ops->send_component_table(data->context, + component, + transfer_flag); + if (err) + return err; + } + + return 0; +} + +/** + * pldm_flash_components - Program each component to device flash + * @data: pointer to private data + * + * Loop through each component that is active for the matching device record, + * and send it to the device driver for flashing. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int pldm_flash_components(struct pldmfw_priv *data) +{ + unsigned long *bitmap = data->matching_record->component_bitmap; + struct pldmfw_component *component; + int err; + + list_for_each_entry(component, &data->components, entry) { + u8 index = component->index; + + /* Skip components which are not intended for this device */ + if (!test_bit(index, bitmap)) + continue; + + err = data->context->ops->flash_component(data->context, component); + if (err) + return err; + } + + return 0; +} + +/** + * pldm_finalize_update - Finalize the device flash update + * @data: pointer to private data + * + * Tell the device driver to perform any remaining logic to complete the + * device update. + * + * Returns: zero on success, or a PLFM_FWU error indicating the reason for + * failure. + */ +static int pldm_finalize_update(struct pldmfw_priv *data) +{ + if (data->context->ops->finalize_update) + return data->context->ops->finalize_update(data->context); + + return 0; +} + +/** + * pldmfw_flash_image - Write a PLDM-formatted firmware image to the device + * @context: ops and data for firmware update + * @fw: firmware object pointing to the relevant firmware file to program + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the device firmware. Extract and write the flash data for each of the + * components indicated in the firmware file. + * + * Returns: zero on success, or a negative error code on failure. + */ +int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw) +{ + struct pldmfw_priv *data; + int err; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + INIT_LIST_HEAD(&data->records); + INIT_LIST_HEAD(&data->components); + + data->fw = fw; + data->context = context; + + err = pldm_parse_image(data); + if (err) + goto out_release_data; + + err = pldm_find_matching_record(data); + if (err) + goto out_release_data; + + err = pldm_send_package_data(data); + if (err) + goto out_release_data; + + err = pldm_send_component_tables(data); + if (err) + goto out_release_data; + + err = pldm_flash_components(data); + if (err) + goto out_release_data; + + err = pldm_finalize_update(data); + +out_release_data: + pldmfw_free_priv(data); + kfree(data); + + return err; +} +EXPORT_SYMBOL(pldmfw_flash_image); + +MODULE_AUTHOR("Jacob Keller "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("PLDM firmware flash update library"); diff --git a/lib/pldmfw/pldmfw_private.h b/lib/pldmfw/pldmfw_private.h new file mode 100644 index 000000000000..687ef2200692 --- /dev/null +++ b/lib/pldmfw/pldmfw_private.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _PLDMFW_PRIVATE_H_ +#define _PLDMFW_PRIVATE_H_ + +/* The following data structures define the layout of a firmware binary + * following the "PLDM For Firmware Update Specification", DMTF standard + * #DSP0267. + * + * pldmfw.c uses these structures to implement a simple engine that will parse + * a fw binary file in this format and perform a firmware update for a given + * device. + * + * Due to the variable sized data layout, alignment of fields within these + * structures is not guaranteed when reading. For this reason, all multi-byte + * field accesses should be done using the unaligned access macros. + * Additionally, the standard specifies that multi-byte fields are in + * LittleEndian format. + * + * The structure definitions are not made public, in order to keep direct + * accesses within code that is prepared to deal with the limitation of + * unaligned access. + */ + +/* UUID for PLDM firmware packages: f018878c-cb7d-4943-9800-a02f059aca02 */ +static const uuid_t pldm_firmware_header_id = + UUID_INIT(0xf018878c, 0xcb7d, 0x4943, + 0x98, 0x00, 0xa0, 0x2f, 0x05, 0x9a, 0xca, 0x02); + +/* Revision number of the PLDM header format this code supports */ +#define PACKAGE_HEADER_FORMAT_REVISION 0x01 + +/* timestamp104 structure defined in PLDM Base specification */ +#define PLDM_TIMESTAMP_SIZE 13 +struct __pldm_timestamp { + u8 b[PLDM_TIMESTAMP_SIZE]; +} __packed __aligned(1); + +/* Package Header Information */ +struct __pldm_header { + uuid_t id; /* PackageHeaderIdentifier */ + u8 revision; /* PackageHeaderFormatRevision */ + __le16 size; /* PackageHeaderSize */ + struct __pldm_timestamp release_date; /* PackageReleaseDateTime */ + __le16 component_bitmap_len; /* ComponentBitmapBitLength */ + u8 version_type; /* PackageVersionStringType */ + u8 version_len; /* PackageVersionStringLength */ + + /* + * DSP0267 also includes the following variable length fields at the + * end of this structure: + * + * PackageVersionString, length is version_len. + * + * The total size of this section is + * sizeof(pldm_header) + version_len; + */ + u8 version_string[]; /* PackageVersionString */ +} __packed __aligned(1); + +/* Firmware Device ID Record */ +struct __pldmfw_record_info { + __le16 record_len; /* RecordLength */ + u8 descriptor_count; /* DescriptorCount */ + __le32 device_update_flags; /* DeviceUpdateOptionFlags */ + u8 version_type; /* ComponentImageSetVersionType */ + u8 version_len; /* ComponentImageSetVersionLength */ + __le16 package_data_len; /* FirmwareDevicePackageDataLength */ + + /* + * DSP0267 also includes the following variable length fields at the + * end of this structure: + * + * ApplicableComponents, length is component_bitmap_len from header + * ComponentImageSetVersionString, length is version_len + * RecordDescriptors, a series of TLVs with 16bit type and length + * FirmwareDevicePackageData, length is package_data_len + * + * The total size of each record is + * sizeof(pldmfw_record_info) + + * component_bitmap_len (converted to bytes!) + + * version_len + + * + + * package_data_len + */ + u8 variable_record_data[]; +} __packed __aligned(1); + +/* Firmware Descriptor Definition */ +struct __pldmfw_desc_tlv { + __le16 type; /* DescriptorType */ + __le16 size; /* DescriptorSize */ + u8 data[]; /* DescriptorData */ +} __aligned(1); + +/* Firmware Device Identification Area */ +struct __pldmfw_record_area { + u8 record_count; /* DeviceIDRecordCount */ + /* This is not a struct type because the size of each record varies */ + u8 records[]; +} __aligned(1); + +/* Individual Component Image Information */ +struct __pldmfw_component_info { + __le16 classification; /* ComponentClassfication */ + __le16 identifier; /* ComponentIdentifier */ + __le32 comparison_stamp; /* ComponentComparisonStamp */ + __le16 options; /* componentOptions */ + __le16 activation_method; /* RequestedComponentActivationMethod */ + __le32 location_offset; /* ComponentLocationOffset */ + __le32 size; /* ComponentSize */ + u8 version_type; /* ComponentVersionStringType */ + u8 version_len; /* ComponentVersionStringLength */ + + /* + * DSP0267 also includes the following variable length fields at the + * end of this structure: + * + * ComponentVersionString, length is version_len + * + * The total size of this section is + * sizeof(pldmfw_component_info) + version_len; + */ + u8 version_string[]; /* ComponentVersionString */ +} __packed __aligned(1); + +/* Component Image Information Area */ +struct __pldmfw_component_area { + __le16 component_image_count; + /* This is not a struct type because the component size varies */ + u8 components[]; +} __aligned(1); + +/** + * pldm_first_desc_tlv + * @start: byte offset of the start of the descriptor TLVs + * + * Converts the starting offset of the descriptor TLVs into a pointer to the + * first descriptor. + */ +#define pldm_first_desc_tlv(start) \ + ((const struct __pldmfw_desc_tlv *)(start)) + +/** + * pldm_next_desc_tlv + * @desc: pointer to a descriptor TLV + * + * Finds the pointer to the next descriptor following a given descriptor + */ +#define pldm_next_desc_tlv(desc) \ + ((const struct __pldmfw_desc_tlv *)((desc)->data + \ + get_unaligned_le16(&(desc)->size))) + +/** + * pldm_for_each_desc_tlv + * @i: variable to store descriptor index + * @desc: variable to store descriptor pointer + * @start: byte offset of the start of the descriptors + * @count: the number of descriptors + * + * for loop macro to iterate over all of the descriptors of a given PLDM + * record. + */ +#define pldm_for_each_desc_tlv(i, desc, start, count) \ + for ((i) = 0, (desc) = pldm_first_desc_tlv(start); \ + (i) < (count); \ + (i)++, (desc) = pldm_next_desc_tlv(desc)) + +/** + * pldm_first_record + * @start: byte offset of the start of the PLDM records + * + * Converts a starting offset of the PLDM records into a pointer to the first + * record. + */ +#define pldm_first_record(start) \ + ((const struct __pldmfw_record_info *)(start)) + +/** + * pldm_next_record + * @record: pointer to a PLDM record + * + * Finds a pointer to the next record following a given record + */ +#define pldm_next_record(record) \ + ((const struct __pldmfw_record_info *) \ + ((const u8 *)(record) + get_unaligned_le16(&(record)->record_len))) + +/** + * pldm_for_each_record + * @i: variable to store record index + * @record: variable to store record pointer + * @start: byte offset of the start of the records + * @count: the number of records + * + * for loop macro to iterate over all of the records of a PLDM file. + */ +#define pldm_for_each_record(i, record, start, count) \ + for ((i) = 0, (record) = pldm_first_record(start); \ + (i) < (count); \ + (i)++, (record) = pldm_next_record(record)) + +/** + * pldm_first_component + * @start: byte offset of the start of the PLDM components + * + * Convert a starting offset of the PLDM components into a pointer to the + * first component + */ +#define pldm_first_component(start) \ + ((const struct __pldmfw_component_info *)(start)) + +/** + * pldm_next_component + * @component: pointer to a PLDM component + * + * Finds a pointer to the next component following a given component + */ +#define pldm_next_component(component) \ + ((const struct __pldmfw_component_info *)((component)->version_string + \ + (component)->version_len)) + +/** + * pldm_for_each_component + * @i: variable to store component index + * @component: variable to store component pointer + * @start: byte offset to the start of the first component + * @count: the number of components + * + * for loop macro to iterate over all of the components of a PLDM file. + */ +#define pldm_for_each_component(i, component, start, count) \ + for ((i) = 0, (component) = pldm_first_component(start); \ + (i) < (count); \ + (i)++, (component) = pldm_next_component(component)) + +#endif From de9b277ee0321100ee3299a79e6f09dec56aa05e Mon Sep 17 00:00:00 2001 From: Jacek Naczyk Date: Thu, 23 Jul 2020 17:22:00 -0700 Subject: [PATCH 1729/2056] ice: Add support for unified NVM update flow capability Extends function parsing response from Discover Device Capability AQC to check if the device supports unified NVM update flow. Signed-off-by: Jacek Naczyk Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 1 + drivers/net/ethernet/intel/ice/ice_common.c | 7 +++++++ drivers/net/ethernet/intel/ice/ice_type.h | 3 +++ 3 files changed, 11 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b363e0223670..b97f50e60feb 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -109,6 +109,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_MSIX 0x0043 #define ICE_AQC_CAPS_FD 0x0045 #define ICE_AQC_CAPS_MAX_MTU 0x0047 +#define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; u8 minor_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index c72cc77b8d67..249526f217ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1857,6 +1857,13 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; + case ICE_AQC_CAPS_NVM_MGMT: + caps->nvm_unified_update = + (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? + true : false; + ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix, + caps->nvm_unified_update); + break; case ICE_AQC_CAPS_MAX_MTU: caps->max_mtu = number; ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n", diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 08c616d9fffd..8eb1f184a886 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -244,6 +244,9 @@ struct ice_hw_common_caps { u8 rss_table_entry_width; /* RSS Entry width in bits */ u8 dcb; + + bool nvm_unified_update; +#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) }; /* Function specific capabilities */ From 544cd2ac1328451fdbbd60e5cce52aa1b6fe11a8 Mon Sep 17 00:00:00 2001 From: "Cudzilo, Szymon T" Date: Thu, 23 Jul 2020 17:22:01 -0700 Subject: [PATCH 1730/2056] ice: Add AdminQ commands for FW update Add structures, identifiers, and helper functions for several AdminQ commands related to performing a firmware update for the ice hardware. These will be used in future code for implementing the devlink .flash_update handler. Signed-off-by: Cudzilo, Szymon T Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- .../net/ethernet/intel/ice/ice_adminq_cmd.h | 76 +++++++ drivers/net/ethernet/intel/ice/ice_common.h | 2 - drivers/net/ethernet/intel/ice/ice_nvm.c | 186 ++++++++++++++++++ drivers/net/ethernet/intel/ice/ice_nvm.h | 16 ++ drivers/net/ethernet/intel/ice/ice_type.h | 3 + 5 files changed, 281 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b97f50e60feb..b2ec792e6052 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1299,7 +1299,14 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) +#define ICE_AQC_NVM_FACTORY_DEFAULT (2 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */ +#define ICE_AQC_NVM_ACTIV_SEL_OROM BIT(4) +#define ICE_AQC_NVM_ACTIV_SEL_NETLIST BIT(5) +#define ICE_AQC_NVM_SPECIAL_UPDATE BIT(6) +#define ICE_AQC_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */ +#define ICE_AQC_NVM_ACTIV_SEL_MASK ICE_M(0x7, 3) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) __le16 module_typeid; __le16 length; @@ -1348,6 +1355,67 @@ struct ice_aqc_nvm_checksum { #define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA #define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F +/* Used for NVM Set Package Data command - 0x070A */ +struct ice_aqc_nvm_pkg_data { + u8 reserved[3]; + u8 cmd_flags; +#define ICE_AQC_NVM_PKG_DELETE BIT(0) /* used for command call */ +#define ICE_AQC_NVM_PKG_SKIPPED BIT(0) /* used for command response */ + + u32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +/* Used for Pass Component Table command - 0x070B */ +struct ice_aqc_nvm_pass_comp_tbl { + u8 component_response; /* Response only */ +#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0 +#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1 +#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2 + u8 component_response_code; /* Response only */ +#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0 +#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1 +#define ICE_AQ_NVM_PASS_COMP_STAMP_LOWER 0x2 +#define ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE 0x3 +#define ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE 0x4 +#define ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE 0x5 +#define ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE 0x6 +#define ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE 0x7 +#define ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE 0x8 +#define ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE 0xA +#define ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE 0xB + u8 reserved; + u8 transfer_flag; +#define ICE_AQ_NVM_PASS_COMP_TBL_START 0x1 +#define ICE_AQ_NVM_PASS_COMP_TBL_MIDDLE 0x2 +#define ICE_AQ_NVM_PASS_COMP_TBL_END 0x4 +#define ICE_AQ_NVM_PASS_COMP_TBL_START_AND_END 0x5 + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_nvm_comp_tbl { + __le16 comp_class; +#define NVM_COMP_CLASS_ALL_FW 0x000A + + __le16 comp_id; +#define NVM_COMP_ID_OROM 0x5 +#define NVM_COMP_ID_NVM 0x6 +#define NVM_COMP_ID_NETLIST 0x8 + + u8 comp_class_idx; +#define FWU_COMP_CLASS_IDX_NOT_USE 0x0 + + __le32 comp_cmp_stamp; + u8 cvs_type; +#define NVM_CVS_TYPE_ASCII 0x1 + + u8 cvs_len; + u8 cvs[]; /* Component Version String */ +} __packed; + /** * Send to PF command (indirect 0x0801) ID is only used by PF * @@ -1795,6 +1863,8 @@ struct ice_aq_desc { struct ice_aqc_rl_profile rl_profile; struct ice_aqc_nvm nvm; struct ice_aqc_nvm_checksum nvm_checksum; + struct ice_aqc_nvm_pkg_data pkg_data; + struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl; struct ice_aqc_pf_vf_msg virt; struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_set_mib_change lldp_set_event; @@ -1923,7 +1993,13 @@ enum ice_adminq_opc { /* NVM commands */ ice_aqc_opc_nvm_read = 0x0701, + ice_aqc_opc_nvm_erase = 0x0702, + ice_aqc_opc_nvm_write = 0x0703, ice_aqc_opc_nvm_checksum = 0x0706, + ice_aqc_opc_nvm_write_activate = 0x0707, + ice_aqc_opc_nvm_update_empr = 0x0709, + ice_aqc_opc_nvm_pkg_data = 0x070A, + ice_aqc_opc_nvm_pass_component_tbl = 0x070B, /* PF/VF mailbox commands */ ice_mbx_opc_send_msg_to_pf = 0x0801, diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 33a681a75439..d8b33d7285c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -11,8 +11,6 @@ #include "ice_switch.h" #include -enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); - enum ice_status ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 7c2a06892bbb..5903a36763de 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -107,6 +107,76 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, return status; } +/** + * ice_aq_update_nvm + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @offset: byte offset from the module beginning + * @length: length of the section to be written (in bytes from the offset) + * @data: command buffer (size [bytes] = length) + * @last_command: tells if this is the last command in a series + * @command_flags: command parameters + * @cd: pointer to command details structure or NULL + * + * Update the NVM using the admin queue commands (0x0703) + */ +enum ice_status +ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, u8 command_flags, + struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + struct ice_aqc_nvm *cmd; + + cmd = &desc.params.nvm; + + /* In offset the highest byte must be zeroed. */ + if (offset & 0xFF000000) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write); + + cmd->cmd_flags |= command_flags; + + /* If this is the last command in a series, set the proper flag. */ + if (last_command) + cmd->cmd_flags |= ICE_AQC_NVM_LAST_CMD; + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->offset_low = cpu_to_le16(offset & 0xFFFF); + cmd->offset_high = (offset >> 16) & 0xFF; + cmd->length = cpu_to_le16(length); + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + return ice_aq_send_cmd(hw, &desc, data, length, cd); +} + +/** + * ice_aq_erase_nvm + * @hw: pointer to the HW struct + * @module_typeid: module pointer location in words from the NVM beginning + * @cd: pointer to command details structure or NULL + * + * Erase the NVM sector using the admin queue commands (0x0702) + */ +enum ice_status +ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd) +{ + struct ice_aq_desc desc; + struct ice_aqc_nvm *cmd; + + cmd = &desc.params.nvm; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase); + + cmd->module_typeid = cpu_to_le16(module_typeid); + cmd->length = cpu_to_le16(ICE_AQC_NVM_ERASE_LEN); + cmd->offset_low = 0; + cmd->offset_high = 0; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); +} + /** * ice_read_sr_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure @@ -634,3 +704,119 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw) return status; } + +/** + * ice_nvm_write_activate + * @hw: pointer to the HW struct + * @cmd_flags: NVM activate admin command bits (banks to be validated) + * + * Update the control word with the required banks' validity bits + * and dumps the Shadow RAM to flash (0x0707) + */ +enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags) +{ + struct ice_aqc_nvm *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.nvm; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate); + + cmd->cmd_flags = cmd_flags; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** + * ice_aq_nvm_update_empr + * @hw: pointer to the HW struct + * + * Update empr (0x0709). This command allows SW to + * request an EMPR to activate new FW. + */ +enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw) +{ + struct ice_aq_desc desc; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr); + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/* ice_nvm_set_pkg_data + * @hw: pointer to the HW struct + * @del_pkg_data_flag: If is set then the current pkg_data store by FW + * is deleted. + * If bit is set to 1, then buffer should be size 0. + * @data: pointer to buffer + * @length: length of the buffer + * @cd: pointer to command details structure or NULL + * + * Set package data (0x070A). This command is equivalent to the reception + * of a PLDM FW Update GetPackageData cmd. This command should be sent + * as part of the NVM update as the first cmd in the flow. + */ + +enum ice_status +ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, + u16 length, struct ice_sq_cd *cd) +{ + struct ice_aqc_nvm_pkg_data *cmd; + struct ice_aq_desc desc; + + if (length != 0 && !data) + return ICE_ERR_PARAM; + + cmd = &desc.params.pkg_data; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + if (del_pkg_data_flag) + cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE; + + return ice_aq_send_cmd(hw, &desc, data, length, cd); +} + +/* ice_nvm_pass_component_tbl + * @hw: pointer to the HW struct + * @data: pointer to buffer + * @length: length of the buffer + * @transfer_flag: parameter for determining stage of the update + * @comp_response: a pointer to the response from the 0x070B AQC. + * @comp_response_code: a pointer to the response code from the 0x070B AQC. + * @cd: pointer to command details structure or NULL + * + * Pass component table (0x070B). This command is equivalent to the reception + * of a PLDM FW Update PassComponentTable cmd. This command should be sent once + * per component. It can be only sent after Set Package Data cmd and before + * actual update. FW will assume these commands are going to be sent until + * the TransferFlag is set to End or StartAndEnd. + */ + +enum ice_status +ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, + u8 transfer_flag, u8 *comp_response, + u8 *comp_response_code, struct ice_sq_cd *cd) +{ + struct ice_aqc_nvm_pass_comp_tbl *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + if (!data || !comp_response || !comp_response_code) + return ICE_ERR_PARAM; + + cmd = &desc.params.pass_comp_tbl; + + ice_fill_dflt_direct_cmd_desc(&desc, + ice_aqc_opc_nvm_pass_component_tbl); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + + cmd->transfer_flag = transfer_flag; + status = ice_aq_send_cmd(hw, &desc, data, length, cd); + + if (!status) { + *comp_response = cmd->component_response; + *comp_response_code = cmd->component_response_code; + } + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 999f273ba6ad..8d430909f846 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -17,4 +17,20 @@ enum ice_status ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data); +enum ice_status +ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, u8 command_flags, + struct ice_sq_cd *cd); +enum ice_status +ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd); +enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw); +enum ice_status ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags); +enum ice_status ice_aq_nvm_update_empr(struct ice_hw *hw); +enum ice_status +ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data, + u16 length, struct ice_sq_cd *cd); +enum ice_status +ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length, + u8 transfer_flag, u8 *comp_response, + u8 *comp_response_code, struct ice_sq_cd *cd); #endif /* _ICE_NVM_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 8eb1f184a886..1022086e8920 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -774,6 +774,9 @@ struct ice_hw_port_stats { #define ICE_OROM_VER_SHIFT 24 #define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT) #define ICE_SR_PFA_PTR 0x40 +#define ICE_SR_1ST_NVM_BANK_PTR 0x42 +#define ICE_SR_1ST_OROM_BANK_PTR 0x44 +#define ICE_SR_NETLIST_BANK_PTR 0x46 #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 /* Link override related */ From 2ab560a78e3b2f6b19626dcd47790f35b28cc5db Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 23 Jul 2020 17:22:02 -0700 Subject: [PATCH 1731/2056] ice: add flags indicating pending update of firmware module After a flash update, the pending status of the update can be determined from the device capabilities. Read the appropriate device capability and store whether there is a pending update awaiting a reboot. Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 6 ++++++ drivers/net/ethernet/intel/ice/ice_common.c | 12 ++++++++++++ drivers/net/ethernet/intel/ice/ice_type.h | 6 ++++++ 3 files changed, 24 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index b2ec792e6052..7fec9309d379 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -109,6 +109,12 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_MSIX 0x0043 #define ICE_AQC_CAPS_FD 0x0045 #define ICE_AQC_CAPS_MAX_MTU 0x0047 +#define ICE_AQC_CAPS_NVM_VER 0x0048 +#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049 +#define ICE_AQC_CAPS_OROM_VER 0x004A +#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B +#define ICE_AQC_CAPS_NET_VER 0x004C +#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D #define ICE_AQC_CAPS_NVM_MGMT 0x0080 u8 major_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 249526f217ff..e5d7f3c3c0d7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1857,6 +1857,18 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, "%s: msix_vector_first_id = %d\n", prefix, caps->msix_vector_first_id); break; + case ICE_AQC_CAPS_PENDING_NVM_VER: + caps->nvm_update_pending_nvm = true; + ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix); + break; + case ICE_AQC_CAPS_PENDING_OROM_VER: + caps->nvm_update_pending_orom = true; + ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix); + break; + case ICE_AQC_CAPS_PENDING_NET_VER: + caps->nvm_update_pending_netlist = true; + ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix); + break; case ICE_AQC_CAPS_NVM_MGMT: caps->nvm_unified_update = (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ? diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 1022086e8920..e4349475790f 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -245,6 +245,12 @@ struct ice_hw_common_caps { u8 dcb; + bool nvm_update_pending_nvm; + bool nvm_update_pending_orom; + bool nvm_update_pending_netlist; +#define ICE_NVM_PENDING_NVM_IMAGE BIT(0) +#define ICE_NVM_PENDING_OROM BIT(1) +#define ICE_NVM_PENDING_NETLIST BIT(2) bool nvm_unified_update; #define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3) }; From d69ea414c9b498064d4cd3ebbe2fd4b8298568bc Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 23 Jul 2020 17:22:03 -0700 Subject: [PATCH 1732/2056] ice: implement device flash update via devlink Use the newly added pldmfw library to implement device flash update for the Intel ice networking device driver. This support uses the devlink flash update interface. The main parts of the flash include the Option ROM, the netlist module, and the main NVM data. The PLDM firmware file contains modules for each of these components. Using the pldmfw library, the provided firmware file will be scanned for the three major components, "fw.undi" for the Option ROM, "fw.mgmt" for the main NVM module containing the primary device firmware, and "fw.netlist" containing the netlist module. The flash is separated into two banks, the active bank containing the running firmware, and the inactive bank which we use for update. Each module is updated in a staged process. First, the inactive bank is erased, preparing the device for update. Second, the contents of the component are copied to the inactive portion of the flash. After all components are updated, the driver signals the device to switch the active bank during the next EMP reset (which would usually occur during the next reboot). Although the firmware AdminQ interface does report an immediate status for each command, the NVM erase and NVM write commands receive status asynchronously. The driver must not continue writing until previous erase and write commands have finished. The real status of the NVM commands is returned over the receive AdminQ. Implement a simple interface that uses a wait queue so that the main update thread can sleep until the completion status is reported by firmware. For erasing the inactive banks, this can take quite a while in practice. To help visualize the process to the devlink application and other applications based on the devlink netlink interface, status is reported via the devlink_flash_update_status_notify. While we do report status after each 4k block when writing, there is no real status we can report during erasing. We simply must wait for the complete module erasure to finish. With this implementation, basic flash update for the ice hardware is supported. Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/Kconfig | 1 + drivers/net/ethernet/intel/ice/Makefile | 1 + drivers/net/ethernet/intel/ice/ice.h | 9 + drivers/net/ethernet/intel/ice/ice_common.c | 2 +- drivers/net/ethernet/intel/ice/ice_common.h | 2 + drivers/net/ethernet/intel/ice/ice_devlink.c | 54 ++ .../net/ethernet/intel/ice/ice_fw_update.c | 773 ++++++++++++++++++ .../net/ethernet/intel/ice/ice_fw_update.h | 12 + drivers/net/ethernet/intel/ice/ice_main.c | 154 ++++ 9 files changed, 1007 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/intel/ice/ice_fw_update.c create mode 100644 drivers/net/ethernet/intel/ice/ice_fw_update.h diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3cd13fd55011..5aa86318ed3e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -295,6 +295,7 @@ config ICE default n depends on PCI_MSI select NET_DEVLINK + select PLDMFW help This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index 980bbcc64b4b..6da4f43f2348 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -23,6 +23,7 @@ ice-y := ice_main.o \ ice_flex_pipe.o \ ice_flow.o \ ice_devlink.o \ + ice_fw_update.o \ ice_ethtool.o ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index c665220bb637..7d194d2a031a 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -412,6 +413,12 @@ struct ice_pf { struct mutex sw_mutex; /* lock for protecting VSI alloc flow */ struct mutex tc_mutex; /* lock to protect TC changes */ u32 msg_enable; + + /* spinlock to protect the AdminQ wait list */ + spinlock_t aq_wait_lock; + struct hlist_head aq_wait_list; + wait_queue_head_t aq_wait_queue; + u32 hw_csum_rx_error; u16 oicr_idx; /* Other interrupt cause MSIX vector index */ u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ @@ -593,6 +600,8 @@ void ice_fdir_release_flows(struct ice_hw *hw); void ice_fdir_replay_flows(struct ice_hw *hw); void ice_fdir_replay_fltrs(struct ice_pf *pf); int ice_fdir_create_dflt_rules(struct ice_pf *pf); +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, + struct ice_rq_event_info *event); int ice_open(struct net_device *netdev); int ice_stop(struct net_device *netdev); void ice_service_task_schedule(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index e5d7f3c3c0d7..50939ad0e760 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -2223,7 +2223,7 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, * Read the device capabilities and extract them into the dev_caps structure * for later use. */ -static enum ice_status +enum ice_status ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps) { enum ice_status status; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index d8b33d7285c0..906113db6be6 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -88,6 +88,8 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, enum ice_status ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count, enum ice_adminq_opc opc, struct ice_sq_cd *cd); +enum ice_status +ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps); void ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high, u16 link_speeds_bitmap); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 43da2dcb0cbc..dbbd8b6f9d1a 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -4,6 +4,7 @@ #include "ice.h" #include "ice_lib.h" #include "ice_devlink.h" +#include "ice_fw_update.h" static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len) { @@ -229,8 +230,61 @@ static int ice_devlink_info_get(struct devlink *devlink, return 0; } +/** + * ice_devlink_flash_update - Update firmware stored in flash on the device + * @devlink: pointer to devlink associated with device to update + * @path: the path of the firmware file to use via request_firmware + * @component: name of the component to update, or NULL + * @extack: netlink extended ACK structure + * + * Perform a device flash update. The bulk of the update logic is contained + * within the ice_flash_pldm_image function. + * + * Returns: zero on success, or an error code on failure. + */ +static int +ice_devlink_flash_update(struct devlink *devlink, const char *path, + const char *component, struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = &pf->pdev->dev; + struct ice_hw *hw = &pf->hw; + const struct firmware *fw; + int err; + + /* individual component update is not yet supported */ + if (component) + return -EOPNOTSUPP; + + if (!hw->dev_caps.common_cap.nvm_unified_update) { + NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); + return -EOPNOTSUPP; + } + + err = ice_check_for_pending_update(pf, component, extack); + if (err) + return err; + + err = request_firmware(&fw, path, dev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk"); + return err; + } + + devlink_flash_update_begin_notify(devlink); + devlink_flash_update_status_notify(devlink, "Preparing to flash", + component, 0, 0); + err = ice_flash_pldm_image(pf, fw, extack); + devlink_flash_update_end_notify(devlink); + + release_firmware(fw); + + return err; +} + static const struct devlink_ops ice_devlink_ops = { .info_get = ice_devlink_info_get, + .flash_update = ice_devlink_flash_update, }; static void ice_devlink_free(void *devlink_ptr) diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c new file mode 100644 index 000000000000..deaefe00c9c0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#include +#include +#include +#include +#include "ice.h" +#include "ice_fw_update.h" + +struct ice_fwu_priv { + struct pldmfw context; + + struct ice_pf *pf; + struct netlink_ext_ack *extack; + + /* Track which NVM banks to activate at the end of the update */ + u8 activate_flags; +}; + +/** + * ice_send_package_data - Send record package data to firmware + * @context: PLDM fw update structure + * @data: pointer to the package data + * @length: length of the package data + * + * Send a copy of the package data associated with the PLDM record matching + * this device to the firmware. + * + * Note that this function sends an AdminQ command that will fail unless the + * NVM resource has been acquired. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct device *dev = context->dev; + struct ice_pf *pf = priv->pf; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 *package_data; + + package_data = kmemdup(data, length, GFP_KERNEL); + if (!package_data) + return -ENOMEM; + + status = ice_nvm_set_pkg_data(hw, false, package_data, length, NULL); + + kfree(package_data); + + if (status) { + dev_err(dev, "Failed to send record package data to firmware, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware"); + return -EIO; + } + + return 0; +} + +/** + * ice_check_component_response - Report firmware response to a component + * @pf: device private data structure + * @id: component id being checked + * @response: indicates whether this component can be updated + * @code: code indicating reason for response + * @extack: netlink extended ACK structure + * + * Check whether firmware indicates if this component can be updated. Report + * a suitable error message over the netlink extended ACK if the component + * cannot be updated. + * + * Returns: zero if the component can be updated, or -ECANCELED of the + * firmware indicates the component cannot be updated. + */ +static int +ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + const char *component; + + switch (id) { + case NVM_COMP_ID_OROM: + component = "fw.undi"; + break; + case NVM_COMP_ID_NVM: + component = "fw.mgmt"; + break; + case NVM_COMP_ID_NETLIST: + component = "fw.netlist"; + break; + default: + WARN(1, "Unexpected unknown component identifier 0x%02x", id); + return -EINVAL; + } + + dev_dbg(dev, "%s: firmware response 0x%x, code 0x%x\n", + component, response, code); + + switch (response) { + case ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED: + /* firmware indicated this update is good to proceed */ + return 0; + case ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE: + dev_warn(dev, "firmware recommends not updating %s, as it may result in a downgrade. continuing anyways\n", component); + return 0; + case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED: + dev_info(dev, "firmware has rejected updating %s\n", component); + break; + } + + switch (code) { + case ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE: + dev_err(dev, "Component comparison stamp for %s is identical to the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is identical to running image"); + break; + case ICE_AQ_NVM_PASS_COMP_STAMP_LOWER: + dev_err(dev, "Component comparison stamp for %s is lower than the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is lower than running image"); + break; + case ICE_AQ_NVM_PASS_COMP_INVALID_STAMP_CODE: + dev_err(dev, "Component comparison stamp for %s is invalid\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component comparison stamp is invalid"); + break; + case ICE_AQ_NVM_PASS_COMP_CONFLICT_CODE: + dev_err(dev, "%s conflicts with a previous component table\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component table conflict occurred"); + break; + case ICE_AQ_NVM_PASS_COMP_PRE_REQ_NOT_MET_CODE: + dev_err(dev, "Pre-requisites for component %s have not been met\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component pre-requisites not met"); + break; + case ICE_AQ_NVM_PASS_COMP_NOT_SUPPORTED_CODE: + dev_err(dev, "%s is not a supported component\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component not supported"); + break; + case ICE_AQ_NVM_PASS_COMP_CANNOT_DOWNGRADE_CODE: + dev_err(dev, "Security restrictions prevent %s from being downgraded\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component cannot be downgraded"); + break; + case ICE_AQ_NVM_PASS_COMP_INCOMPLETE_IMAGE_CODE: + dev_err(dev, "Received an incomplete component image for %s\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Incomplete component image"); + break; + case ICE_AQ_NVM_PASS_COMP_VER_STR_IDENTICAL_CODE: + dev_err(dev, "Component version for %s is identical to the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component version is identical to running image"); + break; + case ICE_AQ_NVM_PASS_COMP_VER_STR_LOWER_CODE: + dev_err(dev, "Component version for %s is lower than the running image\n", + component); + NL_SET_ERR_MSG_MOD(extack, "Component version is lower than the running image"); + break; + default: + dev_err(dev, "Unexpected response code 0x02%x for %s\n", + code, component); + NL_SET_ERR_MSG_MOD(extack, "Received unexpected response code from firmware"); + break; + } + + return -ECANCELED; +} + +/** + * ice_send_component_table - Send PLDM component table to firmware + * @context: PLDM fw update structure + * @component: the component to process + * @transfer_flag: relative transfer order of this component + * + * Read relevant data from the component and forward it to the device + * firmware. Check the response to determine if the firmware indicates that + * the update can proceed. + * + * This function sends AdminQ commands related to the NVM, and assumes that + * the NVM resource has been acquired. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_send_component_table(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flag) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct ice_aqc_nvm_comp_tbl *comp_tbl; + u8 comp_response, comp_response_code; + struct device *dev = context->dev; + struct ice_pf *pf = priv->pf; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + size_t length; + + switch (component->identifier) { + case NVM_COMP_ID_OROM: + case NVM_COMP_ID_NVM: + case NVM_COMP_ID_NETLIST: + break; + default: + dev_err(dev, "Unable to update due to a firmware component with unknown ID %u\n", + component->identifier); + NL_SET_ERR_MSG_MOD(extack, "Unable to update due to unknown firmware component"); + return -EOPNOTSUPP; + } + + length = struct_size(comp_tbl, cvs, component->version_len); + comp_tbl = kzalloc(length, GFP_KERNEL); + if (!comp_tbl) + return -ENOMEM; + + comp_tbl->comp_class = cpu_to_le16(component->classification); + comp_tbl->comp_id = cpu_to_le16(component->identifier); + comp_tbl->comp_class_idx = FWU_COMP_CLASS_IDX_NOT_USE; + comp_tbl->comp_cmp_stamp = cpu_to_le32(component->comparison_stamp); + comp_tbl->cvs_type = component->version_type; + comp_tbl->cvs_len = component->version_len; + memcpy(comp_tbl->cvs, component->version_string, component->version_len); + + status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length, + transfer_flag, &comp_response, + &comp_response_code, NULL); + + kfree(comp_tbl); + + if (status) { + dev_err(dev, "Failed to transfer component table to firmware, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware"); + return -EIO; + } + + return ice_check_component_response(pf, component->identifier, comp_response, + comp_response_code, extack); +} + +/** + * ice_write_one_nvm_block - Write an NVM block and await completion response + * @pf: the PF data structure + * @module: the module to write to + * @offset: offset in bytes + * @block_size: size of the block to write, up to 4k + * @block: pointer to block of data to write + * @last_cmd: whether this is the last command + * @extack: netlink extended ACK structure + * + * Write a block of data to a flash module, and await for the completion + * response message from firmware. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + struct netlink_ext_ack *extack) +{ + u16 completion_module, completion_retval; + struct device *dev = ice_pf_to_dev(pf); + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u32 completion_offset; + int err; + + memset(&event, 0, sizeof(event)); + + status = ice_aq_update_nvm(hw, module, offset, block_size, block, + last_cmd, 0, NULL); + if (status) { + dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n", + module, offset, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module"); + return -EIO; + } + + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, HZ, &event); + if (err) { + dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n", + module, err); + NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); + return -EIO; + } + + completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); + completion_retval = le16_to_cpu(event.desc.retval); + + completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low); + completion_offset |= event.desc.params.nvm.offset_high << 16; + + if (completion_module != module) { + dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n", + completion_module, module); + NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response"); + return -EIO; + } + + if (completion_offset != offset) { + dev_err(dev, "Unexpected offset in write completion: got %u, expected %u\n", + completion_offset, offset); + NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response"); + return -EIO; + } + + if (completion_retval) { + dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n", + module, offset, + ice_aq_str((enum ice_aq_err)completion_retval)); + NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module"); + return -EIO; + } + + return 0; +} + +/** + * ice_write_nvm_module - Write data to an NVM module + * @pf: the PF driver structure + * @module: the module id to program + * @component: the name of the component being updated + * @image: buffer of image data to write to the NVM + * @length: length of the buffer + * @extack: netlink extended ACK structure + * + * Loop over the data for a given NVM module and program it in 4 Kb + * blocks. Notify devlink core of progress after each block is programmed. + * Loops over a block of data and programs the NVM in 4k block chunks. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component, + const u8 *image, u32 length, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink; + u32 offset = 0; + bool last_cmd; + u8 *block; + int err; + + devlink = priv_to_devlink(pf); + + devlink_flash_update_status_notify(devlink, "Flashing", + component, 0, length); + + block = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!block) + return -ENOMEM; + + do { + u32 block_size; + + block_size = min_t(u32, ICE_AQ_MAX_BUF_LEN, length - offset); + last_cmd = !(offset + block_size < length); + + /* ice_aq_update_nvm may copy the firmware response into the + * buffer, so we must make a copy since the source data is + * constant. + */ + memcpy(block, image + offset, block_size); + + err = ice_write_one_nvm_block(pf, module, offset, block_size, + block, last_cmd, extack); + if (err) + break; + + offset += block_size; + + devlink_flash_update_status_notify(devlink, "Flashing", + component, offset, length); + } while (!last_cmd); + + if (err) + devlink_flash_update_status_notify(devlink, "Flashing failed", + component, length, length); + else + devlink_flash_update_status_notify(devlink, "Flashing done", + component, length, length); + + kfree(block); + return err; +} + +/** + * ice_erase_nvm_module - Erase an NVM module and await firmware completion + * @pf: the PF data structure + * @module: the module to erase + * @component: name of the component being updated + * @extack: netlink extended ACK structure + * + * Erase the inactive NVM bank associated with this module, and await for + * a completion response message from firmware. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component, + struct netlink_ext_ack *extack) +{ + u16 completion_module, completion_retval; + struct device *dev = ice_pf_to_dev(pf); + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + struct devlink *devlink; + enum ice_status status; + int err; + + memset(&event, 0, sizeof(event)); + + devlink = priv_to_devlink(pf); + + devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0); + + status = ice_aq_erase_nvm(hw, module, NULL); + if (status) { + dev_err(dev, "Failed to erase %s (module 0x%02x), err %s aq_err %s\n", + component, module, ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module"); + err = -EIO; + goto out_notify_devlink; + } + + /* Yes, this really can take minutes to complete */ + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event); + if (err) { + dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n", + component, module, err); + NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); + goto out_notify_devlink; + } + + completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid); + completion_retval = le16_to_cpu(event.desc.retval); + + if (completion_module != module) { + dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n", + component, completion_module, module); + NL_SET_ERR_MSG_MOD(extack, "Unexpected firmware response"); + err = -EIO; + goto out_notify_devlink; + } + + if (completion_retval) { + dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n", + component, module, + ice_aq_str((enum ice_aq_err)completion_retval)); + NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash"); + err = -EIO; + goto out_notify_devlink; + } + +out_notify_devlink: + if (err) + devlink_flash_update_status_notify(devlink, "Erasing failed", + component, 0, 0); + else + devlink_flash_update_status_notify(devlink, "Erasing done", + component, 0, 0); + + return err; +} + +/** + * ice_switch_flash_banks - Tell firmware to switch NVM banks + * @pf: Pointer to the PF data structure + * @activate_flags: flags used for the activation command + * @extack: netlink extended ACK structure + * + * Notify firmware to activate the newly written flash banks, and wait for the + * firmware response. + * + * Returns: zero on success or an error code on failure. + */ +static int ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_rq_event_info event; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u16 completion_retval; + int err; + + memset(&event, 0, sizeof(event)); + + status = ice_nvm_write_activate(hw, activate_flags); + if (status) { + dev_err(dev, "Failed to switch active flash banks, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks"); + return -EIO; + } + + err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, HZ, + &event); + if (err) { + dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n", + err); + NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware"); + return err; + } + + completion_retval = le16_to_cpu(event.desc.retval); + if (completion_retval) { + dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n", + ice_aq_str((enum ice_aq_err)completion_retval)); + NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks"); + return -EIO; + } + + return 0; +} + +/** + * ice_flash_component - Flash a component of the NVM + * @context: PLDM fw update structure + * @component: the component table to program + * + * Program the flash contents for a given component. First, determine the + * module id. Then, erase the secondary bank for this module. Finally, write + * the contents of the component to the NVM. + * + * Note this function assumes the caller has acquired the NVM resource. + * + * Returns: zero on success, or a negative error code on failure. + */ +static int +ice_flash_component(struct pldmfw *context, struct pldmfw_component *component) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct ice_pf *pf = priv->pf; + const char *name; + u16 module; + u8 flag; + int err; + + switch (component->identifier) { + case NVM_COMP_ID_OROM: + module = ICE_SR_1ST_OROM_BANK_PTR; + flag = ICE_AQC_NVM_ACTIV_SEL_OROM; + name = "fw.undi"; + break; + case NVM_COMP_ID_NVM: + module = ICE_SR_1ST_NVM_BANK_PTR; + flag = ICE_AQC_NVM_ACTIV_SEL_NVM; + name = "fw.mgmt"; + break; + case NVM_COMP_ID_NETLIST: + module = ICE_SR_NETLIST_BANK_PTR; + flag = ICE_AQC_NVM_ACTIV_SEL_NETLIST; + name = "fw.netlist"; + break; + default: + /* This should not trigger, since we check the id before + * sending the component table to firmware. + */ + WARN(1, "Unexpected unknown component identifier 0x%02x", + component->identifier); + return -EINVAL; + } + + /* Mark this component for activating at the end */ + priv->activate_flags |= flag; + + err = ice_erase_nvm_module(pf, module, name, extack); + if (err) + return err; + + return ice_write_nvm_module(pf, module, name, component->component_data, + component->component_size, extack); +} + +/** + * ice_finalize_update - Perform last steps to complete device update + * @context: PLDM fw update structure + * + * Called as the last step of the update process. Complete the update by + * telling the firmware to switch active banks, and perform a reset of + * configured. + * + * Returns: 0 on success, or an error code on failure. + */ +static int ice_finalize_update(struct pldmfw *context) +{ + struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context); + struct netlink_ext_ack *extack = priv->extack; + struct ice_pf *pf = priv->pf; + int err; + + /* Finally, notify firmware to activate the written NVM banks */ + err = ice_switch_flash_banks(pf, priv->activate_flags, extack); + if (err) + return err; + + return 0; +} + +static const struct pldmfw_ops ice_fwu_ops = { + .match_record = &pldmfw_op_pci_match_record, + .send_package_data = &ice_send_package_data, + .send_component_table = &ice_send_component_table, + .flash_component = &ice_flash_component, + .finalize_update = &ice_finalize_update, +}; + +/** + * ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device + * @pf: private device driver structure + * @fw: firmware object pointing to the relevant firmware file + * @extack: netlink extended ACK structure + * + * Parse the data for a given firmware file, verifying that it is a valid PLDM + * formatted image that matches this device. + * + * Extract the device record Package Data and Component Tables and send them + * to the firmware. Extract and write the flash data for each of the three + * main flash components, "fw.mgmt", "fw.undi", and "fw.netlist". Notify + * firmware once the data is written to the inactive banks. + * + * Returns: zero on success or a negative error code on failure. + */ +int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw *hw = &pf->hw; + struct ice_fwu_priv priv; + enum ice_status status; + int err; + + memset(&priv, 0, sizeof(priv)); + + priv.context.ops = &ice_fwu_ops; + priv.context.dev = dev; + priv.extack = extack; + priv.pf = pf; + priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL; + + status = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (status) { + dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return -EIO; + } + + err = pldmfw_flash_image(&priv.context, fw); + + ice_release_nvm(hw); + + return err; +} + +/** + * ice_check_for_pending_update - Check for a pending flash update + * @pf: the PF driver structure + * @component: if not NULL, the name of the component being updated + * @extack: Netlink extended ACK structure + * + * Check whether the device already has a pending flash update. If such an + * update is found, cancel it so that the requested update may proceed. + * + * Returns: zero on success, or a negative error code on failure. + */ +int ice_check_for_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct device *dev = ice_pf_to_dev(pf); + struct ice_hw_dev_caps *dev_caps; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + u8 pending = 0; + int err; + + dev_caps = kzalloc(sizeof(*dev_caps), GFP_KERNEL); + if (!dev_caps) + return -ENOMEM; + + /* Read the most recent device capabilities from firmware. Do not use + * the cached values in hw->dev_caps, because the pending update flag + * may have changed, e.g. if an update was previously completed and + * the system has not yet rebooted. + */ + status = ice_discover_dev_caps(hw, dev_caps); + if (status) { + NL_SET_ERR_MSG_MOD(extack, "Unable to read device capabilities"); + kfree(dev_caps); + return -EIO; + } + + if (dev_caps->common_cap.nvm_update_pending_nvm) { + dev_info(dev, "The fw.mgmt flash component has a pending update\n"); + pending |= ICE_AQC_NVM_ACTIV_SEL_NVM; + } + + if (dev_caps->common_cap.nvm_update_pending_orom) { + dev_info(dev, "The fw.undi flash component has a pending update\n"); + pending |= ICE_AQC_NVM_ACTIV_SEL_OROM; + } + + if (dev_caps->common_cap.nvm_update_pending_netlist) { + dev_info(dev, "The fw.netlist flash component has a pending update\n"); + pending |= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + } + + kfree(dev_caps); + + /* If the flash_update request is for a specific component, ignore all + * of the other components. + */ + if (component) { + if (strcmp(component, "fw.mgmt") == 0) + pending &= ICE_AQC_NVM_ACTIV_SEL_NVM; + else if (strcmp(component, "fw.undi") == 0) + pending &= ICE_AQC_NVM_ACTIV_SEL_OROM; + else if (strcmp(component, "fw.netlist") == 0) + pending &= ICE_AQC_NVM_ACTIV_SEL_NETLIST; + else + WARN(1, "Unexpected flash component %s", component); + } + + /* There is no previous pending update, so this request may continue */ + if (!pending) + return 0; + + /* In order to allow overwriting a previous pending update, notify + * firmware to cancel that update by issuing the appropriate command. + */ + devlink_flash_update_status_notify(devlink, + "Canceling previous pending update", + component, 0, 0); + + status = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (status) { + dev_err(dev, "Failed to acquire device flash lock, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock"); + return -EIO; + } + + pending |= ICE_AQC_NVM_REVERT_LAST_ACTIV; + err = ice_switch_flash_banks(pf, pending, extack); + + ice_release_nvm(hw); + + return err; +} diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h new file mode 100644 index 000000000000..79472cc618b4 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _ICE_FW_UPDATE_H_ +#define _ICE_FW_UPDATE_H_ + +int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw, + struct netlink_ext_ack *extack); +int ice_check_for_pending_update(struct ice_pf *pf, const char *component, + struct netlink_ext_ack *extack); + +#endif diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 16a4096bb780..646c2cbc6904 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -914,6 +914,151 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event) return status; } +enum ice_aq_task_state { + ICE_AQ_TASK_WAITING = 0, + ICE_AQ_TASK_COMPLETE, + ICE_AQ_TASK_CANCELED, +}; + +struct ice_aq_task { + struct hlist_node entry; + + u16 opcode; + struct ice_rq_event_info *event; + enum ice_aq_task_state state; +}; + +/** + * ice_wait_for_aq_event - Wait for an AdminQ event from firmware + * @pf: pointer to the PF private structure + * @opcode: the opcode to wait for + * @timeout: how long to wait, in jiffies + * @event: storage for the event info + * + * Waits for a specific AdminQ completion event on the ARQ for a given PF. The + * current thread will be put to sleep until the specified event occurs or + * until the given timeout is reached. + * + * To obtain only the descriptor contents, pass an event without an allocated + * msg_buf. If the complete data buffer is desired, allocate the + * event->msg_buf with enough space ahead of time. + * + * Returns: zero on success, or a negative error code on failure. + */ +int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout, + struct ice_rq_event_info *event) +{ + struct ice_aq_task *task; + long ret; + int err; + + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return -ENOMEM; + + INIT_HLIST_NODE(&task->entry); + task->opcode = opcode; + task->event = event; + task->state = ICE_AQ_TASK_WAITING; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_add_head(&task->entry, &pf->aq_wait_list); + spin_unlock_bh(&pf->aq_wait_lock); + + ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state, + timeout); + switch (task->state) { + case ICE_AQ_TASK_WAITING: + err = ret < 0 ? ret : -ETIMEDOUT; + break; + case ICE_AQ_TASK_CANCELED: + err = ret < 0 ? ret : -ECANCELED; + break; + case ICE_AQ_TASK_COMPLETE: + err = ret < 0 ? ret : 0; + break; + default: + WARN(1, "Unexpected AdminQ wait task state %u", task->state); + err = -EINVAL; + break; + } + + spin_lock_bh(&pf->aq_wait_lock); + hlist_del(&task->entry); + spin_unlock_bh(&pf->aq_wait_lock); + kfree(task); + + return err; +} + +/** + * ice_aq_check_events - Check if any thread is waiting for an AdminQ event + * @pf: pointer to the PF private structure + * @opcode: the opcode of the event + * @event: the event to check + * + * Loops over the current list of pending threads waiting for an AdminQ event. + * For each matching task, copy the contents of the event into the task + * structure and wake up the thread. + * + * If multiple threads wait for the same opcode, they will all be woken up. + * + * Note that event->msg_buf will only be duplicated if the event has a buffer + * with enough space already allocated. Otherwise, only the descriptor and + * message length will be copied. + * + * Returns: true if an event was found, false otherwise + */ +static void ice_aq_check_events(struct ice_pf *pf, u16 opcode, + struct ice_rq_event_info *event) +{ + struct ice_aq_task *task; + bool found = false; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_for_each_entry(task, &pf->aq_wait_list, entry) { + if (task->state || task->opcode != opcode) + continue; + + memcpy(&task->event->desc, &event->desc, sizeof(event->desc)); + task->event->msg_len = event->msg_len; + + /* Only copy the data buffer if a destination was set */ + if (task->event->msg_buf && + task->event->buf_len > event->buf_len) { + memcpy(task->event->msg_buf, event->msg_buf, + event->buf_len); + task->event->buf_len = event->buf_len; + } + + task->state = ICE_AQ_TASK_COMPLETE; + found = true; + } + spin_unlock_bh(&pf->aq_wait_lock); + + if (found) + wake_up(&pf->aq_wait_queue); +} + +/** + * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks + * @pf: the PF private structure + * + * Set all waiting tasks to ICE_AQ_TASK_CANCELED, and wake up their threads. + * This will then cause ice_aq_wait_for_event to exit with -ECANCELED. + */ +static void ice_aq_cancel_waiting_tasks(struct ice_pf *pf) +{ + struct ice_aq_task *task; + + spin_lock_bh(&pf->aq_wait_lock); + hlist_for_each_entry(task, &pf->aq_wait_list, entry) + task->state = ICE_AQ_TASK_CANCELED; + spin_unlock_bh(&pf->aq_wait_lock); + + wake_up(&pf->aq_wait_queue); +} + /** * __ice_clean_ctrlq - helper function to clean controlq rings * @pf: ptr to struct ice_pf @@ -1010,6 +1155,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) opcode = le16_to_cpu(event.desc.opcode); + /* Notify any thread that might be waiting for this event */ + ice_aq_check_events(pf, opcode, &event); + switch (opcode) { case ice_aqc_opc_get_link_status: if (ice_handle_link_event(pf, &event)) @@ -3089,6 +3237,10 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->sw_mutex); mutex_init(&pf->tc_mutex); + INIT_HLIST_HEAD(&pf->aq_wait_list); + spin_lock_init(&pf->aq_wait_lock); + init_waitqueue_head(&pf->aq_wait_queue); + /* setup service timer and periodic service task */ timer_setup(&pf->serv_tmr, ice_service_timer, 0); pf->serv_tmr_period = HZ; @@ -4017,6 +4169,8 @@ static void ice_remove(struct pci_dev *pdev) set_bit(__ICE_DOWN, pf->state); ice_service_task_stop(pf); + ice_aq_cancel_waiting_tasks(pf); + mutex_destroy(&(&pf->hw)->fdir_fltr_lock); if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); From c15850c709eb5c7771785c9174e4908d1806a0f0 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sat, 25 Jul 2020 15:11:18 +0800 Subject: [PATCH 1733/2056] hinic: add support to handle hw abnormal event add support to handle hw abnormal event such as hardware failure, cable unplugged,link error Signed-off-by: Luo bin Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 4 + .../net/ethernet/huawei/hinic/hinic_devlink.c | 286 +++++++++++++++++- .../net/ethernet/huawei/hinic/hinic_devlink.h | 8 +- .../net/ethernet/huawei/hinic/hinic_ethtool.c | 20 ++ .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 152 +++++++++- .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 148 ++++++++- .../net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 10 + .../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 4 + .../net/ethernet/huawei/hinic/hinic_main.c | 87 ++++-- .../net/ethernet/huawei/hinic/hinic_port.h | 25 ++ 10 files changed, 706 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index befd925c03dc..0a1e20edf7cf 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -98,10 +98,14 @@ struct hinic_dev { int lb_pkt_len; u8 *lb_test_rx_buf; struct devlink *devlink; + bool cable_unplugged; + bool module_unrecognized; }; struct hinic_devlink_priv { struct hinic_hwdev *hwdev; + struct devlink_health_reporter *hw_fault_reporter; + struct devlink_health_reporter *fw_fault_reporter; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index a40a10ac1ee9..c6adc776f3c8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -16,9 +16,9 @@ #include #include -#include "hinic_dev.h" #include "hinic_port.h" #include "hinic_devlink.h" +#include "hinic_hw_dev.h" static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf, u32 image_size, struct host_image_st *host_image) @@ -317,12 +317,292 @@ void hinic_devlink_free(struct devlink *devlink) devlink_free(devlink); } -int hinic_devlink_register(struct devlink *devlink, struct device *dev) +int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev) { + struct devlink *devlink = priv_to_devlink(priv); + return devlink_register(devlink, dev); } -void hinic_devlink_unregister(struct devlink *devlink) +void hinic_devlink_unregister(struct hinic_devlink_priv *priv) { + struct devlink *devlink = priv_to_devlink(priv); + devlink_unregister(devlink); } + +static int chip_fault_show(struct devlink_fmsg *fmsg, + struct hinic_fault_event *event) +{ + char fault_level[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "fatal", "reset", "flr", "general", "suggestion"}; + char level_str[FAULT_SHOW_STR_LEN + 1] = {0}; + u8 level; + int err; + + level = event->event.chip.err_level; + if (level < FAULT_LEVEL_MAX) + strncpy(level_str, fault_level[level], strlen(fault_level[level])); + else + strncpy(level_str, "Unknown", strlen("Unknown")); + + if (level == FAULT_LEVEL_SERIOUS_FLR) { + err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", + (u32)event->event.chip.func_id); + if (err) + return err; + } + + err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type); + if (err) + return err; + + err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr", + event->event.chip.err_csr_addr); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value", + event->event.chip.err_csr_value); + if (err) + return err; + + return 0; +} + +static int fault_report_show(struct devlink_fmsg *fmsg, + struct hinic_fault_event *event) +{ + char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + "chip", "ucode", "mem rd timeout", "mem wr timeout", + "reg rd timeout", "reg wr timeout", "phy fault"}; + char type_str[FAULT_SHOW_STR_LEN + 1] = {0}; + int err; + + if (event->type < FAULT_TYPE_MAX) + strncpy(type_str, fault_type[event->type], strlen(fault_type[event->type])); + else + strncpy(type_str, "Unknown", strlen("Unknown")); + + err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str); + if (err) + return err; + + err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", + event->event.val, sizeof(event->event.val)); + if (err) + return err; + + switch (event->type) { + case FAULT_TYPE_CHIP: + err = chip_fault_show(fmsg, event); + if (err) + return err; + break; + case FAULT_TYPE_UCODE: + err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id); + if (err) + return err; + err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id); + if (err) + return err; + err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id); + if (err) + return err; + err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc); + if (err) + return err; + break; + case FAULT_TYPE_MEM_RD_TIMEOUT: + case FAULT_TYPE_MEM_WR_TIMEOUT: + err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl", + event->event.mem_timeout.err_csr_ctrl); + if (err) + return err; + err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data", + event->event.mem_timeout.err_csr_data); + if (err) + return err; + err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab", + event->event.mem_timeout.ctrl_tab); + if (err) + return err; + err = devlink_fmsg_u32_pair_put(fmsg, "mem_index", + event->event.mem_timeout.mem_index); + if (err) + return err; + break; + case FAULT_TYPE_REG_RD_TIMEOUT: + case FAULT_TYPE_REG_WR_TIMEOUT: + err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr); + if (err) + return err; + break; + case FAULT_TYPE_PHY_FAULT: + err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type); + if (err) + return err; + err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id); + if (err) + return err; + err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr); + if (err) + return err; + err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data); + if (err) + return err; + break; + default: + break; + } + + return 0; +} + +static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + if (priv_ctx) + return fault_report_show(fmsg, priv_ctx); + + return 0; +} + +static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg, + struct hinic_mgmt_watchdog_info *watchdog_info) +{ + int err; + + err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr); + if (err) + return err; + + err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr); + if (err) + return err; + + err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", + watchdog_info->reg, sizeof(watchdog_info->reg)); + if (err) + return err; + + err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)", + watchdog_info->data, sizeof(watchdog_info->data)); + if (err) + return err; + + return 0; +} + +static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + if (priv_ctx) + return mgmt_watchdog_report_show(fmsg, priv_ctx); + + return 0; +} + +static const struct devlink_health_reporter_ops hinic_hw_fault_reporter_ops = { + .name = "hw", + .dump = hinic_hw_reporter_dump, +}; + +static const struct devlink_health_reporter_ops hinic_fw_fault_reporter_ops = { + .name = "fw", + .dump = hinic_fw_reporter_dump, +}; + +int hinic_health_reporters_create(struct hinic_devlink_priv *priv) +{ + struct devlink *devlink = priv_to_devlink(priv); + + priv->hw_fault_reporter = + devlink_health_reporter_create(devlink, &hinic_hw_fault_reporter_ops, + 0, priv); + if (IS_ERR(priv->hw_fault_reporter)) { + dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create hw fault reporter, err: %ld\n", + PTR_ERR(priv->hw_fault_reporter)); + return PTR_ERR(priv->hw_fault_reporter); + } + + priv->fw_fault_reporter = + devlink_health_reporter_create(devlink, &hinic_fw_fault_reporter_ops, + 0, priv); + if (IS_ERR(priv->fw_fault_reporter)) { + dev_warn(&priv->hwdev->hwif->pdev->dev, "Failed to create fw fault reporter, err: %ld\n", + PTR_ERR(priv->fw_fault_reporter)); + devlink_health_reporter_destroy(priv->hw_fault_reporter); + priv->hw_fault_reporter = NULL; + return PTR_ERR(priv->fw_fault_reporter); + } + + return 0; +} + +void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv) +{ + if (!IS_ERR_OR_NULL(priv->fw_fault_reporter)) { + devlink_health_reporter_destroy(priv->fw_fault_reporter); + priv->fw_fault_reporter = NULL; + } + + if (!IS_ERR_OR_NULL(priv->hw_fault_reporter)) { + devlink_health_reporter_destroy(priv->hw_fault_reporter); + priv->hw_fault_reporter = NULL; + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h index 604e95a7c5ce..a090ebcfaabb 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -7,6 +7,7 @@ #define __HINIC_DEVLINK_H__ #include +#include "hinic_dev.h" #define MAX_FW_TYPE_NUM 30 #define HINIC_MAGIC_NUM 0x18221100 @@ -109,7 +110,10 @@ struct host_image_st { struct devlink *hinic_devlink_alloc(void); void hinic_devlink_free(struct devlink *devlink); -int hinic_devlink_register(struct devlink *devlink, struct device *dev); -void hinic_devlink_unregister(struct devlink *devlink); +int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev); +void hinic_devlink_unregister(struct hinic_devlink_priv *priv); + +int hinic_health_reporters_create(struct hinic_devlink_priv *priv); +void hinic_health_reporters_destroy(struct hinic_devlink_priv *priv); #endif /* __HINIC_DEVLINK_H__ */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index cb5ebae54f73..6bb65ade1d77 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -1766,6 +1766,25 @@ static int hinic_get_module_eeprom(struct net_device *netdev, return 0; } +static int +hinic_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *link_ext_state_info) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + if (nic_dev->cable_unplugged) + link_ext_state_info->link_ext_state = + ETHTOOL_LINK_EXT_STATE_NO_CABLE; + else if (nic_dev->module_unrecognized) + link_ext_state_info->link_ext_state = + ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH; + + return 0; +} + static const struct ethtool_ops hinic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_RX_MAX_FRAMES | @@ -1776,6 +1795,7 @@ static const struct ethtool_ops hinic_ethtool_ops = { .set_link_ksettings = hinic_set_link_ksettings, .get_drvinfo = hinic_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ext_state = hinic_get_link_ext_state, .get_ringparam = hinic_get_ringparam, .set_ringparam = hinic_set_ringparam, .get_coalesce = hinic_get_coalesce, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 9831c14324e6..55672ee18aab 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -16,8 +16,11 @@ #include #include #include +#include +#include "hinic_devlink.h" #include "hinic_sriov.h" +#include "hinic_dev.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" @@ -621,6 +624,113 @@ static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, nic_cb->cb_state &= ~HINIC_CB_RUNNING; } +static void hinic_comm_recv_mgmt_self_cmd_reg(struct hinic_pfhwdev *pfhwdev, + u8 cmd, + comm_mgmt_self_msg_proc proc) +{ + u8 cmd_idx; + + cmd_idx = pfhwdev->proc.cmd_num; + if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { + dev_err(&pfhwdev->hwdev.hwif->pdev->dev, + "Register recv mgmt process failed, cmd: 0x%x\n", cmd); + return; + } + + pfhwdev->proc.info[cmd_idx].cmd = cmd; + pfhwdev->proc.info[cmd_idx].proc = proc; + pfhwdev->proc.cmd_num++; +} + +static void hinic_comm_recv_mgmt_self_cmd_unreg(struct hinic_pfhwdev *pfhwdev, + u8 cmd) +{ + u8 cmd_idx; + + cmd_idx = pfhwdev->proc.cmd_num; + if (cmd_idx >= HINIC_COMM_SELF_CMD_MAX) { + dev_err(&pfhwdev->hwdev.hwif->pdev->dev, "Unregister recv mgmt process failed, cmd: 0x%x\n", + cmd); + return; + } + + for (cmd_idx = 0; cmd_idx < HINIC_COMM_SELF_CMD_MAX; cmd_idx++) { + if (cmd == pfhwdev->proc.info[cmd_idx].cmd) { + pfhwdev->proc.info[cmd_idx].cmd = 0; + pfhwdev->proc.info[cmd_idx].proc = NULL; + pfhwdev->proc.cmd_num--; + } + } +} + +static void comm_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_pfhwdev *pfhwdev = handle; + u8 cmd_idx; + + for (cmd_idx = 0; cmd_idx < pfhwdev->proc.cmd_num; cmd_idx++) { + if (cmd == pfhwdev->proc.info[cmd_idx].cmd) { + if (!pfhwdev->proc.info[cmd_idx].proc) { + dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, + "PF recv mgmt comm msg handle null, cmd: 0x%x\n", + cmd); + } else { + pfhwdev->proc.info[cmd_idx].proc + (&pfhwdev->hwdev, buf_in, in_size, + buf_out, out_size); + } + + return; + } + } + + dev_warn(&pfhwdev->hwdev.hwif->pdev->dev, "Received unknown mgmt cpu event: 0x%x\n", + cmd); + + *out_size = 0; +} + +/* pf fault report event */ +static void pf_fault_event_handler(void *dev, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cmd_fault_event *fault_event = buf_in; + struct hinic_hwdev *hwdev = dev; + + if (in_size != sizeof(*fault_event)) { + dev_err(&hwdev->hwif->pdev->dev, "Invalid fault event report, length: %d, should be %zu\n", + in_size, sizeof(*fault_event)); + return; + } + + if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->hw_fault_reporter)) + return; + + devlink_health_report(hwdev->devlink_dev->hw_fault_reporter, + "HW fatal error reported", &fault_event->event); +} + +static void mgmt_watchdog_timeout_event_handler(void *dev, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_mgmt_watchdog_info *watchdog_info = buf_in; + struct hinic_hwdev *hwdev = dev; + + if (in_size != sizeof(*watchdog_info)) { + dev_err(&hwdev->hwif->pdev->dev, "Invalid mgmt watchdog report, length: %d, should be %zu\n", + in_size, sizeof(*watchdog_info)); + return; + } + + if (!hwdev->devlink_dev || IS_ERR_OR_NULL(hwdev->devlink_dev->fw_fault_reporter)) + return; + + devlink_health_report(hwdev->devlink_dev->fw_fault_reporter, + "FW fatal error reported", watchdog_info); +} + /** * init_pfhwdev - Initialize the extended components of PF * @pfhwdev: the HW device for PF @@ -640,20 +750,37 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } - err = hinic_func_to_func_init(hwdev); + err = hinic_devlink_register(hwdev->devlink_dev, &pdev->dev); if (err) { - dev_err(&hwif->pdev->dev, "Failed to init mailbox\n"); + dev_err(&hwif->pdev->dev, "Failed to register devlink\n"); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); return err; } - if (!HINIC_IS_VF(hwif)) + err = hinic_func_to_func_init(hwdev); + if (err) { + dev_err(&hwif->pdev->dev, "Failed to init mailbox\n"); + hinic_devlink_unregister(hwdev->devlink_dev); + hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); + return err; + } + + if (!HINIC_IS_VF(hwif)) { hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, pfhwdev, nic_mgmt_msg_handler); - else + hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + pfhwdev, comm_mgmt_msg_handler); + hinic_comm_recv_mgmt_self_cmd_reg(pfhwdev, + HINIC_COMM_CMD_FAULT_REPORT, + pf_fault_event_handler); + hinic_comm_recv_mgmt_self_cmd_reg + (pfhwdev, HINIC_COMM_CMD_WATCHDOG_INFO, + mgmt_watchdog_timeout_event_handler); + } else { hinic_register_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC, nic_mgmt_msg_handler); + } hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); @@ -670,14 +797,23 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); - if (!HINIC_IS_VF(hwdev->hwif)) + if (!HINIC_IS_VF(hwdev->hwif)) { + hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev, + HINIC_COMM_CMD_WATCHDOG_INFO); + hinic_comm_recv_mgmt_self_cmd_unreg(pfhwdev, + HINIC_COMM_CMD_FAULT_REPORT); + hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, + HINIC_MOD_COMM); hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC); - else + } else { hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); + } hinic_func_to_func_free(hwdev); + hinic_devlink_unregister(hwdev->devlink_dev); + hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } @@ -777,7 +913,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, * * Initialize the NIC HW device and return a pointer to it **/ -struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) +struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink) { struct hinic_pfhwdev *pfhwdev; struct hinic_hwdev *hwdev; @@ -802,6 +938,8 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) hwdev = &pfhwdev->hwdev; hwdev->hwif = hwif; + hwdev->devlink_dev = devlink_priv(devlink); + hwdev->devlink_dev->hwdev = hwdev; err = init_msix(hwdev); if (err) { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 94593a8ad667..2fb5f784f116 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" @@ -164,9 +165,12 @@ enum hinic_ucode_cmd { #define NIC_RSS_CMD_TEMP_FREE 0x02 enum hinic_mgmt_msg_cmd { - HINIC_MGMT_MSG_CMD_BASE = 160, + HINIC_MGMT_MSG_CMD_BASE = 0xA0, - HINIC_MGMT_MSG_CMD_LINK_STATUS = 160, + HINIC_MGMT_MSG_CMD_LINK_STATUS = 0xA0, + + HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT = 0xE5, + HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT = 0xE6, HINIC_MGMT_MSG_CMD_MAX, }; @@ -348,6 +352,7 @@ struct hinic_hwdev { struct hinic_cap nic_cap; u8 port_id; + struct hinic_devlink_priv *devlink_dev; }; struct hinic_nic_cb { @@ -359,12 +364,29 @@ struct hinic_nic_cb { unsigned long cb_state; }; +#define HINIC_COMM_SELF_CMD_MAX 4 + +typedef void (*comm_mgmt_self_msg_proc)(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + +struct comm_mgmt_self_msg_sub_info { + u8 cmd; + comm_mgmt_self_msg_proc proc; +}; + +struct comm_mgmt_self_msg_info { + u8 cmd_num; + struct comm_mgmt_self_msg_sub_info info[HINIC_COMM_SELF_CMD_MAX]; +}; + struct hinic_pfhwdev { struct hinic_hwdev hwdev; struct hinic_pf_to_mgmt pf_to_mgmt; struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD]; + + struct comm_mgmt_self_msg_info proc; }; struct hinic_dev_cap { @@ -386,6 +408,126 @@ struct hinic_dev_cap { u8 rsvd3[204]; }; +union hinic_fault_hw_mgmt { + u32 val[4]; + /* valid only type == FAULT_TYPE_CHIP */ + struct { + u8 node_id; + u8 err_level; + u16 err_type; + u32 err_csr_addr; + u32 err_csr_value; + /* func_id valid only if err_level == FAULT_LEVEL_SERIOUS_FLR */ + u16 func_id; + u16 rsvd2; + } chip; + + /* valid only if type == FAULT_TYPE_UCODE */ + struct { + u8 cause_id; + u8 core_id; + u8 c_id; + u8 rsvd3; + u32 epc; + u32 rsvd4; + u32 rsvd5; + } ucode; + + /* valid only if type == FAULT_TYPE_MEM_RD_TIMEOUT || + * FAULT_TYPE_MEM_WR_TIMEOUT + */ + struct { + u32 err_csr_ctrl; + u32 err_csr_data; + u32 ctrl_tab; + u32 mem_index; + } mem_timeout; + + /* valid only if type == FAULT_TYPE_REG_RD_TIMEOUT || + * FAULT_TYPE_REG_WR_TIMEOUT + */ + struct { + u32 err_csr; + u32 rsvd6; + u32 rsvd7; + u32 rsvd8; + } reg_timeout; + + struct { + /* 0: read; 1: write */ + u8 op_type; + u8 port_id; + u8 dev_ad; + u8 rsvd9; + u32 csr_addr; + u32 op_data; + u32 rsvd10; + } phy_fault; +}; + +struct hinic_fault_event { + u8 type; + u8 fault_level; + u8 rsvd0[2]; + union hinic_fault_hw_mgmt event; +}; + +struct hinic_cmd_fault_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + struct hinic_fault_event event; +}; + +enum hinic_fault_type { + FAULT_TYPE_CHIP, + FAULT_TYPE_UCODE, + FAULT_TYPE_MEM_RD_TIMEOUT, + FAULT_TYPE_MEM_WR_TIMEOUT, + FAULT_TYPE_REG_RD_TIMEOUT, + FAULT_TYPE_REG_WR_TIMEOUT, + FAULT_TYPE_PHY_FAULT, + FAULT_TYPE_MAX, +}; + +#define FAULT_SHOW_STR_LEN 16 + +enum hinic_fault_err_level { + FAULT_LEVEL_FATAL, + FAULT_LEVEL_SERIOUS_RESET, + FAULT_LEVEL_SERIOUS_FLR, + FAULT_LEVEL_GENERAL, + FAULT_LEVEL_SUGGESTION, + FAULT_LEVEL_MAX +}; + +struct hinic_mgmt_watchdog_info { + u8 status; + u8 version; + u8 rsvd0[6]; + + u32 curr_time_h; + u32 curr_time_l; + u32 task_id; + u32 rsv; + + u32 reg[13]; + u32 pc; + u32 lr; + u32 cpsr; + + u32 stack_top; + u32 stack_bottom; + u32 sp; + u32 curr_used; + u32 peak_used; + u32 is_overflow; + + u32 stack_actlen; + u8 data[1024]; +}; + void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, enum hinic_mgmt_msg_cmd cmd, void *handle, void (*handler)(void *handle, void *buf_in, @@ -407,7 +549,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth); void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev); -struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); +struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devlink); void hinic_free_hwdev(struct hinic_hwdev *hwdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index e0f5a81d8620..f5a46d5bb007 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -12,8 +12,10 @@ #include #include #include +#include #include +#include "hinic_devlink.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_api_cmd.h" @@ -617,10 +619,15 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, if (HINIC_IS_VF(hwif)) return 0; + err = hinic_health_reporters_create(hwdev->devlink_dev); + if (err) + return err; + sema_init(&pf_to_mgmt->sync_msg_lock, 1); pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt"); if (!pf_to_mgmt->workq) { dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n"); + hinic_health_reporters_destroy(hwdev->devlink_dev); return -ENOMEM; } pf_to_mgmt->sync_msg_id = 0; @@ -628,12 +635,14 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, err = alloc_msg_buf(pf_to_mgmt); if (err) { dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); + hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); if (err) { dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); + hinic_health_reporters_destroy(hwdev->devlink_dev); return err; } @@ -658,4 +667,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); hinic_api_cmd_free(pf_to_mgmt->cmd_chain); destroy_workqueue(pf_to_mgmt->workq); + hinic_health_reporters_destroy(hwdev->devlink_dev); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index 21b93b654d6b..f626100b85c1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -81,6 +81,8 @@ enum hinic_comm_cmd { HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, + HINIC_COMM_CMD_FAULT_REPORT = 0x37, + HINIC_COMM_CMD_SET_LED_STATUS = 0x4a, HINIC_COMM_CMD_L2NIC_RESET = 0x4b, @@ -89,6 +91,8 @@ enum hinic_comm_cmd { HINIC_COMM_CMD_GET_BOARD_INFO = 0x52, + HINIC_COMM_CMD_WATCHDOG_INFO = 0x56, + HINIC_COMM_CMD_MAX, }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index c4c6f9c29f0e..501056fd32ee 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -444,8 +444,11 @@ int hinic_open(struct net_device *netdev) if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) hinic_notify_all_vfs_link_changed(nic_dev->hwdev, link_state); - if (link_state == HINIC_LINK_STATE_UP) + if (link_state == HINIC_LINK_STATE_UP) { nic_dev->flags |= HINIC_LINK_UP; + nic_dev->cable_unplugged = false; + nic_dev->module_unrecognized = false; + } nic_dev->flags |= HINIC_INTF_UP; @@ -935,6 +938,8 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, down(&nic_dev->mgmt_lock); nic_dev->flags |= HINIC_LINK_UP; + nic_dev->cable_unplugged = false; + nic_dev->module_unrecognized = false; if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == (HINIC_LINK_UP | HINIC_INTF_UP)) { @@ -971,6 +976,39 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, *out_size = sizeof(*ret_link_status); } +static void cable_plug_event(void *handle, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_cable_plug_event *plug_event = buf_in; + struct hinic_dev *nic_dev = handle; + + nic_dev->cable_unplugged = plug_event->plugged ? false : true; + + *out_size = sizeof(*plug_event); + plug_event = buf_out; + plug_event->status = 0; +} + +static void link_err_event(void *handle, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_link_err_event *link_err = buf_in; + struct hinic_dev *nic_dev = handle; + + if (link_err->err_type >= LINK_ERR_NUM) + netif_info(nic_dev, link, nic_dev->netdev, + "Link failed, Unknown error type: 0x%x\n", + link_err->err_type); + else + nic_dev->module_unrecognized = true; + + *out_size = sizeof(*link_err); + link_err = buf_out; + link_err->status = 0; +} + static int set_features(struct hinic_dev *nic_dev, netdev_features_t pre_features, netdev_features_t features, bool force_change) @@ -1077,28 +1115,24 @@ static int nic_dev_init(struct pci_dev *pdev) struct hinic_rx_mode_work *rx_mode_work; struct hinic_txq_stats *tx_stats; struct hinic_rxq_stats *rx_stats; - struct hinic_devlink_priv *priv; struct hinic_dev *nic_dev; struct net_device *netdev; struct hinic_hwdev *hwdev; struct devlink *devlink; int err, num_qps; - hwdev = hinic_init_hwdev(pdev); - if (IS_ERR(hwdev)) { - dev_err(&pdev->dev, "Failed to initialize HW device\n"); - return PTR_ERR(hwdev); - } - devlink = hinic_devlink_alloc(); if (!devlink) { dev_err(&pdev->dev, "Hinic devlink alloc failed\n"); - err = -ENOMEM; - goto err_devlink_alloc; + return -ENOMEM; } - priv = devlink_priv(devlink); - priv->hwdev = hwdev; + hwdev = hinic_init_hwdev(pdev, devlink); + if (IS_ERR(hwdev)) { + dev_err(&pdev->dev, "Failed to initialize HW device\n"); + hinic_devlink_free(devlink); + return PTR_ERR(hwdev); + } num_qps = hinic_hwdev_num_qps(hwdev); if (num_qps <= 0) { @@ -1161,10 +1195,6 @@ static int nic_dev_init(struct pci_dev *pdev) goto err_workq; } - err = hinic_devlink_register(devlink, &pdev->dev); - if (err) - goto err_devlink_reg; - pci_set_drvdata(pdev, netdev); err = hinic_port_get_mac(nic_dev, netdev->dev_addr); @@ -1206,6 +1236,12 @@ static int nic_dev_init(struct pci_dev *pdev) hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, nic_dev, link_status_event_handler); + hinic_hwdev_cb_register(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT, + nic_dev, cable_plug_event); + hinic_hwdev_cb_register(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT, + nic_dev, link_err_event); err = set_features(nic_dev, 0, nic_dev->netdev->features, true); if (err) @@ -1237,6 +1273,10 @@ static int nic_dev_init(struct pci_dev *pdev) err_init_intr: err_set_pfc: err_set_features: + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT); + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS); cancel_work_sync(&rx_mode_work->work); @@ -1246,17 +1286,15 @@ static int nic_dev_init(struct pci_dev *pdev) err_add_mac: err_get_mac: pci_set_drvdata(pdev, NULL); -err_devlink_reg: destroy_workqueue(nic_dev->workq); - err_workq: err_vlan_bitmap: free_netdev(netdev); err_alloc_etherdev: err_num_qps: -err_devlink_alloc: hinic_free_hwdev(hwdev); + hinic_devlink_free(devlink); return err; } @@ -1343,6 +1381,7 @@ static void hinic_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct hinic_dev *nic_dev = netdev_priv(netdev); + struct devlink *devlink = nic_dev->devlink; struct hinic_rx_mode_work *rx_mode_work; if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) { @@ -1356,6 +1395,10 @@ static void hinic_remove(struct pci_dev *pdev) hinic_port_del_mac(nic_dev, netdev->dev_addr, 0); + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_ERR_EVENT); + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_CABLE_PLUG_EVENT); hinic_hwdev_cb_unregister(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS); @@ -1364,16 +1407,14 @@ static void hinic_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); - hinic_devlink_unregister(nic_dev->devlink); - destroy_workqueue(nic_dev->workq); - hinic_devlink_free(nic_dev->devlink); - hinic_free_hwdev(nic_dev->hwdev); free_netdev(netdev); + hinic_devlink_free(devlink); + pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 14931adaffb8..9c3cbe45c9ec 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -189,6 +189,31 @@ struct hinic_port_link_status { u8 port_id; }; +struct hinic_cable_plug_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 plugged; /* 0: unplugged, 1: plugged */ + u8 port_id; +}; + +enum link_err_type { + LINK_ERR_MODULE_UNRECOGENIZED, + LINK_ERR_NUM, +}; + +struct hinic_link_err_event { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u8 err_type; + u8 port_id; +}; + struct hinic_port_func_state_cmd { u8 status; u8 version; From 90f86b8a36c065286b743eed29661fc5cd00d342 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Sat, 25 Jul 2020 15:11:19 +0800 Subject: [PATCH 1734/2056] hinic: add log in exception handling processes improve the error message when functions return failure and dump relevant registers in some exception handling processes Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../ethernet/huawei/hinic/hinic_hw_api_cmd.c | 27 +++++++- .../ethernet/huawei/hinic/hinic_hw_api_cmd.h | 4 ++ .../net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 2 + .../net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 2 + .../net/ethernet/huawei/hinic/hinic_hw_dev.c | 12 ++-- .../net/ethernet/huawei/hinic/hinic_hw_eqs.c | 39 ++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_eqs.h | 6 +- .../net/ethernet/huawei/hinic/hinic_hw_if.c | 23 +++++++ .../net/ethernet/huawei/hinic/hinic_hw_if.h | 10 ++- .../net/ethernet/huawei/hinic/hinic_hw_mbox.c | 2 + .../net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 1 + .../net/ethernet/huawei/hinic/hinic_port.c | 62 +++++++++---------- .../net/ethernet/huawei/hinic/hinic_sriov.c | 6 +- 13 files changed, 151 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 583fd24c29cf..29e88e25a4a4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -112,6 +112,26 @@ static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); } +static void dump_api_chain_reg(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwif, addr); + + dev_err(&chain->hwif->pdev->dev, "Chain type: 0x%x, cpld error: 0x%x, check error: 0x%x, current fsm: 0x%x\n", + chain->chain_type, HINIC_API_CMD_STATUS_GET(val, CPLD_ERR), + HINIC_API_CMD_STATUS_GET(val, CHKSUM_ERR), + HINIC_API_CMD_STATUS_GET(val, FSM)); + + dev_err(&chain->hwif->pdev->dev, "Chain hw current ci: 0x%x\n", + HINIC_API_CMD_STATUS_GET(val, CONS_IDX)); + + addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwif, addr); + dev_err(&chain->hwif->pdev->dev, "Chain hw current pi: 0x%x\n", val); +} + /** * chain_busy - check if the chain is still processing last requests * @chain: chain to check @@ -131,8 +151,10 @@ static int chain_busy(struct hinic_api_cmd_chain *chain) /* check for a space for a new command */ if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) { - dev_err(&pdev->dev, "API CMD chain %d is busy\n", - chain->chain_type); + dev_err(&pdev->dev, "API CMD chain %d is busy, cons_idx: %d, prod_idx: %d\n", + chain->chain_type, chain->cons_idx, + chain->prod_idx); + dump_api_chain_reg(chain); return -EBUSY; } break; @@ -332,6 +354,7 @@ static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain) err = wait_for_status_poll(chain); if (err) { dev_err(&pdev->dev, "API CMD Poll status timeout\n"); + dump_api_chain_reg(chain); break; } break; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h index 0ba00fd828df..6d1654b050ad 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h @@ -103,10 +103,14 @@ HINIC_API_CMD_STATUS_HEADER_##member##_MASK) #define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 +#define HINIC_API_CMD_STATUS_FSM_SHIFT 24 #define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 +#define HINIC_API_CMD_STATUS_CPLD_ERR_SHIFT 30 #define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF +#define HINIC_API_CMD_STATUS_FSM_MASK 0xFU #define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3 +#define HINIC_API_CMD_STATUS_CPLD_ERR_MASK 0x1U #define HINIC_API_CMD_STATUS_GET(val, member) \ (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index cb5b6e5f787f..e0eb294779ec 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -401,6 +401,7 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, spin_unlock_bh(&cmdq->cmdq_lock); + hinic_dump_ceq_info(cmdq->hwdev); return -ETIMEDOUT; } @@ -807,6 +808,7 @@ static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, cmdq_type = HINIC_CMDQ_SYNC; for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + cmdqs->cmdq[cmdq_type].hwdev = hwdev; err = init_cmdq(&cmdqs->cmdq[cmdq_type], &cmdqs->saved_wqs[cmdq_type], cmdq_type, db_area[cmdq_type]); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h index 3e4b0aef9fe6..f40c31e1879f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -130,6 +130,8 @@ struct hinic_cmdq_ctxt { }; struct hinic_cmdq { + struct hinic_hwdev *hwdev; + struct hinic_wq *wq; enum hinic_cmdq_type cmdq_type; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 55672ee18aab..0c737765d113 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -258,9 +258,9 @@ static int init_fw_ctxt(struct hinic_hwdev *hwdev) &fw_ctxt, sizeof(fw_ctxt), &fw_ctxt, &out_size); if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) { - dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n", - fw_ctxt.status); - return -EFAULT; + dev_err(&pdev->dev, "Failed to init FW ctxt, err: %d, status: 0x%x, out size: 0x%x\n", + err, fw_ctxt.status, out_size); + return -EIO; } return 0; @@ -425,9 +425,9 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn) &cmd_base_qpn, sizeof(cmd_base_qpn), &cmd_base_qpn, &out_size); if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) { - dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n", - cmd_base_qpn.status); - return -EFAULT; + dev_err(&pdev->dev, "Failed to get base qpn, err: %d, status: 0x%x, out size: 0x%x\n", + err, cmd_base_qpn.status, out_size); + return -EIO; } *base_qpn = cmd_base_qpn.qpn; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 397936cac304..ca8cb68a8d20 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -953,3 +953,42 @@ void hinic_ceqs_free(struct hinic_ceqs *ceqs) for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) remove_eq(&ceqs->ceq[q_id]); } + +void hinic_dump_ceq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_eq *eq = NULL; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->func_to_io.ceqs.num_ceqs; q_id++) { + eq = &hwdev->func_to_io.ceqs.ceq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + dev_err(&hwdev->hwif->pdev->dev, "Ceq id: %d, ci: 0x%08x, sw_ci: 0x%08x, pi: 0x%x, tasklet_state: 0x%lx, wrap: %d, ceqe: 0x%x\n", + q_id, ci, eq->cons_idx, pi, + eq->ceq_tasklet.state, + eq->wrapped, be32_to_cpu(*(__be32 *)(GET_CURR_CEQ_ELEM(eq)))); + } +} + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev) +{ + struct hinic_aeq_elem *aeqe_pos = NULL; + struct hinic_eq *eq = NULL; + u32 addr, ci, pi; + int q_id; + + for (q_id = 0; q_id < hwdev->aeqs.num_aeqs; q_id++) { + eq = &hwdev->aeqs.aeq[q_id]; + addr = EQ_CONS_IDX_REG_ADDR(eq); + ci = hinic_hwif_read_reg(hwdev->hwif, addr); + addr = EQ_PROD_IDX_REG_ADDR(eq); + pi = hinic_hwif_read_reg(hwdev->hwif, addr); + aeqe_pos = GET_CURR_AEQ_ELEM(eq); + dev_err(&hwdev->hwif->pdev->dev, "Aeq id: %d, ci: 0x%08x, pi: 0x%x, work_state: 0x%x, wrap: %d, desc: 0x%x\n", + q_id, ci, pi, work_busy(&eq->aeq_work.work), + eq->wrapped, be32_to_cpu(aeqe_pos->desc)); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h index 74b9ff90640c..43065fc70869 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -162,7 +162,7 @@ enum hinic_eqe_state { struct hinic_aeq_elem { u8 data[HINIC_AEQE_DATA_SIZE]; - u32 desc; + __be32 desc; }; struct hinic_eq_work { @@ -254,4 +254,8 @@ int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, void hinic_ceqs_free(struct hinic_ceqs *ceqs); +void hinic_dump_ceq_info(struct hinic_hwdev *hwdev); + +void hinic_dump_aeq_info(struct hinic_hwdev *hwdev); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index cf127d896ba6..bc8925c0c982 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -21,6 +21,8 @@ #define WAIT_HWIF_READY_TIMEOUT 10000 +#define HINIC_SELFTEST_RESULT 0x883C + /** * hinic_msix_attr_set - set message attribute for msix entry * @hwif: the HW interface of a pci function device @@ -369,6 +371,26 @@ u16 hinic_pf_id_of_vf_hw(struct hinic_hwif *hwif) return HINIC_FA0_GET(attr0, PF_IDX); } +static void __print_selftest_reg(struct hinic_hwif *hwif) +{ + u32 addr, attr0, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + if (attr1 == HINIC_PCIE_LINK_DOWN) { + dev_err(&hwif->pdev->dev, "PCIE is link down\n"); + return; + } + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwif, addr); + if (HINIC_FA0_GET(attr0, FUNC_TYPE) != HINIC_VF && + !HINIC_FA0_GET(attr0, PCI_INTF_IDX)) + dev_err(&hwif->pdev->dev, "Selftest reg: 0x%08x\n", + hinic_hwif_read_reg(hwif, HINIC_SELFTEST_RESULT)); +} + /** * hinic_init_hwif - initialize the hw interface * @hwif: the HW interface of a pci function device @@ -398,6 +420,7 @@ int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) err = wait_hwif_ready(hwif); if (err) { dev_err(&pdev->dev, "HW interface is not ready\n"); + __print_selftest_reg(hwif); goto err_hwif_ready; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 0872e035faa1..c06f2253151e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -12,6 +12,8 @@ #include #include +#define HINIC_PCIE_LINK_DOWN 0xFFFFFFFF + #define HINIC_DMA_ATTR_ST_SHIFT 0 #define HINIC_DMA_ATTR_AT_SHIFT 8 #define HINIC_DMA_ATTR_PH_SHIFT 10 @@ -249,13 +251,17 @@ struct hinic_hwif { static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) { - return be32_to_cpu(readl(hwif->cfg_regs_bar + reg)); + u32 out = readl(hwif->cfg_regs_bar + reg); + + return be32_to_cpu(*(__be32 *)&out); } static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, u32 val) { - writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg); + __be32 in = cpu_to_be32(val); + + writel(*(u32 *)&in, hwif->cfg_regs_bar + reg); } int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c index bc2f87e6cb5d..47c93f946b94 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c @@ -650,6 +650,7 @@ wait_for_mbox_seg_completion(struct hinic_mbox_func_to_func *func_to_func, if (!wait_for_completion_timeout(done, jif)) { dev_err(&hwdev->hwif->pdev->dev, "Send mailbox segment timeout\n"); dump_mox_reg(hwdev); + hinic_dump_aeq_info(hwdev); return -ETIMEDOUT; } @@ -897,6 +898,7 @@ int hinic_mbox_to_func(struct hinic_mbox_func_to_func *func_to_func, set_mbox_to_func_event(func_to_func, EVENT_TIMEOUT); dev_err(&func_to_func->hwif->pdev->dev, "Send mbox msg timeout, msg_id: %d\n", msg_info.msg_id); + hinic_dump_aeq_info(func_to_func->hwdev); err = -ETIMEDOUT; goto err_send_mbox; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index f5a46d5bb007..c6ce5966284c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -276,6 +276,7 @@ static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, if (!wait_for_completion_timeout(recv_done, timeo)) { dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); + hinic_dump_aeq_info(pf_to_mgmt->hwdev); err = -ETIMEDOUT; goto unlock_sync_msg; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index ba358bbb74a2..02cd635d6914 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -61,8 +61,8 @@ static int change_mac(struct hinic_dev *nic_dev, const u8 *addr, (port_mac_cmd.status && port_mac_cmd.status != HINIC_PF_SET_VF_ALREADY && port_mac_cmd.status != HINIC_MGMT_STATUS_EXIST)) { - dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n", - port_mac_cmd.status); + dev_err(&pdev->dev, "Failed to change MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_mac_cmd.status, out_size); return -EFAULT; } @@ -129,8 +129,8 @@ int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr) &port_mac_cmd, sizeof(port_mac_cmd), &port_mac_cmd, &out_size); if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { - dev_err(&pdev->dev, "Failed to get mac, ret = %d\n", - port_mac_cmd.status); + dev_err(&pdev->dev, "Failed to get mac, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_mac_cmd.status, out_size); return -EFAULT; } @@ -172,9 +172,9 @@ int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu) err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, &port_mtu_cmd, sizeof(port_mtu_cmd), &port_mtu_cmd, &out_size); - if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) { - dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n", - port_mtu_cmd.status); + if (err || out_size != sizeof(port_mtu_cmd) || port_mtu_cmd.status) { + dev_err(&pdev->dev, "Failed to set mtu, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_mtu_cmd.status, out_size); return -EFAULT; } @@ -264,8 +264,8 @@ int hinic_port_link_state(struct hinic_dev *nic_dev, &link_cmd, sizeof(link_cmd), &link_cmd, &out_size); if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) { - dev_err(&pdev->dev, "Failed to get link state, ret = %d\n", - link_cmd.status); + dev_err(&pdev->dev, "Failed to get link state, err: %d, status: 0x%x, out size: 0x%x\n", + err, link_cmd.status, out_size); return -EINVAL; } @@ -298,8 +298,8 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) &port_state, sizeof(port_state), &port_state, &out_size); if (err || (out_size != sizeof(port_state)) || port_state.status) { - dev_err(&pdev->dev, "Failed to set port state, ret = %d\n", - port_state.status); + dev_err(&pdev->dev, "Failed to set port state, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_state.status, out_size); return -EFAULT; } @@ -330,8 +330,8 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev, &func_state, sizeof(func_state), &func_state, &out_size); if (err || (out_size != sizeof(func_state)) || func_state.status) { - dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n", - func_state.status); + dev_err(&pdev->dev, "Failed to set port func state, err: %d, status: 0x%x, out size: 0x%x\n", + err, func_state.status, out_size); return -EFAULT; } @@ -361,9 +361,9 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev, port_cap, &out_size); if (err || (out_size != sizeof(*port_cap)) || port_cap->status) { dev_err(&pdev->dev, - "Failed to get port capabilities, ret = %d\n", - port_cap->status); - return -EINVAL; + "Failed to get port capabilities, err: %d, status: 0x%x, out size: 0x%x\n", + err, port_cap->status, out_size); + return -EIO; } return 0; @@ -393,9 +393,9 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state) &tso_cfg, &out_size); if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) { dev_err(&pdev->dev, - "Failed to set port tso, ret = %d\n", - tso_cfg.status); - return -EINVAL; + "Failed to set port tso, err: %d, status: 0x%x, out size: 0x%x\n", + err, tso_cfg.status, out_size); + return -EIO; } return 0; @@ -423,9 +423,9 @@ int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en) &rx_csum_cfg, &out_size); if (err || !out_size || rx_csum_cfg.status) { dev_err(&pdev->dev, - "Failed to set rx csum offload, ret = %d\n", - rx_csum_cfg.status); - return -EINVAL; + "Failed to set rx csum offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, rx_csum_cfg.status, out_size); + return -EIO; } return 0; @@ -480,9 +480,9 @@ int hinic_set_max_qnum(struct hinic_dev *nic_dev, u8 num_rqs) &rq_num, &out_size); if (err || !out_size || rq_num.status) { dev_err(&pdev->dev, - "Failed to rxq number, ret = %d\n", - rq_num.status); - return -EINVAL; + "Failed to set rxq number, err: %d, status: 0x%x, out size: 0x%x\n", + err, rq_num.status, out_size); + return -EIO; } return 0; @@ -508,9 +508,9 @@ static int hinic_set_rx_lro(struct hinic_dev *nic_dev, u8 ipv4_en, u8 ipv6_en, &lro_cfg, &out_size); if (err || !out_size || lro_cfg.status) { dev_err(&pdev->dev, - "Failed to set lro offload, ret = %d\n", - lro_cfg.status); - return -EINVAL; + "Failed to set lro offload, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_cfg.status, out_size); + return -EIO; } return 0; @@ -542,10 +542,10 @@ static int hinic_set_rx_lro_timer(struct hinic_dev *nic_dev, u32 timer_value) if (err || !out_size || lro_timer.status) { dev_err(&pdev->dev, - "Failed to set lro timer, ret = %d\n", - lro_timer.status); + "Failed to set lro timer, err: %d, status: 0x%x, out size: 0x%x\n", + err, lro_timer.status, out_size); - return -EINVAL; + return -EIO; } return 0; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index caf7e81e3f62..141206917e4d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -40,9 +40,9 @@ static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr, if (err || out_size != sizeof(mac_info) || (mac_info.status && mac_info.status != HINIC_PF_SET_VF_ALREADY && mac_info.status != HINIC_MGMT_STATUS_EXIST)) { - dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to change MAC, ret = %d\n", - mac_info.status); - return -EFAULT; + dev_err(&hwdev->func_to_io.hwif->pdev->dev, "Failed to set MAC, err: %d, status: 0x%x, out size: 0x%x\n", + err, mac_info.status, out_size); + return -EIO; } return 0; From 608b4adab17889576c6636311f495b1a52418c69 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Sat, 25 Jul 2020 13:17:07 -0700 Subject: [PATCH 1735/2056] net_sched: initialize timer earlier in red_init() When red_init() fails, red_destroy() is called to clean up. If the timer is not initialized yet, del_timer_sync() will complain. So we have to move timer_setup() before any failure. Reported-and-tested-by: syzbot+6e95a4fabf88dc217145@syzkaller.appspotmail.com Fixes: aee9caa03fc3 ("net: sched: sch_red: Add qevents "early_drop" and "mark"") Cc: Petr Machata Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Reviewed-by: Petr Machata Signed-off-by: David S. Miller --- net/sched/sch_red.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 4cc0ad0b1189..deac82f3ad7b 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c @@ -333,6 +333,10 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt, struct nlattr *tb[TCA_RED_MAX + 1]; int err; + q->qdisc = &noop_qdisc; + q->sch = sch; + timer_setup(&q->adapt_timer, red_adaptative_timer, 0); + if (!opt) return -EINVAL; @@ -341,10 +345,6 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt, if (err < 0) return err; - q->qdisc = &noop_qdisc; - q->sch = sch; - timer_setup(&q->adapt_timer, red_adaptative_timer, 0); - err = __red_change(sch, tb, extack); if (err) return err; From b9aaec8f0be518096d1377082e0abe6a85e86ff3 Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Sun, 26 Jul 2020 15:48:16 -0700 Subject: [PATCH 1736/2056] fib: use indirect call wrappers in the most common fib_rules_ops This avoids another inderect call per RX packet which save us around 20-40 ns. Changelog: v1 -> v2: - Move declaraions to fib_rules.h to remove warnings Reported-by: kernel test robot Signed-off-by: Brian Vazquez Signed-off-by: David S. Miller --- include/net/fib_rules.h | 18 ++++++++++++++++++ net/core/fib_rules.c | 16 +++++++++++++--- net/ipv4/fib_rules.c | 12 ++++++++---- net/ipv6/fib6_rules.c | 12 ++++++++---- 4 files changed, 47 insertions(+), 11 deletions(-) diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index a259050f84af..4b10676c69d1 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -10,6 +10,7 @@ #include #include #include +#include struct fib_kuid_range { kuid_t start; @@ -203,4 +204,21 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); + +INDIRECT_CALLABLE_DECLARE(int fib6_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags)); +INDIRECT_CALLABLE_DECLARE(int fib4_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags)); + +INDIRECT_CALLABLE_DECLARE(int fib6_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg)); +INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg)); + +INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg)); +INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg)); #endif diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index bd7eba9066f8..e7a8f87b0bb2 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -14,6 +14,7 @@ #include #include #include +#include static const struct fib_kuid_range fib_kuid_range_unset = { KUIDT_INIT(0), @@ -267,7 +268,10 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, uid_gt(fl->flowi_uid, rule->uid_range.end)) goto out; - ret = ops->match(rule, fl, flags); + ret = INDIRECT_CALL_INET(ops->match, + fib6_rule_match, + fib4_rule_match, + rule, fl, flags); out: return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; } @@ -298,9 +302,15 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, } else if (rule->action == FR_ACT_NOP) continue; else - err = ops->action(rule, fl, flags, arg); + err = INDIRECT_CALL_INET(ops->action, + fib6_rule_action, + fib4_rule_action, + rule, fl, flags, arg); - if (!err && ops->suppress && ops->suppress(rule, arg)) + if (!err && ops->suppress && INDIRECT_CALL_INET(ops->suppress, + fib6_rule_suppress, + fib4_rule_suppress, + rule, arg)) continue; if (err != -EAGAIN) { diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index f99e3bac5cab..ce54a30c2ef1 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -29,6 +29,7 @@ #include #include #include +#include struct fib4_rule { struct fib_rule common; @@ -103,8 +104,9 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, } EXPORT_SYMBOL_GPL(__fib_lookup); -static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, - int flags, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg) { int err = -EAGAIN; struct fib_table *tbl; @@ -138,7 +140,8 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp, return err; } -static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg) { struct fib_result *result = (struct fib_result *) arg->result; struct net_device *dev = NULL; @@ -169,7 +172,8 @@ static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg return true; } -static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) +INDIRECT_CALLABLE_SCOPE int fib4_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags) { struct fib4_rule *r = (struct fib4_rule *) rule; struct flowi4 *fl4 = &fl->u.ip4; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 6053ef851555..8f9a83314de7 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -255,8 +256,9 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp, return err; } -static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, - int flags, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule, + struct flowi *flp, int flags, + struct fib_lookup_arg *arg) { if (arg->lookup_ptr == fib6_table_lookup) return fib6_rule_action_alt(rule, flp, flags, arg); @@ -264,7 +266,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, return __fib6_rule_action(rule, flp, flags, arg); } -static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg) +INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule, + struct fib_lookup_arg *arg) { struct fib6_result *res = arg->result; struct rt6_info *rt = res->rt6; @@ -296,7 +299,8 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg return true; } -static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) +INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule, + struct flowi *fl, int flags) { struct fib6_rule *r = (struct fib6_rule *) rule; struct flowi6 *fl6 = &fl->u.ip6; From 15ab7906cc9290afb006df1bb1074907fbcc7061 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Jul 2020 01:34:39 +0200 Subject: [PATCH 1737/2056] net: dsa: rtl8366: Fix VLAN semantics The RTL8366 would not handle adding new members (ports) to a VLAN: the code assumed that ->port_vlan_add() was only called once for a single port. When intializing the switch with .configure_vlan_while_not_filtering set to true, the function is called numerous times for adding all ports to VLAN1, which was something the code could not handle. Alter rtl8366_set_vlan() to just |= new members and untagged flags to 4k and MC VLAN table entries alike. This makes it possible to just add new ports to a VLAN. Put in some helpful debug code that can be used to find any further bugs here. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Florian Fainelli Fixes: d8652956cf37 ("net: dsa: realtek-smi: Add Realtek SMI driver") Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/dsa/rtl8366.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 993cf3ac59d9..2997abeecc4a 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -43,18 +43,26 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, int ret; int i; + dev_dbg(smi->dev, + "setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, member, untag); + /* Update the 4K table */ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k); if (ret) return ret; - vlan4k.member = member; - vlan4k.untag = untag; + vlan4k.member |= member; + vlan4k.untag |= untag; vlan4k.fid = fid; ret = smi->ops->set_vlan_4k(smi, &vlan4k); if (ret) return ret; + dev_dbg(smi->dev, + "resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n", + vid, vlan4k.member, vlan4k.untag); + /* Try to find an existing MC entry for this VID */ for (i = 0; i < smi->num_vlan_mc; i++) { struct rtl8366_vlan_mc vlanmc; @@ -65,11 +73,16 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member, if (vid == vlanmc.vid) { /* update the MC entry */ - vlanmc.member = member; - vlanmc.untag = untag; + vlanmc.member |= member; + vlanmc.untag |= untag; vlanmc.fid = fid; ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); + + dev_dbg(smi->dev, + "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n", + vid, vlanmc.member, vlanmc.untag); + break; } } From 788abc6d9d278ed6fa1fa94db2098481a04152b7 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 27 Jul 2020 01:34:40 +0200 Subject: [PATCH 1738/2056] net: dsa: rtl8366: Fix VLAN set-up Alter the rtl8366_vlan_add() to call rtl8366_set_vlan() inside the loop that goes over all VIDs since we now properly support calling that function more than once. Augment the loop to postincrement as this is more intuitive. The loop moved past the last VID but called rtl8366_set_vlan() with the port number instead of the VID, assuming a 1-to-1 correspondence between ports and VIDs. This was also a bug. Cc: DENG Qingfang Cc: Mauri Sandberg Reviewed-by: Florian Fainelli Fixes: d8652956cf37 ("net: dsa: realtek-smi: Add Realtek SMI driver") Signed-off-by: Linus Walleij Signed-off-by: David S. Miller --- drivers/net/dsa/rtl8366.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 2997abeecc4a..8f40fbf70a82 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -397,7 +397,7 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) dev_err(smi->dev, "port is DSA or CPU port\n"); - for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { int pvid_val = 0; dev_info(smi->dev, "add VLAN %04x\n", vid); @@ -420,13 +420,13 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port, if (ret < 0) return; } - } - ret = rtl8366_set_vlan(smi, port, member, untag, 0); - if (ret) - dev_err(smi->dev, - "failed to set up VLAN %04x", - vid); + ret = rtl8366_set_vlan(smi, vid, member, untag, 0); + if (ret) + dev_err(smi->dev, + "failed to set up VLAN %04x", + vid); + } } EXPORT_SYMBOL_GPL(rtl8366_vlan_add); From 1057d685c6eb4ef5dbc03f97ff25da8ca2418a86 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 27 Jul 2020 19:01:48 +0800 Subject: [PATCH 1739/2056] net: stmmac: Remove WAKE_MAGIC if HW shows no pmt_magic_frame Remove WAKE_MAGIC from supported modes if the HW capability register shows no support for pmt_magic_frame. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index eae11c585025..9e0af626a24a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -603,6 +603,8 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) mutex_lock(&priv->lock); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; + if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame) + wol->supported &= ~WAKE_MAGIC; wol->wolopts = priv->wolopts; } mutex_unlock(&priv->lock); From 2f45f7a13e99ef2d05ef3afecaee30f314c9b8b2 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 27 Jul 2020 19:02:13 +0800 Subject: [PATCH 1740/2056] net: stmmac: Move device_can_wakeup() check earlier in set_wol If !device_can_wakeup(), there's no need to futher check. And return -EOPNOTSUPP rather than -EINVAL if !device_can_wakeup(). Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9e0af626a24a..79795bebd3a2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -615,15 +615,15 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) struct stmmac_priv *priv = netdev_priv(dev); u32 support = WAKE_MAGIC | WAKE_UCAST; + if (!device_can_wakeup(priv->device)) + return -EOPNOTSUPP; + /* By default almost all GMAC devices support the WoL via * magic frame but we can disable it if the HW capability * register shows no support for pmt_magic_frame. */ if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame)) wol->wolopts &= ~WAKE_MAGIC; - if (!device_can_wakeup(priv->device)) - return -EINVAL; - if (wol->wolopts & ~support) return -EINVAL; From e8377e7a29efbe80668b638eccd58f8db3c9ecb7 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 27 Jul 2020 19:02:48 +0800 Subject: [PATCH 1741/2056] net: stmmac: only call pmt() during suspend/resume if HW enables PMT This is to prepare WOL support with phy. Compared with WOL implementation which relies on the MAC's PMT features, in phy supported WOL case, device_may_wakeup() may also be true, but we should not call mac's pmt() function if HW doesn't enable PMT. And during resume, we should call phylink_start() if PMT is disabled. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 73677c3b33b6..358fd3bf9ef5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5085,7 +5085,7 @@ int stmmac_suspend(struct device *dev) priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); /* Enable Power down mode by programming the PMT regs */ - if (device_may_wakeup(priv->device)) { + if (device_may_wakeup(priv->device) && priv->plat->pmt) { stmmac_pmt(priv, priv->hw, priv->wolopts); priv->irq_wake = 1; } else { @@ -5157,7 +5157,7 @@ int stmmac_resume(struct device *dev) * this bit because it can generate problems while resuming * from another devices (e.g. serial console). */ - if (device_may_wakeup(priv->device)) { + if (device_may_wakeup(priv->device) && priv->plat->pmt) { mutex_lock(&priv->lock); stmmac_pmt(priv, priv->hw, 0); mutex_unlock(&priv->lock); @@ -5200,7 +5200,7 @@ int stmmac_resume(struct device *dev) mutex_unlock(&priv->lock); - if (!device_may_wakeup(priv->device)) { + if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { rtnl_lock(); phylink_start(priv->phylink); rtnl_unlock(); From 1d8e5b0f3f2c6d05697f8192aac7255e6be1e715 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 27 Jul 2020 19:03:09 +0800 Subject: [PATCH 1742/2056] net: stmmac: Support WOL with phy Currently, the stmmac driver WOL implementation relies on MAC's PMT feature. We have a case: the MAC HW doesn't enable PMT, instead, we rely on the phy to support WOL. Implement the support for this case. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 11 +++++++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 79795bebd3a2..05d63963fdb7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -600,6 +600,9 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct stmmac_priv *priv = netdev_priv(dev); + if (!priv->plat->pmt) + return phylink_ethtool_get_wol(priv->phylink, wol); + mutex_lock(&priv->lock); if (device_can_wakeup(priv->device)) { wol->supported = WAKE_MAGIC | WAKE_UCAST; @@ -618,6 +621,14 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (!device_can_wakeup(priv->device)) return -EOPNOTSUPP; + if (!priv->plat->pmt) { + int ret = phylink_ethtool_set_wol(priv->phylink, wol); + + if (!ret) + device_set_wakeup_enable(&dev->dev, !!wol->wolopts); + return ret; + } + /* By default almost all GMAC devices support the WoL via * magic frame but we can disable it if the HW capability * register shows no support for pmt_magic_frame. */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 358fd3bf9ef5..32c0c9647b87 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1075,6 +1075,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) */ static int stmmac_init_phy(struct net_device *dev) { + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; struct stmmac_priv *priv = netdev_priv(dev); struct device_node *node; int ret; @@ -1100,6 +1101,9 @@ static int stmmac_init_phy(struct net_device *dev) ret = phylink_connect_phy(priv->phylink, phydev); } + phylink_ethtool_get_wol(priv->phylink, &wol); + device_set_wakeup_capable(priv->device, !!wol.supported); + return ret; } From 77b2898394e3bb843228ee6f90066a23cf67e8a1 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 27 Jul 2020 19:05:20 +0800 Subject: [PATCH 1743/2056] net: stmmac: Speed down the PHY if WoL to save energy When WoL is enabled and the machine is powered off, the PHY remains waiting for wakeup events at max speed, which is a waste of energy. Slow down the PHY speed before stopping the ethernet if WoL is enabled, Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 32c0c9647b87..89b2b3472852 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2823,6 +2823,8 @@ static int stmmac_open(struct net_device *dev) stmmac_init_coalesce(priv); phylink_start(priv->phylink); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); /* Request the IRQ lines */ ret = request_irq(dev->irq, stmmac_interrupt, @@ -2896,6 +2898,8 @@ static int stmmac_release(struct net_device *dev) if (priv->eee_enabled) del_timer_sync(&priv->eee_ctrl_timer); + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); /* Stop and disconnect the PHY */ phylink_stop(priv->phylink); phylink_disconnect_phy(priv->phylink); @@ -5095,6 +5099,8 @@ int stmmac_suspend(struct device *dev) } else { mutex_unlock(&priv->lock); rtnl_lock(); + if (device_may_wakeup(priv->device)) + phylink_speed_down(priv->phylink, false); phylink_stop(priv->phylink); rtnl_unlock(); mutex_lock(&priv->lock); @@ -5207,6 +5213,8 @@ int stmmac_resume(struct device *dev) if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { rtnl_lock(); phylink_start(priv->phylink); + /* We may have called phylink_speed_down before */ + phylink_speed_up(priv->phylink); rtnl_unlock(); } From 7d9c9b791f9e275b49870b6b10a1ea4d49209de8 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 13 Jul 2020 13:53:04 -0700 Subject: [PATCH 1744/2056] ice: Implement LFC workaround There is a bug where the LFC settings are not being preserved through a link event. The registers in question are the ones that are touched (and restored) when a set_local_mib AQ command is performed. On a link-up event, make sure that a set_local_mib is being performed. Move the function ice_aq_set_lldp_mib() from the DCB specific ice_dcb.c to ice_common.c so that the driver always has access to this AQ command. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 33 ++++++ drivers/net/ethernet/intel/ice/ice_common.h | 3 + drivers/net/ethernet/intel/ice/ice_dcb.c | 33 ------ drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 4 - drivers/net/ethernet/intel/ice/ice_dcb_lib.h | 11 ++ drivers/net/ethernet/intel/ice/ice_main.c | 102 ++++++++++++++++++- 6 files changed, 148 insertions(+), 38 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 50939ad0e760..fd9534568bf3 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -4357,3 +4357,36 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) return false; } + +/** + * ice_aq_set_lldp_mib - Set the LLDP MIB + * @hw: pointer to the HW struct + * @mib_type: Local, Remote or both Local and Remote MIBs + * @buf: pointer to the caller-supplied buffer to store the MIB block + * @buf_size: size of the buffer (in bytes) + * @cd: pointer to command details structure or NULL + * + * Set the LLDP MIB. (0x0A08) + */ +enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd) +{ + struct ice_aqc_lldp_set_local_mib *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.lldp_set_mib; + + if (buf_size == 0 || !buf) + return ICE_ERR_PARAM; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); + + desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); + desc.datalen = cpu_to_le16(buf_size); + + cmd->type = mib_type; + cmd->length = cpu_to_le16(buf_size); + + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); +} diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 906113db6be6..3ebb973878c7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -172,4 +172,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, enum ice_status ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); +enum ice_status +ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, + struct ice_sq_cd *cd); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 2cecc9d08005..2a3147ee0bbb 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -134,39 +134,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd) return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } -/** - * ice_aq_set_lldp_mib - Set the LLDP MIB - * @hw: pointer to the HW struct - * @mib_type: Local, Remote or both Local and Remote MIBs - * @buf: pointer to the caller-supplied buffer to store the MIB block - * @buf_size: size of the buffer (in bytes) - * @cd: pointer to command details structure or NULL - * - * Set the LLDP MIB. (0x0A08) - */ -static enum ice_status -ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, - struct ice_sq_cd *cd) -{ - struct ice_aqc_lldp_set_local_mib *cmd; - struct ice_aq_desc desc; - - cmd = &desc.params.lldp_set_mib; - - if (buf_size == 0 || !buf) - return ICE_ERR_PARAM; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); - - desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); - desc.datalen = cpu_to_le16(buf_size); - - cmd->type = mib_type; - cmd->length = cpu_to_le16(buf_size); - - return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); -} - /** * ice_get_dcbx_status * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 979af197f8a3..3b3a2789eb55 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf) goto dcb_error; } - /* If DCB was not enabled previously, we are done */ - if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) - return; - mutex_lock(&pf->tc_mutex); if (!pf->hw.port_info->is_sw_lldp) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 323238669572..35c21d9ae009 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { tlan_ctx->cgd_num = ring->dcb_tc; } + +static inline bool ice_is_dcb_active(struct ice_pf *pf) +{ + return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) || + test_bit(ICE_FLAG_DCB_ENA, pf->flags)); +} #else #define ice_dcb_rebuild(pf) do {} while (0) @@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring, return 0; } +static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf) +{ + return false; +} + static inline bool ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf, unsigned int __always_unused txqueue) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 646c2cbc6904..fe0982d0717d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -766,6 +766,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up) } } +/** + * ice_set_dflt_mib - send a default config MIB to the FW + * @pf: private PF struct + * + * This function sends a default configuration MIB to the FW. + * + * If this function errors out at any point, the driver is still able to + * function. The main impact is that LFC may not operate as expected. + * Therefore an error state in this function should be treated with a DBG + * message and continue on with driver rebuild/reenable. + */ +static void ice_set_dflt_mib(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + u8 mib_type, *buf, *lldpmib = NULL; + u16 len, typelen, offset = 0; + struct ice_lldp_org_tlv *tlv; + struct ice_hw *hw; + u32 ouisubtype; + + if (!pf) { + dev_dbg(dev, "%s NULL pf pointer\n", __func__); + return; + } + + hw = &pf->hw; + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; + lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL); + if (!lldpmib) { + dev_dbg(dev, "%s Failed to allocate MIB memory\n", + __func__); + return; + } + + /* Add ETS CFG TLV */ + tlv = (struct ice_lldp_org_tlv *)lldpmib; + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_ETS_TLV_LEN); + tlv->typelen = htons(typelen); + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + buf = tlv->tlvinfo; + buf[0] = 0; + + /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. + * Octets 5 - 12 are BW values, set octet 5 to 100% BW. + * Octets 13 - 20 are TSA values - leave as zeros + */ + buf[5] = 0x64; + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + offset += len + 2; + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + + /* Add ETS REC TLV */ + buf = tlv->tlvinfo; + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_ETS_REC); + tlv->ouisubtype = htonl(ouisubtype); + + /* First octet of buf is reserved + * Octets 1 - 4 map UP to TC - all UPs map to zero + * Octets 5 - 12 are BW values - set TC 0 to 100%. + * Octets 13 - 20 are TSA value - leave as zeros + */ + buf[5] = 0x64; + offset += len + 2; + tlv = (struct ice_lldp_org_tlv *) + ((char *)tlv + sizeof(tlv->typelen) + len); + + /* Add PFC CFG TLV */ + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | + ICE_IEEE_PFC_TLV_LEN); + tlv->typelen = htons(typelen); + + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | + ICE_IEEE_SUBTYPE_PFC_CFG); + tlv->ouisubtype = htonl(ouisubtype); + + /* Octet 1 left as all zeros - PFC disabled */ + buf[0] = 0x08; + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; + offset += len + 2; + + if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL)) + dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__); + + kfree(lldpmib); +} + /** * ice_link_event - process the link event * @pf: PF that the link event is associated with @@ -821,7 +915,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return result; - ice_dcb_rebuild(pf); + if (ice_is_dcb_active(pf)) { + if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) + ice_dcb_rebuild(pf); + } else { + if (link_up) + ice_set_dflt_mib(pf); + } ice_vsi_link_event(vsi, link_up); ice_print_link_msg(vsi, link_up); From b767ca650f9e9cc3308d8714137264f4a6d908ce Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 13 Jul 2020 13:53:05 -0700 Subject: [PATCH 1745/2056] ice: Fix link broken after GLOBR reset After a GLOBR, the link was broken so that a link up situation was being seen as a link down. The problem was that the rebuild process was updating the port_info link status without doing any of the other things that need to be done when link changes. This was causing the port_info struct to have current "UP" information so that any further UP interrupts were skipped as redundant. The rebuild flow should *not* be updating the port_info struct link information, so eliminate this and leave it to the link event handling code. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index fe0982d0717d..7b70ab3703a7 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5913,10 +5913,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) if (err) goto err_sched_init_port; - err = ice_update_link_info(hw->port_info); - if (err) - dev_err(dev, "Get link status error %d\n", err); - /* start misc vector */ err = ice_req_irq_msix_misc(pf); if (err) { From 0ce6c34a8f6f5b63c9ed5f2ee9be52ece1428350 Mon Sep 17 00:00:00 2001 From: Dave Ertman Date: Mon, 13 Jul 2020 13:53:06 -0700 Subject: [PATCH 1746/2056] ice: fix link event handling timing When the driver experiences a link event (especially link up) there can be multiple events generated. Some of these are link fault and still have a state of DOWN set. The problem happens when the link comes UP during the PF driver handling one of the LINK DOWN events. The status of the link is updated and is now seen as UP, so when the actual LINK UP event comes, the port information has already been updated to be seen as UP, even though none of the UP activities have been completed. After the link information has been updated in the link handler and evaluated for MEDIA PRESENT, if the state of the link has been changed to UP, treat the DOWN event as an UP event since the link is now UP. Signed-off-by: Dave Ertman Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_dcb_lib.c | 2 +- drivers/net/ethernet/intel/ice/ice_main.c | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 3b3a2789eb55..36abd6b7280c 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -463,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) } } - dev_info(dev, "DCB restored after reset\n"); + dev_info(dev, "DCB info restored\n"); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); if (ret) { dev_err(dev, "Query Port ETS failed\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 7b70ab3703a7..e441cafaf23b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -894,6 +894,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n", pi->lport); + /* Check if the link state is up after updating link info, and treat + * this event as an UP event since the link is actually UP now. + */ + if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) + link_up = true; + vsi = ice_get_main_vsi(pf); if (!vsi || !vsi->port_info) return -EINVAL; From a54a0b24f4f5a361abaf1c0e6591c57816d4cfa7 Mon Sep 17 00:00:00 2001 From: Nick Nunley Date: Mon, 13 Jul 2020 13:53:07 -0700 Subject: [PATCH 1747/2056] ice: restore VF MSI-X state during PCI reset During a PCI FLR the MSI-X Enable flag in the VF PCI MSI-X capability register will be cleared. This can lead to issues when a VF is assigned to a VM because in these cases the VF driver receives no indication of the PF PCI error/reset and additionally it is incapable of restoring the cleared flag in the hypervisor configuration space without fully reinitializing the driver interrupt functionality. Since the VF driver is unable to easily resolve this condition on its own, restore the VF MSI-X flag during the PF PCI reset handling. Signed-off-by: Nick Nunley Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 2 ++ .../net/ethernet/intel/ice/ice_virtchnl_pf.c | 30 +++++++++++++++++++ .../net/ethernet/intel/ice/ice_virtchnl_pf.h | 2 ++ 3 files changed, 34 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e441cafaf23b..9b9e30a7d690 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4623,6 +4623,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev) return; } + ice_restore_all_vfs_msi_state(pdev); + ice_do_reset(pf, ICE_RESET_PFR); ice_service_task_restart(pf); mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 9df5ceb26ab9..7061730f0f37 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -4071,3 +4071,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) } } } + +/** + * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR + * @pdev: pointer to a pci_dev structure + * + * Called when recovering from a PF FLR to restore interrupt capability to + * the VFs. + */ +void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) +{ + struct pci_dev *vfdev; + u16 vf_id; + int pos; + + if (!pci_num_vf(pdev)) + return; + + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, + &vf_id); + vfdev = pci_get_device(pdev->vendor, vf_id, NULL); + while (vfdev) { + if (vfdev->is_virtfn && vfdev->physfn == pdev) + pci_restore_msi_state(vfdev); + vfdev = pci_get_device(pdev->vendor, vf_id, + vfdev); + } + } +} diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 67aa9110fdd1..049e0b583383 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -114,6 +114,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf); void ice_vc_notify_reset(struct ice_pf *pf); bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr); bool ice_reset_vf(struct ice_vf *vf, bool is_vflr); +void ice_restore_all_vfs_msi_state(struct pci_dev *pdev); int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, @@ -146,6 +147,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf); #define ice_vf_lan_overflow_event(pf, event) do {} while (0) #define ice_print_vfs_mdd_events(pf) do {} while (0) #define ice_print_vf_rx_mdd_event(vf) do {} while (0) +#define ice_restore_all_vfs_msi_state(pdev) do {} while (0) static inline bool ice_reset_all_vfs(struct ice_pf __always_unused *pf, From ca1fdb885e5fe794a2f6171f9b7c7c064a461486 Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Mon, 13 Jul 2020 13:53:08 -0700 Subject: [PATCH 1748/2056] ice: return correct error code from ice_aq_sw_rules Return ICE_ERR_DOES_NOT_EXIST return code if admin command error code is ICE_AQ_RC_ENOENT (not exist). ice_aq_sw_rules is used when switch rule is getting added/deleted/updated. In case of delete/update switch rule, admin command can return ICE_AQ_RC_ENOENT error code if such rule does not exist, hence return ICE_ERR_DOES_NOT_EXIST error code from ice_aq_sw_rule, so that caller of this function can decide how to handle ICE_ERR_DOES_NOT_EXIST. Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_switch.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index ccbe1cc64295..c3a6c41385ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -495,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd) { struct ice_aq_desc desc; + enum ice_status status; if (opc != ice_aqc_opc_add_sw_rules && opc != ice_aqc_opc_update_sw_rules && @@ -506,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); desc.params.sw_rules.num_rules_fltr_entry_index = cpu_to_le16(num_rules); - return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); + status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd); + if (opc != ice_aqc_opc_add_sw_rules && + hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) + status = ICE_ERR_DOES_NOT_EXIST; + + return status; } /* ice_init_port_info - Initialize port_info with switch configuration data From a02016de00f84a9e8a1a8ab1d8b9720c97fb3fdc Mon Sep 17 00:00:00 2001 From: Paul M Stillwell Jr Date: Mon, 13 Jul 2020 13:53:09 -0700 Subject: [PATCH 1749/2056] ice: fix overwriting TX/RX descriptor values when rebuilding VSI If a user sets the value of the TX or RX descriptors to some non-default value using 'ethtool -G' then we need to not overwrite the values when we rebuild the VSI. The VSI rebuild could happen as a result of a user setting the number of queues via the 'ethtool -L' command. Fix this by checking to see if the value we have stored is non-zero and if it is then don't change the value. Signed-off-by: Paul M Stillwell Jr Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_lib.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 8f6a191839f1..84202c814c3b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -127,8 +127,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) case ICE_VSI_PF: case ICE_VSI_CTRL: case ICE_VSI_LB: - vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; - vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; + /* a user could change the values of num_[tr]x_desc using + * ethtool -G so we should keep those values instead of + * overwriting them with the defaults. + */ + if (!vsi->num_rx_desc) + vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC; + if (!vsi->num_tx_desc) + vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; break; default: dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n", From b3b93d6ce1bd5f42c49dd8e0b059e1dca7fa8f61 Mon Sep 17 00:00:00 2001 From: Tarun Singh Date: Mon, 13 Jul 2020 13:53:10 -0700 Subject: [PATCH 1750/2056] ice: Add RL profile bit mask check Mask bits before accessing the profile type field. Signed-off-by: Tarun Singh Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_sched.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 1c29cfa1cf33..ac5b05bb978f 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -2153,8 +2153,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, hw = pi->hw; list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) - if (rl_prof_elem->profile.flags == profile_type && - rl_prof_elem->bw == bw) + if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == + profile_type && rl_prof_elem->bw == bw) /* Return existing profile ID info */ return rl_prof_elem; @@ -2384,7 +2384,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) - if (rl_prof_elem->profile.flags == profile_type && + if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == + profile_type && le16_to_cpu(rl_prof_elem->profile.profile_id) == profile_id) { if (rl_prof_elem->prof_id_ref) @@ -2546,8 +2547,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, return 0; return ice_sched_rm_rl_profile(pi, layer_num, - rl_prof_info->profile.flags, - old_id); + rl_prof_info->profile.flags & + ICE_AQC_RL_PROFILE_TYPE_M, old_id); } /** From 984824a210d4b21868d2f96219637587bdb37719 Mon Sep 17 00:00:00 2001 From: Tarun Singh Date: Mon, 13 Jul 2020 13:53:11 -0700 Subject: [PATCH 1751/2056] ice: Adjust scheduler default BW weight By default the queues are configured in legacy mode. The default BW settings for legacy/advanced modes are different. The existing code was using the advanced mode default value of 1 which was incorrect. This caused the unbalanced BW sharing among siblings. The recommended default value is applied. Signed-off-by: Tarun Singh Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 13 ++++++++++++- drivers/net/ethernet/intel/ice/ice_type.h | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index fd9534568bf3..35644fe6235a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -3907,7 +3907,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle, * Without setting the generic section as valid in valid_sections, the * Admin queue command will fail with error code ICE_AQ_RC_EINVAL. */ - buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC; + buf->txqs[0].info.valid_sections = + ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR | + ICE_AQC_ELEM_VALID_EIR; + buf->txqs[0].info.generic = 0; + buf->txqs[0].info.cir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->txqs[0].info.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); + buf->txqs[0].info.eir_bw.bw_profile_idx = + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->txqs[0].info.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); /* add the LAN queue */ status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index e4349475790f..1eb83d9b0546 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -409,7 +409,7 @@ enum ice_rl_type { #define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */ #define ICE_SCHED_DFLT_RL_PROF_ID 0 #define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF -#define ICE_SCHED_DFLT_BW_WT 1 +#define ICE_SCHED_DFLT_BW_WT 4 #define ICE_SCHED_INVAL_PROF_ID 0xFFFF #define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */ From 4043818c13ef5e610e6555b02d69dfd8d0a6b32d Mon Sep 17 00:00:00 2001 From: Victor Raj Date: Mon, 13 Jul 2020 13:53:12 -0700 Subject: [PATCH 1752/2056] ice: distribute Tx queues evenly Distribute the Tx queues evenly across all queue groups. This will help the queues to get more equal sharing among the queues when all are in use. In the previous algorithm, the next queue group node will be picked up only after the previous one filled with max children. For example: if VSI is configured with 9 queues, the first 8 queues will be assigned to queue group 1 and the 9th queue will be assigned to queue group 2. The 2 queue groups split the bandwidth between them equally (50:50). The first queue group node will share the 50% bandwidth with all of its children (8 queues). And the second queue group node will share the entire 50% bandwidth with its only children. The new algorithm will fix this issue. Signed-off-by: Victor Raj Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_sched.c | 55 ++++++++++++++++++++-- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index ac5b05bb978f..355f727563e4 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -1275,6 +1275,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base, return false; } +/** + * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node + * @pi: port information structure + * @vsi_node: software VSI handle + * @qgrp_node: first queue group node identified for scanning + * @owner: LAN or RDMA + * + * This function retrieves a free LAN or RDMA queue group node by scanning + * qgrp_node and its siblings for the queue group with the fewest number + * of queues currently assigned. + */ +static struct ice_sched_node * +ice_sched_get_free_qgrp(struct ice_port_info *pi, + struct ice_sched_node *vsi_node, + struct ice_sched_node *qgrp_node, u8 owner) +{ + struct ice_sched_node *min_qgrp; + u8 min_children; + + if (!qgrp_node) + return qgrp_node; + min_children = qgrp_node->num_children; + if (!min_children) + return qgrp_node; + min_qgrp = qgrp_node; + /* scan all queue groups until find a node which has less than the + * minimum number of children. This way all queue group nodes get + * equal number of shares and active. The bandwidth will be equally + * distributed across all queues. + */ + while (qgrp_node) { + /* make sure the qgroup node is part of the VSI subtree */ + if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) + if (qgrp_node->num_children < min_children && + qgrp_node->owner == owner) { + /* replace the new min queue group node */ + min_qgrp = qgrp_node; + min_children = min_qgrp->num_children; + /* break if it has no children, */ + if (!min_children) + break; + } + qgrp_node = qgrp_node->sibling; + } + return min_qgrp; +} + /** * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node * @pi: port information structure @@ -1288,7 +1335,7 @@ struct ice_sched_node * ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 owner) { - struct ice_sched_node *vsi_node, *qgrp_node = NULL; + struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; u16 max_children; u8 qgrp_layer; @@ -1302,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, vsi_node = vsi_ctx->sched.vsi_node[tc]; /* validate invalid VSI ID */ if (!vsi_node) - goto lan_q_exit; + return NULL; /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); @@ -1315,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, qgrp_node = qgrp_node->sibling; } -lan_q_exit: - return qgrp_node; + /* Select the best queue group */ + return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); } /** From 682dfedcee2fa9a50cb53e3b30ff1ebe1f7dd659 Mon Sep 17 00:00:00 2001 From: Krzysztof Kazimierczak Date: Mon, 13 Jul 2020 13:53:13 -0700 Subject: [PATCH 1753/2056] ice: need_wakeup flag might not be set for Tx This is a port of i40e commit 705639572e8c ("i40e: need_wakeup flag might not be set for Tx"). Quoting the original commit message: "The need_wakeup flag for Tx might not be set for AF_XDP sockets that are only used to send packets. This happens if there is at least one outstanding packet that has not been completed by the hardware and we get that corresponding completion (which will not generate an interrupt since interrupts are disabled in the napi poll loop) between the time we stopped processing the Tx completions and interrupts are enabled again. In this case, the need_wakeup flag will have been cleared at the end of the Tx completion processing as we believe we will get an interrupt from the outstanding completion at a later point in time. But if this completion interrupt occurs before interrupts are enable, we lose it and should at that point really have set the need_wakeup flag since there are no more outstanding completions that can generate an interrupt to continue the processing. When this happens, user space will see a Tx queue need_wakeup of 0 and skip issuing a syscall, which means will never get into the Tx processing again and we have a deadlock." As a result, packet processing stops. This patch introduces a fix for this issue, by always setting the need_wakeup flag at the end of an interrupt processing. This ensures that the deadlock will not happen. Signed-off-by: Krzysztof Kazimierczak Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_xsk.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 6badfd62dc63..87862918bc7a 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -706,8 +706,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget) if (tx_desc) { ice_xdp_ring_update_tail(xdp_ring); xsk_umem_consume_tx_done(xdp_ring->xsk_umem); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); } return budget > 0 && work_done; @@ -783,12 +781,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget) if (xsk_frames) xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames); - if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) { - if (xdp_ring->next_to_clean == xdp_ring->next_to_use) - xsk_set_tx_need_wakeup(xdp_ring->xsk_umem); - else - xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); - } + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) + xsk_set_tx_need_wakeup(xdp_ring->xsk_umem); ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); From cd1f56f4291a1bc949bc922e862a45d8cf6b8990 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Mon, 13 Jul 2020 13:53:14 -0700 Subject: [PATCH 1754/2056] ice: Allow all VLANs in safe mode Currently the PF VSI's context parameters are left in a bad state when going into safe mode. This is causing VLAN traffic to not pass. Fix this by configuring the PF VSI to allow all VLAN tagged traffic. Also, remove redundant comment explaining the safe mode flow in ice_probe(). Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 59 ++++++++++++++++++++++- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9b9e30a7d690..a68371fc0a75 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3583,6 +3583,60 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) return err; } +/** + * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode + * @pf: PF to configure + * + * No VLAN offloads/filtering are advertised in safe mode so make sure the PF + * VSI can still Tx/Rx VLAN tagged packets. + */ +static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf) +{ + struct ice_vsi *vsi = ice_get_main_vsi(pf); + struct ice_vsi_ctx *ctxt; + enum ice_status status; + struct ice_hw *hw; + + if (!vsi) + return; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return; + + hw = &pf->hw; + ctxt->info = vsi->info; + + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + /* disable VLAN anti-spoof */ + ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + + /* disable VLAN pruning and keep all other settings */ + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + + /* allow all VLANs on Tx and don't strip on Rx */ + ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL | + ICE_AQ_VSI_VLAN_EMOD_NOTHING; + + status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (status) { + dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n", + ice_stat_str(status), + ice_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + vsi->info.vlan_flags = ctxt->info.vlan_flags; + } + + kfree(ctxt); +} + /** * ice_log_pkg_init - log result of DDP package load * @hw: pointer to hardware info @@ -4139,9 +4193,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) /* Disable WoL at init, wait for user to enable */ device_set_wakeup_enable(dev, false); - /* If no DDP driven features have to be setup, we are done with probe */ - if (ice_is_safe_mode(pf)) + if (ice_is_safe_mode(pf)) { + ice_set_safe_mode_vlan_cfg(pf); goto probe_done; + } /* initialize DDP driven features */ From 78116e979ddc4928398420aa986fd283db23c361 Mon Sep 17 00:00:00 2001 From: Marcin Szycik Date: Mon, 13 Jul 2020 13:53:15 -0700 Subject: [PATCH 1755/2056] ice: cleanup VSI on probe fail As part of ice_setup_pf_sw() a PF VSI is setup; release the VSI in case of failure. Signed-off-by: Marcin Szycik Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a68371fc0a75..c0bde24ab344 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4134,7 +4134,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) if (err) { dev_err(dev, "probe failed sending driver version %s. error: %d\n", UTS_RELEASE, err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } /* since everything is good, start the service timer */ @@ -4143,19 +4143,19 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_link_events(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_link_events failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } err = ice_init_nvm_phy_type(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } err = ice_update_link_info(pf->hw.port_info); if (err) { dev_err(dev, "ice_update_link_info failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } ice_init_link_dflt_override(pf->hw.port_info); @@ -4166,7 +4166,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) err = ice_init_phy_user_cfg(pf->hw.port_info); if (err) { dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err); - goto err_alloc_sw_unroll; + goto err_send_version_unroll; } if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { @@ -4220,6 +4220,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) clear_bit(__ICE_DOWN, pf->state); return 0; +err_send_version_unroll: + ice_vsi_release_all(pf); err_alloc_sw_unroll: ice_devlink_destroy_port(pf); set_bit(__ICE_SERVICE_DIS, pf->state); From e923f04d660dd1dde872be8d63527fc2fd257874 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Mon, 13 Jul 2020 13:53:16 -0700 Subject: [PATCH 1756/2056] ice: reduce scope of variable The scope of the macro local variable 'i' can be reduced. Do so to avoid static analysis tools from complaining. Signed-off-by: Bruce Allan Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_controlq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 1e18021aa073..1f46a7828be8 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) #define ICE_FREE_CQ_BUFS(hw, qi, ring) \ do { \ - int i; \ /* free descriptors */ \ - if ((qi)->ring.r.ring##_bi) \ + if ((qi)->ring.r.ring##_bi) { \ + int i; \ + \ for (i = 0; i < (qi)->num_##ring##_entries; i++) \ if ((qi)->ring.r.ring##_bi[i].pa) { \ dmam_free_coherent(ice_hw_to_dev(hw), \ @@ -325,6 +326,7 @@ do { \ (qi)->ring.r.ring##_bi[i].pa = 0;\ (qi)->ring.r.ring##_bi[i].size = 0;\ } \ + } \ /* free the buffer info list */ \ if ((qi)->ring.cmd_buf) \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ From 7dfff9ffe8df7181c2840de2dc861795cf8d8147 Mon Sep 17 00:00:00 2001 From: Ben Shelton Date: Mon, 13 Jul 2020 13:53:17 -0700 Subject: [PATCH 1757/2056] ice: disable no longer needed workaround for FW logging For the FW logging info AQ command, we currently set the ICE_AQ_FLAG_RD in order to work around a FW issue. This issue has been fixed so remove the workaround. Signed-off-by: Ben Shelton Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 35644fe6235a..ad5941ec7b11 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -611,8 +611,6 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info); - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); - status = ice_aq_send_cmd(hw, &desc, config, size, NULL); if (!status) { u16 i; From 6221595fc5fd55a457d0439a381c388d967f2011 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Mon, 13 Jul 2020 13:53:18 -0700 Subject: [PATCH 1758/2056] ice: fix unused parameter warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Depending on PAGE_SIZE, the following unused parameter warning can be reported: drivers/net/ethernet/intel/ice/ice_txrx.c: In function ‘ice_rx_frame_truesize’: drivers/net/ethernet/intel/ice/ice_txrx.c:513:21: warning: unused parameter ‘size’ [-Wunused-parameter] unsigned int size) The 'size' variable is used only when PAGE_SIZE >= 8192. Add __maybe_unused to remove the warning. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers --- drivers/net/ethernet/intel/ice/ice_txrx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index abdb137c8bb7..a508d4f463e9 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring) return 0; } -static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring, - unsigned int size) +static unsigned int +ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size) { unsigned int truesize; From 50935339c394adfb3d7253055e3bc10ee70264b0 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sat, 25 Jul 2020 19:02:25 +0200 Subject: [PATCH 1759/2056] netfilter: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/xt_connmark.h | 2 +- net/decnet/netfilter/dn_rtmsg.c | 2 +- net/netfilter/Kconfig | 2 +- net/netfilter/nfnetlink_acct.c | 2 +- net/netfilter/nft_set_pipapo.c | 4 ++-- net/netfilter/xt_CONNSECMARK.c | 2 +- net/netfilter/xt_connmark.c | 2 +- net/netfilter/xt_nfacct.c | 2 +- net/netfilter/xt_time.c | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h index 1aa5c955ee1e..f01c19b83a2b 100644 --- a/include/uapi/linux/netfilter/xt_connmark.h +++ b/include/uapi/linux/netfilter/xt_connmark.h @@ -4,7 +4,7 @@ #include -/* Copyright (C) 2002,2004 MARA Systems AB +/* Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom * * This program is free software; you can redistribute it and/or modify diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index dc705769acc9..26a9193df783 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -6,7 +6,7 @@ * * DECnet Routing Message Grabulator * - * (C) 2000 ChyGwyn Limited - http://www.chygwyn.com/ + * (C) 2000 ChyGwyn Limited - https://www.chygwyn.com/ * * Author: Steven Whitehouse */ diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 0ffe2b8723c4..25313c29d799 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -447,7 +447,7 @@ config NF_TABLES replace the existing {ip,ip6,arp,eb}_tables infrastructure. It provides a pseudo-state machine with an extensible instruction-set (also known as expressions) that the userspace 'nft' utility - (http://www.netfilter.org/projects/nftables) uses to build the + (https://www.netfilter.org/projects/nftables) uses to build the rule-set. It also comes with the generic set infrastructure that allows you to construct mappings between matchings and actions for performance lookups. diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 5827117f2635..5bfec829c12f 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2011 Pablo Neira Ayuso - * (C) 2011 Intra2net AG + * (C) 2011 Intra2net AG */ #include #include diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index cc6082a5f7ad..9944523f5c2c 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -312,7 +312,7 @@ * Jay Ligatti, Josh Kuhn, and Chris Gage. * Proceedings of the IEEE International Conference on Computer * Communication Networks (ICCCN), August 2010. - * http://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf + * https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf * * [Rottenstreich 2010] * Worst-Case TCAM Rule Expansion @@ -325,7 +325,7 @@ * Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane, * and Patrick Eugster. * Proceedings of the 2014 ACM conference on SIGCOMM, August 2014. - * http://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf + * https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf */ #include diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index a5c8b653476a..76acecf3e757 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c @@ -6,7 +6,7 @@ * with the SECMARK target and state match. * * Based somewhat on CONNMARK: - * Copyright (C) 2002,2004 MARA Systems AB + * Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom * * (C) 2006,2008 Red Hat, Inc., James Morris diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index eec2f3a88d73..e5ebc0810675 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -2,7 +2,7 @@ /* * xt_connmark - Netfilter module to operate on connection marks * - * Copyright (C) 2002,2004 MARA Systems AB + * Copyright (C) 2002,2004 MARA Systems AB * by Henrik Nordstrom * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * Jan Engelhardt diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index 5aab6df74e0f..a97c2259bbc8 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2011 Pablo Neira Ayuso - * (C) 2011 Intra2net AG + * (C) 2011 Intra2net AG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 67cb98489415..6aa12d0f54e2 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c @@ -5,7 +5,7 @@ * based on ipt_time by Fabrice MARIE * This is a module which is used for time matching * It is using some modified code from dietlibc (localtime() function) - * that you can find at http://www.fefe.de/dietlibc/ + * that you can find at https://www.fefe.de/dietlibc/ * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from gnu.org/gpl. */ From 42f36eba71c4f0c532d6de761c154d00f9f1900d Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Sun, 26 Jul 2020 21:52:05 -0400 Subject: [PATCH 1760/2056] netfilter: ip6tables: Remove redundant null checks Remove superfluous check for NULL pointer to header. Signed-off-by: Gaurav Singh Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/ip6t_ah.c | 3 +-- net/ipv6/netfilter/ip6t_frag.c | 3 +-- net/ipv6/netfilter/ip6t_hbh.c | 3 +-- net/ipv6/netfilter/ip6t_rt.c | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c index 4e15a14435e4..70da2f2ce064 100644 --- a/net/ipv6/netfilter/ip6t_ah.c +++ b/net/ipv6/netfilter/ip6t_ah.c @@ -74,8 +74,7 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par) ahinfo->hdrres, ah->reserved, !(ahinfo->hdrres && ah->reserved)); - return (ah != NULL) && - spi_match(ahinfo->spis[0], ahinfo->spis[1], + return spi_match(ahinfo->spis[0], ahinfo->spis[1], ntohl(ah->spi), !!(ahinfo->invflags & IP6T_AH_INV_SPI)) && (!ahinfo->hdrlen || diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index fb91eeee4a1e..3aad6439386b 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c @@ -85,8 +85,7 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par) !((fraginfo->flags & IP6T_FRAG_NMF) && (ntohs(fh->frag_off) & IP6_MF))); - return (fh != NULL) && - id_match(fraginfo->ids[0], fraginfo->ids[1], + return id_match(fraginfo->ids[0], fraginfo->ids[1], ntohl(fh->identification), !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)) && !((fraginfo->flags & IP6T_FRAG_RES) && diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 467b2a86031b..e7a3fb9355ee 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c @@ -86,8 +86,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) ((optinfo->hdrlen == hdrlen) ^ !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); - ret = (oh != NULL) && - (!(optinfo->flags & IP6T_OPTS_LEN) || + ret = (!(optinfo->flags & IP6T_OPTS_LEN) || ((optinfo->hdrlen == hdrlen) ^ !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index f633dc84ca3f..733c83d38b30 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c @@ -89,8 +89,7 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) !((rtinfo->flags & IP6T_RT_RES) && (((const struct rt0_hdr *)rh)->reserved))); - ret = (rh != NULL) && - (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], + ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], rh->segments_left, !!(rtinfo->invflags & IP6T_RT_INV_SGS))) && (!(rtinfo->flags & IP6T_RT_LEN) || From 5ba2254b04f9f606bafb8b0091bea2d0cd9f5dd0 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 29 Jul 2020 17:49:09 +0800 Subject: [PATCH 1761/2056] net: mvneta: fix comment about phylink_speed_down mvneta has switched to phylink, so the comment should look like "We may have called phylink_speed_down before". Signed-off-by: Jisheng Zhang Reviewed-by: Russell King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2c9277e73cef..c9b6b0f85bb0 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3637,7 +3637,7 @@ static void mvneta_start_dev(struct mvneta_port *pp) phylink_start(pp->phylink); - /* We may have called phy_speed_down before */ + /* We may have called phylink_speed_down before */ phylink_speed_up(pp->phylink); netif_tx_start_all_queues(pp->dev); From 41d707b7332f1386642c47eb078110ca368a46f5 Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Wed, 29 Jul 2020 11:10:18 -0700 Subject: [PATCH 1762/2056] fib: fix fib_rules_ops indirect calls wrappers This patch fixes: commit b9aaec8f0be5 ("fib: use indirect call wrappers in the most common fib_rules_ops") which didn't consider the case when CONFIG_IPV6_MULTIPLE_TABLES is not set. Reported-by: Stephen Rothwell Fixes: b9aaec8f0be5 ("fib: use indirect call wrappers in the most common fib_rules_ops") Signed-off-by: Brian Vazquez Signed-off-by: David S. Miller --- net/core/fib_rules.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index e7a8f87b0bb2..fce645f6b9b1 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -16,6 +16,13 @@ #include #include +#ifdef CONFIG_IPV6_MULTIPLE_TABLES +#define INDIRECT_CALL_MT(f, f2, f1, ...) \ + INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__) +#else +#define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) +#endif + static const struct fib_kuid_range fib_kuid_range_unset = { KUIDT_INIT(0), KUIDT_INIT(~0), @@ -268,10 +275,10 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, uid_gt(fl->flowi_uid, rule->uid_range.end)) goto out; - ret = INDIRECT_CALL_INET(ops->match, - fib6_rule_match, - fib4_rule_match, - rule, fl, flags); + ret = INDIRECT_CALL_MT(ops->match, + fib6_rule_match, + fib4_rule_match, + rule, fl, flags); out: return (rule->flags & FIB_RULE_INVERT) ? !ret : ret; } @@ -302,15 +309,15 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl, } else if (rule->action == FR_ACT_NOP) continue; else - err = INDIRECT_CALL_INET(ops->action, - fib6_rule_action, - fib4_rule_action, - rule, fl, flags, arg); + err = INDIRECT_CALL_MT(ops->action, + fib6_rule_action, + fib4_rule_action, + rule, fl, flags, arg); - if (!err && ops->suppress && INDIRECT_CALL_INET(ops->suppress, - fib6_rule_suppress, - fib4_rule_suppress, - rule, arg)) + if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress, + fib6_rule_suppress, + fib4_rule_suppress, + rule, arg)) continue; if (err != -EAGAIN) { From 6540351e6f27ef718e3cf5b46349633f3ec57859 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 23 Jul 2020 18:08:56 +0530 Subject: [PATCH 1763/2056] Bluetooth: Translate additional address type correctly When using controller based address resolution, then the new address types 0x02 and 0x03 are used. These types need to be converted back into either public address or random address types. Signed-off-by: Marcel Holtmann Signed-off-by: Sathish Narsimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 6 ++++-- net/bluetooth/hci_core.c | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 1317dfd8f962..c36dccd6718e 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -2279,8 +2279,10 @@ struct hci_ev_le_conn_complete { #define LE_EXT_ADV_SCAN_RSP 0x0008 #define LE_EXT_ADV_LEGACY_PDU 0x0010 -#define ADDR_LE_DEV_PUBLIC 0x00 -#define ADDR_LE_DEV_RANDOM 0x01 +#define ADDR_LE_DEV_PUBLIC 0x00 +#define ADDR_LE_DEV_RANDOM 0x01 +#define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02 +#define ADDR_LE_DEV_RANDOM_RESOLVED 0x03 #define HCI_EV_LE_ADVERTISING_REPORT 0x02 struct hci_ev_le_advertising_info { diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 4ba23b821cbf..3f89bd639860 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3292,6 +3292,15 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, { struct hci_conn_params *param; + switch (addr_type) { + case ADDR_LE_DEV_PUBLIC_RESOLVED: + addr_type = ADDR_LE_DEV_PUBLIC; + break; + case ADDR_LE_DEV_RANDOM_RESOLVED: + addr_type = ADDR_LE_DEV_RANDOM; + break; + } + list_for_each_entry(param, list, action) { if (bacmp(¶m->addr, addr) == 0 && param->addr_type == addr_type) From e1d572357599d142df5764b39731b6eb55a22beb Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 23 Jul 2020 18:08:57 +0530 Subject: [PATCH 1764/2056] Bluetooth: Configure controller address resolution if available When the LL Privacy support is available, then as part of enabling or disabling passive background scanning, it is required to set up the controller based address resolution as well. Since only passive background scanning is utilizing the whitelist, the address resolution is now bound to the whitelist and passive background scanning. All other resolution can be easily done by the host stack. Signed-off-by: Marcel Holtmann Signed-off-by: Sathish Narsimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci_core.h | 3 +++ net/bluetooth/hci_request.c | 26 +++++++++++++++++++++++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index bee1b4778ccc..8caac20556b4 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1359,6 +1359,9 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) +/* Use LL Privacy based address resolution if supported */ +#define use_ll_privacy(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY) + /* Use ext scanning if set ext scan param and ext scan enable is supported */ #define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \ ((dev)->commands[37] & 0x40)) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 7c0c2fda04ad..7d0ba53ffed0 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -675,6 +675,12 @@ void hci_req_add_le_scan_disable(struct hci_request *req) cp.enable = LE_SCAN_DISABLE; hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } + + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + __u8 enable = 0x00; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + } } static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, @@ -816,7 +822,8 @@ static bool scan_use_rpa(struct hci_dev *hdev) } static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, - u16 window, u8 own_addr_type, u8 filter_policy) + u16 window, u8 own_addr_type, u8 filter_policy, + bool addr_resolv) { struct hci_dev *hdev = req->hdev; @@ -825,6 +832,11 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, return; } + if (use_ll_privacy(hdev) && addr_resolv) { + u8 enable = 0x01; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + } + /* Use ext scanning if set ext scan param and ext scan enable is * supported */ @@ -898,12 +910,18 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, } } +/* Ensure to call hci_req_add_le_scan_disable() first to disable the + * controller based address resolution to be able to reconfigure + * resolving list. + */ void hci_req_add_le_passive_scan(struct hci_request *req) { struct hci_dev *hdev = req->hdev; u8 own_addr_type; u8 filter_policy; u16 window, interval; + /* Background scanning should run with address resolution */ + bool addr_resolv = true; if (hdev->scanning_paused) { bt_dev_dbg(hdev, "Scanning is paused for suspend"); @@ -949,7 +967,7 @@ void hci_req_add_le_passive_scan(struct hci_request *req) bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy); hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, - own_addr_type, filter_policy); + own_addr_type, filter_policy, addr_resolv); } static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) @@ -2789,6 +2807,8 @@ static int active_scan(struct hci_request *req, unsigned long opt) u8 own_addr_type; /* White list is not used for discovery */ u8 filter_policy = 0x00; + /* Discovery doesn't require controller address resolution */ + bool addr_resolv = false; int err; BT_DBG("%s", hdev->name); @@ -2811,7 +2831,7 @@ static int active_scan(struct hci_request *req, unsigned long opt) hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, hdev->le_scan_window_discovery, own_addr_type, - filter_policy); + filter_policy, addr_resolv); return 0; } From 0eee35bdfa3b472cc986ecc6ad76293fdcda59e2 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 23 Jul 2020 18:08:58 +0530 Subject: [PATCH 1765/2056] Bluetooth: Update resolving list when updating whitelist When the whitelist is updated, then also update the entries of the resolving list for devices where IRKs are available. Signed-off-by: Marcel Holtmann Signed-off-by: Sathish Narsimman Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 41 +++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 7d0ba53ffed0..85de1f356610 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -694,6 +694,21 @@ static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr, bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp); + + if (use_ll_privacy(req->hdev)) { + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); + if (irk) { + struct hci_cp_le_del_from_resolv_list cp; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, + sizeof(cp), &cp); + } + } } /* Adds connection to white list if needed. On error, returns -1. */ @@ -714,7 +729,7 @@ static int add_to_white_list(struct hci_request *req, return -1; /* White list can not be used with RPAs */ - if (!allow_rpa && + if (!allow_rpa && !use_ll_privacy(hdev) && hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { return -1; } @@ -732,6 +747,28 @@ static int add_to_white_list(struct hci_request *req, cp.bdaddr_type); hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); + if (use_ll_privacy(hdev)) { + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, + params->addr_type); + if (irk) { + struct hci_cp_le_add_to_resolv_list cp; + + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + memcpy(cp.peer_irk, irk->val, 16); + + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + memcpy(cp.local_irk, hdev->irk, 16); + else + memset(cp.local_irk, 0, 16); + + hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, + sizeof(cp), &cp); + } + } + return 0; } @@ -772,7 +809,7 @@ static u8 update_white_list(struct hci_request *req) } /* White list can not be used with RPAs */ - if (!allow_rpa && + if (!allow_rpa && !use_ll_privacy(hdev) && hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { return 0x00; } From b31bc00bfe3a4881e48e196b93cec1efb491ef2b Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:08:59 +0530 Subject: [PATCH 1766/2056] Bluetooth: Translate additional address type during le_conn When using controller based address resolution, then the new address types 0x02 and 0x03 are used. These types need to be converted back into either public address or random address types. This patch is specially during LE_CREATE_CONN if using own_add_type as 0x02 or 0x03. Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_event.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 61f8c4d12028..6388fb55b4d2 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -2296,6 +2296,22 @@ static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, if (!conn) return; + /* When using controller based address resolution, then the new + * address types 0x02 and 0x03 are used. These types need to be + * converted back into either public address or random address type + */ + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + switch (own_address_type) { + case ADDR_LE_DEV_PUBLIC_RESOLVED: + own_address_type = ADDR_LE_DEV_PUBLIC; + break; + case ADDR_LE_DEV_RANDOM_RESOLVED: + own_address_type = ADDR_LE_DEV_RANDOM; + break; + } + } + /* Store the initiator and responder address information which * is needed for SMP. These values will not change during the * lifetime of the connection. From d03c759e391901ed8584117abd52ca4381a652c9 Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:09:00 +0530 Subject: [PATCH 1767/2056] Bluetooth: Let controller creates RPA during le create conn When address resolution is enabled and set_privacy is enabled let's use own address type as 0x03 Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 85de1f356610..e48f0945a417 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -2242,7 +2242,13 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy, if (use_rpa) { int to; - *own_addr_type = ADDR_LE_DEV_RANDOM; + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (use_ll_privacy(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && !bacmp(&hdev->random_addr, &hdev->rpa)) From 5c49bcce5c124406920843af65574104aaaa3309 Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:09:01 +0530 Subject: [PATCH 1768/2056] Bluetooth: Enable/Disable address resolution during le create conn In this patch if le_create_conn process is started restrict to disable address resolution and same is disabled during le_enh_connection_complete Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_conn.c | 7 +++++- net/bluetooth/hci_event.c | 4 ++++ net/bluetooth/hci_request.c | 45 ++++++++++++++++++++++++++++--------- net/bluetooth/hci_request.h | 3 ++- net/bluetooth/mgmt.c | 2 +- 5 files changed, 47 insertions(+), 14 deletions(-) diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index be67361ff2f0..9832f8445d43 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1003,6 +1003,11 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, struct hci_request req; int err; + /* This ensures that during disable le_scan address resolution + * will not be disabled if it is followed by le_create_conn + */ + bool rpa_le_conn = true; + /* Let's make sure that le is enabled.*/ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { if (lmp_le_capable(hdev)) @@ -1103,7 +1108,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, * state. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - hci_req_add_le_scan_disable(&req); + hci_req_add_le_scan_disable(&req, rpa_le_conn); hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6388fb55b4d2..628831b15c0a 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5228,6 +5228,10 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->interval), le16_to_cpu(ev->latency), le16_to_cpu(ev->supervision_timeout)); + + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + hci_req_disable_address_resolution(hdev); } static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index e48f0945a417..70e077cc7dfa 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -428,7 +428,7 @@ static void __hci_update_background_scan(struct hci_request *req) if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) return; - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); BT_DBG("%s stopping background scanning", hdev->name); } else { @@ -447,7 +447,7 @@ static void __hci_update_background_scan(struct hci_request *req) * don't miss any advertising (due to duplicates filter). */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); hci_req_add_le_passive_scan(req); @@ -652,7 +652,7 @@ void __hci_req_update_eir(struct hci_request *req) hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); } -void hci_req_add_le_scan_disable(struct hci_request *req) +void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) { struct hci_dev *hdev = req->hdev; @@ -676,8 +676,9 @@ void hci_req_add_le_scan_disable(struct hci_request *req) hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } + /* Disable address resolution */ if (use_ll_privacy(hdev) && - hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } @@ -1072,7 +1073,7 @@ static void hci_req_config_le_suspend_scan(struct hci_request *req) { /* Before changing params disable scan if enabled */ if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); /* Configure params and enable scanning */ hci_req_add_le_passive_scan(req); @@ -1140,7 +1141,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next) /* Disable LE passive scan if enabled */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(&req); + hci_req_add_le_scan_disable(&req, false); /* Mark task needing completion */ set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks); @@ -1696,6 +1697,28 @@ int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) return hci_req_run(&req, NULL); } +static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + BT_DBG("%s status %u", hdev->name, status); +} + +void hci_req_disable_address_resolution(struct hci_dev *hdev) +{ + struct hci_request req; + __u8 enable = 0x00; + + if (!use_ll_privacy(hdev) && + !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return; + + hci_req_init(&req, hdev); + + hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + + hci_req_run(&req, enable_addr_resolution_complete); +} + static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) { BT_DBG("%s status %u", hdev->name, status); @@ -2667,7 +2690,7 @@ static void bg_scan_update(struct work_struct *work) static int le_scan_disable(struct hci_request *req, unsigned long opt) { - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); return 0; } @@ -2770,7 +2793,7 @@ static int le_scan_restart(struct hci_request *req, unsigned long opt) return 0; } - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); if (use_ext_scan(hdev)) { struct hci_cp_le_set_ext_scan_enable ext_enable_cp; @@ -2861,7 +2884,7 @@ static int active_scan(struct hci_request *req, unsigned long opt) * discovery scanning parameters. */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); /* All active scans will be done with either a resolvable private * address (when privacy feature has been enabled) or non-resolvable @@ -2976,14 +2999,14 @@ bool hci_req_stop_discovery(struct hci_request *req) if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { cancel_delayed_work(&hdev->le_scan_disable); - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); } ret = true; } else { /* Passive scanning */ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { - hci_req_add_le_scan_disable(req); + hci_req_add_le_scan_disable(req, false); ret = true; } } diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index bbe892ab078a..6a12e84c66c4 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -65,11 +65,12 @@ void __hci_req_write_fast_connectable(struct hci_request *req, bool enable); void __hci_req_update_name(struct hci_request *req); void __hci_req_update_eir(struct hci_request *req); -void hci_req_add_le_scan_disable(struct hci_request *req); +void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn); void hci_req_add_le_passive_scan(struct hci_request *req); void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); +void hci_req_disable_address_resolution(struct hci_dev *hdev); void hci_req_reenable_advertising(struct hci_dev *hdev); void __hci_req_enable_advertising(struct hci_request *req); void __hci_req_disable_advertising(struct hci_request *req); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index f45105d2de77..47bcfe2fb14c 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -5226,7 +5226,7 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev, hci_req_init(&req, hdev); - hci_req_add_le_scan_disable(&req); + hci_req_add_le_scan_disable(&req, false); hci_req_add_le_passive_scan(&req); hci_req_run(&req, NULL); From b2cc23398e8166b38f8715026273503b081c2a7a Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:09:02 +0530 Subject: [PATCH 1769/2056] Bluetooth: Enable RPA Timeout Enable RPA timeout during bluetooth initialization. The RPA timeout value is used from hdev, which initialized from debug_fs Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 2 ++ net/bluetooth/hci_core.c | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index c36dccd6718e..dd82cce77a7a 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1648,6 +1648,8 @@ struct hci_rp_le_read_resolv_list_size { #define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d +#define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e + #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f struct hci_rp_le_read_max_data_len { __u8 status; diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 3f89bd639860..68bfe57b6625 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -763,6 +763,14 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt) hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); } + if (hdev->commands[35] & 0x40) { + __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); + + /* Set RPA timeout */ + hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2, + &rpa_timeout); + } + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { /* Read LE Maximum Data Length */ hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); From cbbdfa6f331980c6786b4ca5df53c37b90df3246 Mon Sep 17 00:00:00 2001 From: Sathish Narasimman Date: Thu, 23 Jul 2020 18:09:03 +0530 Subject: [PATCH 1770/2056] Bluetooth: Enable controller RPA resolution using Experimental feature This patch adds support to enable the use of RPA Address resolution using expermental feature mgmt command. Signed-off-by: Sathish Narasimman Signed-off-by: Marcel Holtmann --- include/net/bluetooth/hci.h | 1 + net/bluetooth/hci_event.c | 1 + net/bluetooth/hci_request.c | 7 +- net/bluetooth/mgmt.c | 142 +++++++++++++++++++++++++++++++++++- 4 files changed, 148 insertions(+), 3 deletions(-) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index dd82cce77a7a..c8e67042a3b1 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -318,6 +318,7 @@ enum { HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, HCI_LL_RPA_RESOLUTION, + HCI_ENABLE_LL_PRIVACY, HCI_CMD_PENDING, HCI_FORCE_NO_MITM, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 628831b15c0a..33d8458fdd4a 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -5230,6 +5230,7 @@ static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, le16_to_cpu(ev->supervision_timeout)); if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) hci_req_disable_address_resolution(hdev); } diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 70e077cc7dfa..435400a43a78 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -678,8 +678,10 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) /* Disable address resolution */ if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { __u8 enable = 0x00; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } } @@ -870,8 +872,11 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, return; } - if (use_ll_privacy(hdev) && addr_resolv) { + if (use_ll_privacy(hdev) && + hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && + addr_resolv) { u8 enable = 0x01; + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 47bcfe2fb14c..4ec0fee80344 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -795,10 +795,15 @@ static u32 get_supported_settings(struct hci_dev *hdev) if (lmp_le_capable(hdev)) { settings |= MGMT_SETTING_LE; - settings |= MGMT_SETTING_ADVERTISING; settings |= MGMT_SETTING_SECURE_CONN; settings |= MGMT_SETTING_PRIVACY; settings |= MGMT_SETTING_STATIC_ADDRESS; + + /* When the experimental feature for LL Privacy support is + * enabled, then advertising is no longer supported. + */ + if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + settings |= MGMT_SETTING_ADVERTISING; } if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || @@ -3759,10 +3764,16 @@ static const u8 simult_central_periph_uuid[16] = { 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, }; +/* 15c0a148-c273-11ea-b3de-0242ac130004 */ +static const u8 rpa_resolution_uuid[16] = { + 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3, + 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, +}; + static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, void *data, u16 data_len) { - char buf[44]; + char buf[62]; /* Enough space for 3 features */ struct mgmt_rp_read_exp_features_info *rp = (void *)buf; u16 idx = 0; u32 flags; @@ -3795,6 +3806,17 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, idx++; } + if (hdev && use_ll_privacy(hdev)) { + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + flags = BIT(0) | BIT(1); + else + flags = BIT(1); + + memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + rp->feature_count = cpu_to_le16(idx); /* After reading the experimental features information, enable @@ -3807,6 +3829,21 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, 0, rp, sizeof(*rp) + (20 * idx)); } +static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, + struct sock *skip) +{ + struct mgmt_ev_exp_feature_changed ev; + + memset(&ev, 0, sizeof(ev)); + memcpy(ev.uuid, rpa_resolution_uuid, 16); + ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); + + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, + &ev, sizeof(ev), + HCI_MGMT_EXP_FEATURE_EVENTS, skip); + +} + #ifdef CONFIG_BT_FEATURE_DEBUG static int exp_debug_feature_changed(bool enabled, struct sock *skip) { @@ -3845,6 +3882,16 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, } #endif + if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { + bool changed = hci_dev_test_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + + if (changed) + exp_ll_privacy_feature_changed(false, hdev, sk); + } + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, @@ -3895,6 +3942,69 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, } #endif + if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) { + bool val, changed; + int err; + u32 flags; + + /* Command requires to use the controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Changes can only be made when controller is powered down */ + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_POWERED); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + + if (val) { + changed = !hci_dev_test_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); + + /* Enable LL privacy + supported settings changed */ + flags = BIT(0) | BIT(1); + } else { + changed = hci_dev_test_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY); + + /* Disable LL privacy + supported settings changed */ + flags = BIT(1); + } + + memcpy(rp.uuid, rpa_resolution_uuid, 16); + rp.flags = cpu_to_le32(flags); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_ll_privacy_feature_changed(val, hdev, sk); + + return err; + } + return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, MGMT_OP_SET_EXP_FEATURE, MGMT_STATUS_NOT_SUPPORTED); @@ -5040,6 +5150,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, status); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -7112,6 +7229,13 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, MGMT_STATUS_REJECTED); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + hci_dev_lock(hdev); rp_len = sizeof(*rp) + hdev->adv_instance_cnt; @@ -7315,6 +7439,13 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev, return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, status); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES) return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, MGMT_STATUS_INVALID_PARAMS); @@ -7479,6 +7610,13 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev, bt_dev_dbg(hdev, "sock %p", sk); + /* Enabling the experimental LL Privay support disables support for + * advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_NOT_SUPPORTED); + hci_dev_lock(hdev); if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { From 34ec58b9fd1c0a353892e962fb0d4834d0d7283d Mon Sep 17 00:00:00 2001 From: Abhishek Pandit-Subedi Date: Wed, 29 Jul 2020 16:17:43 -0700 Subject: [PATCH 1771/2056] Revert "Bluetooth: btusb: Disable runtime suspend on Realtek devices" This reverts commit 7ecacafc240638148567742cca41aa7144b4fe1e. Testing this change on a board with RTL8822CE, I found that enabling autosuspend has no effect on the stability of the system. The board continued working after autosuspend, suspend and reboot. The original commit makes it impossible to enable autosuspend on working systems so it should be reverted. Disabling autosuspend should be done via module param or udev in userspace instead. Signed-off-by: Abhishek Pandit-Subedi Acked-by: Kai-Heng Feng Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1f51494f5818..8d2608ddfd08 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -4086,10 +4086,6 @@ static int btusb_probe(struct usb_interface *intf, set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags); else bt_dev_err(hdev, "Device does not support ALT setting 1"); - - err = usb_autopm_get_interface(intf); - if (err < 0) - goto out_free_dev; } if (!reset) From bc5cbd73eb493944b8665dc517f684c40eb18a4a Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 14:59:39 +0530 Subject: [PATCH 1772/2056] iavf: use generic power management With the support of generic PM callbacks, drivers no longer need to use legacy .suspend() and .resume() in which they had to maintain PCI states changes and device's power state themselves. The required operations are done by PCI core. PCI drivers are not expected to invoke PCI helper functions like pci_save/restore_state(), pci_enable/disable_device(), pci_set_power_state(), etc. Their tasks are completed by PCI core itself. Compile-tested only. Signed-off-by: Vaibhav Gupta Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/iavf/iavf_main.c | 45 ++++++--------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 48c956d90b90..d870343cf689 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3766,7 +3766,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; } -#ifdef CONFIG_PM /** * iavf_suspend - Power management suspend routine * @pdev: PCI device information struct @@ -3774,11 +3773,10 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * * Called when the system (VM) is entering sleep/suspend. **/ -static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused iavf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct iavf_adapter *adapter = netdev_priv(netdev); - int retval = 0; netif_device_detach(netdev); @@ -3796,12 +3794,6 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); - retval = pci_save_state(pdev); - if (retval) - return retval; - - pci_disable_device(pdev); - return 0; } @@ -3811,24 +3803,13 @@ static int iavf_suspend(struct pci_dev *pdev, pm_message_t state) * * Called when the system (VM) is resumed from sleep/suspend. **/ -static int iavf_resume(struct pci_dev *pdev) +static int __maybe_unused iavf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct iavf_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend.\n"); - return err; - } pci_set_master(pdev); rtnl_lock(); @@ -3852,7 +3833,6 @@ static int iavf_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ /** * iavf_remove - Device Removal Routine * @pdev: PCI device information struct @@ -3954,16 +3934,15 @@ static void iavf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume); + static struct pci_driver iavf_driver = { - .name = iavf_driver_name, - .id_table = iavf_pci_tbl, - .probe = iavf_probe, - .remove = iavf_remove, -#ifdef CONFIG_PM - .suspend = iavf_suspend, - .resume = iavf_resume, -#endif - .shutdown = iavf_shutdown, + .name = iavf_driver_name, + .id_table = iavf_pci_tbl, + .probe = iavf_probe, + .remove = iavf_remove, + .driver.pm = &iavf_pm_ops, + .shutdown = iavf_shutdown, }; /** From e9c971bdabb0ab008f443a92bcc35a2ccce99256 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 14:59:40 +0530 Subject: [PATCH 1773/2056] igbvf: use generic power management Remove legacy PM callbacks and use generic operations. With legacy code, drivers were responsible for handling PCI PM operations like pci_save_state(). In generic code, all these are handled by PCI core. The generic suspend() and resume() are called at the same point the legacy ones were called. Thus, it does not affect the normal functioning of the driver. __maybe_unused attribute is used with .resume() but not with .suspend(), as .suspend() is called by .shutdown(). Compile-tested only. Signed-off-by: Vaibhav Gupta Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igbvf/netdev.c | 37 +++++------------------ 1 file changed, 8 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 97a065928976..19269f5d52bc 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2457,13 +2457,10 @@ static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) +static int igbvf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct igbvf_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif netif_device_detach(netdev); @@ -2473,31 +2470,16 @@ static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) igbvf_free_irq(adapter); } -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; -#endif - - pci_disable_device(pdev); - return 0; } -#ifdef CONFIG_PM -static int igbvf_resume(struct pci_dev *pdev) +static int __maybe_unused igbvf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct igbvf_adapter *adapter = netdev_priv(netdev); u32 err; - pci_restore_state(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); if (netif_running(netdev)) { @@ -2515,11 +2497,10 @@ static int igbvf_resume(struct pci_dev *pdev) return 0; } -#endif static void igbvf_shutdown(struct pci_dev *pdev) { - igbvf_suspend(pdev, PMSG_SUSPEND); + igbvf_suspend(&pdev->dev); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2960,17 +2941,15 @@ static const struct pci_device_id igbvf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); +static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume); + /* PCI Device API Driver */ static struct pci_driver igbvf_driver = { .name = igbvf_driver_name, .id_table = igbvf_pci_tbl, .probe = igbvf_probe, .remove = igbvf_remove, -#ifdef CONFIG_PM - /* Power Management Hooks */ - .suspend = igbvf_suspend, - .resume = igbvf_resume, -#endif + .driver.pm = &igbvf_pm_ops, .shutdown = igbvf_shutdown, .err_handler = &igbvf_err_handler }; From 6f82b25587354ce7c9c42e0b53d8b0770b900847 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 14:59:41 +0530 Subject: [PATCH 1774/2056] ixgbe: use generic power management With legacy PM hooks, it was the responsibility of a driver to manage PCI states and also the device's power state. The generic approach is to let PCI core handle the work. ixgbe_suspend() calls __ixgbe_shutdown() to perform intermediate tasks. __ixgbe_shutdown() modifies the value of "wake" (device should be wakeup enabled or not), responsible for controlling the flow of legacy PM. Since, PCI core has no idea about the value of "wake", new code for generic PM may produce unexpected results. Thus, use "device_set_wakeup_enable()" to wakeup-enable the device accordingly. Compile-tested only. Signed-off-by: Vaibhav Gupta Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 61 +++++-------------- 1 file changed, 15 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4d898ff21a46..e339edd0b593 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6877,32 +6877,20 @@ int ixgbe_close(struct net_device *netdev) return 0; } -#ifdef CONFIG_PM -static int ixgbe_resume(struct pci_dev *pdev) +static int __maybe_unused ixgbe_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; u32 err; adapter->hw.hw_addr = adapter->io_addr; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* - * pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - err = pci_enable_device_mem(pdev); - if (err) { - e_dev_err("Cannot enable PCI device from suspend\n"); - return err; - } smp_mb__before_atomic(); clear_bit(__IXGBE_DISABLED, &adapter->state); pci_set_master(pdev); - pci_wake_from_d3(pdev, false); + device_wakeup_disable(dev_d); ixgbe_reset(adapter); @@ -6920,7 +6908,6 @@ static int ixgbe_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { @@ -6929,9 +6916,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) struct ixgbe_hw *hw = &adapter->hw; u32 ctrl; u32 wufc = adapter->wol; -#ifdef CONFIG_PM - int retval = 0; -#endif rtnl_lock(); netif_device_detach(netdev); @@ -6942,12 +6926,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) ixgbe_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif if (hw->mac.ops.stop_link_on_d3) hw->mac.ops.stop_link_on_d3(hw); @@ -7002,26 +6980,18 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) return 0; } -#ifdef CONFIG_PM -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ixgbe_suspend(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); int retval; bool wake; retval = __ixgbe_shutdown(pdev, &wake); - if (retval) - return retval; - if (wake) { - pci_prepare_to_sleep(pdev); - } else { - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - } + device_set_wakeup_enable(dev_d, wake); - return 0; + return retval; } -#endif /* CONFIG_PM */ static void ixgbe_shutdown(struct pci_dev *pdev) { @@ -11383,16 +11353,15 @@ static const struct pci_error_handlers ixgbe_err_handler = { .resume = ixgbe_io_resume, }; +static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume); + static struct pci_driver ixgbe_driver = { - .name = ixgbe_driver_name, - .id_table = ixgbe_pci_tbl, - .probe = ixgbe_probe, - .remove = ixgbe_remove, -#ifdef CONFIG_PM - .suspend = ixgbe_suspend, - .resume = ixgbe_resume, -#endif - .shutdown = ixgbe_shutdown, + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, + .remove = ixgbe_remove, + .driver.pm = &ixgbe_pm_ops, + .shutdown = ixgbe_shutdown, .sriov_configure = ixgbe_pci_sriov_configure, .err_handler = &ixgbe_err_handler }; From bac66317284350176b087ac44b0e4266770a659e Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 14:59:42 +0530 Subject: [PATCH 1775/2056] ixgbevf: use generic power management With legacy PM, drivers themselves were responsible for managing the device's power states and takes care of register states. After upgrading to the generic structure, PCI core will take care of required tasks and drivers should do only device-specific operations. The driver was invoking PCI helper functions like pci_save/restore_state(), and pci_enable/disable_device(), which is not recommended. Compile-tested only. Signed-off-by: Vaibhav Gupta Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ixgbevf/ixgbevf_main.c | 44 +++++-------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 6e9a397db583..c3d26cc0cf51 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4297,13 +4297,10 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused ixgbevf_suspend(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct ixgbevf_adapter *adapter = netdev_priv(netdev); -#ifdef CONFIG_PM - int retval = 0; -#endif rtnl_lock(); netif_device_detach(netdev); @@ -4314,37 +4311,16 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) ixgbevf_clear_interrupt_scheme(adapter); rtnl_unlock(); -#ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; - -#endif - if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) - pci_disable_device(pdev); - return 0; } -#ifdef CONFIG_PM -static int ixgbevf_resume(struct pci_dev *pdev) +static int __maybe_unused ixgbevf_resume(struct device *dev_d) { + struct pci_dev *pdev = to_pci_dev(dev_d); struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); u32 err; - pci_restore_state(pdev); - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - adapter->hw.hw_addr = adapter->io_addr; smp_mb__before_atomic(); clear_bit(__IXGBEVF_DISABLED, &adapter->state); @@ -4365,10 +4341,9 @@ static int ixgbevf_resume(struct pci_dev *pdev) return err; } -#endif /* CONFIG_PM */ static void ixgbevf_shutdown(struct pci_dev *pdev) { - ixgbevf_suspend(pdev, PMSG_SUSPEND); + ixgbevf_suspend(&pdev->dev); } static void ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, @@ -4888,16 +4863,17 @@ static const struct pci_error_handlers ixgbevf_err_handler = { .resume = ixgbevf_io_resume, }; +static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume); + static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = ixgbevf_remove, -#ifdef CONFIG_PM + /* Power Management Hooks */ - .suspend = ixgbevf_suspend, - .resume = ixgbevf_resume, -#endif + .driver.pm = &ixgbevf_pm_ops, + .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; From 69a74aef8a18eef20fb0044b5e164af41b84db21 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Mon, 29 Jun 2020 14:59:43 +0530 Subject: [PATCH 1776/2056] e100: use generic power management With legacy PM hooks, it was the responsibility of a driver to manage PCI states and also the device's power state. The generic approach is to let PCI core handle the work. e100_suspend() calls __e100_shutdown() to perform intermediate tasks. __e100_shutdown() calls pci_save_state() which is not recommended. e100_suspend() also calls __e100_power_off() which is calling PCI helper functions, pci_prepare_to_sleep(), pci_set_power_state(), along with pci_wake_from_d3(...,false). Hence, the functin call is removed and wol is disabled as earlier using device_wakeup_disable(). Compile-tested only. Signed-off-by: Vaibhav Gupta Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e100.c | 32 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 91c64f91a835..36da059388dc 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2993,8 +2993,6 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake) e100_down(nic); netif_device_detach(netdev); - pci_save_state(pdev); - if ((nic->flags & wol_magic) | e100_asf(nic)) { /* enable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { @@ -3024,24 +3022,22 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake) return 0; } -#ifdef CONFIG_PM -static int e100_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused e100_suspend(struct device *dev_d) { bool wake; - __e100_shutdown(pdev, &wake); - return __e100_power_off(pdev, wake); + + __e100_shutdown(to_pci_dev(dev_d), &wake); + + device_wakeup_disable(dev_d); + + return 0; } -static int e100_resume(struct pci_dev *pdev) +static int __maybe_unused e100_resume(struct device *dev_d) { - struct net_device *netdev = pci_get_drvdata(pdev); + struct net_device *netdev = dev_get_drvdata(dev_d); struct nic *nic = netdev_priv(netdev); - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* ack any pending wake events, disable PME */ - pci_enable_wake(pdev, PCI_D0, 0); - /* disable reverse auto-negotiation */ if (nic->phy == phy_82552_v) { u16 smartspeed = mdio_read(netdev, nic->mii.phy_id, @@ -3058,7 +3054,6 @@ static int e100_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ static void e100_shutdown(struct pci_dev *pdev) { @@ -3146,16 +3141,17 @@ static const struct pci_error_handlers e100_err_handler = { .resume = e100_io_resume, }; +static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume); + static struct pci_driver e100_driver = { .name = DRV_NAME, .id_table = e100_id_table, .probe = e100_probe, .remove = e100_remove, -#ifdef CONFIG_PM + /* Power Management hooks */ - .suspend = e100_suspend, - .resume = e100_resume, -#endif + .driver.pm = &e100_pm_ops, + .shutdown = e100_shutdown, .err_handler = &e100_err_handler, }; From 4b6bafb9e1d4d1eb16df0942abf455726c88c811 Mon Sep 17 00:00:00 2001 From: Suraj Upadhyay Date: Wed, 15 Jul 2020 01:10:35 +0530 Subject: [PATCH 1777/2056] e1000: Remove unnecessary usages of memset Replace memsets of 1 byte with simple assignments. Issue reported by checkpatch.pl. Signed-off-by: Suraj Upadhyay Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 0b4196d2cdd4..f976e9daa3d8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1356,8 +1356,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size &= ~1; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; } static int e1000_check_lbtest_frame(const unsigned char *data, From c5b369651b582128c0d2aa74b934844270b13050 Mon Sep 17 00:00:00 2001 From: Suraj Upadhyay Date: Wed, 15 Jul 2020 01:11:17 +0530 Subject: [PATCH 1778/2056] e1000e: Remove unnecessary usages of memset Replace memsets of 1 byte with simple assignments. Issue found with checkpatch.pl Signed-off-by: Suraj Upadhyay Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e1000e/ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 64f684dc6c7a..a8fc9208382c 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1608,8 +1608,8 @@ static void e1000_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size &= ~1; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); - memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); + skb->data[frame_size / 2 + 10] = 0xBE; + skb->data[frame_size / 2 + 12] = 0xAF; } static int e1000_check_lbtest_frame(struct sk_buff *skb, From 90105264a60d39f7a332b7acfe8038c2a01e1ae2 Mon Sep 17 00:00:00 2001 From: Suraj Upadhyay Date: Wed, 15 Jul 2020 01:11:47 +0530 Subject: [PATCH 1779/2056] igb: Remove unnecessary usages of memset Replace memsets of 1 byte with simple assignment. Issue found with checkpatch.pl Signed-off-by: Suraj Upadhyay Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index c2cf414d126b..6e8231c1ddf0 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1782,8 +1782,8 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size /= 2; memset(&skb->data[frame_size], 0xAA, frame_size - 1); - memset(&skb->data[frame_size + 10], 0xBE, 1); - memset(&skb->data[frame_size + 12], 0xAF, 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; } static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer, From 7ba068d12844e8ad2615aa4c9ffbbf023c46a92c Mon Sep 17 00:00:00 2001 From: Suraj Upadhyay Date: Wed, 15 Jul 2020 01:12:13 +0530 Subject: [PATCH 1780/2056] ixgbe: Remove unnecessary usages of memset Replace memsets of 1 byte with simple assignment. Issue found with checkpatch.pl Signed-off-by: Suraj Upadhyay Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 6725d892336e..71ec908266a6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1951,8 +1951,8 @@ static void ixgbe_create_lbtest_frame(struct sk_buff *skb, memset(skb->data, 0xFF, frame_size); frame_size >>= 1; memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1); - memset(&skb->data[frame_size + 10], 0xBE, 1); - memset(&skb->data[frame_size + 12], 0xAF, 1); + skb->data[frame_size + 10] = 0xBE; + skb->data[frame_size + 12] = 0xAF; } static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, From 09a071f52bbedddef626e71c0fd210838532f347 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Thu, 23 Jul 2020 09:07:20 -0700 Subject: [PATCH 1781/2056] Documentation: intel: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Tony Nguyen --- .../networking/device_drivers/ethernet/intel/e100.rst | 4 ++-- .../networking/device_drivers/ethernet/intel/fm10k.rst | 2 +- .../networking/device_drivers/ethernet/intel/iavf.rst | 2 +- .../networking/device_drivers/ethernet/intel/igb.rst | 2 +- .../networking/device_drivers/ethernet/intel/igbvf.rst | 2 +- .../networking/device_drivers/ethernet/intel/ixgb.rst | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Documentation/networking/device_drivers/ethernet/intel/e100.rst b/Documentation/networking/device_drivers/ethernet/intel/e100.rst index 3ac21e7119a7..3d4a9ba21946 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/e100.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/e100.rst @@ -41,7 +41,7 @@ Identifying Your Adapter For information on how to identify your adapter, and for the latest Intel network drivers, refer to the Intel Support website: -http://www.intel.com/support +https://www.intel.com/support Driver Configuration Parameters =============================== @@ -179,7 +179,7 @@ filtering by Support ======= For general information, go to the Intel support website at: -http://www.intel.com/support/ +https://www.intel.com/support/ or the Intel Wired Networking project hosted by Sourceforge at: http://sourceforge.net/projects/e1000 diff --git a/Documentation/networking/device_drivers/ethernet/intel/fm10k.rst b/Documentation/networking/device_drivers/ethernet/intel/fm10k.rst index 4d279e64e221..9258ef6f515c 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/fm10k.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/fm10k.rst @@ -22,7 +22,7 @@ Ethernet Multi-host Controller. For information on how to identify your adapter, and for the latest Intel network drivers, refer to the Intel Support website: -http://www.intel.com/support +https://www.intel.com/support Flow Control diff --git a/Documentation/networking/device_drivers/ethernet/intel/iavf.rst b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst index 84ac7e75f363..52e037b11c97 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/iavf.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/iavf.rst @@ -43,7 +43,7 @@ device. For information on how to identify your adapter, and for the latest NVM/FW images and Intel network drivers, refer to the Intel Support website: -http://www.intel.com/support +https://www.intel.com/support Additional Features and Configurations diff --git a/Documentation/networking/device_drivers/ethernet/intel/igb.rst b/Documentation/networking/device_drivers/ethernet/intel/igb.rst index 87e560fe5eaa..d46289e182cf 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/igb.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/igb.rst @@ -20,7 +20,7 @@ Identifying Your Adapter ======================== For information on how to identify your adapter, and for the latest Intel network drivers, refer to the Intel Support website: -http://www.intel.com/support +https://www.intel.com/support Command Line Parameters diff --git a/Documentation/networking/device_drivers/ethernet/intel/igbvf.rst b/Documentation/networking/device_drivers/ethernet/intel/igbvf.rst index 557fc020ef31..40fa210c5e14 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/igbvf.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/igbvf.rst @@ -35,7 +35,7 @@ Identifying Your Adapter ======================== For information on how to identify your adapter, and for the latest Intel network drivers, refer to the Intel Support website: -http://www.intel.com/support +https://www.intel.com/support Additional Features and Configurations diff --git a/Documentation/networking/device_drivers/ethernet/intel/ixgb.rst b/Documentation/networking/device_drivers/ethernet/intel/ixgb.rst index ab624f1a44a8..c6a233e68ad6 100644 --- a/Documentation/networking/device_drivers/ethernet/intel/ixgb.rst +++ b/Documentation/networking/device_drivers/ethernet/intel/ixgb.rst @@ -203,7 +203,7 @@ With the 10 Gigabit server adapters, the default Linux configuration will very likely limit the total available throughput artificially. There is a set of configuration changes that, when applied together, will increase the ability of Linux to transmit and receive data. The following enhancements were -originally acquired from settings published at http://www.spec.org/web99/ for +originally acquired from settings published at https://www.spec.org/web99/ for various submitted results using Linux. NOTE: From 935f73bd51de35fa01f39098e970ecf4405b8593 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 20 Jul 2020 16:27:41 +0800 Subject: [PATCH 1782/2056] ixgbe: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Miaohe Lin Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 23a92656821d..988db46bff0e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -783,7 +783,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); else - memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); + eth_zero_addr(adapter->vfinfo[vf].vf_mac_addresses); return retval; } From 8698fb64cc77bf103ab582970965ae851f5f4d72 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Mon, 20 Jul 2020 16:29:14 +0800 Subject: [PATCH 1783/2056] igb: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Miaohe Lin Tested-by: Aaron Brown Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ae8d64324619..b97916b91aba 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7159,7 +7159,7 @@ static void igb_flush_mac_table(struct igb_adapter *adapter) for (i = 0; i < hw->mac.rar_entry_count; i++) { adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); adapter->mac_table[i].queue = 0; igb_rar_set_index(adapter, i); } @@ -7308,7 +7308,7 @@ static int igb_del_mac_filter_flags(struct igb_adapter *adapter, } else { adapter->mac_table[i].state = 0; adapter->mac_table[i].queue = 0; - memset(adapter->mac_table[i].addr, 0, ETH_ALEN); + eth_zero_addr(adapter->mac_table[i].addr); } igb_rar_set_index(adapter, i); From 800834285361dcf8e98b018e891df876472a4fac Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Tue, 28 Jul 2020 17:21:26 +0200 Subject: [PATCH 1784/2056] bpf, arm64: Add BPF exception tables When a tracing BPF program attempts to read memory without using the bpf_probe_read() helper, the verifier marks the load instruction with the BPF_PROBE_MEM flag. Since the arm64 JIT does not currently recognize this flag it falls back to the interpreter. Add support for BPF_PROBE_MEM, by appending an exception table to the BPF program. If the load instruction causes a data abort, the fixup infrastructure finds the exception table and fixes up the fault, by clearing the destination register and jumping over the faulting instruction. To keep the compact exception table entry format, inspect the pc in fixup_exception(). A more generic solution would add a "handler" field to the table entry, like on x86 and s390. Signed-off-by: Jean-Philippe Brucker Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200728152122.1292756-2-jean-philippe@linaro.org --- arch/arm64/include/asm/extable.h | 12 +++++ arch/arm64/mm/extable.c | 12 +++-- arch/arm64/net/bpf_jit_comp.c | 93 +++++++++++++++++++++++++++++--- 3 files changed, 108 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 56a4f68b262e..840a35ed92ec 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -22,5 +22,17 @@ struct exception_table_entry #define ARCH_HAS_RELATIVE_EXTABLE +#ifdef CONFIG_BPF_JIT +int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs); +#else /* !CONFIG_BPF_JIT */ +static inline +int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + return 0; +} +#endif /* !CONFIG_BPF_JIT */ + extern int fixup_exception(struct pt_regs *regs); #endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 81e694af5f8c..eee1732ab6cd 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -11,8 +11,14 @@ int fixup_exception(struct pt_regs *regs) const struct exception_table_entry *fixup; fixup = search_exception_tables(instruction_pointer(regs)); - if (fixup) - regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + if (!fixup) + return 0; - return fixup != NULL; + if (IS_ENABLED(CONFIG_BPF_JIT) && + regs->pc >= BPF_JIT_REGION_START && + regs->pc < BPF_JIT_REGION_END) + return arm64_bpf_fixup_exception(fixup, regs); + + regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + return 1; } diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 3cb25b43b368..f8912e45be7a 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) "bpf_jit: " fmt +#include #include #include #include @@ -56,6 +57,7 @@ struct jit_ctx { int idx; int epilogue_offset; int *offset; + int exentry_idx; __le32 *image; u32 stack_size; }; @@ -351,6 +353,67 @@ static void build_epilogue(struct jit_ctx *ctx) emit(A64_RET(A64_LR), ctx); } +#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0) +#define BPF_FIXUP_REG_MASK GENMASK(31, 27) + +int arm64_bpf_fixup_exception(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup); + int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup); + + regs->regs[dst_reg] = 0; + regs->pc = (unsigned long)&ex->fixup - offset; + return 1; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct jit_ctx *ctx, + int dst_reg) +{ + off_t offset; + unsigned long pc; + struct exception_table_entry *ex; + + if (!ctx->image) + /* First pass */ + return 0; + + if (BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (!ctx->prog->aux->extable || + WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->exentry_idx]; + pc = (unsigned long)&ctx->image[ctx->idx - 1]; + + offset = pc - (long)&ex->insn; + if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN)) + return -ERANGE; + ex->insn = offset; + + /* + * Since the extable follows the program, the fixup offset is always + * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value + * to keep things simple, and put the destination register in the upper + * bits. We don't need to worry about buildtime or runtime sort + * modifying the upper bits because the table is already sorted, and + * isn't part of the main exception table. + */ + offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE); + if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset)) + return -ERANGE; + + ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | + FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg); + + ctx->exentry_idx++; + return 0; +} + /* JITs an eBPF instruction. * Returns: * 0 - successfully JITed an 8-byte eBPF instruction. @@ -375,6 +438,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, u8 jmp_cond, reg; s32 jmp_offset; u32 a64_insn; + int ret; #define check_imm(bits, imm) do { \ if ((((imm) > 0) && ((imm) >> (bits))) || \ @@ -694,7 +758,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, const u8 r0 = bpf2a64[BPF_REG_0]; bool func_addr_fixed; u64 func_addr; - int ret; ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &func_addr, &func_addr_fixed); @@ -738,6 +801,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, case BPF_LDX | BPF_MEM | BPF_H: case BPF_LDX | BPF_MEM | BPF_B: case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: emit_a64_mov_i(1, tmp, off, ctx); switch (BPF_SIZE(code)) { case BPF_W: @@ -753,6 +820,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit(A64_LDR64(dst, src, tmp), ctx); break; } + + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; break; /* ST: *(size *)(dst + off) = imm */ @@ -868,6 +939,9 @@ static int validate_code(struct jit_ctx *ctx) return -1; } + if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) + return -1; + return 0; } @@ -884,6 +958,7 @@ struct arm64_jit_data { struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { + int image_size, prog_size, extable_size; struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; struct arm64_jit_data *jit_data; @@ -891,7 +966,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bool tmp_blinded = false; bool extra_pass = false; struct jit_ctx ctx; - int image_size; u8 *image_ptr; if (!prog->jit_requested) @@ -922,7 +996,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) image_ptr = jit_data->image; header = jit_data->header; extra_pass = true; - image_size = sizeof(u32) * ctx.idx; + prog_size = sizeof(u32) * ctx.idx; goto skip_init_ctx; } memset(&ctx, 0, sizeof(ctx)); @@ -950,8 +1024,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ctx.epilogue_offset = ctx.idx; build_epilogue(&ctx); + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + /* Now we know the actual image size. */ - image_size = sizeof(u32) * ctx.idx; + prog_size = sizeof(u32) * ctx.idx; + image_size = prog_size + extable_size; header = bpf_jit_binary_alloc(image_size, &image_ptr, sizeof(u32), jit_fill_hole); if (header == NULL) { @@ -962,8 +1040,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* 2. Now, the actual pass. */ ctx.image = (__le32 *)image_ptr; + if (extable_size) + prog->aux->extable = (void *)image_ptr + prog_size; skip_init_ctx: ctx.idx = 0; + ctx.exentry_idx = 0; build_prologue(&ctx, was_classic); @@ -984,7 +1065,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* And we're done. */ if (bpf_jit_enable > 1) - bpf_jit_dump(prog->len, image_size, 2, ctx.image); + bpf_jit_dump(prog->len, prog_size, 2, ctx.image); bpf_flush_icache(header, ctx.image + ctx.idx); @@ -1005,7 +1086,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } prog->bpf_func = (void *)ctx.image; prog->jited = 1; - prog->jited_len = image_size; + prog->jited_len = prog_size; if (!prog->is_func || extra_pass) { bpf_prog_fill_jited_linfo(prog, ctx.offset); From 4fc00b79b85d4c34bef06ad49f109ad7cd9e5d83 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 28 Jul 2020 15:18:01 -0700 Subject: [PATCH 1785/2056] bpf: Add missing newline characters in verifier error messages Newline characters are added in two verifier error messages, refactored in Commit afbf21dce668 ("bpf: Support readonly/readwrite buffers in verifier"). This way, they do not mix with messages afterwards. Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200728221801.1090349-1-yhs@fb.com --- kernel/bpf/verifier.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 88bb25d08bf8..b6ccfce3bf4c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3069,7 +3069,7 @@ static int __check_buffer_access(struct bpf_verifier_env *env, { if (off < 0) { verbose(env, - "R%d invalid %s buffer access: off=%d, size=%d", + "R%d invalid %s buffer access: off=%d, size=%d\n", regno, buf_info, off, size); return -EACCES; } @@ -3078,7 +3078,7 @@ static int __check_buffer_access(struct bpf_verifier_env *env, tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, - "R%d invalid variable buffer offset: off=%d, var_off=%s", + "R%d invalid variable buffer offset: off=%d, var_off=%s\n", regno, off, tn_buf); return -EACCES; } From 12e6196fb15953605be54ac9320ac54371aecab7 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 28 Jul 2020 15:18:01 -0700 Subject: [PATCH 1786/2056] selftests/bpf: Test bpf_iter buffer access with negative offset Commit afbf21dce668 ("bpf: Support readonly/readwrite buffers in verifier") added readonly/readwrite buffer support which is currently used by bpf_iter tracing programs. It has a bug with incorrect parameter ordering which later fixed by Commit f6dfbe31e8fa ("bpf: Fix swapped arguments in calls to check_buffer_access"). This patch added a test case with a negative offset access which will trigger the error path. Without Commit f6dfbe31e8fa, running the test case in the patch, the error message looks like: R1_w=rdwr_buf(id=0,off=0,imm=0) R10=fp0 ; value_sum += *(__u32 *)(value - 4); 2: (61) r1 = *(u32 *)(r1 -4) R1 invalid (null) buffer access: off=-4, size=4 With the above commit, the error message looks like: R1_w=rdwr_buf(id=0,off=0,imm=0) R10=fp0 ; value_sum += *(__u32 *)(value - 4); 2: (61) r1 = *(u32 *)(r1 -4) R1 invalid rdwr buffer access: off=-4, size=4 Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200728221801.1090406-1-yhs@fb.com --- .../selftests/bpf/prog_tests/bpf_iter.c | 13 ++++++++++++ .../selftests/bpf/progs/bpf_iter_test_kern6.c | 21 +++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index d95de80b1851..4ffefdc1130f 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -21,6 +21,7 @@ #include "bpf_iter_bpf_percpu_array_map.skel.h" #include "bpf_iter_bpf_sk_storage_map.skel.h" #include "bpf_iter_test_kern5.skel.h" +#include "bpf_iter_test_kern6.skel.h" static int duration; @@ -885,6 +886,16 @@ static void test_rdonly_buf_out_of_bound(void) bpf_iter_test_kern5__destroy(skel); } +static void test_buf_neg_offset(void) +{ + struct bpf_iter_test_kern6 *skel; + + skel = bpf_iter_test_kern6__open_and_load(); + if (CHECK(skel, "bpf_iter_test_kern6__open_and_load", + "skeleton open_and_load unexpected success\n")) + bpf_iter_test_kern6__destroy(skel); +} + void test_bpf_iter(void) { if (test__start_subtest("btf_id_or_null")) @@ -933,4 +944,6 @@ void test_bpf_iter(void) test_bpf_sk_storage_map(); if (test__start_subtest("rdonly-buf-out-of-bound")) test_rdonly_buf_out_of_bound(); + if (test__start_subtest("buf-neg-offset")) + test_buf_neg_offset(); } diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c new file mode 100644 index 000000000000..1c7304f56b1e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2020 Facebook */ +#include "bpf_iter.h" +#include + +char _license[] SEC("license") = "GPL"; + +__u32 value_sum = 0; + +SEC("iter/bpf_map_elem") +int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) +{ + void *value = ctx->value; + + if (value == (void *)0) + return 0; + + /* negative offset, verifier failure. */ + value_sum += *(__u32 *)(value - 4); + return 0; +} From f7c6cb1d9728dea9d9f131ef57303d6821afb0f8 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 28 Jul 2020 17:31:03 -0700 Subject: [PATCH 1787/2056] bpf: Expose socket storage to BPF_PROG_TYPE_CGROUP_SOCK This lets us use socket storage from the following hooks: * BPF_CGROUP_INET_SOCK_CREATE * BPF_CGROUP_INET_SOCK_RELEASE * BPF_CGROUP_INET4_POST_BIND * BPF_CGROUP_INET6_POST_BIND Using existing 'bpf_sk_storage_get_proto' doesn't work because second argument is ARG_PTR_TO_SOCKET. Even though BPF_PROG_TYPE_CGROUP_SOCK hooks operate on 'struct bpf_sock', the verifier still considers it as a PTR_TO_CTX. That's why I'm adding another 'bpf_sk_storage_get_cg_sock_proto' definition strictly for BPF_PROG_TYPE_CGROUP_SOCK which accepts ARG_PTR_TO_CTX which is really 'struct sock' for this program type. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729003104.1280813-1-sdf@google.com --- net/core/bpf_sk_storage.c | 10 ++++++++++ net/core/filter.c | 3 +++ 2 files changed, 13 insertions(+) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index eafcd15e7dfd..d3377c90a291 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -944,6 +944,16 @@ const struct bpf_func_proto bpf_sk_storage_get_proto = { .arg4_type = ARG_ANYTHING, }; +const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = { + .func = bpf_sk_storage_get, + .gpl_only = false, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */ + .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, + .arg4_type = ARG_ANYTHING, +}; + const struct bpf_func_proto bpf_sk_storage_delete_proto = { .func = bpf_sk_storage_delete, .gpl_only = false, diff --git a/net/core/filter.c b/net/core/filter.c index 29e3455122f7..7124f0fe6974 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6187,6 +6187,7 @@ bool bpf_helper_changes_pkt_data(void *func) } const struct bpf_func_proto bpf_event_output_data_proto __weak; +const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak; static const struct bpf_func_proto * sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) @@ -6219,6 +6220,8 @@ sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_cgroup_classid: return &bpf_get_cgroup_classid_curr_proto; #endif + case BPF_FUNC_sk_storage_get: + return &bpf_sk_storage_get_cg_sock_proto; default: return bpf_base_func_proto(func_id); } From 4fb5f94911405b6d2645d4384c2ae1215bfc6a76 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Tue, 28 Jul 2020 17:31:04 -0700 Subject: [PATCH 1788/2056] selftests/bpf: Verify socket storage in cgroup/sock_{create, release} Augment udp_limit test to set and verify socket storage value. That should be enough to exercise the changes from the previous patch. Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729003104.1280813-2-sdf@google.com --- tools/testing/selftests/bpf/progs/udp_limit.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c index 8429b22525a7..165e3c2dd9a3 100644 --- a/tools/testing/selftests/bpf/progs/udp_limit.c +++ b/tools/testing/selftests/bpf/progs/udp_limit.c @@ -6,14 +6,28 @@ int invocations = 0, in_use = 0; +struct { + __uint(type, BPF_MAP_TYPE_SK_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, int); +} sk_map SEC(".maps"); + SEC("cgroup/sock_create") int sock(struct bpf_sock *ctx) { + int *sk_storage; __u32 key; if (ctx->type != SOCK_DGRAM) return 1; + sk_storage = bpf_sk_storage_get(&sk_map, ctx, 0, + BPF_SK_STORAGE_GET_F_CREATE); + if (!sk_storage) + return 0; + *sk_storage = 0xdeadbeef; + __sync_fetch_and_add(&invocations, 1); if (in_use > 0) { @@ -31,11 +45,16 @@ int sock(struct bpf_sock *ctx) SEC("cgroup/sock_release") int sock_release(struct bpf_sock *ctx) { + int *sk_storage; __u32 key; if (ctx->type != SOCK_DGRAM) return 1; + sk_storage = bpf_sk_storage_get(&sk_map, ctx, 0, 0); + if (!sk_storage || *sk_storage != 0xdeadbeef) + return 0; + __sync_fetch_and_add(&invocations, 1); __sync_fetch_and_add(&in_use, -1); return 1; From dfdb0d93e5bc351af5b286ae9c630d3cf869b810 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 29 Jul 2020 16:56:58 +0800 Subject: [PATCH 1789/2056] selftests/bpf: Add xdpdrv mode for test_xdp_redirect This patch add xdpdrv mode for test_xdp_redirect.sh since veth has support native mode. After update here is the test result: # ./test_xdp_redirect.sh selftests: test_xdp_redirect xdpgeneric [PASS] selftests: test_xdp_redirect xdpdrv [PASS] Signed-off-by: Hangbin Liu Signed-off-by: Daniel Borkmann Acked-by: Song Liu Acked-by: William Tu Link: https://lore.kernel.org/bpf/20200729085658.403794-1-liuhangbin@gmail.com --- .../selftests/bpf/test_xdp_redirect.sh | 96 +++++++++++-------- 1 file changed, 58 insertions(+), 38 deletions(-) diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh index c4b17e08d431..dd80f0c84afb 100755 --- a/tools/testing/selftests/bpf/test_xdp_redirect.sh +++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh @@ -10,52 +10,72 @@ # | xdp forwarding | # ------------------ +ret=0 + +setup() +{ + + local xdpmode=$1 + + ip netns add ns1 + ip netns add ns2 + + ip link add veth1 index 111 type veth peer name veth11 netns ns1 + ip link add veth2 index 222 type veth peer name veth22 netns ns2 + + ip link set veth1 up + ip link set veth2 up + ip -n ns1 link set dev veth11 up + ip -n ns2 link set dev veth22 up + + ip -n ns1 addr add 10.1.1.11/24 dev veth11 + ip -n ns2 addr add 10.1.1.22/24 dev veth22 +} + cleanup() { - if [ "$?" = "0" ]; then - echo "selftests: test_xdp_redirect [PASS]"; - else - echo "selftests: test_xdp_redirect [FAILED]"; - fi - - set +e ip link del veth1 2> /dev/null ip link del veth2 2> /dev/null ip netns del ns1 2> /dev/null ip netns del ns2 2> /dev/null } -ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null -if [ $? -ne 0 ];then - echo "selftests: [SKIP] Could not run test without the ip xdpgeneric support" - exit 0 -fi +test_xdp_redirect() +{ + local xdpmode=$1 + + setup + + ip link set dev veth1 $xdpmode off &> /dev/null + if [ $? -ne 0 ];then + echo "selftests: test_xdp_redirect $xdpmode [SKIP]" + return 0 + fi + + ip -n ns1 link set veth11 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null + ip -n ns2 link set veth22 $xdpmode obj xdp_dummy.o sec xdp_dummy &> /dev/null + ip link set dev veth1 $xdpmode obj test_xdp_redirect.o sec redirect_to_222 &> /dev/null + ip link set dev veth2 $xdpmode obj test_xdp_redirect.o sec redirect_to_111 &> /dev/null + + ip netns exec ns1 ping -c 1 10.1.1.22 &> /dev/null + local ret1=$? + ip netns exec ns2 ping -c 1 10.1.1.11 &> /dev/null + local ret2=$? + + if [ $ret1 -eq 0 -a $ret2 -eq 0 ]; then + echo "selftests: test_xdp_redirect $xdpmode [PASS]"; + else + ret=1 + echo "selftests: test_xdp_redirect $xdpmode [FAILED]"; + fi + + cleanup +} + set -e +trap cleanup 2 3 6 9 -ip netns add ns1 -ip netns add ns2 +test_xdp_redirect xdpgeneric +test_xdp_redirect xdpdrv -trap cleanup 0 2 3 6 9 - -ip link add veth1 index 111 type veth peer name veth11 -ip link add veth2 index 222 type veth peer name veth22 - -ip link set veth11 netns ns1 -ip link set veth22 netns ns2 - -ip link set veth1 up -ip link set veth2 up - -ip netns exec ns1 ip addr add 10.1.1.11/24 dev veth11 -ip netns exec ns2 ip addr add 10.1.1.22/24 dev veth22 - -ip netns exec ns1 ip link set dev veth11 up -ip netns exec ns2 ip link set dev veth22 up - -ip link set dev veth1 xdpgeneric obj test_xdp_redirect.o sec redirect_to_222 -ip link set dev veth2 xdpgeneric obj test_xdp_redirect.o sec redirect_to_111 - -ip netns exec ns1 ping -c 1 10.1.1.22 -ip netns exec ns2 ping -c 1 10.1.1.11 - -exit 0 +exit $ret From 80546ac4586c0bd326aa7ce80f076646db02bcd0 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 28 Jul 2020 21:50:56 -0700 Subject: [PATCH 1790/2056] selftests/bpf: Don't destroy failed link Check that link is NULL or proper pointer before invoking bpf_link__destroy(). Not doing this causes crash in test_progs, when cg_storage_multi selftest fails. Fixes: 3573f384014f ("selftests/bpf: Test CGROUP_STORAGE behavior on shared egress + ingress") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729045056.3363921-1-andriin@fb.com --- .../bpf/prog_tests/cg_storage_multi.c | 42 ++++++++++++------- 1 file changed, 28 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c index c67d8c076a34..643dfa35419c 100644 --- a/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c @@ -147,8 +147,10 @@ static void test_egress_only(int parent_cgroup_fd, int child_cgroup_fd) goto close_bpf_object; close_bpf_object: - bpf_link__destroy(parent_link); - bpf_link__destroy(child_link); + if (!IS_ERR(parent_link)) + bpf_link__destroy(parent_link); + if (!IS_ERR(child_link)) + bpf_link__destroy(child_link); cg_storage_multi_egress_only__destroy(obj); } @@ -262,12 +264,18 @@ static void test_isolated(int parent_cgroup_fd, int child_cgroup_fd) goto close_bpf_object; close_bpf_object: - bpf_link__destroy(parent_egress1_link); - bpf_link__destroy(parent_egress2_link); - bpf_link__destroy(parent_ingress_link); - bpf_link__destroy(child_egress1_link); - bpf_link__destroy(child_egress2_link); - bpf_link__destroy(child_ingress_link); + if (!IS_ERR(parent_egress1_link)) + bpf_link__destroy(parent_egress1_link); + if (!IS_ERR(parent_egress2_link)) + bpf_link__destroy(parent_egress2_link); + if (!IS_ERR(parent_ingress_link)) + bpf_link__destroy(parent_ingress_link); + if (!IS_ERR(child_egress1_link)) + bpf_link__destroy(child_egress1_link); + if (!IS_ERR(child_egress2_link)) + bpf_link__destroy(child_egress2_link); + if (!IS_ERR(child_ingress_link)) + bpf_link__destroy(child_ingress_link); cg_storage_multi_isolated__destroy(obj); } @@ -367,12 +375,18 @@ static void test_shared(int parent_cgroup_fd, int child_cgroup_fd) goto close_bpf_object; close_bpf_object: - bpf_link__destroy(parent_egress1_link); - bpf_link__destroy(parent_egress2_link); - bpf_link__destroy(parent_ingress_link); - bpf_link__destroy(child_egress1_link); - bpf_link__destroy(child_egress2_link); - bpf_link__destroy(child_ingress_link); + if (!IS_ERR(parent_egress1_link)) + bpf_link__destroy(parent_egress1_link); + if (!IS_ERR(parent_egress2_link)) + bpf_link__destroy(parent_egress2_link); + if (!IS_ERR(parent_ingress_link)) + bpf_link__destroy(parent_ingress_link); + if (!IS_ERR(child_egress1_link)) + bpf_link__destroy(child_egress1_link); + if (!IS_ERR(child_egress2_link)) + bpf_link__destroy(child_egress2_link); + if (!IS_ERR(child_ingress_link)) + bpf_link__destroy(child_ingress_link); cg_storage_multi_shared__destroy(obj); } From a6599abdeac30063baf89df166068b20758e0e86 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Thu, 30 Jul 2020 14:53:25 +0200 Subject: [PATCH 1791/2056] selftests/bpf: Omit nodad flag when adding addresses to loopback Setting IFA_F_NODAD flag for IPv6 addresses to add to loopback is unnecessary. Duplicate Address Detection does not happen on loopback device. Also, passing 'nodad' flag to 'ip address' breaks libbpf CI, which runs in an environment with BusyBox implementation of 'ip' command, that doesn't understand this flag. Fixes: 0ab5539f8584 ("selftests/bpf: Tests for BPF_SK_LOOKUP attach point") Reported-by: Andrii Nakryiko Signed-off-by: Jakub Sitnicki Signed-off-by: Daniel Borkmann Tested-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200730125325.1869363-1-jakub@cloudflare.com --- tools/testing/selftests/bpf/prog_tests/sk_lookup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index 9bbd2b2b7630..379da6f10ee9 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -1290,8 +1290,8 @@ static void run_tests(struct test_sk_lookup *skel) static int switch_netns(void) { static const char * const setup_script[] = { - "ip -6 addr add dev lo " EXT_IP6 "/128 nodad", - "ip -6 addr add dev lo " INT_IP6 "/128 nodad", + "ip -6 addr add dev lo " EXT_IP6 "/128", + "ip -6 addr add dev lo " INT_IP6 "/128", "ip link set dev lo up", NULL, }; From 50450fc716c1a570ee8d8bfe198ef5d3cfca36e4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 29 Jul 2020 16:21:48 -0700 Subject: [PATCH 1792/2056] libbpf: Make destructors more robust by handling ERR_PTR(err) cases Most of libbpf "constructors" on failure return ERR_PTR(err) result encoded as a pointer. It's a common mistake to eventually pass such malformed pointers into xxx__destroy()/xxx__free() "destructors". So instead of fixing up clean up code in selftests and user programs, handle such error pointers in destructors themselves. This works beautifully for NULL pointers passed to destructors, so might as well just work for error pointers. Suggested-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200729232148.896125-1-andriin@fb.com --- tools/lib/bpf/btf.c | 4 ++-- tools/lib/bpf/btf_dump.c | 2 +- tools/lib/bpf/libbpf.c | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index c9e760e120dc..ded5b29965f9 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -386,7 +386,7 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, void btf__free(struct btf *btf) { - if (!btf) + if (IS_ERR_OR_NULL(btf)) return; if (btf->fd >= 0) @@ -1025,7 +1025,7 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) void btf_ext__free(struct btf_ext *btf_ext) { - if (!btf_ext) + if (IS_ERR_OR_NULL(btf_ext)) return; free(btf_ext->data); free(btf_ext); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index e1c344504cae..cf711168d34a 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -183,7 +183,7 @@ void btf_dump__free(struct btf_dump *d) { int i, cnt; - if (!d) + if (IS_ERR_OR_NULL(d)) return; free(d->type_states); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 54830d603fee..b9f11f854985 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6504,7 +6504,7 @@ void bpf_object__close(struct bpf_object *obj) { size_t i; - if (!obj) + if (IS_ERR_OR_NULL(obj)) return; if (obj->clear_priv) @@ -7690,7 +7690,7 @@ int bpf_link__destroy(struct bpf_link *link) { int err = 0; - if (!link) + if (IS_ERR_OR_NULL(link)) return 0; if (!link->disconnected && link->detach) @@ -8502,7 +8502,7 @@ void perf_buffer__free(struct perf_buffer *pb) { int i; - if (!pb) + if (IS_ERR_OR_NULL(pb)) return; if (pb->cpu_bufs) { for (i = 0; i < pb->cpu_cnt; i++) { @@ -9379,8 +9379,7 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) for (i = 0; i < s->prog_cnt; i++) { struct bpf_link **link = s->progs[i].link; - if (!IS_ERR_OR_NULL(*link)) - bpf_link__destroy(*link); + bpf_link__destroy(*link); *link = NULL; } } From d3239425351a219ec779488ffb125352449c4106 Mon Sep 17 00:00:00 2001 From: Jian Yang Date: Mon, 27 Jul 2020 14:14:38 -0700 Subject: [PATCH 1793/2056] selftests: txtimestamp: add flag for timestamp validation tolerance. The txtimestamp selftest sets a fixed 500us tolerance. This value was arrived at experimentally. Some platforms have higher variances. Make this adjustable by adding the following flag: -t N: tolerance (usec) for timestamp validation. Signed-off-by: Jian Yang Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- tools/testing/selftests/net/txtimestamp.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c index 011b0da6b033..490a8cca708a 100644 --- a/tools/testing/selftests/net/txtimestamp.c +++ b/tools/testing/selftests/net/txtimestamp.c @@ -64,6 +64,7 @@ static int cfg_payload_len = 10; static int cfg_poll_timeout = 100; static int cfg_delay_snd; static int cfg_delay_ack; +static int cfg_delay_tolerance_usec = 500; static bool cfg_show_payload; static bool cfg_do_pktinfo; static bool cfg_busy_poll; @@ -152,11 +153,12 @@ static void validate_key(int tskey, int tstype) static void validate_timestamp(struct timespec *cur, int min_delay) { - int max_delay = min_delay + 500 /* processing time upper bound */; int64_t cur64, start64; + int max_delay; cur64 = timespec_to_us64(cur); start64 = timespec_to_us64(&ts_usr); + max_delay = min_delay + cfg_delay_tolerance_usec; if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) { fprintf(stderr, "ERROR: %lu us expected between %d and %d\n", @@ -683,6 +685,7 @@ static void __attribute__((noreturn)) usage(const char *filepath) " -r: use raw\n" " -R: use raw (IP_HDRINCL)\n" " -S N: usec to sleep before reading error queue\n" + " -t N: tolerance (usec) for timestamp validation\n" " -u: use udp\n" " -v: validate SND delay (usec)\n" " -V: validate ACK delay (usec)\n" @@ -697,7 +700,7 @@ static void parse_opt(int argc, char **argv) int c; while ((c = getopt(argc, argv, - "46bc:CeEFhIl:LnNp:PrRS:uv:V:x")) != -1) { + "46bc:CeEFhIl:LnNp:PrRS:t:uv:V:x")) != -1) { switch (c) { case '4': do_ipv6 = 0; @@ -760,6 +763,9 @@ static void parse_opt(int argc, char **argv) case 'S': cfg_sleep_usec = strtoul(optarg, NULL, 10); break; + case 't': + cfg_delay_tolerance_usec = strtoul(optarg, NULL, 10); + break; case 'u': proto_count++; cfg_proto = SOCK_DGRAM; From b2aecfe8e49059a69379a521aa68b4f55516780f Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Tue, 28 Jul 2020 18:20:28 +0100 Subject: [PATCH 1794/2056] l2tp: don't export __l2tp_session_unhash When __l2tp_session_unhash was first added it was used outside of l2tp_core.c, but that's no longer the case. As such, there's no longer a need to export the function. Make it private inside l2tp_core.c, and relocate it to avoid having to declare the function prototype in l2tp_core.h. Since the function is no longer used outside l2tp_core.c, remove the "__" prefix since we don't need to indicate anything special about its expected use to callers. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 57 ++++++++++++++++++++------------------------ net/l2tp/l2tp_core.h | 1 - 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index e723828e458b..7f4aef5a58ba 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1180,6 +1180,30 @@ static void l2tp_tunnel_destruct(struct sock *sk) return; } +/* Remove an l2tp session from l2tp_core's hash lists. */ +static void l2tp_session_unhash(struct l2tp_session *session) +{ + struct l2tp_tunnel *tunnel = session->tunnel; + + /* Remove the session from core hashes */ + if (tunnel) { + /* Remove from the per-tunnel hash */ + write_lock_bh(&tunnel->hlist_lock); + hlist_del_init(&session->hlist); + write_unlock_bh(&tunnel->hlist_lock); + + /* For L2TPv3 we have a per-net hash: remove from there, too */ + if (tunnel->version != L2TP_HDR_VER_2) { + struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); + + spin_lock_bh(&pn->l2tp_session_hlist_lock); + hlist_del_init_rcu(&session->global_hlist); + spin_unlock_bh(&pn->l2tp_session_hlist_lock); + synchronize_rcu(); + } + } +} + /* When the tunnel is closed, all the attached sessions need to go too. */ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) @@ -1209,7 +1233,7 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) write_unlock_bh(&tunnel->hlist_lock); - __l2tp_session_unhash(session); + l2tp_session_unhash(session); l2tp_session_queue_purge(session); if (session->session_close) @@ -1574,35 +1598,6 @@ void l2tp_session_free(struct l2tp_session *session) } EXPORT_SYMBOL_GPL(l2tp_session_free); -/* Remove an l2tp session from l2tp_core's hash lists. - * Provides a tidyup interface for pseudowire code which can't just route all - * shutdown via. l2tp_session_delete and a pseudowire-specific session_close - * callback. - */ -void __l2tp_session_unhash(struct l2tp_session *session) -{ - struct l2tp_tunnel *tunnel = session->tunnel; - - /* Remove the session from core hashes */ - if (tunnel) { - /* Remove from the per-tunnel hash */ - write_lock_bh(&tunnel->hlist_lock); - hlist_del_init(&session->hlist); - write_unlock_bh(&tunnel->hlist_lock); - - /* For L2TPv3 we have a per-net hash: remove from there, too */ - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_del_init_rcu(&session->global_hlist); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); - synchronize_rcu(); - } - } -} -EXPORT_SYMBOL_GPL(__l2tp_session_unhash); - /* This function is used by the netlink SESSION_DELETE command and by * pseudowire modules. */ @@ -1611,7 +1606,7 @@ int l2tp_session_delete(struct l2tp_session *session) if (test_and_set_bit(0, &session->dead)) return 0; - __l2tp_session_unhash(session); + l2tp_session_unhash(session); l2tp_session_queue_purge(session); if (session->session_close) (*session->session_close)(session); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 2d2dd219a176..f6dd74476d13 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -201,7 +201,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel); -void __l2tp_session_unhash(struct l2tp_session *session); int l2tp_session_delete(struct l2tp_session *session); void l2tp_session_free(struct l2tp_session *session); void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, From 52016e259bab98613453e29d42f2ffe456feee11 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Tue, 28 Jul 2020 18:20:29 +0100 Subject: [PATCH 1795/2056] l2tp: don't export tunnel and session free functions Tunnel and session instances are reference counted, and shouldn't be directly freed by pseudowire code. Rather than exporting l2tp_tunnel_free and l2tp_session_free, make them private to l2tp_core.c, and export the refcount functions instead. In order to do this, the refcount functions cannot be declared as inline. Since the codepaths which take and drop tunnel and session references are not directly in the datapath this shouldn't cause performance issues. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 60 ++++++++++++++++++++++++++++++-------------- net/l2tp/l2tp_core.h | 33 ++++-------------------- 2 files changed, 46 insertions(+), 47 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 7f4aef5a58ba..f22fe34eb8fc 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -149,12 +149,51 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; } -void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) +static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) { sock_put(tunnel->sock); /* the tunnel is freed in the socket destructor */ } -EXPORT_SYMBOL(l2tp_tunnel_free); + +static void l2tp_session_free(struct l2tp_session *session) +{ + struct l2tp_tunnel *tunnel = session->tunnel; + + if (tunnel) { + if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC)) + goto out; + l2tp_tunnel_dec_refcount(tunnel); + } + +out: + kfree(session); +} + +void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) +{ + refcount_inc(&tunnel->ref_count); +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_inc_refcount); + +void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) +{ + if (refcount_dec_and_test(&tunnel->ref_count)) + l2tp_tunnel_free(tunnel); +} +EXPORT_SYMBOL_GPL(l2tp_tunnel_dec_refcount); + +void l2tp_session_inc_refcount(struct l2tp_session *session) +{ + refcount_inc(&session->ref_count); +} +EXPORT_SYMBOL_GPL(l2tp_session_inc_refcount); + +void l2tp_session_dec_refcount(struct l2tp_session *session) +{ + if (refcount_dec_and_test(&session->ref_count)) + l2tp_session_free(session); +} +EXPORT_SYMBOL_GPL(l2tp_session_dec_refcount); /* Lookup a tunnel. A new reference is held on the returned tunnel. */ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) @@ -1581,23 +1620,6 @@ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); -/* Really kill the session. - */ -void l2tp_session_free(struct l2tp_session *session) -{ - struct l2tp_tunnel *tunnel = session->tunnel; - - if (tunnel) { - if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC)) - goto out; - l2tp_tunnel_dec_refcount(tunnel); - } - -out: - kfree(session); -} -EXPORT_SYMBOL_GPL(l2tp_session_free); - /* This function is used by the netlink SESSION_DELETE command and by * pseudowire modules. */ diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index f6dd74476d13..5c49e2762300 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -175,13 +175,16 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) return &session->priv[0]; } +void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel); +void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel); +void l2tp_session_inc_refcount(struct l2tp_session *session); +void l2tp_session_dec_refcount(struct l2tp_session *session); + struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, u32 session_id); -void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); - struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, @@ -202,7 +205,6 @@ int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel); int l2tp_session_delete(struct l2tp_session *session); -void l2tp_session_free(struct l2tp_session *session); void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length); @@ -217,31 +219,6 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); -static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel) -{ - refcount_inc(&tunnel->ref_count); -} - -static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel) -{ - if (refcount_dec_and_test(&tunnel->ref_count)) - l2tp_tunnel_free(tunnel); -} - -/* Session reference counts. Incremented when code obtains a reference - * to a session. - */ -static inline void l2tp_session_inc_refcount(struct l2tp_session *session) -{ - refcount_inc(&session->ref_count); -} - -static inline void l2tp_session_dec_refcount(struct l2tp_session *session) -{ - if (refcount_dec_and_test(&session->ref_count)) - l2tp_session_free(session); -} - static inline int l2tp_get_l2specific_len(struct l2tp_session *session) { switch (session->l2specific_type) { From 628703f59dcc832a0bd04b4fa41d856e5df98112 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Tue, 28 Jul 2020 18:20:30 +0100 Subject: [PATCH 1796/2056] l2tp: return void from l2tp_session_delete l2tp_session_delete is used to schedule a session instance for deletion. The function itself always returns zero, and none of its direct callers check its return value, so have the function return void. This change de-facto changes the l2tp netlink session_delete callback prototype since all pseudowires currently use l2tp_session_delete for their implementation of that operation. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 9 ++------- net/l2tp/l2tp_core.h | 4 ++-- net/l2tp/l2tp_netlink.c | 2 +- 3 files changed, 5 insertions(+), 10 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index f22fe34eb8fc..690dcbc30472 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1620,13 +1620,10 @@ void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) } EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); -/* This function is used by the netlink SESSION_DELETE command and by - * pseudowire modules. - */ -int l2tp_session_delete(struct l2tp_session *session) +void l2tp_session_delete(struct l2tp_session *session) { if (test_and_set_bit(0, &session->dead)) - return 0; + return; l2tp_session_unhash(session); l2tp_session_queue_purge(session); @@ -1634,8 +1631,6 @@ int l2tp_session_delete(struct l2tp_session *session) (*session->session_close)(session); l2tp_session_dec_refcount(session); - - return 0; } EXPORT_SYMBOL_GPL(l2tp_session_delete); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 5c49e2762300..0c32981f0cd3 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -167,7 +167,7 @@ struct l2tp_nl_cmd_ops { int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); - int (*session_delete)(struct l2tp_session *session); + void (*session_delete)(struct l2tp_session *session); }; static inline void *l2tp_session_priv(struct l2tp_session *session) @@ -204,7 +204,7 @@ struct l2tp_session *l2tp_session_create(int priv_size, int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel); -int l2tp_session_delete(struct l2tp_session *session); +void l2tp_session_delete(struct l2tp_session *session); void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 35716a6e1e2c..def78eebca4c 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -670,7 +670,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf pw_type = session->pwtype; if (pw_type < __L2TP_PWTYPE_MAX) if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete) - ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session); + l2tp_nl_cmd_ops[pw_type]->session_delete(session); l2tp_session_dec_refcount(session); From 2dedab6ff57edd29848391fa917a8f96c4f82583 Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Tue, 28 Jul 2020 18:20:31 +0100 Subject: [PATCH 1797/2056] l2tp: remove build_header callback in struct l2tp_session The structure of an L2TP data packet header varies depending on the version of the L2TP protocol being used. struct l2tp_session used to have a build_header callback to abstract this difference away. It's clearer to simply choose the correct function to use when building the data packet (and we save on the function pointer in the session structure). This approach does mean dereferencing the parent tunnel structure in order to determine the tunnel version, but we're doing that in the transmit path in any case. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 10 ++++------ net/l2tp/l2tp_core.h | 1 - 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 690dcbc30472..3992af139479 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -1116,7 +1116,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len } /* Setup L2TP header */ - session->build_header(session, __skb_push(skb, hdr_len)); + if (tunnel->version == L2TP_HDR_VER_2) + l2tp_build_l2tpv2_header(session, __skb_push(skb, hdr_len)); + else + l2tp_build_l2tpv3_header(session, __skb_push(skb, hdr_len)); /* Reset skb netfilter state */ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); @@ -1700,11 +1703,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len); } - if (tunnel->version == L2TP_HDR_VER_2) - session->build_header = l2tp_build_l2tpv2_header; - else - session->build_header = l2tp_build_l2tpv3_header; - l2tp_session_set_header_len(session, tunnel->version); refcount_set(&session->ref_count, 1); diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 0c32981f0cd3..3dfd3ddd28fd 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -101,7 +101,6 @@ struct l2tp_session { struct l2tp_stats stats; struct hlist_node global_hlist; /* global hash list node */ - int (*build_header)(struct l2tp_session *session, void *buf); void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); void (*session_close)(struct l2tp_session *session); void (*show)(struct seq_file *m, void *priv); From ca7885dbcd899e63e47c33707c1615d9dd281c7f Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Tue, 28 Jul 2020 18:20:32 +0100 Subject: [PATCH 1798/2056] l2tp: tweak exports for l2tp_recv_common and l2tp_ioctl All of the l2tp subsystem's exported symbols are exported using EXPORT_SYMBOL_GPL, except for l2tp_recv_common and l2tp_ioctl. These functions alone are not useful without the rest of the l2tp infrastructure, so there's no practical benefit to these symbols using a different export policy. Change these exports to use EXPORT_SYMBOL_GPL for consistency with the rest of l2tp. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.c | 2 +- net/l2tp/l2tp_ip.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 3992af139479..701fc72ad9f4 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -808,7 +808,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, atomic_long_inc(&session->stats.rx_errors); kfree_skb(skb); } -EXPORT_SYMBOL(l2tp_recv_common); +EXPORT_SYMBOL_GPL(l2tp_recv_common); /* Drop skbs from the session's reorder_q */ diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index a159cb2bf0f4..df2a35b5714a 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -597,7 +597,7 @@ int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg) return put_user(amount, (int __user *)arg); } -EXPORT_SYMBOL(l2tp_ioctl); +EXPORT_SYMBOL_GPL(l2tp_ioctl); static struct proto l2tp_ip_prot = { .name = "L2TP/IP", From 340bb1ac450ba21cc4dabd368791fa49b39c858a Mon Sep 17 00:00:00 2001 From: Tom Parkin Date: Tue, 28 Jul 2020 18:20:33 +0100 Subject: [PATCH 1799/2056] l2tp: improve API documentation in l2tp_core.h * Improve the description of the key l2tp subsystem data structures. * Add high-level description of the main APIs for interacting with l2tp core. * Add documentation for the l2tp netlink session command callbacks. * Document the session pseudowire callbacks. Signed-off-by: Tom Parkin Signed-off-by: David S. Miller --- net/l2tp/l2tp_core.h | 86 ++++++++++++++++++++++++++++++++++++-------- 1 file changed, 72 insertions(+), 14 deletions(-) diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 3dfd3ddd28fd..3468d6b177a0 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -15,15 +15,15 @@ #include #endif -/* Just some random numbers */ +/* Random numbers used for internal consistency checks of tunnel and session structures */ #define L2TP_TUNNEL_MAGIC 0x42114DDA #define L2TP_SESSION_MAGIC 0x0C04EB7D -/* Per tunnel, session hash table size */ +/* Per tunnel session hash table size */ #define L2TP_HASH_BITS 4 #define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS) -/* System-wide, session hash table size */ +/* System-wide session hash table size */ #define L2TP_HASH_BITS_2 8 #define L2TP_HASH_SIZE_2 BIT(L2TP_HASH_BITS_2) @@ -43,9 +43,7 @@ struct l2tp_stats { struct l2tp_tunnel; -/* Describes a session. Contains information to determine incoming - * packets and transmit outgoing ones. - */ +/* L2TP session configuration */ struct l2tp_session_cfg { enum l2tp_pwtype pw_type; unsigned int recv_seq:1; /* expect receive packets with sequence numbers? */ @@ -63,6 +61,11 @@ struct l2tp_session_cfg { char *ifname; }; +/* Represents a session (pseudowire) instance. + * Tracks runtime state including cookies, dataplane packet sequencing, and IO statistics. + * Is linked into a per-tunnel session hashlist; and in the case of an L2TPv3 session into + * an additional per-net ("global") hashlist. + */ struct l2tp_session { int magic; /* should be L2TP_SESSION_MAGIC */ long dead; @@ -101,15 +104,32 @@ struct l2tp_session { struct l2tp_stats stats; struct hlist_node global_hlist; /* global hash list node */ + /* Session receive handler for data packets. + * Each pseudowire implementation should implement this callback in order to + * handle incoming packets. Packets are passed to the pseudowire handler after + * reordering, if data sequence numbers are enabled for the session. + */ void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len); + + /* Session close handler. + * Each pseudowire implementation may implement this callback in order to carry + * out pseudowire-specific shutdown actions. + * The callback is called by core after unhashing the session and purging its + * reorder queue. + */ void (*session_close)(struct l2tp_session *session); + + /* Session show handler. + * Pseudowire-specific implementation of debugfs session rendering. + * The callback is called by l2tp_debugfs.c after rendering core session + * information. + */ void (*show)(struct seq_file *m, void *priv); + u8 priv[]; /* private data */ }; -/* Describes the tunnel. It contains info to track all the associated - * sessions so incoming packets can be sorted out - */ +/* L2TP tunnel configuration */ struct l2tp_tunnel_cfg { int debug; /* bitmask of debug message categories */ enum l2tp_encap_type encap; @@ -128,6 +148,12 @@ struct l2tp_tunnel_cfg { udp6_zero_rx_checksums:1; }; +/* Represents a tunnel instance. + * Tracks runtime state including IO statistics. + * Holds the tunnel socket (either passed from userspace or directly created by the kernel). + * Maintains a hashlist of sessions belonging to the tunnel instance. + * Is linked into a per-net list of tunnels. + */ struct l2tp_tunnel { int magic; /* Should be L2TP_TUNNEL_MAGIC */ @@ -162,10 +188,23 @@ struct l2tp_tunnel { struct work_struct del_work; }; +/* Pseudowire ops callbacks for use with the l2tp genetlink interface */ struct l2tp_nl_cmd_ops { + /* The pseudowire session create callback is responsible for creating a session + * instance for a specific pseudowire type. + * It must call l2tp_session_create and l2tp_session_register to register the + * session instance, as well as carry out any pseudowire-specific initialisation. + * It must return >= 0 on success, or an appropriate negative errno value on failure. + */ int (*session_create)(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); + + /* The pseudowire session delete callback is responsible for initiating the deletion + * of a session instance. + * It must call l2tp_session_delete, as well as carry out any pseudowire-specific + * teardown actions. + */ void (*session_delete)(struct l2tp_session *session); }; @@ -174,11 +213,16 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) return &session->priv[0]; } +/* Tunnel and session refcounts */ void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel); void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel); void l2tp_session_inc_refcount(struct l2tp_session *session); void l2tp_session_dec_refcount(struct l2tp_session *session); +/* Tunnel and session lookup. + * These functions take a reference on the instances they return, so + * the caller must ensure that the reference is dropped appropriately. + */ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, @@ -189,33 +233,47 @@ struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, const char *ifname); +/* Tunnel and session lifetime management. + * Creation of a new instance is a two-step process: create, then register. + * Destruction is triggered using the *_delete functions, and completes asynchronously. + */ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net, struct l2tp_tunnel_cfg *cfg); - void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); + struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel); - void l2tp_session_delete(struct l2tp_session *session); + +/* Receive path helpers. If data sequencing is enabled for the session these + * functions handle queuing and reordering prior to passing packets to the + * pseudowire code to be passed to userspace. + */ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length); int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); -void l2tp_session_set_header_len(struct l2tp_session *session, int version); +/* Transmit path helpers for sending packets over the tunnel socket. */ +void l2tp_session_set_header_len(struct l2tp_session *session, int version); int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); -int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, - const struct l2tp_nl_cmd_ops *ops); +/* Pseudowire management. + * Pseudowires should register with l2tp core on module init, and unregister + * on module exit. + */ +int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops); void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type); + +/* IOCTL helper for IP encap modules. */ int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg); static inline int l2tp_get_l2specific_len(struct l2tp_session *session) From 7ea5fda2b1325e17528e2648b913244c8be0bcc7 Mon Sep 17 00:00:00 2001 From: Min Li Date: Tue, 28 Jul 2020 16:00:30 -0400 Subject: [PATCH 1800/2056] ptp: ptp_clockmatrix: update to support 4.8.7 firmware With 4.8.7 firmware, adjtime can change delta instead of absolute time, which greately increases snap accuracy. PPS alignment doesn't have to be set for every single TOD change. Other minor changes includes: adding more debug logs, increasing snap accuracy for pre 4.8.7 firmware and supporting new tcs2bin format. Signed-off-by: Min Li Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/ptp/idt8a340_reg.h | 48 ++ drivers/ptp/ptp_clockmatrix.c | 1145 +++++++++++++++++++++++++++------ drivers/ptp/ptp_clockmatrix.h | 61 +- 3 files changed, 1045 insertions(+), 209 deletions(-) diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h index 69eedda9f731..b297c4aba2ba 100644 --- a/drivers/ptp/idt8a340_reg.h +++ b/drivers/ptp/idt8a340_reg.h @@ -20,6 +20,10 @@ #define HW_DPLL_1 (0x8b00) #define HW_DPLL_2 (0x8c00) #define HW_DPLL_3 (0x8d00) +#define HW_DPLL_4 (0x8e00) +#define HW_DPLL_5 (0x8f00) +#define HW_DPLL_6 (0x9000) +#define HW_DPLL_7 (0x9100) #define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080) #define HW_DPLL_TOD_CTRL_1 (0x089) @@ -57,6 +61,43 @@ #define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1) #define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0) +#define HW_Q8_CTRL_SPARE (0xa7d4) +#define HW_Q11_CTRL_SPARE (0xa7ec) + +/** + * Select FOD5 as sync_trigger for Q8 divider. + * Transition from logic zero to one + * sets trigger to sync Q8 divider. + * + * Unused when FOD4 is driving Q8 divider (normal operation). + */ +#define Q9_TO_Q8_SYNC_TRIG BIT(1) + +/** + * Enable FOD5 as driver for clock and sync for Q8 divider. + * Enable fanout buffer for FOD5. + * + * Unused when FOD4 is driving Q8 divider (normal operation). + */ +#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) + +/** + * Select FOD6 as sync_trigger for Q11 divider. + * Transition from logic zero to one + * sets trigger to sync Q11 divider. + * + * Unused when FOD7 is driving Q11 divider (normal operation). + */ +#define Q10_TO_Q11_SYNC_TRIG BIT(1) + +/** + * Enable FOD6 as driver for clock and sync for Q11 divider. + * Enable fanout buffer for FOD6. + * + * Unused when FOD7 is driving Q11 divider (normal operation). + */ +#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) + #define RESET_CTRL 0xc000 #define SM_RESET 0x0012 #define SM_RESET_CMD 0x5A @@ -191,6 +232,7 @@ #define DPLL_CTRL_0 0xc600 #define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001 +#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a #define DPLL_CTRL_1 0xc63c @@ -646,6 +688,9 @@ /* Bit definitions for the TOD_WRITE_CMD register */ #define TOD_WRITE_SELECTION_SHIFT (0) #define TOD_WRITE_SELECTION_MASK (0xf) +/* 4.8.7 */ +#define TOD_WRITE_TYPE_SHIFT (4) +#define TOD_WRITE_TYPE_MASK (0x3) /* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */ #define RD_PWM_DECODER_INDEX_SHIFT (4) @@ -658,4 +703,7 @@ #define TOD_READ_TRIGGER_SHIFT (0) #define TOD_READ_TRIGGER_MASK (0xf) +/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */ +#define COMBO_MASTER_HOLD BIT(0) + #endif diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c index ceb6bc58f3b4..73aaae5574ed 100644 --- a/drivers/ptp/ptp_clockmatrix.c +++ b/drivers/ptp/ptp_clockmatrix.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "ptp_private.h" #include "ptp_clockmatrix.h" @@ -23,6 +24,13 @@ MODULE_AUTHOR("IDT support-1588 "); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL"); +/* + * The name of the firmware file to be loaded + * over-rides any automatic selection + */ +static char *firmware; +module_param(firmware, charp, 0); + #define SETTIME_CORRECTION (0) static long set_write_phase_ready(struct ptp_clock_info *ptp) @@ -95,6 +103,45 @@ static int timespec_to_char_array(struct timespec64 const *ts, return 0; } +static int idtcm_strverscmp(const char *ver1, const char *ver2) +{ + u8 num1; + u8 num2; + int result = 0; + + /* loop through each level of the version string */ + while (result == 0) { + /* extract leading version numbers */ + if (kstrtou8(ver1, 10, &num1) < 0) + return -1; + + if (kstrtou8(ver2, 10, &num2) < 0) + return -1; + + /* if numbers differ, then set the result */ + if (num1 < num2) + result = -1; + else if (num1 > num2) + result = 1; + else { + /* if numbers are the same, go to next level */ + ver1 = strchr(ver1, '.'); + ver2 = strchr(ver2, '.'); + if (!ver1 && !ver2) + break; + else if (!ver1) + result = -1; + else if (!ver2) + result = 1; + else { + ver1++; + ver2++; + } + } + } + return result; +} + static int idtcm_xfer(struct idtcm *idtcm, u8 regaddr, u8 *buf, @@ -104,6 +151,7 @@ static int idtcm_xfer(struct idtcm *idtcm, struct i2c_client *client = idtcm->client; struct i2c_msg msg[2]; int cnt; + char *fmt = "i2c_transfer failed at %d in %s for %s, at addr: %04X!\n"; msg[0].addr = client->addr; msg[0].flags = 0; @@ -118,7 +166,12 @@ static int idtcm_xfer(struct idtcm *idtcm, cnt = i2c_transfer(client->adapter, msg, 2); if (cnt < 0) { - dev_err(&client->dev, "i2c_transfer returned %d\n", cnt); + dev_err(&client->dev, + fmt, + __LINE__, + __func__, + write ? "write" : "read", + regaddr); return cnt; } else if (cnt != 2) { dev_err(&client->dev, @@ -144,10 +197,12 @@ static int idtcm_page_offset(struct idtcm *idtcm, u8 val) err = idtcm_xfer(idtcm, PAGE_ADDR, buf, sizeof(buf), 1); - if (err) + if (err) { + idtcm->page_offset = 0xff; dev_err(&idtcm->client->dev, "failed to set page offset\n"); - else + } else { idtcm->page_offset = val; + } return err; } @@ -198,6 +253,7 @@ static int _idtcm_gettime(struct idtcm_channel *channel, { struct idtcm *idtcm = channel->idtcm; u8 buf[TOD_BYTE_COUNT]; + u8 timeout = 10; u8 trigger; int err; @@ -208,16 +264,29 @@ static int _idtcm_gettime(struct idtcm_channel *channel, trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT); trigger |= (1 << TOD_READ_TRIGGER_SHIFT); - trigger |= TOD_READ_TRIGGER_MODE; + trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */ err = idtcm_write(idtcm, channel->tod_read_primary, TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger)); - if (err) return err; - if (idtcm->calculate_overhead_flag) - idtcm->start_time = ktime_get_raw(); + /* wait trigger to be 0 */ + while (trigger & TOD_READ_TRIGGER_MASK) { + + if (idtcm->calculate_overhead_flag) + idtcm->start_time = ktime_get_raw(); + + err = idtcm_read(idtcm, channel->tod_read_primary, + TOD_READ_PRIMARY_CMD, &trigger, + sizeof(trigger)); + + if (err) + return err; + + if (--timeout == 0) + return -EIO; + } err = idtcm_read(idtcm, channel->tod_read_primary, TOD_READ_PRIMARY, buf, sizeof(buf)); @@ -240,6 +309,7 @@ static int _sync_pll_output(struct idtcm *idtcm, u8 val; u16 sync_ctrl0; u16 sync_ctrl1; + u8 temp; if ((qn == 0) && (qn_plus_1 == 0)) return 0; @@ -305,6 +375,50 @@ static int _sync_pll_output(struct idtcm *idtcm, if (err) return err; + /* PLL5 can have OUT8 as second additional output. */ + if ((pll == 5) && (qn_plus_1 != 0)) { + err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + temp &= ~(Q9_TO_Q8_SYNC_TRIG); + + err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + temp |= Q9_TO_Q8_SYNC_TRIG; + + err = idtcm_write(idtcm, 0, HW_Q8_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + } + + /* PLL6 can have OUT11 as second additional output. */ + if ((pll == 6) && (qn_plus_1 != 0)) { + err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + temp &= ~(Q10_TO_Q11_SYNC_TRIG); + + err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + temp |= Q10_TO_Q11_SYNC_TRIG; + + err = idtcm_write(idtcm, 0, HW_Q11_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + } + /* Place master sync out of reset */ val &= ~(SYNCTRL1_MASTER_SYNC_RST); err = idtcm_write(idtcm, 0, sync_ctrl1, &val, sizeof(val)); @@ -312,6 +426,30 @@ static int _sync_pll_output(struct idtcm *idtcm, return err; } +static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src) +{ + int err = 0; + + switch (tod_addr) { + case TOD_0: + *sync_src = SYNC_SOURCE_DPLL0_TOD_PPS; + break; + case TOD_1: + *sync_src = SYNC_SOURCE_DPLL1_TOD_PPS; + break; + case TOD_2: + *sync_src = SYNC_SOURCE_DPLL2_TOD_PPS; + break; + case TOD_3: + *sync_src = SYNC_SOURCE_DPLL3_TOD_PPS; + break; + default: + err = -EINVAL; + } + + return err; +} + static int idtcm_sync_pps_output(struct idtcm_channel *channel) { struct idtcm *idtcm = channel->idtcm; @@ -321,37 +459,68 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel) u8 qn; u8 qn_plus_1; int err = 0; + u8 out8_mux = 0; + u8 out11_mux = 0; + u8 temp; u16 output_mask = channel->output_mask; - switch (channel->dpll_n) { - case DPLL_0: - sync_src = SYNC_SOURCE_DPLL0_TOD_PPS; - break; - case DPLL_1: - sync_src = SYNC_SOURCE_DPLL1_TOD_PPS; - break; - case DPLL_2: - sync_src = SYNC_SOURCE_DPLL2_TOD_PPS; - break; - case DPLL_3: - sync_src = SYNC_SOURCE_DPLL3_TOD_PPS; - break; - default: - return -EINVAL; - } + err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src); + if (err) + return err; + + err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) == + Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) + out8_mux = 1; + + err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) == + Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) + out11_mux = 1; for (pll = 0; pll < 8; pll++) { - - qn = output_mask & 0x1; - output_mask = output_mask >> 1; + qn = 0; + qn_plus_1 = 0; if (pll < 4) { /* First 4 pll has 2 outputs */ + qn = output_mask & 0x1; + output_mask = output_mask >> 1; qn_plus_1 = output_mask & 0x1; output_mask = output_mask >> 1; - } else { - qn_plus_1 = 0; + } else if (pll == 4) { + if (out8_mux == 0) { + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + } + } else if (pll == 5) { + if (out8_mux) { + qn_plus_1 = output_mask & 0x1; + output_mask = output_mask >> 1; + } + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + } else if (pll == 6) { + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + if (out11_mux) { + qn_plus_1 = output_mask & 0x1; + output_mask = output_mask >> 1; + } + } else if (pll == 7) { + if (out11_mux == 0) { + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + } } if ((qn != 0) || (qn_plus_1 != 0)) @@ -365,7 +534,7 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel) return err; } -static int _idtcm_set_dpll_tod(struct idtcm_channel *channel, +static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel, struct timespec64 const *ts, enum hw_tod_write_trig_sel wr_trig) { @@ -439,17 +608,78 @@ static int _idtcm_set_dpll_tod(struct idtcm_channel *channel, return err; } +static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel, + struct timespec64 const *ts, + enum scsr_tod_write_trig_sel wr_trig, + enum scsr_tod_write_type_sel wr_type) +{ + struct idtcm *idtcm = channel->idtcm; + unsigned char buf[TOD_BYTE_COUNT], cmd; + struct timespec64 local_ts = *ts; + int err, count = 0; + + timespec64_add_ns(&local_ts, SETTIME_CORRECTION); + + err = timespec_to_char_array(&local_ts, buf, sizeof(buf)); + + if (err) + return err; + + err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE, + buf, sizeof(buf)); + if (err) + return err; + + /* Trigger the write operation. */ + err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD, + &cmd, sizeof(cmd)); + if (err) + return err; + + cmd &= ~(TOD_WRITE_SELECTION_MASK << TOD_WRITE_SELECTION_SHIFT); + cmd &= ~(TOD_WRITE_TYPE_MASK << TOD_WRITE_TYPE_SHIFT); + cmd |= (wr_trig << TOD_WRITE_SELECTION_SHIFT); + cmd |= (wr_type << TOD_WRITE_TYPE_SHIFT); + + err = idtcm_write(idtcm, channel->tod_write, TOD_WRITE_CMD, + &cmd, sizeof(cmd)); + if (err) + return err; + + /* Wait for the operation to complete. */ + while (1) { + /* pps trigger takes up to 1 sec to complete */ + if (wr_trig == SCSR_TOD_WR_TRIG_SEL_TODPPS) + msleep(50); + + err = idtcm_read(idtcm, channel->tod_write, TOD_WRITE_CMD, + &cmd, sizeof(cmd)); + if (err) + return err; + + if (cmd == 0) + break; + + if (++count > 20) { + dev_err(&idtcm->client->dev, + "Timed out waiting for the write counter\n"); + return -EIO; + } + } + + return 0; +} + static int _idtcm_settime(struct idtcm_channel *channel, struct timespec64 const *ts, enum hw_tod_write_trig_sel wr_trig) { struct idtcm *idtcm = channel->idtcm; - s32 retval; int err; int i; u8 trig_sel; - err = _idtcm_set_dpll_tod(channel, ts, wr_trig); + err = _idtcm_set_dpll_hw_tod(channel, ts, wr_trig); if (err) return err; @@ -469,12 +699,24 @@ static int _idtcm_settime(struct idtcm_channel *channel, err = 1; } - if (err) + if (err) { + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); return err; + } - retval = idtcm_sync_pps_output(channel); + return idtcm_sync_pps_output(channel); +} - return retval; +static int _idtcm_settime_v487(struct idtcm_channel *channel, + struct timespec64 const *ts, + enum scsr_tod_write_type_sel wr_type) +{ + return _idtcm_set_dpll_scsr_tod(channel, ts, + SCSR_TOD_WR_TRIG_SEL_IMMEDIATE, + wr_type); } static int idtcm_set_phase_pull_in_offset(struct idtcm_channel *channel, @@ -565,6 +807,50 @@ static int idtcm_do_phase_pull_in(struct idtcm_channel *channel, return err; } +static int set_tod_write_overhead(struct idtcm_channel *channel) +{ + struct idtcm *idtcm = channel->idtcm; + s64 current_ns = 0; + s64 lowest_ns = 0; + int err; + u8 i; + + ktime_t start; + ktime_t stop; + + char buf[TOD_BYTE_COUNT] = {0}; + + /* Set page offset */ + idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0, + buf, sizeof(buf)); + + for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) { + + start = ktime_get_raw(); + + err = idtcm_write(idtcm, channel->hw_dpll_n, + HW_DPLL_TOD_OVR__0, buf, sizeof(buf)); + + if (err) + return err; + + stop = ktime_get_raw(); + + current_ns = ktime_to_ns(stop - start); + + if (i == 0) { + lowest_ns = current_ns; + } else { + if (current_ns < lowest_ns) + lowest_ns = current_ns; + } + } + + idtcm->tod_write_overhead_ns = lowest_ns; + + return err; +} + static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta) { int err; @@ -577,6 +863,11 @@ static int _idtcm_adjtime(struct idtcm_channel *channel, s64 delta) } else { idtcm->calculate_overhead_flag = 1; + err = set_tod_write_overhead(channel); + + if (err) + return err; + err = _idtcm_gettime(channel, &ts); if (err) @@ -656,93 +947,118 @@ static int idtcm_read_otp_scsr_config_select(struct idtcm *idtcm, config_select, sizeof(u8)); } -static int process_pll_mask(struct idtcm *idtcm, u32 addr, u8 val, u8 *mask) -{ - int err = 0; - - if (addr == PLL_MASK_ADDR) { - if ((val & 0xf0) || !(val & 0xf)) { - dev_err(&idtcm->client->dev, - "Invalid PLL mask 0x%hhx\n", val); - err = -EINVAL; - } - *mask = val; - } - - return err; -} - static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val) { int err = 0; switch (addr) { - case OUTPUT_MASK_PLL0_ADDR: + case TOD0_OUT_ALIGN_MASK_ADDR: SET_U16_LSB(idtcm->channel[0].output_mask, val); break; - case OUTPUT_MASK_PLL0_ADDR + 1: + case TOD0_OUT_ALIGN_MASK_ADDR + 1: SET_U16_MSB(idtcm->channel[0].output_mask, val); break; - case OUTPUT_MASK_PLL1_ADDR: + case TOD1_OUT_ALIGN_MASK_ADDR: SET_U16_LSB(idtcm->channel[1].output_mask, val); break; - case OUTPUT_MASK_PLL1_ADDR + 1: + case TOD1_OUT_ALIGN_MASK_ADDR + 1: SET_U16_MSB(idtcm->channel[1].output_mask, val); break; - case OUTPUT_MASK_PLL2_ADDR: + case TOD2_OUT_ALIGN_MASK_ADDR: SET_U16_LSB(idtcm->channel[2].output_mask, val); break; - case OUTPUT_MASK_PLL2_ADDR + 1: + case TOD2_OUT_ALIGN_MASK_ADDR + 1: SET_U16_MSB(idtcm->channel[2].output_mask, val); break; - case OUTPUT_MASK_PLL3_ADDR: + case TOD3_OUT_ALIGN_MASK_ADDR: SET_U16_LSB(idtcm->channel[3].output_mask, val); break; - case OUTPUT_MASK_PLL3_ADDR + 1: + case TOD3_OUT_ALIGN_MASK_ADDR + 1: SET_U16_MSB(idtcm->channel[3].output_mask, val); break; default: - err = -EINVAL; + err = -EFAULT; /* Bad address */; break; } return err; } +static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll) +{ + if (index >= MAX_TOD) { + dev_err(&idtcm->client->dev, "ToD%d not supported\n", index); + return -EINVAL; + } + + if (pll >= MAX_PLL) { + dev_err(&idtcm->client->dev, "Pll%d not supported\n", pll); + return -EINVAL; + } + + idtcm->channel[index].pll = pll; + + return 0; +} + static int check_and_set_masks(struct idtcm *idtcm, u16 regaddr, u8 val) { int err = 0; - if (set_pll_output_mask(idtcm, regaddr, val)) { - /* Not an output mask, check for pll mask */ - err = process_pll_mask(idtcm, regaddr, val, &idtcm->pll_mask); + switch (regaddr) { + case TOD_MASK_ADDR: + if ((val & 0xf0) || !(val & 0x0f)) { + dev_err(&idtcm->client->dev, + "Invalid TOD mask 0x%hhx\n", val); + err = -EINVAL; + } else { + idtcm->tod_mask = val; + } + break; + case TOD0_PTP_PLL_ADDR: + err = set_tod_ptp_pll(idtcm, 0, val); + break; + case TOD1_PTP_PLL_ADDR: + err = set_tod_ptp_pll(idtcm, 1, val); + break; + case TOD2_PTP_PLL_ADDR: + err = set_tod_ptp_pll(idtcm, 2, val); + break; + case TOD3_PTP_PLL_ADDR: + err = set_tod_ptp_pll(idtcm, 3, val); + break; + default: + err = set_pll_output_mask(idtcm, regaddr, val); + break; } return err; } -static void display_pll_and_output_masks(struct idtcm *idtcm) +static void display_pll_and_masks(struct idtcm *idtcm) { u8 i; u8 mask; - dev_dbg(&idtcm->client->dev, "pllmask = 0x%02x\n", idtcm->pll_mask); + dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x\n", idtcm->tod_mask); - for (i = 0; i < MAX_PHC_PLL; i++) { + for (i = 0; i < MAX_TOD; i++) { mask = 1 << i; - if (mask & idtcm->pll_mask) + if (mask & idtcm->tod_mask) dev_dbg(&idtcm->client->dev, - "PLL%d output_mask = 0x%04x\n", - i, idtcm->channel[i].output_mask); + "TOD%d pll = %d output_mask = 0x%04x\n", + i, idtcm->channel[i].pll, + idtcm->channel[i].output_mask); } } static int idtcm_load_firmware(struct idtcm *idtcm, struct device *dev) { + char fname[128] = FW_FILENAME; const struct firmware *fw; struct idtcm_fwrc *rec; u32 regaddr; @@ -751,12 +1067,20 @@ static int idtcm_load_firmware(struct idtcm *idtcm, u8 val; u8 loaddr; - dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", FW_FILENAME); + if (firmware) /* module parameter */ + snprintf(fname, sizeof(fname), "%s", firmware); - err = request_firmware(&fw, FW_FILENAME, dev); + dev_dbg(&idtcm->client->dev, "requesting firmware '%s'\n", fname); - if (err) + err = request_firmware(&fw, fname, dev); + + if (err) { + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); return err; + } dev_dbg(&idtcm->client->dev, "firmware size %zu bytes\n", fw->size); @@ -783,7 +1107,9 @@ static int idtcm_load_firmware(struct idtcm *idtcm, err = check_and_set_masks(idtcm, regaddr, val); } - if (err == 0) { + if (err != -EINVAL) { + err = 0; + /* Top (status registers) and bottom are read-only */ if ((regaddr < GPIO_USER_CONTROL) || (regaddr >= SCRATCH)) @@ -801,42 +1127,22 @@ static int idtcm_load_firmware(struct idtcm *idtcm, goto out; } - display_pll_and_output_masks(idtcm); + display_pll_and_masks(idtcm); out: release_firmware(fw); return err; } -static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable) +static int idtcm_output_enable(struct idtcm_channel *channel, + bool enable, unsigned int outn) { struct idtcm *idtcm = channel->idtcm; - u32 module; - u8 val; int err; + u8 val; - /* - * This assumes that the 1-PPS is on the second of the two - * output. But is this always true? - */ - switch (channel->dpll_n) { - case DPLL_0: - module = OUTPUT_1; - break; - case DPLL_1: - module = OUTPUT_3; - break; - case DPLL_2: - module = OUTPUT_5; - break; - case DPLL_3: - module = OUTPUT_7; - break; - default: - return -EINVAL; - } - - err = idtcm_read(idtcm, module, OUT_CTRL_1, &val, sizeof(val)); + err = idtcm_read(idtcm, OUTPUT_MODULE_FROM_INDEX(outn), + OUT_CTRL_1, &val, sizeof(val)); if (err) return err; @@ -846,14 +1152,50 @@ static int idtcm_pps_enable(struct idtcm_channel *channel, bool enable) else val &= ~SQUELCH_DISABLE; - err = idtcm_write(idtcm, module, OUT_CTRL_1, &val, sizeof(val)); + return idtcm_write(idtcm, OUTPUT_MODULE_FROM_INDEX(outn), + OUT_CTRL_1, &val, sizeof(val)); +} - if (err) - return err; +static int idtcm_output_mask_enable(struct idtcm_channel *channel, + bool enable) +{ + u16 mask; + int err; + u8 outn; + + mask = channel->output_mask; + outn = 0; + + while (mask) { + + if (mask & 0x1) { + + err = idtcm_output_enable(channel, enable, outn); + + if (err) + return err; + } + + mask >>= 0x1; + outn++; + } return 0; } +static int idtcm_perout_enable(struct idtcm_channel *channel, + bool enable, + struct ptp_perout_request *perout) +{ + unsigned int flags = perout->flags; + + if (flags == PEROUT_ENABLE_OUTPUT_MASK) + return idtcm_output_mask_enable(channel, enable); + + /* Enable/disable individual output instead */ + return idtcm_output_enable(channel, enable, perout->index); +} + static int idtcm_set_pll_mode(struct idtcm_channel *channel, enum pll_mode pll_mode) { @@ -940,10 +1282,8 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns) return err; } -static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm) { - struct idtcm_channel *channel = - container_of(ptp, struct idtcm_channel, caps); struct idtcm *idtcm = channel->idtcm; u8 i; bool neg_adj = 0; @@ -970,15 +1310,15 @@ static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb) * FCW = ------------- * 111 * 2^4 */ - if (ppb < 0) { + if (scaled_ppm < 0) { neg_adj = 1; - ppb = -ppb; + scaled_ppm = -scaled_ppm; } /* 2 ^ -53 = 1.1102230246251565404236316680908e-16 */ - fcw = ppb * 1000000000000ULL; + fcw = scaled_ppm * 244140625ULL; - fcw = div_u64(fcw, 111022); + fcw = div_u64(fcw, 1776); if (neg_adj) fcw = -fcw; @@ -988,12 +1328,9 @@ static int idtcm_adjfreq(struct ptp_clock_info *ptp, s32 ppb) fcw >>= 8; } - mutex_lock(&idtcm->reg_lock); - err = idtcm_write(idtcm, channel->dpll_freq, DPLL_WR_FREQ, buf, sizeof(buf)); - mutex_unlock(&idtcm->reg_lock); return err; } @@ -1008,6 +1345,12 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) err = _idtcm_gettime(channel, ts); + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + mutex_unlock(&idtcm->reg_lock); return err; @@ -1025,6 +1368,35 @@ static int idtcm_settime(struct ptp_clock_info *ptp, err = _idtcm_settime(channel, ts, HW_TOD_WR_TRIG_SEL_MSB); + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + + mutex_unlock(&idtcm->reg_lock); + + return err; +} + +static int idtcm_settime_v487(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct idtcm_channel *channel = + container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + int err; + + mutex_lock(&idtcm->reg_lock); + + err = _idtcm_settime_v487(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE); + + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + mutex_unlock(&idtcm->reg_lock); return err; @@ -1041,6 +1413,54 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta) err = _idtcm_adjtime(channel, delta); + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + + mutex_unlock(&idtcm->reg_lock); + + return err; +} + +static int idtcm_adjtime_v487(struct ptp_clock_info *ptp, s64 delta) +{ + struct idtcm_channel *channel = + container_of(ptp, struct idtcm_channel, caps); + struct idtcm *idtcm = channel->idtcm; + struct timespec64 ts; + enum scsr_tod_write_type_sel type; + int err; + + if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_V487) { + err = idtcm_do_phase_pull_in(channel, delta, 0); + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + return err; + } + + if (delta >= 0) { + ts = ns_to_timespec64(delta); + type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS; + } else { + ts = ns_to_timespec64(-delta); + type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS; + } + + mutex_lock(&idtcm->reg_lock); + + err = _idtcm_settime_v487(channel, &ts, type); + + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + mutex_unlock(&idtcm->reg_lock); return err; @@ -1059,6 +1479,36 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta) err = _idtcm_adjphase(channel, delta); + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + + mutex_unlock(&idtcm->reg_lock); + + return err; +} + +static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct idtcm_channel *channel = + container_of(ptp, struct idtcm_channel, caps); + + struct idtcm *idtcm = channel->idtcm; + + int err; + + mutex_lock(&idtcm->reg_lock); + + err = _idtcm_adjfine(channel, scaled_ppm); + + if (err) + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + mutex_unlock(&idtcm->reg_lock); return err; @@ -1067,20 +1517,35 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta) static int idtcm_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *rq, int on) { + int err; + struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps); switch (rq->type) { case PTP_CLK_REQ_PEROUT: - if (!on) - return idtcm_pps_enable(channel, false); + if (!on) { + err = idtcm_perout_enable(channel, false, &rq->perout); + if (err) + dev_err(&channel->idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + return err; + } /* Only accept a 1-PPS aligned to the second. */ if (rq->perout.start.nsec || rq->perout.period.sec != 1 || rq->perout.period.nsec) return -ERANGE; - return idtcm_pps_enable(channel, true); + err = idtcm_perout_enable(channel, true, &rq->perout); + if (err) + dev_err(&channel->idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + return err; default: break; } @@ -1088,6 +1553,230 @@ static int idtcm_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } +static int _enable_pll_tod_sync(struct idtcm *idtcm, + u8 pll, + u8 sync_src, + u8 qn, + u8 qn_plus_1) +{ + int err; + u8 val; + u16 dpll; + u16 out0 = 0, out1 = 0; + + if ((qn == 0) && (qn_plus_1 == 0)) + return 0; + + switch (pll) { + case 0: + dpll = DPLL_0; + if (qn) + out0 = OUTPUT_0; + if (qn_plus_1) + out1 = OUTPUT_1; + break; + case 1: + dpll = DPLL_1; + if (qn) + out0 = OUTPUT_2; + if (qn_plus_1) + out1 = OUTPUT_3; + break; + case 2: + dpll = DPLL_2; + if (qn) + out0 = OUTPUT_4; + if (qn_plus_1) + out1 = OUTPUT_5; + break; + case 3: + dpll = DPLL_3; + if (qn) + out0 = OUTPUT_6; + if (qn_plus_1) + out1 = OUTPUT_7; + break; + case 4: + dpll = DPLL_4; + if (qn) + out0 = OUTPUT_8; + break; + case 5: + dpll = DPLL_5; + if (qn) + out0 = OUTPUT_9; + if (qn_plus_1) + out1 = OUTPUT_8; + break; + case 6: + dpll = DPLL_6; + if (qn) + out0 = OUTPUT_10; + if (qn_plus_1) + out1 = OUTPUT_11; + break; + case 7: + dpll = DPLL_7; + if (qn) + out0 = OUTPUT_11; + break; + default: + return -EINVAL; + } + + /* + * Enable OUTPUT OUT_SYNC. + */ + if (out0) { + err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val)); + + if (err) + return err; + + val &= ~OUT_SYNC_DISABLE; + + err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val)); + + if (err) + return err; + } + + if (out1) { + err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val)); + + if (err) + return err; + + val &= ~OUT_SYNC_DISABLE; + + err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val)); + + if (err) + return err; + } + + /* enable dpll sync tod pps, must be set before dpll_mode */ + err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val)); + if (err) + return err; + + val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT); + val |= (sync_src << TOD_SYNC_SOURCE_SHIFT); + val |= TOD_SYNC_EN; + + return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val)); +} + +static int idtcm_enable_tod_sync(struct idtcm_channel *channel) +{ + struct idtcm *idtcm = channel->idtcm; + + u8 pll; + u8 sync_src; + u8 qn; + u8 qn_plus_1; + u8 cfg; + int err = 0; + u16 output_mask = channel->output_mask; + u8 out8_mux = 0; + u8 out11_mux = 0; + u8 temp; + + /* + * set tod_out_sync_enable to 0. + */ + err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg)); + if (err) + return err; + + cfg &= ~TOD_OUT_SYNC_ENABLE; + + err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg)); + if (err) + return err; + + switch (channel->tod_n) { + case TOD_0: + sync_src = 0; + break; + case TOD_1: + sync_src = 1; + break; + case TOD_2: + sync_src = 2; + break; + case TOD_3: + sync_src = 3; + break; + default: + return -EINVAL; + } + + err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) == + Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) + out8_mux = 1; + + err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, + &temp, sizeof(temp)); + if (err) + return err; + + if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) == + Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) + out11_mux = 1; + + for (pll = 0; pll < 8; pll++) { + qn = 0; + qn_plus_1 = 0; + + if (pll < 4) { + /* First 4 pll has 2 outputs */ + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + qn_plus_1 = output_mask & 0x1; + output_mask = output_mask >> 1; + } else if (pll == 4) { + if (out8_mux == 0) { + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + } + } else if (pll == 5) { + if (out8_mux) { + qn_plus_1 = output_mask & 0x1; + output_mask = output_mask >> 1; + } + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + } else if (pll == 6) { + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + if (out11_mux) { + qn_plus_1 = output_mask & 0x1; + output_mask = output_mask >> 1; + } + } else if (pll == 7) { + if (out11_mux == 0) { + qn = output_mask & 0x1; + output_mask = output_mask >> 1; + } + } + + if ((qn != 0) || (qn_plus_1 != 0)) + err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn, + qn_plus_1); + + if (err) + return err; + } + + return err; +} + static int idtcm_enable_tod(struct idtcm_channel *channel) { struct idtcm *idtcm = channel->idtcm; @@ -1095,10 +1784,6 @@ static int idtcm_enable_tod(struct idtcm_channel *channel) u8 cfg; int err; - err = idtcm_pps_enable(channel, false); - if (err) - return err; - /* * Start the TOD clock ticking. */ @@ -1134,16 +1819,32 @@ static void idtcm_display_version_info(struct idtcm *idtcm) idtcm_read_otp_scsr_config_select(idtcm, &config_select); + snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u", + major, minor, hotfix); + dev_info(&idtcm->client->dev, fmt, major, minor, hotfix, product_id, hw_rev_id, config_select); } +static const struct ptp_clock_info idtcm_caps_v487 = { + .owner = THIS_MODULE, + .max_adj = 244000, + .n_per_out = 12, + .adjphase = &idtcm_adjphase, + .adjfine = &idtcm_adjfine, + .adjtime = &idtcm_adjtime_v487, + .gettime64 = &idtcm_gettime, + .settime64 = &idtcm_settime_v487, + .enable = &idtcm_enable, + .do_aux_work = &set_write_phase_ready, +}; + static const struct ptp_clock_info idtcm_caps = { .owner = THIS_MODULE, .max_adj = 244000, - .n_per_out = 1, + .n_per_out = 12, .adjphase = &idtcm_adjphase, - .adjfreq = &idtcm_adjfreq, + .adjfine = &idtcm_adjfine, .adjtime = &idtcm_adjtime, .gettime64 = &idtcm_gettime, .settime64 = &idtcm_settime, @@ -1151,24 +1852,14 @@ static const struct ptp_clock_info idtcm_caps = { .do_aux_work = &set_write_phase_ready, }; - -static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) +static int configure_channel_pll(struct idtcm_channel *channel) { - struct idtcm_channel *channel; - int err; + int err = 0; - if (!(index < MAX_PHC_PLL)) - return -EINVAL; - - channel = &idtcm->channel[index]; - - switch (index) { + switch (channel->pll) { case 0: channel->dpll_freq = DPLL_FREQ_0; channel->dpll_n = DPLL_0; - channel->tod_read_primary = TOD_READ_PRIMARY_0; - channel->tod_write = TOD_WRITE_0; - channel->tod_n = TOD_0; channel->hw_dpll_n = HW_DPLL_0; channel->dpll_phase = DPLL_PHASE_0; channel->dpll_ctrl_n = DPLL_CTRL_0; @@ -1177,9 +1868,6 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) case 1: channel->dpll_freq = DPLL_FREQ_1; channel->dpll_n = DPLL_1; - channel->tod_read_primary = TOD_READ_PRIMARY_1; - channel->tod_write = TOD_WRITE_1; - channel->tod_n = TOD_1; channel->hw_dpll_n = HW_DPLL_1; channel->dpll_phase = DPLL_PHASE_1; channel->dpll_ctrl_n = DPLL_CTRL_1; @@ -1188,9 +1876,6 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) case 2: channel->dpll_freq = DPLL_FREQ_2; channel->dpll_n = DPLL_2; - channel->tod_read_primary = TOD_READ_PRIMARY_2; - channel->tod_write = TOD_WRITE_2; - channel->tod_n = TOD_2; channel->hw_dpll_n = HW_DPLL_2; channel->dpll_phase = DPLL_PHASE_2; channel->dpll_ctrl_n = DPLL_CTRL_2; @@ -1199,31 +1884,129 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) case 3: channel->dpll_freq = DPLL_FREQ_3; channel->dpll_n = DPLL_3; - channel->tod_read_primary = TOD_READ_PRIMARY_3; - channel->tod_write = TOD_WRITE_3; - channel->tod_n = TOD_3; channel->hw_dpll_n = HW_DPLL_3; channel->dpll_phase = DPLL_PHASE_3; channel->dpll_ctrl_n = DPLL_CTRL_3; channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_3; break; + case 4: + channel->dpll_freq = DPLL_FREQ_4; + channel->dpll_n = DPLL_4; + channel->hw_dpll_n = HW_DPLL_4; + channel->dpll_phase = DPLL_PHASE_4; + channel->dpll_ctrl_n = DPLL_CTRL_4; + channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_4; + break; + case 5: + channel->dpll_freq = DPLL_FREQ_5; + channel->dpll_n = DPLL_5; + channel->hw_dpll_n = HW_DPLL_5; + channel->dpll_phase = DPLL_PHASE_5; + channel->dpll_ctrl_n = DPLL_CTRL_5; + channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_5; + break; + case 6: + channel->dpll_freq = DPLL_FREQ_6; + channel->dpll_n = DPLL_6; + channel->hw_dpll_n = HW_DPLL_6; + channel->dpll_phase = DPLL_PHASE_6; + channel->dpll_ctrl_n = DPLL_CTRL_6; + channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_6; + break; + case 7: + channel->dpll_freq = DPLL_FREQ_7; + channel->dpll_n = DPLL_7; + channel->hw_dpll_n = HW_DPLL_7; + channel->dpll_phase = DPLL_PHASE_7; + channel->dpll_ctrl_n = DPLL_CTRL_7; + channel->dpll_phase_pull_in = DPLL_PHASE_PULL_IN_7; + break; + default: + err = -EINVAL; + } + + return err; +} + +static int idtcm_enable_channel(struct idtcm *idtcm, u32 index) +{ + struct idtcm_channel *channel; + int err; + + if (!(index < MAX_TOD)) + return -EINVAL; + + channel = &idtcm->channel[index]; + + /* Set pll addresses */ + err = configure_channel_pll(channel); + if (err) + return err; + + /* Set tod addresses */ + switch (index) { + case 0: + channel->tod_read_primary = TOD_READ_PRIMARY_0; + channel->tod_write = TOD_WRITE_0; + channel->tod_n = TOD_0; + break; + case 1: + channel->tod_read_primary = TOD_READ_PRIMARY_1; + channel->tod_write = TOD_WRITE_1; + channel->tod_n = TOD_1; + break; + case 2: + channel->tod_read_primary = TOD_READ_PRIMARY_2; + channel->tod_write = TOD_WRITE_2; + channel->tod_n = TOD_2; + break; + case 3: + channel->tod_read_primary = TOD_READ_PRIMARY_3; + channel->tod_write = TOD_WRITE_3; + channel->tod_n = TOD_3; + break; default: return -EINVAL; } channel->idtcm = idtcm; - channel->caps = idtcm_caps; + if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) + channel->caps = idtcm_caps_v487; + else + channel->caps = idtcm_caps; + snprintf(channel->caps.name, sizeof(channel->caps.name), - "IDT CM PLL%u", index); + "IDT CM TOD%u", index); + + if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0) { + err = idtcm_enable_tod_sync(channel); + if (err) { + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); + return err; + } + } err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY); - if (err) + if (err) { + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); return err; + } err = idtcm_enable_tod(channel); - if (err) + if (err) { + dev_err(&idtcm->client->dev, + "Failed at line %d in func %s!\n", + __LINE__, + __func__); return err; + } channel->ptp_clock = ptp_clock_register(&channel->caps, NULL); @@ -1249,7 +2032,7 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm) u8 i; struct idtcm_channel *channel; - for (i = 0; i < MAX_PHC_PLL; i++) { + for (i = 0; i < MAX_TOD; i++) { channel = &idtcm->channel[i]; @@ -1260,7 +2043,12 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm) static void set_default_masks(struct idtcm *idtcm) { - idtcm->pll_mask = DEFAULT_PLL_MASK; + idtcm->tod_mask = DEFAULT_TOD_MASK; + + idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL; + idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL; + idtcm->channel[2].pll = DEFAULT_TOD2_PTP_PLL; + idtcm->channel[3].pll = DEFAULT_TOD3_PTP_PLL; idtcm->channel[0].output_mask = DEFAULT_OUTPUT_MASK_PLL0; idtcm->channel[1].output_mask = DEFAULT_OUTPUT_MASK_PLL1; @@ -1268,51 +2056,13 @@ static void set_default_masks(struct idtcm *idtcm) idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3; } -static int set_tod_write_overhead(struct idtcm *idtcm) -{ - int err; - u8 i; - - s64 total_ns = 0; - - ktime_t start; - ktime_t stop; - - char buf[TOD_BYTE_COUNT]; - - struct idtcm_channel *channel = &idtcm->channel[2]; - - /* Set page offset */ - idtcm_write(idtcm, channel->hw_dpll_n, HW_DPLL_TOD_OVR__0, - buf, sizeof(buf)); - - for (i = 0; i < TOD_WRITE_OVERHEAD_COUNT_MAX; i++) { - - start = ktime_get_raw(); - - err = idtcm_write(idtcm, channel->hw_dpll_n, - HW_DPLL_TOD_OVR__0, buf, sizeof(buf)); - - if (err) - return err; - - stop = ktime_get_raw(); - - total_ns += ktime_to_ns(stop - start); - } - - idtcm->tod_write_overhead_ns = div_s64(total_ns, - TOD_WRITE_OVERHEAD_COUNT_MAX); - - return err; -} - static int idtcm_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct idtcm *idtcm; int err; u8 i; + char *fmt = "Failed at %d in line %s with channel output %d!\n"; /* Unused for now */ (void)id; @@ -1333,25 +2083,24 @@ static int idtcm_probe(struct i2c_client *client, idtcm_display_version_info(idtcm); - err = set_tod_write_overhead(idtcm); - - if (err) { - mutex_unlock(&idtcm->reg_lock); - return err; - } - err = idtcm_load_firmware(idtcm, &client->dev); if (err) dev_warn(&idtcm->client->dev, "loading firmware failed with %d\n", err); - if (idtcm->pll_mask) { - for (i = 0; i < MAX_PHC_PLL; i++) { - if (idtcm->pll_mask & (1 << i)) { + if (idtcm->tod_mask) { + for (i = 0; i < MAX_TOD; i++) { + if (idtcm->tod_mask & (1 << i)) { err = idtcm_enable_channel(idtcm, i); - if (err) + if (err) { + dev_err(&idtcm->client->dev, + fmt, + __LINE__, + __func__, + i); break; + } } } } else { diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h index 3de0eb72889c..ffae56c5d97f 100644 --- a/drivers/ptp/ptp_clockmatrix.h +++ b/drivers/ptp/ptp_clockmatrix.h @@ -13,32 +13,48 @@ #include "idt8a340_reg.h" #define FW_FILENAME "idtcm.bin" -#define MAX_PHC_PLL 4 +#define MAX_TOD (4) +#define MAX_PLL (8) #define MAX_ABS_WRITE_PHASE_PICOSECONDS (107374182350LL) -#define PLL_MASK_ADDR (0xFFA5) -#define DEFAULT_PLL_MASK (0x04) +#define TOD_MASK_ADDR (0xFFA5) +#define DEFAULT_TOD_MASK (0x04) #define SET_U16_LSB(orig, val8) (orig = (0xff00 & (orig)) | (val8)) #define SET_U16_MSB(orig, val8) (orig = (0x00ff & (orig)) | (val8 << 8)) -#define OUTPUT_MASK_PLL0_ADDR (0xFFB0) -#define OUTPUT_MASK_PLL1_ADDR (0xFFB2) -#define OUTPUT_MASK_PLL2_ADDR (0xFFB4) -#define OUTPUT_MASK_PLL3_ADDR (0xFFB6) +#define TOD0_PTP_PLL_ADDR (0xFFA8) +#define TOD1_PTP_PLL_ADDR (0xFFA9) +#define TOD2_PTP_PLL_ADDR (0xFFAA) +#define TOD3_PTP_PLL_ADDR (0xFFAB) + +#define TOD0_OUT_ALIGN_MASK_ADDR (0xFFB0) +#define TOD1_OUT_ALIGN_MASK_ADDR (0xFFB2) +#define TOD2_OUT_ALIGN_MASK_ADDR (0xFFB4) +#define TOD3_OUT_ALIGN_MASK_ADDR (0xFFB6) #define DEFAULT_OUTPUT_MASK_PLL0 (0x003) #define DEFAULT_OUTPUT_MASK_PLL1 (0x00c) #define DEFAULT_OUTPUT_MASK_PLL2 (0x030) #define DEFAULT_OUTPUT_MASK_PLL3 (0x0c0) +#define DEFAULT_TOD0_PTP_PLL (0) +#define DEFAULT_TOD1_PTP_PLL (1) +#define DEFAULT_TOD2_PTP_PLL (2) +#define DEFAULT_TOD3_PTP_PLL (3) + #define POST_SM_RESET_DELAY_MS (3000) #define PHASE_PULL_IN_THRESHOLD_NS (150000) -#define TOD_WRITE_OVERHEAD_COUNT_MAX (5) +#define PHASE_PULL_IN_THRESHOLD_NS_V487 (15000) +#define TOD_WRITE_OVERHEAD_COUNT_MAX (2) #define TOD_BYTE_COUNT (11) #define WR_PHASE_SETUP_MS (5000) +#define OUTPUT_MODULE_FROM_INDEX(index) (OUTPUT_0 + (index) * 0x10) + +#define PEROUT_ENABLE_OUTPUT_MASK (0xdeadbeef) + /* Values of DPLL_N.DPLL_MODE.PLL_MODE */ enum pll_mode { PLL_MODE_MIN = 0, @@ -48,7 +64,8 @@ enum pll_mode { PLL_MODE_GPIO_INC_DEC = 3, PLL_MODE_SYNTHESIS = 4, PLL_MODE_PHASE_MEASUREMENT = 5, - PLL_MODE_MAX = PLL_MODE_PHASE_MEASUREMENT, + PLL_MODE_DISABLED = 6, + PLL_MODE_MAX = PLL_MODE_DISABLED, }; enum hw_tod_write_trig_sel { @@ -63,6 +80,26 @@ enum hw_tod_write_trig_sel { WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC, }; +/* 4.8.7 only */ +enum scsr_tod_write_trig_sel { + SCSR_TOD_WR_TRIG_SEL_DISABLE = 0, + SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1, + SCSR_TOD_WR_TRIG_SEL_REFCLK = 2, + SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3, + SCSR_TOD_WR_TRIG_SEL_TODPPS = 4, + SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5, + SCSR_TOD_WR_TRIG_SEL_GPIO = 6, + SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO, +}; + +/* 4.8.7 only */ +enum scsr_tod_write_type_sel { + SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0, + SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1, + SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2, + SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS, +}; + struct idtcm; struct idtcm_channel { @@ -79,15 +116,17 @@ struct idtcm_channel { u16 tod_n; u16 hw_dpll_n; enum pll_mode pll_mode; + u8 pll; u16 output_mask; int write_phase_ready; }; struct idtcm { - struct idtcm_channel channel[MAX_PHC_PLL]; + struct idtcm_channel channel[MAX_TOD]; struct i2c_client *client; u8 page_offset; - u8 pll_mask; + u8 tod_mask; + char version[16]; /* Overhead calculation for adjtime */ u8 calculate_overhead_flag; From b04e55d641c0d8c6b7305b9cfc7d000f2d305849 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 29 Jul 2020 02:19:50 +0000 Subject: [PATCH 1801/2056] sfc_ef100: remove duplicated include from ef100_netdev.c Remove duplicated include. Signed-off-by: YueHaibing Acked-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_netdev.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 4c3caac2c8cc..ec9ca81fed85 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -16,7 +16,6 @@ #include "tx_common.h" #include "ef100_netdev.h" #include "ef100_ethtool.h" -#include "efx_common.h" #include "nic_common.h" #include "ef100_nic.h" #include "ef100_tx.h" From c64c9c282a9a7ec0515b725d5aaed68c32e403a4 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Sun, 26 Jul 2020 14:02:28 +0200 Subject: [PATCH 1802/2056] udp, bpf: Ignore connections in reuseport group after BPF sk lookup When BPF sk lookup invokes reuseport handling for the selected socket, it should ignore the fact that reuseport group can contain connected UDP sockets. With BPF sk lookup this is not relevant as we are not scoring sockets to find the best match, which might be a connected UDP socket. Fix it by unconditionally accepting the socket selected by reuseport. This fixes the following two failures reported by test_progs. # ./test_progs -t sk_lookup ... #73/14 UDP IPv4 redir and reuseport with conns:FAIL ... #73/20 UDP IPv6 redir and reuseport with conns:FAIL ... Fixes: a57066b1a019 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net") Reported-by: Alexei Starovoitov Signed-off-by: Jakub Sitnicki Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200726120228.1414348-1-jakub@cloudflare.com --- net/ipv4/udp.c | 2 +- net/ipv6/udp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7ce31beccfc2..e88efba07551 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -473,7 +473,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net, return sk; reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); - if (reuse_sk && !reuseport_has_conns(sk, false)) + if (reuse_sk) sk = reuse_sk; return sk; } diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c394e674f486..29d9691359b9 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -208,7 +208,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net, return sk; reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum); - if (reuse_sk && !reuseport_has_conns(sk, false)) + if (reuse_sk) sk = reuse_sk; return sk; } From 10470c0d7e921a46e126ff8f8a3b60fdb944b3d0 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 29 Jul 2020 17:58:03 -0500 Subject: [PATCH 1803/2056] mlxsw: spectrum_cnt: Use flex_array_size() helper in memcpy() Make use of the flex_array_size() helper to calculate the size of a flexible array member within an enclosing structure. This helper offers defense-in-depth against potential integer overflows, while at the same time makes it explicitly clear that we are dealing witha flexible array member. Also, remove unnecessary pointer identifier sub_pool. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Ido Schimmel Tested-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c index 7974982533b5..b65b93a2b9bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c @@ -121,7 +121,6 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) { unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools); struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); - struct mlxsw_sp_counter_sub_pool *sub_pool; struct mlxsw_sp_counter_pool *pool; unsigned int map_size; int err; @@ -131,9 +130,9 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) if (!pool) return -ENOMEM; mlxsw_sp->counter_pool = pool; - memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools, - sub_pools_count * sizeof(*sub_pool)); pool->sub_pools_count = sub_pools_count; + memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools, + flex_array_size(pool, sub_pools, pool->sub_pools_count)); spin_lock_init(&pool->counter_pool_lock); atomic_set(&pool->active_entries_count, 0); From a0d716d8e42abcb973afed27c6fa44bd146d2616 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 29 Jul 2020 22:17:00 -0500 Subject: [PATCH 1804/2056] net/sched: act_pedit: Use flex_array_size() helper in memcpy() Make use of the flex_array_size() helper to calculate the size of a flexible array member within an enclosing structure. This helper offers defense-in-depth against potential integer overflows, while at the same time makes it explicitly clear that we are dealing with a flexible array member. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/sched/act_pedit.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 66986db062ed..c158bfed86d5 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -436,8 +436,7 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, return -ENOBUFS; spin_lock_bh(&p->tcf_lock); - memcpy(opt->keys, p->tcfp_keys, - p->tcfp_nkeys * sizeof(struct tc_pedit_key)); + memcpy(opt->keys, p->tcfp_keys, flex_array_size(opt, keys, p->tcfp_nkeys)); opt->index = p->tcf_index; opt->nkeys = p->tcfp_nkeys; opt->flags = p->tcfp_flags; From 1e51f9358af513c1620b527d1e8c90b8820591a2 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Thu, 30 Jul 2020 14:11:40 +0800 Subject: [PATCH 1805/2056] liquidio: Replace vmalloc with kmalloc in octeon_register_dispatch_fn() The size of struct octeon_dispatch is too small, it is better to use kmalloc instead of vmalloc. Suggested-by: Joe Perches Signed-off-by: Wang Hai Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_device.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 934115d18488..ac32facaa427 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1056,7 +1056,7 @@ void octeon_delete_dispatch_list(struct octeon_device *oct) list_for_each_safe(temp, tmp2, &freelist) { list_del(temp); - vfree(temp); + kfree(temp); } } @@ -1152,13 +1152,10 @@ octeon_register_dispatch_fn(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "Adding opcode to dispatch list linked list\n"); - dispatch = (struct octeon_dispatch *) - vmalloc(sizeof(struct octeon_dispatch)); - if (!dispatch) { - dev_err(&oct->pci_dev->dev, - "No memory to add dispatch function\n"); + dispatch = kmalloc(sizeof(*dispatch), GFP_KERNEL); + if (!dispatch) return 1; - } + dispatch->opcode = combined_opcode; dispatch->dispatch_fn = fn; dispatch->arg = fn_arg; From bbf1b94a733d823f195d9e9f960a2e60718acc3e Mon Sep 17 00:00:00 2001 From: Li Heng Date: Thu, 30 Jul 2020 14:43:50 +0800 Subject: [PATCH 1806/2056] bnxt_en: Remove superfluous memset() Fixes coccicheck warning: ./drivers/net/ethernet/broadcom/bnxt/bnxt.c:3730:19-37: WARNING: dma_alloc_coherent use in stats -> hw_stats already zeroes out memory, so memset is not needed dma_alloc_coherent use in status already zeroes out memory, so memset is not needed Reported-by: Hulk Robot Signed-off-by: Li Heng Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2622d3c2d766..31fb5a28e1c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3732,8 +3732,6 @@ static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats, if (!stats->hw_stats) return -ENOMEM; - memset(stats->hw_stats, 0, stats->len); - stats->sw_stats = kzalloc(stats->len, GFP_KERNEL); if (!stats->sw_stats) goto stats_mem_err; From bfc6c183cb820bfca22c6f075bd81be7ef84d774 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Thu, 30 Jul 2020 12:23:34 +0530 Subject: [PATCH 1807/2056] sc92031: use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions, as through the generic framework PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/silan/sc92031.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c index cb043eb1bdc1..f94078f8ebe5 100644 --- a/drivers/net/ethernet/silan/sc92031.c +++ b/drivers/net/ethernet/silan/sc92031.c @@ -1499,15 +1499,13 @@ static void sc92031_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused sc92031_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct sc92031_priv *priv = netdev_priv(dev); - pci_save_state(pdev); - if (!netif_running(dev)) - goto out; + return 0; netif_device_detach(dev); @@ -1521,22 +1519,16 @@ static int sc92031_suspend(struct pci_dev *pdev, pm_message_t state) spin_unlock_bh(&priv->lock); -out: - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; } -static int sc92031_resume(struct pci_dev *pdev) +static int __maybe_unused sc92031_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct sc92031_priv *priv = netdev_priv(dev); - pci_restore_state(pdev); - pci_set_power_state(pdev, PCI_D0); - if (!netif_running(dev)) - goto out; + return 0; /* Interrupts already disabled by sc92031_suspend */ spin_lock_bh(&priv->lock); @@ -1553,7 +1545,6 @@ static int sc92031_resume(struct pci_dev *pdev) else netif_tx_disable(dev); -out: return 0; } @@ -1565,13 +1556,14 @@ static const struct pci_device_id sc92031_pci_device_id_table[] = { }; MODULE_DEVICE_TABLE(pci, sc92031_pci_device_id_table); +static SIMPLE_DEV_PM_OPS(sc92031_pm_ops, sc92031_suspend, sc92031_resume); + static struct pci_driver sc92031_pci_driver = { .name = SC92031_NAME, .id_table = sc92031_pci_device_id_table, .probe = sc92031_probe, .remove = sc92031_remove, - .suspend = sc92031_suspend, - .resume = sc92031_resume, + .driver.pm = &sc92031_pm_ops, }; module_pci_driver(sc92031_pci_driver); From 7fa8bb48a45091dba1f8e7163451538b34314e1e Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Thu, 30 Jul 2020 12:23:35 +0530 Subject: [PATCH 1808/2056] sis900: use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions, as through the generic framework PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/sis/sis900.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 82e020add19f..336105f77313 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -2502,11 +2502,9 @@ static void sis900_remove(struct pci_dev *pci_dev) pci_release_regions(pci_dev); } -#ifdef CONFIG_PM - -static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) +static int __maybe_unused sis900_suspend(struct device *dev) { - struct net_device *net_dev = pci_get_drvdata(pci_dev); + struct net_device *net_dev = dev_get_drvdata(dev); struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; @@ -2519,22 +2517,17 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) /* Stop the chip's Tx and Rx Status Machine */ sw32(cr, RxDIS | TxDIS | sr32(cr)); - pci_set_power_state(pci_dev, PCI_D3hot); - pci_save_state(pci_dev); - return 0; } -static int sis900_resume(struct pci_dev *pci_dev) +static int __maybe_unused sis900_resume(struct device *dev) { - struct net_device *net_dev = pci_get_drvdata(pci_dev); + struct net_device *net_dev = dev_get_drvdata(dev); struct sis900_private *sis_priv = netdev_priv(net_dev); void __iomem *ioaddr = sis_priv->ioaddr; if(!netif_running(net_dev)) return 0; - pci_restore_state(pci_dev); - pci_set_power_state(pci_dev, PCI_D0); sis900_init_rxfilter(net_dev); @@ -2558,17 +2551,15 @@ static int sis900_resume(struct pci_dev *pci_dev) return 0; } -#endif /* CONFIG_PM */ + +static SIMPLE_DEV_PM_OPS(sis900_pm_ops, sis900_suspend, sis900_resume); static struct pci_driver sis900_pci_driver = { .name = SIS900_MODULE_NAME, .id_table = sis900_pci_tbl, .probe = sis900_probe, .remove = sis900_remove, -#ifdef CONFIG_PM - .suspend = sis900_suspend, - .resume = sis900_resume, -#endif /* CONFIG_PM */ + .driver.pm = &sis900_pm_ops, }; static int __init sis900_init_module(void) From 04db64652e01deb8d3568df22799e02c155377b5 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Thu, 30 Jul 2020 12:23:36 +0530 Subject: [PATCH 1809/2056] tlan: use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions, as through the generic framework PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/tlan.c | 31 ++++++------------------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 583cd2ef7662..58623e974a0c 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -345,33 +345,21 @@ static void tlan_stop(struct net_device *dev) } } -#ifdef CONFIG_PM - -static int tlan_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused tlan_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); if (netif_running(dev)) tlan_stop(dev); netif_device_detach(dev); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); return 0; } -static int tlan_resume(struct pci_dev *pdev) +static int __maybe_unused tlan_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - int rc = pci_enable_device(pdev); - - if (rc) - return rc; - pci_restore_state(pdev); - pci_enable_wake(pdev, PCI_D0, 0); + struct net_device *dev = dev_get_drvdata(dev_d); netif_device_attach(dev); if (netif_running(dev)) @@ -380,21 +368,14 @@ static int tlan_resume(struct pci_dev *pdev) return 0; } -#else /* CONFIG_PM */ - -#define tlan_suspend NULL -#define tlan_resume NULL - -#endif /* CONFIG_PM */ - +static SIMPLE_DEV_PM_OPS(tlan_pm_ops, tlan_suspend, tlan_resume); static struct pci_driver tlan_driver = { .name = "tlan", .id_table = tlan_pci_tbl, .probe = tlan_init_one, .remove = tlan_remove_one, - .suspend = tlan_suspend, - .resume = tlan_resume, + .driver.pm = &tlan_pm_ops, }; static int __init tlan_probe(void) From bd69058f50d5ffa659423bcfa6fe6280ce9c760a Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Thu, 30 Jul 2020 15:24:19 +0800 Subject: [PATCH 1810/2056] net: ll_temac: Use devm_platform_ioremap_resource_byname() platform_get_resource() may fail and return NULL, so we had better check its return value to avoid a NULL pointer dereference a bit later in the code. Fix it to use devm_platform_ioremap_resource_byname() instead of calling platform_get_resource_byname() and devm_ioremap(). Fixes: 8425c41d1ef7 ("net: ll_temac: Extend support to non-device-tree platforms") Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: David S. Miller --- drivers/net/ethernet/xilinx/ll_temac_main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 929244064abd..9a15f14daa47 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1407,10 +1407,8 @@ static int temac_probe(struct platform_device *pdev) } /* map device registers */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lp->regs = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - if (!lp->regs) { + lp->regs = devm_platform_ioremap_resource_byname(pdev, 0); + if (IS_ERR(lp->regs)) { dev_err(&pdev->dev, "could not map TEMAC registers\n"); return -ENOMEM; } From 9a9373ffc7338377c3837ce0ccd40f5c4402c9d1 Mon Sep 17 00:00:00 2001 From: Alain Michaud Date: Fri, 31 Jul 2020 01:05:34 +0000 Subject: [PATCH 1811/2056] Bluetooth: use the proper scan params when conn is pending MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When an LE connection is requested and an RPA update is needed via hci_connect_le_scan, the default scanning parameters are used rather than the connect parameters. This leads to significant delays in the connection establishment process when using lower duty cycle scanning parameters. The patch simply looks at the pended connection list when trying to determine which scanning parameters should be used. Before: < HCI Command: LE Set Extended Scan Parameters (0x08|0x0041) plen 8                             #378 [hci0] 1659.247156         Own address type: Public (0x00)         Filter policy: Ignore not in white list (0x01)         PHYs: 0x01         Entry 0: LE 1M           Type: Passive (0x00)           Interval: 367.500 msec (0x024c)           Window: 37.500 msec (0x003c) After: < HCI Command: LE Set Extended Scan Parameters (0x08|0x0041) plen 8                               #39 [hci0] 7.422109         Own address type: Public (0x00)         Filter policy: Ignore not in white list (0x01)         PHYs: 0x01         Entry 0: LE 1M           Type: Passive (0x00)           Interval: 60.000 msec (0x0060)           Window: 60.000 msec (0x0060) Signed-off-by: Alain Michaud Reviewed-by: Abhishek Pandit-Subedi Reviewed-by: Yu Liu Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_request.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 435400a43a78..e0269192f2e5 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -953,6 +953,27 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, } } +/* Returns true if an le connection is in the scanning state */ +static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return true; + } + } + + rcu_read_unlock(); + + return false; +} + /* Ensure to call hci_req_add_le_scan_disable() first to disable the * controller based address resolution to be able to reconfigure * resolving list. @@ -1003,6 +1024,9 @@ void hci_req_add_le_passive_scan(struct hci_request *req) if (hdev->suspended) { window = hdev->le_scan_window_suspend; interval = hdev->le_scan_int_suspend; + } else if (hci_is_le_conn_scanning(hdev)) { + window = hdev->le_scan_window_connect; + interval = hdev->le_scan_int_connect; } else { window = hdev->le_scan_window; interval = hdev->le_scan_interval; From df78a0c0b67de58934877aad61e0431a2bd0caf1 Mon Sep 17 00:00:00 2001 From: Thomas Pedersen Date: Mon, 1 Jun 2020 23:22:47 -0700 Subject: [PATCH 1812/2056] nl80211: S1G band and channel definitions Gives drivers the definitions needed to advertise support for S1G bands. Signed-off-by: Thomas Pedersen Link: https://lore.kernel.org/r/20200602062247.23212-1-thomas@adapt-ip.com Link: https://lore.kernel.org/r/20200731055636.795173-1-thomas@adapt-ip.com Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath10k/mac.c | 9 ++----- include/net/cfg80211.h | 17 +++++++++++++ include/uapi/linux/nl80211.h | 16 ++++++++++++ net/mac80211/chan.c | 7 +++++- net/mac80211/scan.c | 1 + net/mac80211/tx.c | 1 + net/mac80211/util.c | 5 ++++ net/wireless/chan.c | 35 +++++++++++++++++++++++++++ net/wireless/core.c | 5 ++-- net/wireless/util.c | 8 ++++++ 10 files changed, 94 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 919d15584d4a..3c0c33a9f30c 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -568,11 +568,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_40: phymode = MODE_11NG_HT40; break; - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: - case NL80211_CHAN_WIDTH_80: - case NL80211_CHAN_WIDTH_80P80: - case NL80211_CHAN_WIDTH_160: + default: phymode = MODE_UNKNOWN; break; } @@ -597,8 +593,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef) case NL80211_CHAN_WIDTH_80P80: phymode = MODE_11AC_VHT80_80; break; - case NL80211_CHAN_WIDTH_5: - case NL80211_CHAN_WIDTH_10: + default: phymode = MODE_UNKNOWN; break; } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index fc7e8807838d..ac6e58193426 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -417,6 +417,22 @@ struct ieee80211_edmg { enum ieee80211_edmg_bw_config bw_config; }; +/** + * struct ieee80211_sta_s1g_cap - STA's S1G capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11ah S1G capabilities for a STA. + * + * @s1g_supported: is STA an S1G STA + * @cap: S1G capabilities information + * @nss_mcs: Supported NSS MCS set + */ +struct ieee80211_sta_s1g_cap { + bool s1g; + u8 cap[10]; /* use S1G_CAPAB_ */ + u8 nss_mcs[5]; +}; + /** * struct ieee80211_supported_band - frequency band definition * @@ -448,6 +464,7 @@ struct ieee80211_supported_band { int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_sta_s1g_cap s1g_cap; struct ieee80211_edmg edmg_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data *iftype_data; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 4e6339ab1fce..ad183469f9af 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4437,6 +4437,11 @@ enum nl80211_key_mode { * attribute must be provided as well * @NL80211_CHAN_WIDTH_5: 5 MHz OFDM channel * @NL80211_CHAN_WIDTH_10: 10 MHz OFDM channel + * @NL80211_CHAN_WIDTH_1: 1 MHz OFDM channel + * @NL80211_CHAN_WIDTH_2: 2 MHz OFDM channel + * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel + * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel + * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, @@ -4447,6 +4452,11 @@ enum nl80211_chan_width { NL80211_CHAN_WIDTH_160, NL80211_CHAN_WIDTH_5, NL80211_CHAN_WIDTH_10, + NL80211_CHAN_WIDTH_1, + NL80211_CHAN_WIDTH_2, + NL80211_CHAN_WIDTH_4, + NL80211_CHAN_WIDTH_8, + NL80211_CHAN_WIDTH_16, }; /** @@ -4457,11 +4467,15 @@ enum nl80211_chan_width { * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide + * @NL80211_BSS_CHAN_WIDTH_1: control channel is 1 MHz wide + * @NL80211_BSS_CHAN_WIDTH_2: control channel is 2 MHz wide */ enum nl80211_bss_scan_width { NL80211_BSS_CHAN_WIDTH_20, NL80211_BSS_CHAN_WIDTH_10, NL80211_BSS_CHAN_WIDTH_5, + NL80211_BSS_CHAN_WIDTH_1, + NL80211_BSS_CHAN_WIDTH_2, }; /** @@ -4740,6 +4754,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) + * @NL80211_BAND_S1GHZ: around 900MHz, supported by S1G PHYs * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4748,6 +4763,7 @@ enum nl80211_band { NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, NL80211_BAND_6GHZ, + NL80211_BAND_S1GHZ, NUM_NL80211_BANDS, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index e6e192f53e4e..08cf9da9c1e3 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -313,9 +313,14 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, lockdep_assert_held(&local->chanctx_mtx); - /* don't optimize 5MHz, 10MHz, and radar_enabled confs */ + /* don't optimize non-20MHz based and radar_enabled confs */ if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 || ctx->conf.def.width == NL80211_CHAN_WIDTH_10 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_1 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_2 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_4 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_8 || + ctx->conf.def.width == NL80211_CHAN_WIDTH_16 || ctx->conf.radar_enabled) { ctx->conf.min_def = ctx->conf.def; return; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index ad90bbe57457..8003be6dae8a 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -913,6 +913,7 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, case NL80211_BSS_CHAN_WIDTH_10: local->scan_chandef.width = NL80211_CHAN_WIDTH_10; break; + default: case NL80211_BSS_CHAN_WIDTH_20: /* If scanning on oper channel, use whatever channel-type * is currently in use. diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1a2941e5244f..ee30ef441f4a 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -166,6 +166,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; + case NL80211_BAND_S1GHZ: case NL80211_BAND_60GHZ: /* TODO, for now fall through */ case NUM_NL80211_BANDS: diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 21c94094a699..64a83ecd0a73 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3730,6 +3730,11 @@ u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c) c->width = NL80211_CHAN_WIDTH_20_NOHT; ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; break; + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: WARN_ON_ONCE(1); diff --git a/net/wireless/chan.c b/net/wireless/chan.c index cddf92c5d09e..90f0f82cd9ca 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -153,6 +153,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_1: + case NL80211_CHAN_WIDTH_2: + case NL80211_CHAN_WIDTH_4: + case NL80211_CHAN_WIDTH_8: + case NL80211_CHAN_WIDTH_16: case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20: @@ -263,6 +268,21 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) int width; switch (c->width) { + case NL80211_CHAN_WIDTH_1: + width = 1; + break; + case NL80211_CHAN_WIDTH_2: + width = 2; + break; + case NL80211_CHAN_WIDTH_4: + width = 4; + break; + case NL80211_CHAN_WIDTH_8: + width = 8; + break; + case NL80211_CHAN_WIDTH_16: + width = 16; + break; case NL80211_CHAN_WIDTH_5: width = 5; break; @@ -911,6 +931,21 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, control_freq = chandef->chan->center_freq; switch (chandef->width) { + case NL80211_CHAN_WIDTH_1: + width = 1; + break; + case NL80211_CHAN_WIDTH_2: + width = 2; + break; + case NL80211_CHAN_WIDTH_4: + width = 4; + break; + case NL80211_CHAN_WIDTH_8: + width = 8; + break; + case NL80211_CHAN_WIDTH_16: + width = 16; + break; case NL80211_CHAN_WIDTH_5: width = 5; break; diff --git a/net/wireless/core.c b/net/wireless/core.c index c623d9bf5096..1971d7e6eb55 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -803,10 +803,11 @@ int wiphy_register(struct wiphy *wiphy) if (WARN_ON(!sband->n_channels)) return -EINVAL; /* - * on 60GHz band, there are no legacy rates, so + * on 60GHz or sub-1Ghz band, there are no legacy rates, so * n_bitrates is 0 */ - if (WARN_ON(band != NL80211_BAND_60GHZ && + if (WARN_ON((band != NL80211_BAND_60GHZ && + band != NL80211_BAND_S1GHZ) && !sband->n_bitrates)) return -EINVAL; diff --git a/net/wireless/util.c b/net/wireless/util.c index 4d3b76f94f55..26a977343c3b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -102,6 +102,8 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band) if (chan < 7) return MHZ_TO_KHZ(56160 + chan * 2160); break; + case NL80211_BAND_S1GHZ: + return 902000 + chan * 500; default: ; } @@ -210,6 +212,12 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband) WARN_ON(!sband->ht_cap.ht_supported); WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e); break; + case NL80211_BAND_S1GHZ: + /* Figure 9-589bd: 3 means unsupported, so != 3 means at least + * mandatory is ok. + */ + WARN_ON((sband->s1g_cap.nss_mcs[0] & 0x3) == 0x3); + break; case NUM_NL80211_BANDS: default: WARN_ON(1); From f2a0c18759072dbd5135f72a8035f6fb838df425 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 22 Jul 2020 16:38:30 +0100 Subject: [PATCH 1813/2056] mac80211: remove the need for variable rates_idx Currently rates_idx is being initialized with the value -1 and this value is never read so the initialization is redundant and can be removed. The next time the variable is used it is assigned a value that is returned a few statements later. Just return i - 1 and remove the need for rates_idx. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Link: https://lore.kernel.org/r/20200722153830.959010-1-colin.king@canonical.com Signed-off-by: Johannes Berg --- net/mac80211/status.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/mac80211/status.c b/net/mac80211/status.c index cbc40b358ba2..adb1d30ce06e 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -799,7 +799,6 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, struct ieee80211_tx_info *info, int *retry_count) { - int rates_idx = -1; int count = -1; int i; @@ -821,13 +820,12 @@ static int ieee80211_tx_get_rates(struct ieee80211_hw *hw, count += info->status.rates[i].count; } - rates_idx = i - 1; if (count < 0) count = 0; *retry_count = count; - return rates_idx; + return i - 1; } void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb, From 9c167b2ddc1a89e6f6378e65bfd20e8c0ddf65f1 Mon Sep 17 00:00:00 2001 From: Julian Squires Date: Mon, 20 Jul 2020 12:20:35 -0230 Subject: [PATCH 1814/2056] cfg80211: allow vendor dumpit to terminate by returning 0 nl80211 vendor netlink dumpit, like netlink_callback->dump, should signal successful completion by returning 0. Currently, that will just cause dumpit to be called again, possibly many times until an error occurs. Since skb->len is never going to be 0 by the time dumpit is called, the only way for dumpit to signal completion is by returning an error. If it returns a positive value, the current message is cancelled, but that positive value is returned and nl80211_vendor_cmd_dump gets called again. Fix that by passing a return value of 0 through. Signed-off-by: Julian Squires Link: https://lore.kernel.org/r/20200720145033.401307-1-julian@cipht.net [reword commit message] Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 0e07fb8585fb..e9cf08a1a8b9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -13479,7 +13479,7 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, if (err == -ENOBUFS || err == -ENOENT) { genlmsg_cancel(skb, hdr); break; - } else if (err) { + } else if (err <= 0) { genlmsg_cancel(skb, hdr); goto out; } From 987021726f9f41a1daf335c57cd7b6261109cdb2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:21 -0700 Subject: [PATCH 1815/2056] net/wireless: nl80211.h: drop duplicate words in comments Drop doubled words in several comments. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-1-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index ad183469f9af..f47a7a8d0216 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -363,7 +363,7 @@ * @NL80211_CMD_SET_STATION: Set station attributes for station identified by * %NL80211_ATTR_MAC on the interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_NEW_STATION: Add a station with given attributes to the - * the interface identified by %NL80211_ATTR_IFINDEX. + * interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_STATION: Remove a station identified by %NL80211_ATTR_MAC * or, if no MAC address given, all stations, on the interface identified * by %NL80211_ATTR_IFINDEX. %NL80211_ATTR_MGMT_SUBTYPE and @@ -383,7 +383,7 @@ * @NL80211_CMD_DEL_MPATH: Delete a mesh path to the destination given by * %NL80211_ATTR_MAC. * @NL80211_CMD_NEW_PATH: Add a mesh path with given attributes to the - * the interface identified by %NL80211_ATTR_IFINDEX. + * interface identified by %NL80211_ATTR_IFINDEX. * @NL80211_CMD_DEL_PATH: Remove a mesh path identified by %NL80211_ATTR_MAC * or, if no MAC address given, all mesh paths, on the interface identified * by %NL80211_ATTR_IFINDEX. @@ -934,7 +934,7 @@ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules. * * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the - * the new channel information (Channel Switch Announcement - CSA) + * new channel information (Channel Switch Announcement - CSA) * in the beacon for some time (as defined in the * %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the * new channel. Userspace provides the new channel information (using @@ -1113,7 +1113,7 @@ * randomization may be enabled and configured by specifying the * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes. * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute. - * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in + * A u64 cookie for further %NL80211_ATTR_COOKIE use is returned in * the netlink extended ack message. * * To cancel a measurement, close the socket that requested it. @@ -1511,7 +1511,7 @@ enum nl80211_commands { * rates as defined by IEEE 802.11 7.3.2.2 but without the length * restriction (at most %NL80211_MAX_SUPP_RATES). * @NL80211_ATTR_STA_VLAN: interface index of VLAN interface to move station - * to, or the AP interface the station was originally added to to. + * to, or the AP interface the station was originally added to. * @NL80211_ATTR_STA_INFO: information about a station, part of station info * given for %NL80211_CMD_GET_STATION, nested attribute containing * info as possible, see &enum nl80211_sta_info. @@ -2084,7 +2084,7 @@ enum nl80211_commands { * @NL80211_ATTR_STA_SUPPORTED_CHANNELS: array of supported channels. * * @NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES: array of supported - * supported operating classes. + * operating classes. * * @NL80211_ATTR_HANDLE_DFS: A flag indicating whether user space * controls DFS operation in IBSS mode. If the flag is included in @@ -2395,7 +2395,7 @@ enum nl80211_commands { * nl80211_txq_stats) * @NL80211_ATTR_TXQ_LIMIT: Total packet limit for the TXQ queues for this phy. * The smaller of this and the memory limit is enforced. - * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory memory limit (in bytes) for the + * @NL80211_ATTR_TXQ_MEMORY_LIMIT: Total memory limit (in bytes) for the * TXQ queues for this phy. The smaller of this and the packet limit is * enforced. * @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes @@ -5652,7 +5652,7 @@ enum nl80211_feature_flags { * enum nl80211_ext_feature_index - bit index of extended features. * @NL80211_EXT_FEATURE_VHT_IBSS: This driver supports IBSS with VHT datarates. * @NL80211_EXT_FEATURE_RRM: This driver supports RRM. When featured, user can - * can request to use RRM (see %NL80211_ATTR_USE_RRM) with + * request to use RRM (see %NL80211_ATTR_USE_RRM) with * %NL80211_CMD_ASSOCIATE and %NL80211_CMD_CONNECT requests, which will set * the ASSOC_REQ_USE_RRM flag in the association request even if * NL80211_FEATURE_QUIET is not advertized. @@ -6061,7 +6061,7 @@ enum nl80211_dfs_state { }; /** - * enum enum nl80211_protocol_features - nl80211 protocol features + * enum nl80211_protocol_features - nl80211 protocol features * @NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP: nl80211 supports splitting * wiphy dumps (if requested by the application with the attribute * %NL80211_ATTR_SPLIT_WIPHY_DUMP. Also supported is filtering the From 0f55c0c500f2bbfc5cc5590cdf6973b3f64dc195 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:22 -0700 Subject: [PATCH 1816/2056] net/wireless: wireless.h: drop duplicate word in comments Drop doubled word "threshold" in a comment. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-2-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/uapi/linux/wireless.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index 24f3371ad826..08967b3f19c8 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -914,7 +914,7 @@ union iwreq_data { struct iw_param sens; /* signal level threshold */ struct iw_param bitrate; /* default bit rate */ struct iw_param txpower; /* default transmit power */ - struct iw_param rts; /* RTS threshold threshold */ + struct iw_param rts; /* RTS threshold */ struct iw_param frag; /* Fragmentation threshold */ __u32 mode; /* Operation mode */ struct iw_param retry; /* Retry limits & lifetime */ From 085a6c109b9dbcb6dfc0c7d1001f554a6d513342 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:23 -0700 Subject: [PATCH 1817/2056] net/wireless: cfg80211.h: drop duplicate words in comments Drop doubled word "by" in a comment. Change "operate in in" to "operate with in" as is used below. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-3-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index ac6e58193426..5c7ea0b5dc50 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -439,7 +439,7 @@ struct ieee80211_sta_s1g_cap { * This structure describes a frequency band a wiphy * is able to operate in. * - * @channels: Array of channels the hardware can operate in + * @channels: Array of channels the hardware can operate with * in this band. * @band: the band this structure represents * @n_channels: Number of channels in @channels @@ -5527,7 +5527,7 @@ static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, * * @skb: The input A-MSDU frame without any headers. * @list: The output list of 802.3 frames. It must be allocated and - * initialized by by the caller. + * initialized by the caller. * @addr: The device MAC address. * @iftype: The device interface type. * @extra_headroom: The hardware extra headroom for SKBs in the @list. From 66b239d28c7524324d2474b3faf206d00eadcd78 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:24 -0700 Subject: [PATCH 1818/2056] net/wireless: mac80211.h: drop duplicate words in comments Drop doubled words "are" and "by" in comments. Change doubled "to to" to "to the". Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-4-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/net/mac80211.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 11d5610d2ad5..c0a597167f14 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2727,7 +2727,7 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * for devices that support offload of data packets (e.g. ARP responses). * * Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag - * when they are able to replace in-use PTK keys according to to following + * when they are able to replace in-use PTK keys according to the following * requirements: * 1) They do not hand over frames decrypted with the old key to mac80211 once the call to set_key() with command %DISABLE_KEY has been @@ -4709,7 +4709,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw, * * Call this function for all transmitted data frames after their transmit * completion. This callback should only be called for data frames which - * are are using driver's (or hardware's) offload capability of encap/decap + * are using driver's (or hardware's) offload capability of encap/decap * 802.11 frames. * * This function may not be called in IRQ context. Calls to this function @@ -6344,7 +6344,7 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid); * * Note that this must be called in an rcu_read_lock() critical section, * which can only be released after the SKB was handled. Some pointers in - * skb->cb, e.g. the key pointer, are protected by by RCU and thus the + * skb->cb, e.g. the key pointer, are protected by RCU and thus the * critical section must persist not just for the duration of this call * but for the duration of the frame handling. * However, also note that while in the wake_tx_queue() method, From dec4ca931244632eeca46f406b2ce7f5a1fe723f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Wed, 15 Jul 2020 09:43:25 -0700 Subject: [PATCH 1819/2056] net/wireless: regulatory.h: drop duplicate word in comment Drop doubled word "of" in a comment. Signed-off-by: Randy Dunlap Cc: netdev@vger.kernel.org Cc: Kalle Valo Cc: linux-wireless@vger.kernel.org Cc: Johannes Berg Link: https://lore.kernel.org/r/20200715164325.9109-5-rdunlap@infradead.org Signed-off-by: Johannes Berg --- include/net/regulatory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 09a3099886e5..47f06f6f5a67 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -44,7 +44,7 @@ enum environment_cap { * and potentially inform users of which devices specifically * cased the conflicts. * @initiator: indicates who sent this request, could be any of - * of those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) + * those set in nl80211_reg_initiator (%NL80211_REGDOM_SET_BY_*) * @alpha2: the ISO / IEC 3166 alpha2 country code of the requested * regulatory domain. We have a few special codes: * 00 - World regulatory domain From 8328685682965cbf9348f6f85cadc8c0598771d9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 12 Jul 2020 19:35:39 +0200 Subject: [PATCH 1820/2056] nl80211: Remove a misleading label in 'nl80211_trigger_scan()' Since commit 5fe231e87372 ("cfg80211: vastly simplify locking"), the 'unlock' label at the end of 'nl80211_trigger_scan()' is useless and misleading, because nothing is unlocked there. Direct return can be used instead of 'err = -; goto unlock;' construction. Remove this label and simplify code accordingly. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200712173539.274395-1-christophe.jaillet@wanadoo.fr Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e9cf08a1a8b9..96b94a900625 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7774,10 +7774,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->scan) return -EOPNOTSUPP; - if (rdev->scan_req || rdev->scan_msg) { - err = -EBUSY; - goto unlock; - } + if (rdev->scan_req || rdev->scan_msg) + return -EBUSY; if (info->attrs[NL80211_ATTR_SCAN_FREQ_KHZ]) { if (!wiphy_ext_feature_isset(wiphy, @@ -7790,10 +7788,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) if (scan_freqs) { n_channels = validate_scan_freqs(scan_freqs); - if (!n_channels) { - err = -EINVAL; - goto unlock; - } + if (!n_channels) + return -EINVAL; } else { n_channels = ieee80211_get_num_supported_channels(wiphy); } @@ -7802,29 +7798,23 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; - if (n_ssids > wiphy->max_scan_ssids) { - err = -EINVAL; - goto unlock; - } + if (n_ssids > wiphy->max_scan_ssids) + return -EINVAL; if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; - if (ie_len > wiphy->max_scan_ie_len) { - err = -EINVAL; - goto unlock; - } + if (ie_len > wiphy->max_scan_ie_len) + return -EINVAL; request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); - if (!request) { - err = -ENOMEM; - goto unlock; - } + if (!request) + return -ENOMEM; if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; @@ -8013,7 +8003,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) kfree(request); } - unlock: return err; } From 504776be46cb61bc0606e0e943fd6a04c38f2130 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 12 Jul 2020 19:35:51 +0200 Subject: [PATCH 1821/2056] nl80211: Simplify error handling path in 'nl80211_trigger_scan()' Re-write the end of 'nl80211_trigger_scan()' with a more standard, easy to understand and future proof version. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20200712173551.274448-1-christophe.jaillet@wanadoo.fr Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 96b94a900625..6fdf818f66cf 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -7993,15 +7993,18 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) rdev->scan_req = request; err = rdev_scan(rdev, request); - if (!err) { - nl80211_send_scan_start(rdev, wdev); - if (wdev->netdev) - dev_hold(wdev->netdev); - } else { + if (err) + goto out_free; + + nl80211_send_scan_start(rdev, wdev); + if (wdev->netdev) + dev_hold(wdev->netdev); + + return 0; + out_free: - rdev->scan_req = NULL; - kfree(request); - } + rdev->scan_req = NULL; + kfree(request); return err; } From fc0561dc6a9c61613ea703f54f9cf1c6bb2e0b68 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 7 Jul 2020 15:45:48 -0500 Subject: [PATCH 1822/2056] mac80211: Use fallthrough pseudo-keyword Replace the existing /* fall through */ comments and its variants with the new pseudo-keyword macro fallthrough[1]. Also, remove unnecessary fall-through markings when it is the case. [1] https://www.kernel.org/doc/html/latest/process/deprecated.html?highlight=fallthrough#implicit-switch-case-fall-through Signed-off-by: Gustavo A. R. Silva Link: https://lore.kernel.org/r/20200707204548.GA9320@embeddedor Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 2 +- net/mac80211/cfg.c | 6 +++--- net/mac80211/chan.c | 2 +- net/mac80211/ht.c | 4 ++-- net/mac80211/ibss.c | 4 ++-- net/mac80211/iface.c | 10 +++++----- net/mac80211/key.c | 2 +- net/mac80211/mesh.c | 4 ++-- net/mac80211/mesh_hwmp.c | 2 +- net/mac80211/mesh_plink.c | 2 +- net/mac80211/mlme.c | 4 ++-- net/mac80211/offchannel.c | 4 ++-- net/mac80211/rx.c | 4 ++-- net/mac80211/tdls.c | 8 ++++---- net/mac80211/tx.c | 12 ++++++------ net/mac80211/util.c | 15 +++++++-------- net/mac80211/wme.c | 2 +- 17 files changed, 43 insertions(+), 44 deletions(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1356e8cbe617..9dd9d73f4484 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -4334,7 +4334,7 @@ static int __init init_mac80211_hwsim(void) break; case HWSIM_REGTEST_STRICT_ALL: param.reg_strict = true; - /* fall through */ + fallthrough; case HWSIM_REGTEST_DRIVER_REG_ALL: param.reg_alpha2 = hwsim_alpha2s[0]; break; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9b360544ad6f..b4a74064675e 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -608,12 +608,12 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, case WLAN_CIPHER_SUITE_BIP_CMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_cmac)); - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_BIP_GMAC_128: case WLAN_CIPHER_SUITE_BIP_GMAC_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != offsetof(typeof(kseq), aes_gmac)); - /* fall through */ + fallthrough; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: BUILD_BUG_ON(offsetof(typeof(kseq), ccmp) != @@ -2336,7 +2336,7 @@ static int ieee80211_scan(struct wiphy *wiphy, * for now fall through to allow scanning only when * beaconing hasn't been configured yet */ - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: /* * If the scan has been forced (and the driver supports diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 08cf9da9c1e3..bdc0f29dc6cd 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -748,7 +748,7 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, default: WARN_ONCE(1, "Invalid SMPS mode %d\n", sdata->smps_mode); - /* fall through */ + fallthrough; case IEEE80211_SMPS_OFF: needed_static = sdata->needed_rx_chains; needed_dynamic = sdata->needed_rx_chains; diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index e32906202575..3d62a80b5790 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -250,7 +250,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, switch (sdata->vif.bss_conf.chandef.width) { default: WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: bw = IEEE80211_STA_RX_BW_20; @@ -517,7 +517,7 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); - /* fall through */ + fallthrough; case IEEE80211_SMPS_OFF: action_frame->u.action.u.ht_smps.smps_control = WLAN_HT_SMPS_CONTROL_DISABLED; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 81d26fef41e9..53632c2f5217 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -791,7 +791,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, case NL80211_CHAN_WIDTH_10: case NL80211_CHAN_WIDTH_20_NOHT: sta_flags |= IEEE80211_STA_DISABLE_HT; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20: sta_flags |= IEEE80211_STA_DISABLE_40MHZ; break; @@ -1401,7 +1401,7 @@ ieee80211_ibss_setup_scan_channels(struct wiphy *wiphy, break; case NL80211_CHAN_WIDTH_80P80: cf2 = chandef->center_freq2; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_80: width = 80; break; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f900c84fb40f..570f818384e8 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -978,7 +978,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, case NL80211_IFTYPE_P2P_DEVICE: /* relies on synchronize_rcu() below */ RCU_INIT_POINTER(local->p2p_sdata, NULL); - /* fall through */ + fallthrough; default: cancel_work_sync(&sdata->work); /* @@ -1048,7 +1048,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) break; - /* fall through */ + fallthrough; default: if (going_down) drv_remove_interface(local, sdata); @@ -1497,7 +1497,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, type = NL80211_IFTYPE_AP; sdata->vif.type = type; sdata->vif.p2p = true; - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps.bc_buf); INIT_LIST_HEAD(&sdata->u.ap.vlans); @@ -1507,7 +1507,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, type = NL80211_IFTYPE_STATION; sdata->vif.type = type; sdata->vif.p2p = true; - /* fall through */ + fallthrough; case NL80211_IFTYPE_STATION: sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid; ieee80211_sta_setup_sdata(sdata); @@ -1703,7 +1703,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local, goto out_unlock; } } - /* fall through */ + fallthrough; default: /* assign a new address if possible -- try n_addresses first */ for (i = 0; i < local->hw.wiphy->n_addresses; i++) { diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 8f403c1bb908..9c2888004878 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -225,7 +225,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) */ if (sdata->hw_80211_encap) return -EINVAL; - /* Fall through */ + fallthrough; case WLAN_CIPHER_SUITE_AES_CMAC: case WLAN_CIPHER_SUITE_BIP_CMAC_256: diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 5f1ca25b6c97..96f0323c0a3d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1094,10 +1094,10 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, switch (sdata->vif.bss_conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: sta_flags |= IEEE80211_STA_DISABLE_HT; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_20: sta_flags |= IEEE80211_STA_DISABLE_40MHZ; - /* fall through */ + fallthrough; case NL80211_CHAN_WIDTH_40: sta_flags |= IEEE80211_STA_DISABLE_VHT; break; diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 02cde0fd08fe..bae3a3e15b88 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1266,7 +1266,7 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata) break; case IEEE80211_PROACTIVE_PREQ_WITH_PREP: flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG; - /* fall through */ + fallthrough; case IEEE80211_PROACTIVE_PREQ_NO_PREP: interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout; target_flags |= IEEE80211_PREQ_TO_FLAG | diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 798e4b6b383f..15f2fc658f70 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -699,7 +699,7 @@ void mesh_plink_timer(struct timer_list *t) break; } reason = WLAN_REASON_MESH_MAX_RETRIES; - /* fall through */ + fallthrough; case NL80211_PLINK_CNF_RCVD: /* confirm timer */ if (!reason) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b2a9d47cf86d..1c1b3f6bc415 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -533,7 +533,7 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); - /* fall through */ + fallthrough; case IEEE80211_SMPS_OFF: cap |= WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; @@ -1529,7 +1529,7 @@ ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata, switch (channel->band) { default: WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case NL80211_BAND_2GHZ: case NL80211_BAND_60GHZ: chan_increment = 1; diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index db3b8bf75656..7b842aa903c3 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -808,13 +808,13 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, if (!sdata->vif.bss_conf.ibss_joined) need_offchan = true; #ifdef CONFIG_MAC80211_MESH - /* fall through */ + fallthrough; case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif) && !sdata->u.mesh.mesh_id_len) need_offchan = true; #endif - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 5c5af4b5fc08..8ea93735e47c 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3736,7 +3736,7 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); if (rx->sta) rx->sta->rx_stats.dropped++; - /* fall through */ + fallthrough; case RX_CONTINUE: { struct ieee80211_rate *rate = NULL; struct ieee80211_supported_band *sband; @@ -4752,7 +4752,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, break; default: WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case RX_ENC_LEGACY: if (WARN_ON(status->rate_idx >= sband->n_bitrates)) goto drop; diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index 4b0cff4a07bd..e01e4daeb8cd 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -239,7 +239,7 @@ static enum ieee80211_ac_numbers ieee80211_ac_from_wmm(int ac) switch (ac) { default: WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case 0: return IEEE80211_AC_BE; case 1: @@ -952,7 +952,7 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, set_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); sta->sta.tdls_initiator = false; } - /* fall-through */ + fallthrough; case WLAN_TDLS_SETUP_CONFIRM: case WLAN_TDLS_DISCOVERY_REQUEST: initiator = true; @@ -967,7 +967,7 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev, clear_sta_flag(sta, WLAN_STA_TDLS_INITIATOR); sta->sta.tdls_initiator = true; } - /* fall-through */ + fallthrough; case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: initiator = false; break; @@ -1222,7 +1222,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, * by the AP. */ drv_mgd_protect_tdls_discover(sdata->local, sdata); - /* fall-through */ + fallthrough; case WLAN_TDLS_SETUP_CONFIRM: case WLAN_PUB_ACTION_TDLS_DISCOVER_RES: /* no special handling */ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ee30ef441f4a..0d0e901b1d4c 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1740,7 +1740,7 @@ static bool __ieee80211_tx(struct ieee80211_local *local, case NL80211_IFTYPE_AP_VLAN: sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); - /* fall through */ + fallthrough; default: vif = &sdata->vif; break; @@ -2383,7 +2383,7 @@ int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, } else if (sdata->wdev.use_4addr) { return -ENOLINK; } - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_OCB: case NL80211_IFTYPE_ADHOC: @@ -2553,7 +2553,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, band = chanctx_conf->def.chan->band; if (sdata->wdev.use_4addr) break; - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: if (sdata->vif.type == NL80211_IFTYPE_AP) chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); @@ -3000,7 +3000,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) build.hdr_len = 30; break; } - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS); /* DA BSSID SA */ @@ -3711,7 +3711,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, case NL80211_IFTYPE_AP_VLAN: tx.sdata = container_of(tx.sdata->bss, struct ieee80211_sub_if_data, u.ap); - /* fall through */ + fallthrough; default: vif = &tx.sdata->vif; break; @@ -4050,7 +4050,7 @@ static bool ieee80211_multicast_to_unicast(struct sk_buff *skb, return false; if (sdata->wdev.use_4addr) return false; - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: /* check runtime toggle for this bss */ if (!sdata->bss->multicast_to_unicast) diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 64a83ecd0a73..b768ebd7117f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2347,10 +2347,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) case NL80211_IFTYPE_ADHOC: if (sdata->vif.bss_conf.ibss_joined) WARN_ON(drv_join_ibss(local, sdata)); - /* fall through */ + fallthrough; default: ieee80211_reconfig_stations(sdata); - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: /* AP stations are handled later */ for (i = 0; i < IEEE80211_NUM_ACS; i++) drv_conf_tx(local, sdata, i, @@ -2399,7 +2399,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS; @@ -2414,8 +2414,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) if (rcu_access_pointer(sdata->u.ap.beacon)) drv_start_ap(local, sdata); } - - /* fall through */ + fallthrough; case NL80211_IFTYPE_MESH_POINT: if (sdata->vif.bss_conf.enable_beacon) { changed |= BSS_CHANGED_BEACON | @@ -2885,7 +2884,7 @@ void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata, case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); - /* fall through */ + fallthrough; case IEEE80211_SMPS_OFF: cap |= u16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED, IEEE80211_HE_6GHZ_CAP_SM_PS); @@ -3196,7 +3195,7 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, break; case 0x01: support_80_80 = false; - /* fall through */ + fallthrough; case 0x02: case 0x03: ccf1 = ccfs2; @@ -3566,7 +3565,7 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, break; default: WARN_ON(1); - /* fall through */ + fallthrough; case RX_ENC_LEGACY: { struct ieee80211_supported_band *sband; int shift = 0; diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 72920d82928c..2fb99325135a 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -198,7 +198,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, sta = rcu_dereference(sdata->u.vlan.sta); if (sta) break; - /* fall through */ + fallthrough; case NL80211_IFTYPE_AP: ra = skb->data; break; From 2f1805ea209a146669e0b660633ed22f49e1dd49 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 25 Jun 2020 14:15:24 +0300 Subject: [PATCH 1823/2056] cfg80211: allow the low level driver to flush the BSS table The low level driver adds its own opaque information in the BSS table in the cfg80211_bss structure. The low level driver may need to signal that this information is no longer relevant and needs to be recreated. Add an API to allow the low level driver to do that. iwlwifi needs this because it keeps there an information about the firmware's internal clock. This is kept in mac80211's struct ieee80211_bss::sync_device_ts. This information is populated while we scan, we add the internal firmware's clock to each beacon which allows us to program the firmware correctly after association so that it'll know when (in terms of its internal clock) the DTIM and TBTT will happen. When the firmware is reset this internal clock is reset as well and ieee80211_bss::sync_device_ts is no longer accurate. iwlwifi will call this new API any time the firmware is started. Signed-off-by: Emmanuel Grumbach Link: https://lore.kernel.org/r/20200625111524.3992-1-emmanuel.grumbach@intel.com Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 6 ++++++ net/wireless/scan.c | 10 ++++++++++ 2 files changed, 16 insertions(+) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5c7ea0b5dc50..fa4d5627397f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -7899,4 +7899,10 @@ void cfg80211_update_owe_info_event(struct net_device *netdev, struct cfg80211_update_owe_info *owe_info, gfp_t gfp); +/** + * cfg80211_bss_flush - resets all the scan entries + * @wiphy: the wiphy + */ +void cfg80211_bss_flush(struct wiphy *wiphy); + #endif /* __NET_CFG80211_H */ diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 74ea4cfb39fb..e67a74488bbe 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -712,6 +712,16 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev) __cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE); } +void cfg80211_bss_flush(struct wiphy *wiphy) +{ + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); + + spin_lock_bh(&rdev->bss_lock); + __cfg80211_bss_expire(rdev, jiffies); + spin_unlock_bh(&rdev->bss_lock); +} +EXPORT_SYMBOL(cfg80211_bss_flush); + const struct element * cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len, const u8 *match, unsigned int match_len, From e3718a611470d311a92c60d4eb535270b49a7108 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= Date: Wed, 17 Jun 2020 09:30:33 +0200 Subject: [PATCH 1824/2056] cfg80211/mac80211: add mesh_param "mesh_nolearn" to skip path discovery MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, before being able to forward a packet between two 802.11s nodes, both a PLINK handshake is performed upon receiving a beacon and then later a PREQ/PREP exchange for path discovery is performed on demand upon receiving a data frame to forward. When running a mesh protocol on top of an 802.11s interface, like batman-adv, we do not need the multi-hop mesh routing capabilities of 802.11s and usually set mesh_fwding=0. However, even with mesh_fwding=0 the PREQ/PREP path discovery is still performed on demand. Even though in this scenario the next hop PREQ/PREP will determine is always the direct 11s neighbor node. The new mesh_nolearn parameter allows to skip the PREQ/PREP exchange in this scenario, leading to a reduced delay, reduced packet buffering and simplifies HWMP in general. mesh_nolearn is still rather conservative in that if the packet destination is not a direct 11s neighbor, it will fall back to PREQ/PREP path discovery. For normal, multi-hop 802.11s mesh routing it is usually not advisable to enable mesh_nolearn as a transmission to a direct but distant neighbor might be worse than reaching that same node via a more robust / higher throughput etc. multi-hop path. Cc: Sven Eckelmann Cc: Simon Wunderlich Signed-off-by: Linus Lüssing Link: https://lore.kernel.org/r/20200617073034.26149-1-linus.luessing@c0d3.blue [fix nl80211 policy to range 0/1 only] Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 6 ++++++ include/uapi/linux/nl80211.h | 7 +++++++ net/mac80211/cfg.c | 2 ++ net/mac80211/debugfs_netdev.c | 2 ++ net/mac80211/mesh_hwmp.c | 39 +++++++++++++++++++++++++++++++++++ net/wireless/mesh.c | 1 + net/wireless/nl80211.c | 7 ++++++- net/wireless/trace.h | 4 +++- 8 files changed, 66 insertions(+), 2 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index fa4d5627397f..78b220950942 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1870,6 +1870,11 @@ struct bss_parameters { * connected to a mesh gate in mesh formation info. If false, the * value in mesh formation is determined by the presence of root paths * in the mesh path table + * @dot11MeshNolearn: Try to avoid multi-hop path discovery (e.g. PREQ/PREP + * for HWMP) if the destination is a direct neighbor. Note that this might + * not be the optimal decision as a multi-hop route might be better. So + * if using this setting you will likely also want to disable + * dot11MeshForwarding and use another mesh routing protocol on top. */ struct mesh_config { u16 dot11MeshRetryTimeout; @@ -1901,6 +1906,7 @@ struct mesh_config { enum nl80211_mesh_power_mode power_mode; u16 dot11MeshAwakeWindowDuration; u32 plink_timeout; + bool dot11MeshNolearn; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f47a7a8d0216..a83d8faf88ac 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4236,6 +4236,12 @@ enum nl80211_mesh_power_mode { * field. If left unset then the mesh formation field will only * advertise such if there is an active root mesh path. * + * @NL80211_MESHCONF_NOLEARN: Try to avoid multi-hop path discovery (e.g. + * PREQ/PREP for HWMP) if the destination is a direct neighbor. Note that + * this might not be the optimal decision as a multi-hop route might be + * better. So if using this setting you will likely also want to disable + * dot11MeshForwarding and use another mesh routing protocol on top. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -4269,6 +4275,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_AWAKE_WINDOW, NL80211_MESHCONF_PLINK_TIMEOUT, NL80211_MESHCONF_CONNECTED_TO_GATE, + NL80211_MESHCONF_NOLEARN, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index b4a74064675e..9af56b848544 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2126,6 +2126,8 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_GATE, mask)) conf->dot11MeshConnectedToMeshGate = nconf->dot11MeshConnectedToMeshGate; + if (_chg_mesh_attr(NL80211_MESHCONF_NOLEARN, mask)) + conf->dot11MeshNolearn = nconf->dot11MeshNolearn; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index d7e955127d5c..09eab2c3f380 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -638,6 +638,7 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC); IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); +IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC); #endif #define DEBUGFS_ADD_MODE(name, mode) \ @@ -762,6 +763,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) MESHPARAMS_ADD(power_mode); MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); + MESHPARAMS_ADD(dot11MeshNolearn); #undef MESHPARAMS_ADD } #endif diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index bae3a3e15b88..bec23d2eee7a 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -1172,6 +1172,40 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, return -ENOENT; } +/** + * mesh_nexthop_lookup_nolearn - try to set next hop without path discovery + * @skb: 802.11 frame to be sent + * @sdata: network subif the frame will be sent through + * + * Check if the meshDA (addr3) of a unicast frame is a direct neighbor. + * And if so, set the RA (addr1) to it to transmit to this node directly, + * avoiding PREQ/PREP path discovery. + * + * Returns: 0 if the next hop was found and -ENOENT otherwise. + */ +static int mesh_nexthop_lookup_nolearn(struct ieee80211_sub_if_data *sdata, + struct sk_buff *skb) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct sta_info *sta; + + if (is_multicast_ether_addr(hdr->addr1)) + return -ENOENT; + + rcu_read_lock(); + sta = sta_info_get(sdata, hdr->addr3); + + if (!sta || sta->mesh->plink_state != NL80211_PLINK_ESTAB) { + rcu_read_unlock(); + return -ENOENT; + } + rcu_read_unlock(); + + memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); + memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN); + return 0; +} + /** * mesh_nexthop_lookup - put the appropriate next hop on a mesh frame. Calling * this function is considered "using" the associated mpath, so preempt a path @@ -1185,11 +1219,16 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata, int mesh_nexthop_lookup(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { + struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; struct mesh_path *mpath; struct sta_info *next_hop; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; u8 *target_addr = hdr->addr3; + if (ifmsh->mshcfg.dot11MeshNolearn && + !mesh_nexthop_lookup_nolearn(sdata, skb)) + return 0; + mpath = mesh_path_lookup(sdata, target_addr); if (!mpath || !(mpath->flags & MESH_PATH_ACTIVE)) return -ENOENT; diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index eac5aa1419fc..e4e363138279 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -78,6 +78,7 @@ const struct mesh_config default_mesh_config = { .power_mode = NL80211_MESH_POWER_ACTIVE, .dot11MeshAwakeWindowDuration = MESH_DEFAULT_AWAKE_WINDOW, .plink_timeout = MESH_DEFAULT_PLINK_TIMEOUT, + .dot11MeshNolearn = false, }; const struct mesh_setup default_mesh_setup = { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 6fdf818f66cf..257c06315464 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6885,7 +6885,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT, cur_params.plink_timeout) || nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE, - cur_params.dot11MeshConnectedToMeshGate)) + cur_params.dot11MeshConnectedToMeshGate) || + nla_put_u8(msg, NL80211_MESHCONF_NOLEARN, + cur_params.dot11MeshNolearn)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -6943,6 +6945,7 @@ nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 }, [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1), + [NL80211_MESHCONF_NOLEARN] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static const struct nla_policy @@ -7094,6 +7097,8 @@ do { \ NL80211_MESHCONF_AWAKE_WINDOW, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, plink_timeout, mask, NL80211_MESHCONF_PLINK_TIMEOUT, nla_get_u32); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNolearn, mask, + NL80211_MESHCONF_NOLEARN, nla_get_u8); if (mask_out) *mask_out = mask; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b23cab016521..6e218a0acd4e 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -68,7 +68,8 @@ __field(u16, ht_opmode) \ __field(u32, dot11MeshHWMPactivePathToRootTimeout) \ __field(u16, dot11MeshHWMProotInterval) \ - __field(u16, dot11MeshHWMPconfirmationInterval) + __field(u16, dot11MeshHWMPconfirmationInterval) \ + __field(bool, dot11MeshNolearn) #define MESH_CFG_ASSIGN \ do { \ __entry->dot11MeshRetryTimeout = conf->dot11MeshRetryTimeout; \ @@ -109,6 +110,7 @@ conf->dot11MeshHWMProotInterval; \ __entry->dot11MeshHWMPconfirmationInterval = \ conf->dot11MeshHWMPconfirmationInterval; \ + __entry->dot11MeshNolearn = conf->dot11MeshNolearn; \ } while (0) #define CHAN_ENTRY __field(enum nl80211_band, band) \ From 184eebe664f0e11c485f6d309fe56297b3f75e9e Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Thu, 11 Jun 2020 16:02:37 +0200 Subject: [PATCH 1825/2056] cfg80211/mac80211: add connected to auth server to meshconf Besides information about num of peerings and gate connectivity, the mesh formation byte also contains a flag for authentication server connectivity, that currently cannot be set in the mesh conf. This patch adds this capability, which is necessary to implement 802.1X authentication in mesh mode. Signed-off-by: Markus Theil Link: https://lore.kernel.org/r/20200611140238.427461-1-markus.theil@tu-ilmenau.de Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 1 + include/uapi/linux/nl80211.h | 5 +++++ net/mac80211/cfg.c | 3 +++ net/mac80211/debugfs_netdev.c | 3 +++ net/mac80211/mesh.c | 5 ++++- net/wireless/nl80211.c | 8 +++++++- 6 files changed, 23 insertions(+), 2 deletions(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 78b220950942..8d5071f84ffe 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1895,6 +1895,7 @@ struct mesh_config { u16 dot11MeshHWMPnetDiameterTraversalTime; u8 dot11MeshHWMPRootMode; bool dot11MeshConnectedToMeshGate; + bool dot11MeshConnectedToAuthServer; u16 dot11MeshHWMPRannInterval; bool dot11MeshGateAnnouncementProtocol; bool dot11MeshForwarding; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index a83d8faf88ac..f1770e3756f4 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4242,6 +4242,10 @@ enum nl80211_mesh_power_mode { * better. So if using this setting you will likely also want to disable * dot11MeshForwarding and use another mesh routing protocol on top. * + * @NL80211_MESHCONF_CONNECTED_TO_AS: If set to true then this mesh STA + * will advertise that it is connected to a authentication server + * in the mesh formation field. + * * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use */ enum nl80211_meshconf_params { @@ -4276,6 +4280,7 @@ enum nl80211_meshconf_params { NL80211_MESHCONF_PLINK_TIMEOUT, NL80211_MESHCONF_CONNECTED_TO_GATE, NL80211_MESHCONF_NOLEARN, + NL80211_MESHCONF_CONNECTED_TO_AS, /* keep last */ __NL80211_MESHCONF_ATTR_AFTER_LAST, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 9af56b848544..6a6531a50e54 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2128,6 +2128,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy, nconf->dot11MeshConnectedToMeshGate; if (_chg_mesh_attr(NL80211_MESHCONF_NOLEARN, mask)) conf->dot11MeshNolearn = nconf->dot11MeshNolearn; + if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_AS, mask)) + conf->dot11MeshConnectedToAuthServer = + nconf->dot11MeshConnectedToAuthServer; ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON); return 0; } diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 09eab2c3f380..fe8a7a87e513 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -639,6 +639,8 @@ IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration, IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate, u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC); IEEE80211_IF_FILE(dot11MeshNolearn, u.mesh.mshcfg.dot11MeshNolearn, DEC); +IEEE80211_IF_FILE(dot11MeshConnectedToAuthServer, + u.mesh.mshcfg.dot11MeshConnectedToAuthServer, DEC); #endif #define DEBUGFS_ADD_MODE(name, mode) \ @@ -764,6 +766,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata) MESHPARAMS_ADD(dot11MeshAwakeWindowDuration); MESHPARAMS_ADD(dot11MeshConnectedToMeshGate); MESHPARAMS_ADD(dot11MeshNolearn); + MESHPARAMS_ADD(dot11MeshConnectedToAuthServer); #undef MESHPARAMS_ADD } #endif diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 96f0323c0a3d..d0db6af16427 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -260,6 +260,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, bool is_connected_to_gate = ifmsh->num_gates > 0 || ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol || ifmsh->mshcfg.dot11MeshConnectedToMeshGate; + bool is_connected_to_as = ifmsh->mshcfg.dot11MeshConnectedToAuthServer; if (skb_tailroom(skb) < 2 + meshconf_len) return -ENOMEM; @@ -284,7 +285,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata, /* Mesh Formation Info - number of neighbors */ neighbors = atomic_read(&ifmsh->estab_plinks); neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS); - *pos++ = (neighbors << 1) | is_connected_to_gate; + *pos++ = (is_connected_to_as << 7) | + (neighbors << 1) | + is_connected_to_gate; /* Mesh capability */ *pos = 0x00; *pos |= ifmsh->mshcfg.dot11MeshForwarding ? diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 257c06315464..434fd06dc5cf 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -6887,7 +6887,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb, nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE, cur_params.dot11MeshConnectedToMeshGate) || nla_put_u8(msg, NL80211_MESHCONF_NOLEARN, - cur_params.dot11MeshNolearn)) + cur_params.dot11MeshNolearn) || + nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_AS, + cur_params.dot11MeshConnectedToAuthServer)) goto nla_put_failure; nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); @@ -6946,6 +6948,7 @@ nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1), [NL80211_MESHCONF_NOLEARN] = NLA_POLICY_RANGE(NLA_U8, 0, 1), + [NL80211_MESHCONF_CONNECTED_TO_AS] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static const struct nla_policy @@ -7058,6 +7061,9 @@ do { \ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToMeshGate, mask, NL80211_MESHCONF_CONNECTED_TO_GATE, nla_get_u8); + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToAuthServer, mask, + NL80211_MESHCONF_CONNECTED_TO_AS, + nla_get_u8); /* * Check HT operation mode based on * IEEE 802.11-2016 9.4.2.57 HT Operation element. From 1303a51c24100b3b1915d6f9072fe5ae5bb4c5f6 Mon Sep 17 00:00:00 2001 From: Markus Theil Date: Thu, 11 Jun 2020 16:02:38 +0200 Subject: [PATCH 1826/2056] cfg80211/mac80211: add connected to auth server to station info This patch adds the necessary bits to later query the auth server flag for every peer from iw. Signed-off-by: Markus Theil Link: https://lore.kernel.org/r/20200611140238.427461-2-markus.theil@tu-ilmenau.de Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 3 +++ include/uapi/linux/nl80211.h | 3 +++ net/mac80211/sta_info.c | 4 +++- net/mac80211/sta_info.h | 2 ++ net/wireless/nl80211.c | 1 + 5 files changed, 12 insertions(+), 1 deletion(-) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8d5071f84ffe..39fe21edd2c5 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1598,6 +1598,7 @@ struct cfg80211_tid_stats { * an FCS error. This counter should be incremented only when TA of the * received packet with an FCS error matches the peer MAC address. * @airtime_link_metric: mesh airtime link metric. + * @connected_to_as: true if mesh STA has a path to authentication server */ struct station_info { u64 filled; @@ -1655,6 +1656,8 @@ struct station_info { u32 fcs_err_count; u32 airtime_link_metric; + + u8 connected_to_as; }; #if IS_ENABLED(CONFIG_CFG80211) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f1770e3756f4..d6b6599a6001 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3370,6 +3370,8 @@ enum nl80211_sta_bss_param { * @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station * @NL80211_STA_INFO_ASSOC_AT_BOOTTIME: Timestamp (CLOCK_BOOTTIME, nanoseconds) * of STA's association + * @NL80211_STA_INFO_CONNECTED_TO_AS: set to true if STA has a path to a + * authentication server (u8, 0 or 1) * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3417,6 +3419,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_AIRTIME_WEIGHT, NL80211_STA_INFO_AIRTIME_LINK_METRIC, NL80211_STA_INFO_ASSOC_AT_BOOTTIME, + NL80211_STA_INFO_CONNECTED_TO_AS, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index cd8487bc6fc2..a39773b40457 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -2426,7 +2426,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, BIT_ULL(NL80211_STA_INFO_LOCAL_PM) | BIT_ULL(NL80211_STA_INFO_PEER_PM) | BIT_ULL(NL80211_STA_INFO_NONPEER_PM) | - BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE); + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE) | + BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_AS); sinfo->llid = sta->mesh->llid; sinfo->plid = sta->mesh->plid; @@ -2439,6 +2440,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo, sinfo->peer_pm = sta->mesh->peer_pm; sinfo->nonpeer_pm = sta->mesh->nonpeer_pm; sinfo->connected_to_gate = sta->mesh->connected_to_gate; + sinfo->connected_to_as = sta->mesh->connected_to_as; #endif } diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 49728047dfad..9d398c9daa4c 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -385,6 +385,7 @@ DECLARE_EWMA(mesh_tx_rate_avg, 8, 16) * @processed_beacon: set to true after peer rates and capabilities are * processed * @connected_to_gate: true if mesh STA has a path to a mesh gate + * @connected_to_as: true if mesh STA has a path to a authentication server * @fail_avg: moving percentage of failed MSDUs * @tx_rate_avg: moving average of tx bitrate */ @@ -404,6 +405,7 @@ struct mesh_sta { bool processed_beacon; bool connected_to_gate; + bool connected_to_as; enum nl80211_plink_state plink_state; u32 plink_timeout; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 434fd06dc5cf..13a38aab1565 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5395,6 +5395,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(PEER_PM, peer_pm, u32); PUT_SINFO(NONPEER_PM, nonpeer_pm, u32); PUT_SINFO(CONNECTED_TO_GATE, connected_to_gate, u8); + PUT_SINFO(CONNECTED_TO_AS, connected_to_as, u8); if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) { bss_param = nla_nest_start_noflag(msg, From 3ff901cb5df1d2102e924d75d91347a2a3070fa5 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 24 Jul 2020 20:28:16 +0200 Subject: [PATCH 1827/2056] mac80211: improve AQL tx airtime estimation AQL does not take into account that most HT/VHT/HE traffic is A-MPDU aggregated. Because of that, the per-packet airtime overhead is vastly overestimated. Improve it by assuming an average aggregation length of 16 for non-legacy traffic if not using the VO AC queue. This should improve performance with high data rates, especially with multiple stations Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200724182816.18678-1-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/airtime.c | 24 ++++++++++++++++++++---- net/mac80211/ieee80211_i.h | 2 +- net/mac80211/tx.c | 3 ++- 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 9fc2968856c0..366f76c9003d 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -551,7 +551,7 @@ EXPORT_SYMBOL_GPL(ieee80211_calc_tx_airtime); u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *pubsta, - int len) + int len, bool ampdu) { struct ieee80211_supported_band *sband; struct ieee80211_chanctx_conf *conf; @@ -572,10 +572,26 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, if (pubsta) { struct sta_info *sta = container_of(pubsta, struct sta_info, sta); + struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate; + u32 airtime; - return ieee80211_calc_tx_airtime_rate(hw, - &sta->tx_stats.last_rate, - band, len); + if (!(rate->flags & (IEEE80211_TX_RC_VHT_MCS | + IEEE80211_TX_RC_MCS))) + ampdu = false; + + /* + * Assume that HT/VHT transmission on any AC except VO will + * use aggregation. Since we don't have reliable reporting + * of aggregation length, assume an average of 16. + * This will not be very accurate, but much better than simply + * assuming un-aggregated tx. + */ + airtime = ieee80211_calc_tx_airtime_rate(hw, rate, band, + ampdu ? len * 16 : len); + if (ampdu) + airtime /= 16; + + return airtime; } if (!conf) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ec1a71ac65f2..28b154c6e72d 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -2290,7 +2290,7 @@ extern const struct ethtool_ops ieee80211_ethtool_ops; u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *pubsta, - int len); + int len, bool ampdu); #ifdef CONFIG_MAC80211_NOINLINE #define debug_noinline noinline #else diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0d0e901b1d4c..7c0abe477b03 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3722,10 +3722,11 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, if (vif && wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) { + bool ampdu = txq->ac != IEEE80211_AC_VO; u32 airtime; airtime = ieee80211_calc_expected_tx_airtime(hw, vif, txq->sta, - skb->len); + skb->len, ampdu); if (airtime) { airtime = ieee80211_info_set_tx_time_est(info, airtime); ieee80211_sta_update_pending_airtime(local, tx.sta, From 48a54f6bc456859dabbd1fbd805e233d260754cf Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 26 Jul 2020 15:09:46 +0200 Subject: [PATCH 1828/2056] net/fq_impl: use skb_get_hash instead of skb_get_hash_perturb This avoids unnecessarily regenerating the skb flow hash Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200726130947.88145-1-nbd@nbd.name [small commit message fixup] Signed-off-by: Johannes Berg --- include/net/fq.h | 1 - include/net/fq_impl.h | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/include/net/fq.h b/include/net/fq.h index 2ad85e683041..e39f3f8d5f8a 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -69,7 +69,6 @@ struct fq { struct list_head backlogs; spinlock_t lock; u32 flows_cnt; - siphash_key_t perturbation; u32 limit; u32 memory_limit; u32 memory_usage; diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index 38a9a3d1222b..e73d74d2fabf 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -108,7 +108,7 @@ static struct sk_buff *fq_tin_dequeue(struct fq *fq, static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) { - u32 hash = skb_get_hash_perturb(skb, &fq->perturbation); + u32 hash = skb_get_hash(skb); return reciprocal_scale(hash, fq->flows_cnt); } @@ -308,7 +308,6 @@ static int fq_init(struct fq *fq, int flows_cnt) INIT_LIST_HEAD(&fq->backlogs); spin_lock_init(&fq->lock); fq->flows_cnt = max_t(u32, flows_cnt, 1); - get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); fq->quantum = 300; fq->limit = 8192; fq->memory_limit = 16 << 20; /* 16 MBytes */ From 180ac48ee62f53c26787350a956c5ac371cbe0b7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 26 Jul 2020 15:09:47 +0200 Subject: [PATCH 1829/2056] mac80211: calculate skb hash early when using itxq This avoids flow separation issues when using software encryption. Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200726130947.88145-2-nbd@nbd.name Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 7c0abe477b03..fd44a71eea58 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3952,6 +3952,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, if (local->ops->wake_tx_queue) { u16 queue = __ieee80211_select_queue(sdata, sta, skb); skb_set_queue_mapping(skb, queue); + skb_get_hash(skb); } if (sta) { From 322cd27c06450b2db2cb6bdc68f3814149baf767 Mon Sep 17 00:00:00 2001 From: P Praneesh Date: Thu, 9 Jul 2020 08:16:21 +0530 Subject: [PATCH 1830/2056] cfg80211/mac80211: avoid bss color setting in non-HE modes Adding bss-color configuration is only valid in HE mode. Earlier we have enabled it by default, irrespective of capabilities/mode. Fix that. Reported-by: kernel test robot Reported-by: Rajkumar Manoharan Signed-off-by: P Praneesh Link: https://lore.kernel.org/r/1594262781-21444-1-git-send-email-ppranees@codeaurora.org [fix up commit message] Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 8 +++++--- net/mac80211/mlme.c | 4 +++- net/wireless/nl80211.c | 3 +++ 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 6a6531a50e54..bbd86cd79954 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -991,9 +991,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, BSS_CHANGED_SSID | BSS_CHANGED_P2P_PS | BSS_CHANGED_TXPOWER | - BSS_CHANGED_TWT | - BSS_CHANGED_HE_OBSS_PD | - BSS_CHANGED_HE_BSS_COLOR; + BSS_CHANGED_TWT; int i, err; int prev_beacon_int; @@ -1019,6 +1017,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, sdata->vif.bss_conf.frame_time_rts_th = le32_get_bits(params->he_oper->he_oper_params, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); + changed |= BSS_CHANGED_HE_OBSS_PD; + + if (!params->he_bss_color.disabled) + changed |= BSS_CHANGED_HE_BSS_COLOR; } mutex_lock(&local->mtx); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1c1b3f6bc415..8a92a62dc54d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3463,7 +3463,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, bss_conf->he_bss_color.disabled = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); - changed |= BSS_CHANGED_HE_BSS_COLOR; + + if (!bss_conf->he_bss_color.disabled) + changed |= BSS_CHANGED_HE_BSS_COLOR; bss_conf->htc_trig_based_pkt_ext = le32_get_bits(elems->he_operation->he_oper_params, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 13a38aab1565..b4048f3c5134 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4865,6 +4865,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) memset(¶ms, 0, sizeof(params)); + /* disable BSS color by default */ + params.he_bss_color.disabled = true; + /* these are required for START_AP */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || From fd17dba1c860d39f655a3a08387c21e3ceca8c55 Mon Sep 17 00:00:00 2001 From: Veerendranath Jakkam Date: Mon, 20 Jul 2020 13:12:25 +0530 Subject: [PATCH 1831/2056] cfg80211: Add support to advertize OCV support Add a new feature flag that drivers can use to advertize support for Operating Channel Validation (OCV) when using driver's SME for RSNA handshakes. Signed-off-by: Veerendranath Jakkam Link: https://lore.kernel.org/r/20200720074225.8990-1-vjakkam@codeaurora.org Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d6b6599a6001..a3ae2b060a55 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -5804,6 +5804,9 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS: The driver * can report tx status for control port over nl80211 tx operations. * + * @NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION: Driver supports Operating + * Channel Validation (OCV) when using driver's SME for RSNA handshakes. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5859,6 +5862,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT, NL80211_EXT_FEATURE_SCAN_FREQ_KHZ, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS, + NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, From 1df2bdba528b5a7a30f1b107b6924aa79af5e00e Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:48 +0400 Subject: [PATCH 1832/2056] mac80211: never drop injected frames even if normally not allowed In ieee80211_tx_dequeue there is a check to see if the dequeued frame is allowed in the current state. Injected frames that are normally not allowed are being be dropped here. Fix this by checking if a frame was injected and if so always allowing it. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-1-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index fd44a71eea58..007e070227fd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3619,7 +3619,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, tx.skb = skb; tx.sdata = vif_to_sdata(info->control.vif); - if (txq->sta) { + if (txq->sta && !(info->flags & IEEE80211_TX_CTL_INJECTED)) { tx.sta = container_of(txq->sta, struct sta_info, sta); /* * Drop unicast frames to unauthorised stations unless they are From e02281e7a5c524aaf6a52bb01afc4cc49addb908 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:49 +0400 Subject: [PATCH 1833/2056] mac80211: add radiotap flag to prevent sequence number overwrite The radiotap specification contains a flag to indicate that the sequence number of an injected frame should not be overwritten. Parse this flag and define and set a corresponding Tx control flag. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-2-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- include/net/ieee80211_radiotap.h | 1 + include/net/mac80211.h | 3 +++ net/mac80211/tx.c | 2 ++ 3 files changed, 6 insertions(+) diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 459d355f6506..19c00d100096 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -117,6 +117,7 @@ enum ieee80211_radiotap_tx_flags { IEEE80211_RADIOTAP_F_TX_CTS = 0x0002, IEEE80211_RADIOTAP_F_TX_RTS = 0x0004, IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008, + IEEE80211_RADIOTAP_F_TX_NOSEQNO = 0x0010, }; /* for IEEE80211_RADIOTAP_MCS "have" flags */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c0a597167f14..21ce821a25e7 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -825,6 +825,8 @@ enum mac80211_tx_info_flags { * @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation * (header conversion) + * @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that + * has already been assigned to this frame. * * These flags are used in tx_info->control.flags. */ @@ -836,6 +838,7 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_FAST_XMIT = BIT(4), IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5), IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6), + IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), }; /* diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 007e070227fd..413345056445 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2085,6 +2085,8 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, txflags = get_unaligned_le16(iterator.this_arg); if (txflags & IEEE80211_RADIOTAP_F_TX_NOACK) info->flags |= IEEE80211_TX_CTL_NO_ACK; + if (txflags & IEEE80211_RADIOTAP_F_TX_NOSEQNO) + info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; break; case IEEE80211_RADIOTAP_RATE: From 29c3e95f79adcef9d7999ec643639033585dba08 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:50 +0400 Subject: [PATCH 1834/2056] mac80211: do not overwrite the sequence number if requested Check if the Tx control flag is set to prevent sequence number overwrites, and if so, do not assign a new sequence number to the transmitted frame. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-3-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 413345056445..e1e915e7cc8e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -825,6 +825,9 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) if (ieee80211_is_qos_nullfunc(hdr->frame_control)) return TX_CONTINUE; + if (info->control.flags & IEEE80211_TX_CTRL_NO_SEQNO) + return TX_CONTINUE; + /* * Anything but QoS data that has a sequence number field * (is long enough) gets a sequence number from the global From 2b3dab1353209256cb3d7e07f14584eac8c82508 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:51 +0400 Subject: [PATCH 1835/2056] mac80211: use same flag everywhere to avoid sequence number overwrite Use the IEEE80211_TX_CTRL_NO_SEQNO flag in ieee80211_tx_info to mark probe requests whose sequence number must not be overwritten. This provides consistency with the radiotap flag that can be set to indicate that the sequence number of an injected frame should not be overwritten. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-4-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- net/mac80211/ieee80211_i.h | 1 - net/mac80211/scan.c | 7 +++---- net/mac80211/tx.c | 2 -- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 28b154c6e72d..6f77018db23f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -164,7 +164,6 @@ typedef unsigned __bitwise ieee80211_tx_result; #define TX_DROP ((__force ieee80211_tx_result) 1u) #define TX_QUEUED ((__force ieee80211_tx_result) 2u) -#define IEEE80211_TX_NO_SEQNO BIT(0) #define IEEE80211_TX_UNICAST BIT(1) #define IEEE80211_TX_PS_BUFFERED BIT(2) diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 8003be6dae8a..032f55d38f7d 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -591,7 +591,6 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *channel) { struct sk_buff *skb; - u32 txdata_flags = 0; skb = ieee80211_build_probe_req(sdata, src, dst, ratemask, channel, ssid, ssid_len, @@ -600,15 +599,15 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, if (skb) { if (flags & IEEE80211_PROBE_FLAG_RANDOM_SN) { struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u16 sn = get_random_u32(); - txdata_flags |= IEEE80211_TX_NO_SEQNO; + info->control.flags |= IEEE80211_TX_CTRL_NO_SEQNO; hdr->seq_ctrl = cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; - ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band, - txdata_flags); + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band, 0); } } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index e1e915e7cc8e..b85876d69433 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -836,8 +836,6 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) */ if (!ieee80211_is_data_qos(hdr->frame_control) || is_multicast_ether_addr(hdr->addr1)) { - if (tx->flags & IEEE80211_TX_NO_SEQNO) - return TX_CONTINUE; /* driver should assign sequence number */ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; /* for pure STA mode without beacons, we can do it */ From 08aca29aa8b18dec2e84bc97d27dabe133b75822 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:52 +0400 Subject: [PATCH 1836/2056] mac80211: remove unused flags argument in transmit functions The flags argument in transmit functions is no longer being used and can be removed. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-5-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- net/mac80211/cfg.c | 2 +- net/mac80211/ieee80211_i.h | 11 +++++------ net/mac80211/offchannel.c | 2 +- net/mac80211/rx.c | 2 +- net/mac80211/scan.c | 2 +- net/mac80211/sta_info.c | 2 +- net/mac80211/tx.c | 19 ++++++++----------- 7 files changed, 18 insertions(+), 22 deletions(-) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index bbd86cd79954..23ef0ebaf180 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -3590,7 +3590,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev, } local_bh_disable(); - ieee80211_xmit(sdata, sta, skb, 0); + ieee80211_xmit(sdata, sta, skb); local_bh_enable(); ret = 0; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 6f77018db23f..1760ce77d89f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1966,12 +1966,11 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata, bool bss_notify, bool enable_qos); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, struct sk_buff *skb, - u32 txdata_flags); + struct sta_info *sta, struct sk_buff *skb); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band, u32 txdata_flags); + enum nl80211_band band); /* sta_out needs to be checked for ERR_PTR() before using */ int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, @@ -1981,10 +1980,10 @@ int ieee80211_lookup_ra_sta(struct ieee80211_sub_if_data *sdata, static inline void ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band, u32 txdata_flags) + enum nl80211_band band) { rcu_read_lock(); - __ieee80211_tx_skb_tid_band(sdata, skb, tid, band, txdata_flags); + __ieee80211_tx_skb_tid_band(sdata, skb, tid, band); rcu_read_unlock(); } @@ -2002,7 +2001,7 @@ static inline void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, } __ieee80211_tx_skb_tid_band(sdata, skb, tid, - chanctx_conf->def.chan->band, 0); + chanctx_conf->def.chan->band); rcu_read_unlock(); } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 7b842aa903c3..f470d1a7ce9b 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -264,7 +264,7 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, if (roc->mgmt_tx_cookie) { if (!WARN_ON(!roc->frame)) { ieee80211_tx_skb_tid_band(roc->sdata, roc->frame, 7, - roc->chan->band, 0); + roc->chan->band); roc->frame = NULL; } } else { diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8ea93735e47c..9ac3eaa00ce6 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3591,7 +3591,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) } __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, - status->band, 0); + status->band); } dev_kfree_skb(rx->skb); return RX_QUEUED; diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 032f55d38f7d..5ac2785cdc7b 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -607,7 +607,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata, cpu_to_le16(IEEE80211_SN_TO_SEQ(sn)); } IEEE80211_SKB_CB(skb)->flags |= tx_flags; - ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band, 0); + ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band); } } diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a39773b40457..bf9abfa25bc3 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1455,7 +1455,7 @@ static void ieee80211_send_null_response(struct sta_info *sta, int tid, } info->band = chanctx_conf->def.chan->band; - ieee80211_xmit(sdata, sta, skb, 0); + ieee80211_xmit(sdata, sta, skb); rcu_read_unlock(); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b85876d69433..7fb4afaa9410 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1892,7 +1892,7 @@ EXPORT_SYMBOL(ieee80211_tx_prepare_skb); */ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, struct sk_buff *skb, - bool txpending, u32 txdata_flags) + bool txpending) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_data tx; @@ -1910,8 +1910,6 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, led_len = skb->len; res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); - tx.flags |= txdata_flags; - if (unlikely(res_prepare == TX_DROP)) { ieee80211_free_txskb(&local->hw, skb); return true; @@ -1979,8 +1977,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, } void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, - struct sta_info *sta, struct sk_buff *skb, - u32 txdata_flags) + struct sta_info *sta, struct sk_buff *skb) { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); @@ -2015,7 +2012,7 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, } ieee80211_set_qos_hdr(sdata, skb); - ieee80211_tx(sdata, sta, skb, false, txdata_flags); + ieee80211_tx(sdata, sta, skb, false); } static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, @@ -2350,7 +2347,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, if (!ieee80211_parse_tx_radiotap(local, skb)) goto fail_rcu; - ieee80211_xmit(sdata, NULL, skb, 0); + ieee80211_xmit(sdata, NULL, skb); rcu_read_unlock(); return NETDEV_TX_OK; @@ -4014,7 +4011,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, ieee80211_tx_stats(dev, skb->len); - ieee80211_xmit(sdata, sta, skb, 0); + ieee80211_xmit(sdata, sta, skb); } goto out; out_free: @@ -4380,7 +4377,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local, return true; } info->band = chanctx_conf->def.chan->band; - result = ieee80211_tx(sdata, NULL, skb, true, 0); + result = ieee80211_tx(sdata, NULL, skb, true); } else if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) { if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) { dev_kfree_skb(skb); @@ -5339,7 +5336,7 @@ EXPORT_SYMBOL(ieee80211_unreserve_tid); void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, int tid, - enum nl80211_band band, u32 txdata_flags) + enum nl80211_band band) { int ac = ieee80211_ac_from_tid(tid); @@ -5356,7 +5353,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata, */ local_bh_disable(); IEEE80211_SKB_CB(skb)->band = band; - ieee80211_xmit(sdata, NULL, skb, txdata_flags); + ieee80211_xmit(sdata, NULL, skb); local_bh_enable(); } From cb17ed29a7a5fea8c9bf70e8a05757d71650e025 Mon Sep 17 00:00:00 2001 From: Mathy Vanhoef Date: Thu, 23 Jul 2020 14:01:53 +0400 Subject: [PATCH 1837/2056] mac80211: parse radiotap header when selecting Tx queue Already parse the radiotap header in ieee80211_monitor_select_queue. In a subsequent commit this will allow us to add a radiotap flag that influences the queue on which injected packets will be sent. This also fixes the incomplete validation of the injected frame in ieee80211_monitor_select_queue: currently an out of bounds memory access may occur in in the called function ieee80211_select_queue_80211 if the 802.11 header is too small. Note that in ieee80211_monitor_start_xmit the radiotap header is parsed again, which is necessairy because ieee80211_monitor_select_queue is not always called beforehand. Signed-off-by: Mathy Vanhoef Link: https://lore.kernel.org/r/20200723100153.31631-6-Mathy.Vanhoef@kuleuven.be Signed-off-by: Johannes Berg --- include/net/mac80211.h | 8 +++++++ net/mac80211/iface.c | 15 ++++++++---- net/mac80211/tx.c | 54 +++++++++++++++++++----------------------- 3 files changed, 43 insertions(+), 34 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 21ce821a25e7..6e26f0ba6fd0 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6238,6 +6238,14 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb, int band, struct ieee80211_sta **sta); +/** + * Sanity-check and parse the radiotap header of injected frames + * @skb: packet injected by userspace + * @dev: the &struct device of this 802.11 device + */ +bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, + struct net_device *dev); + /** * struct ieee80211_noa_data - holds temporary data for tracking P2P NoA state * diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 570f818384e8..9740ae8fa697 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1183,17 +1183,24 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev, { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; - struct ieee80211_radiotap_header *rtap = (void *)skb->data; + int len_rthdr; if (local->hw.queues < IEEE80211_NUM_ACS) return 0; - if (skb->len < 4 || - skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) + /* reset flags and info before parsing radiotap header */ + memset(info, 0, sizeof(*info)); + + if (!ieee80211_parse_tx_radiotap(skb, dev)) return 0; /* doesn't matter, frame will be dropped */ - hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); + len_rthdr = ieee80211_get_radiotap_len(skb->data); + hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr); + if (skb->len < len_rthdr + 2 || + skb->len < len_rthdr + ieee80211_hdrlen(hdr->frame_control)) + return 0; /* doesn't matter, frame will be dropped */ return ieee80211_select_queue_80211(sdata, skb, hdr); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 7fb4afaa9410..ec35a92df8ed 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2015,9 +2015,10 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, ieee80211_tx(sdata, sta, skb, false); } -static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, - struct sk_buff *skb) +bool ieee80211_parse_tx_radiotap(struct sk_buff *skb, + struct net_device *dev) { + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_radiotap_iterator iterator; struct ieee80211_radiotap_header *rthdr = (struct ieee80211_radiotap_header *) skb->data; @@ -2036,6 +2037,18 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, u8 vht_mcs = 0, vht_nss = 0; int i; + /* check for not even having the fixed radiotap header part */ + if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) + return false; /* too short to be possibly valid */ + + /* is it a header version we can trust to find length from? */ + if (unlikely(rthdr->it_version)) + return false; /* only version 0 is supported */ + + /* does the skb contain enough to deliver on the alleged length? */ + if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data))) + return false; /* skb too short for claimed rt header extent */ + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | IEEE80211_TX_CTL_DONTFRAG; @@ -2189,13 +2202,6 @@ static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local, local->hw.max_rate_tries); } - /* - * remove the radiotap header - * iterator->_max_length was sanity-checked against - * skb->len by iterator init - */ - skb_pull(skb, iterator._max_length); - return true; } @@ -2204,8 +2210,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_radiotap_header *prthdr = - (struct ieee80211_radiotap_header *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr; struct ieee80211_sub_if_data *tmp_sdata, *sdata; @@ -2213,21 +2217,17 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, u16 len_rthdr; int hdrlen; - /* check for not even having the fixed radiotap header part */ - if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header))) - goto fail; /* too short to be possibly valid */ + memset(info, 0, sizeof(*info)); + info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_CTL_INJECTED; - /* is it a header version we can trust to find length from? */ - if (unlikely(prthdr->it_version)) - goto fail; /* only version 0 is supported */ + /* Sanity-check and process the injection radiotap header */ + if (!ieee80211_parse_tx_radiotap(skb, dev)) + goto fail; - /* then there must be a radiotap header with a length we can use */ + /* we now know there is a radiotap header with a length we can use */ len_rthdr = ieee80211_get_radiotap_len(skb->data); - /* does the skb contain enough to deliver on the alleged length? */ - if (unlikely(skb->len < len_rthdr)) - goto fail; /* skb too short for claimed rt header extent */ - /* * fix up the pointers accounting for the radiotap * header still being in there. We are being given @@ -2273,11 +2273,6 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; } - memset(info, 0, sizeof(*info)); - - info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS | - IEEE80211_TX_CTL_INJECTED; - rcu_read_lock(); /* @@ -2343,9 +2338,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb, info->band = chandef->chan->band; - /* process and remove the injection radiotap header */ - if (!ieee80211_parse_tx_radiotap(local, skb)) - goto fail_rcu; + /* remove the injection radiotap header */ + skb_pull(skb, len_rthdr); ieee80211_xmit(sdata, NULL, skb); rcu_read_unlock(); From c5d1686b314ea44c5f210990dee05caf98cb068f Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sun, 26 Jul 2020 13:06:11 +0200 Subject: [PATCH 1838/2056] mac80211: add a function for running rx without passing skbs to the stack This can be used to run mac80211 rx processing on a batch of frames in NAPI poll before passing them to the network stack in a large batch. This can improve icache footprint, or it can be used to pass frames via netif_receive_skb_list. Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20200726110611.46886-1-nbd@nbd.name Signed-off-by: Johannes Berg --- include/net/mac80211.h | 25 ++++++++++++++++ net/mac80211/ieee80211_i.h | 2 +- net/mac80211/rx.c | 60 ++++++++++++++++++++++++-------------- 3 files changed, 64 insertions(+), 23 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 6e26f0ba6fd0..66e2bfd165e8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4360,6 +4360,31 @@ void ieee80211_free_hw(struct ieee80211_hw *hw); */ void ieee80211_restart_hw(struct ieee80211_hw *hw); +/** + * ieee80211_rx_list - receive frame and store processed skbs in a list + * + * Use this function to hand received frames to mac80211. The receive + * buffer in @skb must start with an IEEE 802.11 header. In case of a + * paged @skb is used, the driver is recommended to put the ieee80211 + * header of the frame on the linear part of the @skb to avoid memory + * allocation and/or memcpy by the stack. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. Calls to + * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be + * mixed for a single hardware. Must not run concurrently with + * ieee80211_tx_status() or ieee80211_tx_status_ni(). + * + * This function must be called with BHs disabled and RCU read lock + * + * @hw: the hardware this frame came in on + * @sta: the station the frame was received from, or %NULL + * @skb: the buffer to receive, owned by mac80211 after this call + * @list: the destination list + */ +void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + struct sk_buff *skb, struct list_head *list); + /** * ieee80211_rx_napi - receive frame from NAPI context * diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1760ce77d89f..0b1eaec6649f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -217,7 +217,7 @@ enum ieee80211_rx_flags { }; struct ieee80211_rx_data { - struct napi_struct *napi; + struct list_head *list; struct sk_buff *skb; struct ieee80211_local *local; struct ieee80211_sub_if_data *sdata; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 9ac3eaa00ce6..836cde516a18 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2578,8 +2578,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, memset(skb->cb, 0, sizeof(skb->cb)); /* deliver to local stack */ - if (rx->napi) - napi_gro_receive(rx->napi, skb); + if (rx->list) + list_add_tail(&skb->list, rx->list); else netif_receive_skb(skb); } @@ -3869,7 +3869,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) /* This is OK -- must be QoS data frame */ .security_idx = tid, .seqno_idx = tid, - .napi = NULL, /* must be NULL to not have races */ }; struct tid_ampdu_rx *tid_agg_rx; @@ -4479,8 +4478,8 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, /* deliver to local stack */ skb->protocol = eth_type_trans(skb, fast_rx->dev); memset(skb->cb, 0, sizeof(skb->cb)); - if (rx->napi) - napi_gro_receive(rx->napi, skb); + if (rx->list) + list_add_tail(&skb->list, rx->list); else netif_receive_skb(skb); @@ -4547,7 +4546,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct sk_buff *skb, - struct napi_struct *napi) + struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata; @@ -4562,7 +4561,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, memset(&rx, 0, sizeof(rx)); rx.skb = skb; rx.local = local; - rx.napi = napi; + rx.list = list; if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) I802_DEBUG_INC(local->dot11ReceivedFragmentCount); @@ -4670,8 +4669,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, * This is the receive path handler. It is called by a low level driver when an * 802.11 MPDU is received from the hardware. */ -void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, - struct sk_buff *skb, struct napi_struct *napi) +void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct list_head *list) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate = NULL; @@ -4762,13 +4761,6 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, status->rx_flags = 0; - /* - * key references and virtual interfaces are protected using RCU - * and this requires that we are in a read-side RCU section during - * receive processing - */ - rcu_read_lock(); - /* * Frames with failed FCS/PLCP checksum are not returned, * all other frames are returned without radiotap header @@ -4776,23 +4768,47 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, * Also, frames with less than 16 bytes are dropped. */ skb = ieee80211_rx_monitor(local, skb, rate); - if (!skb) { - rcu_read_unlock(); + if (!skb) return; - } ieee80211_tpt_led_trig_rx(local, ((struct ieee80211_hdr *)skb->data)->frame_control, skb->len); - __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); - - rcu_read_unlock(); + __ieee80211_rx_handle_packet(hw, pubsta, skb, list); return; drop: kfree_skb(skb); } +EXPORT_SYMBOL(ieee80211_rx_list); + +void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, + struct sk_buff *skb, struct napi_struct *napi) +{ + struct sk_buff *tmp; + LIST_HEAD(list); + + + /* + * key references and virtual interfaces are protected using RCU + * and this requires that we are in a read-side RCU section during + * receive processing + */ + rcu_read_lock(); + ieee80211_rx_list(hw, pubsta, skb, &list); + rcu_read_unlock(); + + if (!napi) { + netif_receive_skb_list(&list); + return; + } + + list_for_each_entry_safe(skb, tmp, &list, list) { + skb_list_del_init(skb); + napi_gro_receive(napi, skb); + } +} EXPORT_SYMBOL(ieee80211_rx_napi); /* This is a version of the rx handler that can be called from hard irq From 75e6b594bbaeeb3f8287a2e6eb8811384b8c7195 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 30 Jul 2020 13:00:52 +0200 Subject: [PATCH 1839/2056] cfg80211: invert HE BSS color 'disabled' to 'enabled' This is in fact 'disabled' in the spec, but there it's in a place where that actually makes sense. In our internal data structures, it doesn't really make sense, and in fact the previous commit just fixed a bug in that area. Make this safer by inverting the polarity from 'disabled' to 'enabled'. Link: https://lore.kernel.org/r/20200730130051.5d8399545bd9.Ie62fdcd1a6cd9c969315bc124084a494ca6c8df3@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath11k/mac.c | 2 +- include/net/cfg80211.h | 4 ++-- net/mac80211/cfg.c | 2 +- net/mac80211/mlme.c | 8 ++++---- net/wireless/nl80211.c | 7 ++----- 5 files changed, 10 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 07d3e031c75a..94ae2b9ea663 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -2072,7 +2072,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw, ret = ath11k_wmi_send_obss_color_collision_cfg_cmd( ar, arvif->vdev_id, info->he_bss_color.color, ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS, - !info->he_bss_color.disabled); + info->he_bss_color.enabled); if (ret) ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", arvif->vdev_id, ret); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 39fe21edd2c5..d9e6b9fbd95b 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -267,12 +267,12 @@ struct ieee80211_he_obss_pd { * struct cfg80211_he_bss_color - AP settings for BSS coloring * * @color: the current color. - * @disabled: is the feature disabled. + * @enabled: HE BSS color is used * @partial: define the AID equation. */ struct cfg80211_he_bss_color { u8 color; - bool disabled; + bool enabled; bool partial; }; diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 23ef0ebaf180..24e9e00decb8 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1019,7 +1019,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); changed |= BSS_CHANGED_HE_OBSS_PD; - if (!params->he_bss_color.disabled) + if (params->he_bss_color.enabled) changed |= BSS_CHANGED_HE_BSS_COLOR; } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8a92a62dc54d..839d0367446c 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3460,11 +3460,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata, bss_conf->he_bss_color.partial = le32_get_bits(elems->he_operation->he_oper_params, IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR); - bss_conf->he_bss_color.disabled = - le32_get_bits(elems->he_operation->he_oper_params, - IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); + bss_conf->he_bss_color.enabled = + !le32_get_bits(elems->he_operation->he_oper_params, + IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); - if (!bss_conf->he_bss_color.disabled) + if (bss_conf->he_bss_color.enabled) changed |= BSS_CHANGED_HE_BSS_COLOR; bss_conf->htc_trig_based_pkt_ext = diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index b4048f3c5134..8d78a6fc59a3 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4713,8 +4713,8 @@ static int nl80211_parse_he_bss_color(struct nlattr *attrs, he_bss_color->color = nla_get_u8(tb[NL80211_HE_BSS_COLOR_ATTR_COLOR]); - he_bss_color->disabled = - nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); + he_bss_color->enabled = + !nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_DISABLED]); he_bss_color->partial = nla_get_flag(tb[NL80211_HE_BSS_COLOR_ATTR_PARTIAL]); @@ -4865,9 +4865,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) memset(¶ms, 0, sizeof(params)); - /* disable BSS color by default */ - params.he_bss_color.disabled = true; - /* these are required for START_AP */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || From f96622749a67d40ad5efe8a58d5fc95313097aa0 Mon Sep 17 00:00:00 2001 From: Chung-Hsien Hsu Date: Tue, 23 Jun 2020 08:49:35 -0500 Subject: [PATCH 1840/2056] nl80211: support 4-way handshake offloading for WPA/WPA2-PSK in AP mode Let drivers advertise support for AP-mode WPA/WPA2-PSK 4-way handshake offloading with a new NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag. Extend use of NL80211_ATTR_PMK attribute indicating it might be passed as part of NL80211_CMD_START_AP command, and contain the PSK (which is the PMK, hence the name). The driver is assumed to handle the 4-way handshake by itself in this case, instead of relying on userspace. Signed-off-by: Chung-Hsien Hsu Signed-off-by: Chi-Hsien Lin Link: https://lore.kernel.org/r/20200623134938.39997-2-chi-hsien.lin@cypress.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 41 ++++++++++++++++++++++++------------ net/wireless/nl80211.c | 4 +++- 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index a3ae2b060a55..631f3a997b3c 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -183,18 +183,27 @@ * * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK flag drivers * can indicate they support offloading EAPOL handshakes for WPA/WPA2 - * preshared key authentication. In %NL80211_CMD_CONNECT the preshared - * key should be specified using %NL80211_ATTR_PMK. Drivers supporting - * this offload may reject the %NL80211_CMD_CONNECT when no preshared - * key material is provided, for example when that driver does not - * support setting the temporal keys through %CMD_NEW_KEY. + * preshared key authentication in station mode. In %NL80211_CMD_CONNECT + * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers + * supporting this offload may reject the %NL80211_CMD_CONNECT when no + * preshared key material is provided, for example when that driver does + * not support setting the temporal keys through %NL80211_CMD_NEW_KEY. * * Similarly @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_1X flag can be * set by drivers indicating offload support of the PTK/GTK EAPOL - * handshakes during 802.1X authentication. In order to use the offload - * the %NL80211_CMD_CONNECT should have %NL80211_ATTR_WANT_1X_4WAY_HS - * attribute flag. Drivers supporting this offload may reject the - * %NL80211_CMD_CONNECT when the attribute flag is not present. + * handshakes during 802.1X authentication in station mode. In order to + * use the offload the %NL80211_CMD_CONNECT should have + * %NL80211_ATTR_WANT_1X_4WAY_HS attribute flag. Drivers supporting this + * offload may reject the %NL80211_CMD_CONNECT when the attribute flag is + * not present. + * + * By setting @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK flag drivers + * can indicate they support offloading EAPOL handshakes for WPA/WPA2 + * preshared key authentication in AP mode. In %NL80211_CMD_START_AP + * the preshared key should be specified using %NL80211_ATTR_PMK. Drivers + * supporting this offload may reject the %NL80211_CMD_START_AP when no + * preshared key material is provided, for example when that driver does + * not support setting the temporal keys through %NL80211_CMD_NEW_KEY. * * For 802.1X the PMK or PMK-R0 are set by providing %NL80211_ATTR_PMK * using %NL80211_CMD_SET_PMK. For offloaded FT support also @@ -2362,10 +2371,11 @@ enum nl80211_commands { * * @NL80211_ATTR_PMK: attribute for passing PMK key material. Used with * %NL80211_CMD_SET_PMKSA for the PMKSA identified by %NL80211_ATTR_PMKID. - * For %NL80211_CMD_CONNECT it is used to provide PSK for offloading 4-way - * handshake for WPA/WPA2-PSK networks. For 802.1X authentication it is - * used with %NL80211_CMD_SET_PMK. For offloaded FT support this attribute - * specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME is included as well. + * For %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP it is used to provide + * PSK for offloading 4-way handshake for WPA/WPA2-PSK networks. For 802.1X + * authentication it is used with %NL80211_CMD_SET_PMK. For offloaded FT + * support this attribute specifies the PMK-R0 if NL80211_ATTR_PMKR0_NAME + * is included as well. * * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to * indicate that it supports multiple active scheduled scan requests. @@ -5807,6 +5817,10 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION: Driver supports Operating * Channel Validation (OCV) when using driver's SME for RSNA handshakes. * + * @NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK: Device wants to do 4-way + * handshake with PSK in AP mode (PSK is passed as part of the start AP + * command). + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5863,6 +5877,7 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_SCAN_FREQ_KHZ, NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS, NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8d78a6fc59a3..a096682ec0ad 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -9442,7 +9442,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, if (nla_len(info->attrs[NL80211_ATTR_PMK]) != WLAN_PMK_LEN) return -EINVAL; if (!wiphy_ext_feature_isset(&rdev->wiphy, - NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK)) + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_STA_PSK) && + !wiphy_ext_feature_isset(&rdev->wiphy, + NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK)) return -EINVAL; settings->psk = nla_data(info->attrs[NL80211_ATTR_PMK]); } From c8ad010665c0b85c6a35466b2159e907b8dd85d1 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 30 Jul 2020 15:52:13 +0200 Subject: [PATCH 1841/2056] mac80211: warn only once in check_sdata_in_driver() at each caller Ben Greear has repeatedly reported in the past (for a few years probably) that this triggers repeatedly in certain scenarios. Make this a macro so that each callsite can trigger the warning only once - that will still give us an idea of what's going on and what paths can reach it, but avoids being too noisy. Link: https://lore.kernel.org/r/20200730155212.06fd3a95dbfb.I0b16829aabfaf5f642bce401502a29d16e2dd444@changeid Signed-off-by: Johannes Berg --- net/mac80211/driver-ops.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index de69fc9c4f07..41d495d73d3a 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -12,12 +12,11 @@ #include "ieee80211_i.h" #include "trace.h" -static inline bool check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) -{ - return !WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), - "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", - sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); -} +#define check_sdata_in_driver(sdata) ({ \ + !WARN_ONCE(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER), \ + "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", \ + sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); \ +}) static inline struct ieee80211_sub_if_data * get_bss_sdata(struct ieee80211_sub_if_data *sdata) From 79bf118957a1966344b247d240434d8047d02ac9 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 30 Jul 2020 19:40:11 +0200 Subject: [PATCH 1842/2056] Bluetooth: Increment management interface revision Increment the mgmt revision due to the recently added new commands. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- net/bluetooth/mgmt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 4ec0fee80344..5bbe71002fb9 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -40,7 +40,7 @@ #include "msft.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 17 +#define MGMT_REVISION 18 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, From 075f77324f90149bac12c8a705dae5786a1d24fb Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 31 Jul 2020 17:41:58 +1000 Subject: [PATCH 1843/2056] Bluetooth: Remove CRYPTO_ALG_INTERNAL flag The flag CRYPTO_ALG_INTERNAL is not meant to be used outside of the Crypto API. It isn't needed here anyway. Signed-off-by: Herbert Xu Signed-off-by: Johan Hedberg --- net/bluetooth/selftest.c | 2 +- net/bluetooth/smp.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c index 03e3c89c3046..f71c6fa65fb3 100644 --- a/net/bluetooth/selftest.c +++ b/net/bluetooth/selftest.c @@ -205,7 +205,7 @@ static int __init test_ecdh(void) calltime = ktime_get(); - tfm = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); + tfm = crypto_alloc_kpp("ecdh", 0, 0); if (IS_ERR(tfm)) { BT_ERR("Unable to create ECDH crypto context"); err = PTR_ERR(tfm); diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index c5c812e8130e..4cfd05efc548 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1391,7 +1391,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) goto zfree_smp; } - smp->tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); + smp->tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); if (IS_ERR(smp->tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); goto free_shash; @@ -3286,7 +3286,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) return ERR_CAST(tfm_cmac); } - tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); + tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); @@ -3851,7 +3851,7 @@ int __init bt_selftest_smp(void) return PTR_ERR(tfm_cmac); } - tfm_ecdh = crypto_alloc_kpp("ecdh", CRYPTO_ALG_INTERNAL, 0); + tfm_ecdh = crypto_alloc_kpp("ecdh", 0, 0); if (IS_ERR(tfm_ecdh)) { BT_ERR("Unable to create ECDH crypto context"); crypto_free_shash(tfm_cmac); From 1acf8f90ea7ee59006d0474275922145ac291331 Mon Sep 17 00:00:00 2001 From: Jerry Crunchtime Date: Fri, 31 Jul 2020 17:08:01 +0200 Subject: [PATCH 1844/2056] libbpf: Fix register in PT_REGS MIPS macros The o32, n32 and n64 calling conventions require the return value to be stored in $v0 which maps to $2 register, i.e., the register 2. Fixes: c1932cd ("bpf: Add MIPS support to samples/bpf.") Signed-off-by: Jerry Crunchtime Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/43707d31-0210-e8f0-9226-1af140907641@web.de --- tools/lib/bpf/bpf_tracing.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 58eceb884df3..eebf020cbe3e 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -215,7 +215,7 @@ struct pt_regs; #define PT_REGS_PARM5(x) ((x)->regs[8]) #define PT_REGS_RET(x) ((x)->regs[31]) #define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */ -#define PT_REGS_RC(x) ((x)->regs[1]) +#define PT_REGS_RC(x) ((x)->regs[2]) #define PT_REGS_SP(x) ((x)->regs[29]) #define PT_REGS_IP(x) ((x)->cp0_epc) @@ -226,7 +226,7 @@ struct pt_regs; #define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8]) #define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31]) #define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30]) -#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[2]) #define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29]) #define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc) From ffba964e4d1126bef5d636e8af70f052e50342fc Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Fri, 31 Jul 2020 16:29:02 +0800 Subject: [PATCH 1845/2056] Documentation/bpf: Use valid and new links in index.rst There exists an error "404 Not Found" when I click the html link of "Documentation/networking/filter.rst" in the BPF documentation [1], fix it. Additionally, use the new links about "BPF and XDP Reference Guide" and "bpf(2)" to avoid redirects. [1] https://www.kernel.org/doc/html/latest/bpf/ Fixes: d9b9170a2653 ("docs: bpf: Rename README.rst to index.rst") Fixes: cb3f0d56e153 ("docs: networking: convert filter.txt to ReST") Signed-off-by: Tiezhu Yang Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/1596184142-18476-1-git-send-email-yangtiezhu@loongson.cn --- Documentation/bpf/index.rst | 12 ++++++------ Documentation/networking/filter.rst | 2 ++ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 26f4bb3107fc..44ca8ea5a723 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -5,10 +5,10 @@ BPF Documentation This directory contains documentation for the BPF (Berkeley Packet Filter) facility, with a focus on the extended BPF version (eBPF). -This kernel side documentation is still work in progress. The main +This kernel side documentation is still work in progress. The main textual documentation is (for historical reasons) described in -`Documentation/networking/filter.rst`_, which describe both classical -and extended BPF instruction-set. +:ref:`networking-filter`, which describe both classical and extended +BPF instruction-set. The Cilium project also maintains a `BPF and XDP Reference Guide`_ that goes into great technical depth about the BPF Architecture. @@ -68,7 +68,7 @@ Testing and debugging BPF .. Links: -.. _Documentation/networking/filter.rst: ../networking/filter.txt +.. _networking-filter: ../networking/filter.rst .. _man-pages: https://www.kernel.org/doc/man-pages/ -.. _bpf(2): http://man7.org/linux/man-pages/man2/bpf.2.html -.. _BPF and XDP Reference Guide: http://cilium.readthedocs.io/en/latest/bpf/ +.. _bpf(2): https://man7.org/linux/man-pages/man2/bpf.2.html +.. _BPF and XDP Reference Guide: https://docs.cilium.io/en/latest/bpf/ diff --git a/Documentation/networking/filter.rst b/Documentation/networking/filter.rst index a1d3e192b9fa..debb59e374de 100644 --- a/Documentation/networking/filter.rst +++ b/Documentation/networking/filter.rst @@ -1,5 +1,7 @@ .. SPDX-License-Identifier: GPL-2.0 +.. _networking-filter: + ======================================================= Linux Socket Filtering aka Berkeley Packet Filter (BPF) ======================================================= From ffe8923f109b7ea92c0842c89e61300eefa11c94 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 24 Jul 2020 13:34:46 +0200 Subject: [PATCH 1846/2056] netfilter: nft_compat: make sure xtables destructors have run Pablo Neira found that after recent update of xt_IDLETIMER the iptables-nft tests sometimes show an error. He tracked this down to the delayed cleanup used by nf_tables core: del rule (transaction A) add rule (transaction B) Its possible that by time transaction B (both in same netns) runs, the xt target destructor has not been invoked yet. For native nft expressions this is no problem because all expressions that have such side effects make sure these are handled from the commit phase, rather than async cleanup. For nft_compat however this isn't true. Instead of forcing synchronous behaviour for nft_compat, keep track of the number of outstanding destructor calls. When we attempt to create a new expression, flush the cleanup worker to make sure destructors have completed. With lots of help from Pablo Neira. Reported-by: Pablo Neira Ayso Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 10 +++++++-- net/netfilter/nft_compat.c | 36 +++++++++++++++++++++++++++---- 3 files changed, 42 insertions(+), 6 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6f0f6fca9ac3..2571c09be8bb 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1498,4 +1498,6 @@ void nft_chain_filter_fini(void); void __init nft_chain_route_init(void); void nft_chain_route_fini(void); + +void nf_tables_trans_destroy_flush_work(void); #endif /* _NET_NF_TABLES_H */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 88325b264737..79e4db3cadec 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7290,6 +7290,12 @@ static void nf_tables_trans_destroy_work(struct work_struct *w) } } +void nf_tables_trans_destroy_flush_work(void) +{ + flush_work(&trans_destroy_work); +} +EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work); + static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain) { struct nft_rule *rule; @@ -7472,9 +7478,9 @@ static void nf_tables_commit_release(struct net *net) spin_unlock(&nf_tables_destroy_list_lock); nf_tables_module_autoload_cleanup(net); - mutex_unlock(&net->nft.commit_mutex); - schedule_work(&trans_destroy_work); + + mutex_unlock(&net->nft.commit_mutex); } static int nf_tables_commit(struct net *net, struct sk_buff *skb) diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index aa1a066cb74b..6428856ccbec 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -27,6 +27,8 @@ struct nft_xt_match_priv { void *info; }; +static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1); + static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, const char *tablename) { @@ -236,6 +238,15 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv); + /* xtables matches or targets can have side effects, e.g. + * creation/destruction of /proc files. + * The xt ->destroy functions are run asynchronously from + * work queue. If we have pending invocations we thus + * need to wait for those to finish. + */ + if (refcount_read(&nft_compat_pending_destroy) > 1) + nf_tables_trans_destroy_flush_work(); + ret = xt_check_target(&par, size, proto, inv); if (ret < 0) return ret; @@ -247,6 +258,13 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, return 0; } +static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr) +{ + refcount_dec(&nft_compat_pending_destroy); + module_put(me); + kfree(expr->ops); +} + static void nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { @@ -262,8 +280,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) if (par.target->destroy != NULL) par.target->destroy(&par); - module_put(me); - kfree(expr->ops); + __nft_mt_tg_destroy(me, expr); } static int nft_extension_dump_info(struct sk_buff *skb, int attr, @@ -494,8 +511,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, if (par.match->destroy != NULL) par.match->destroy(&par); - module_put(me); - kfree(expr->ops); + __nft_mt_tg_destroy(me, expr); } static void @@ -700,6 +716,14 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { static struct nft_expr_type nft_match_type; +static void nft_mt_tg_deactivate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + enum nft_trans_phase phase) +{ + if (phase == NFT_TRANS_COMMIT) + refcount_inc(&nft_compat_pending_destroy); +} + static const struct nft_expr_ops * nft_match_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -738,6 +762,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, ops->type = &nft_match_type; ops->eval = nft_match_eval; ops->init = nft_match_init; + ops->deactivate = nft_mt_tg_deactivate, ops->destroy = nft_match_destroy; ops->dump = nft_match_dump; ops->validate = nft_match_validate; @@ -828,6 +853,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); ops->init = nft_target_init; ops->destroy = nft_target_destroy; + ops->deactivate = nft_mt_tg_deactivate, ops->dump = nft_target_dump; ops->validate = nft_target_validate; ops->data = target; @@ -891,6 +917,8 @@ static void __exit nft_compat_module_exit(void) nfnetlink_subsys_unregister(&nfnl_compat_subsys); nft_unregister_expr(&nft_target_type); nft_unregister_expr(&nft_match_type); + + WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1); } MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); From 83a33b248763f081bbccc7f7f61264883a9e3338 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 31 Jul 2020 16:15:03 -0700 Subject: [PATCH 1847/2056] bluetooth: sco: Fix sockptr reference. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit net/bluetooth/sco.c: In function ‘sco_sock_setsockopt’: net/bluetooth/sco.c:862:3: error: cannot convert to a pointer type 862 | if (get_user(opt, (u32 __user *)optval)) { | ^~ Signed-off-by: David S. Miller --- net/bluetooth/sco.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 6e6b03844a2a..dcf7f96ff417 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -859,7 +859,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, break; case BT_PKT_STATUS: - if (get_user(opt, (u32 __user *)optval)) { + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { err = -EFAULT; break; } From 9fc95f50eedb5cfe5d53a9c970c47ed680fc4e54 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Thu, 30 Jul 2020 19:02:36 +0800 Subject: [PATCH 1848/2056] net: Pass NULL to skb_network_protocol() when we don't care about vlan depth When we don't care about vlan depth, we could pass NULL instead of the address of a unused local variable to skb_network_protocol() as a param. Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- net/core/dev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index fe2e387eed29..38a6371d9bc5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3454,10 +3454,9 @@ static netdev_features_t net_mpls_features(struct sk_buff *skb, static netdev_features_t harmonize_features(struct sk_buff *skb, netdev_features_t features) { - int tmp; __be16 type; - type = skb_network_protocol(skb, &tmp); + type = skb_network_protocol(skb, NULL); features = net_mpls_features(skb, features, type); if (skb->ip_summed != CHECKSUM_NONE && From eff73e16ee116f6eafa2be48fab42659a27cb453 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 30 Jul 2020 17:01:18 +0200 Subject: [PATCH 1849/2056] s390/qeth: tolerate pre-filled RX buffer When preparing a buffer for RX refill, tolerate that it already has a pool_entry attached. Otherwise we could easily leak such a pool_entry when re-driving the RX refill after an error (from eg. do_qdio()). This needs some minor adjustment in the code that drains RX buffer(s) prior to RX refill and during teardown, so that ->pool_entry is NULLed accordingly. Fixes: 4a71df50047f ("qeth: new qeth device driver") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 8a76022fceda..b356ee5de9f8 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -204,12 +204,17 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); void qeth_clear_working_pool_list(struct qeth_card *card) { struct qeth_buffer_pool_entry *pool_entry, *tmp; + struct qeth_qdio_q *queue = card->qdio.in_q; + unsigned int i; QETH_CARD_TEXT(card, 5, "clwrklst"); list_for_each_entry_safe(pool_entry, tmp, &card->qdio.in_buf_pool.entry_list, list){ list_del(&pool_entry->list); } + + for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) + queue->bufs[i].pool_entry = NULL; } EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); @@ -2965,7 +2970,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( static int qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) { - struct qeth_buffer_pool_entry *pool_entry; + struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; int i; if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { @@ -2976,9 +2981,13 @@ static int qeth_init_input_buffer(struct qeth_card *card, return -ENOMEM; } - pool_entry = qeth_find_free_buffer_pool_entry(card); - if (!pool_entry) - return -ENOBUFS; + if (!pool_entry) { + pool_entry = qeth_find_free_buffer_pool_entry(card); + if (!pool_entry) + return -ENOBUFS; + + buf->pool_entry = pool_entry; + } /* * since the buffer is accessed only from the input_tasklet @@ -2986,8 +2995,6 @@ static int qeth_init_input_buffer(struct qeth_card *card, * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off * buffers */ - - buf->pool_entry = pool_entry; for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { buf->buffer->element[i].length = PAGE_SIZE; buf->buffer->element[i].addr = @@ -5771,6 +5778,7 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) if (done) { QETH_CARD_STAT_INC(card, rx_bufs); qeth_put_buffer_pool_entry(card, buffer->pool_entry); + buffer->pool_entry = NULL; qeth_queue_input_buffer(card, card->rx.b_index); card->rx.b_count--; From 7c94a88295008f66e9a0efc1c15d8511f73a0366 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 30 Jul 2020 17:01:19 +0200 Subject: [PATCH 1850/2056] s390/qeth: integrate RX refill worker with NAPI Running a RX refill outside of NAPI context is inherently racy, even though the worker is only started for an entirely idle RX ring. >From the moment that the worker has replenished parts of the RX ring, the HW can use those RX buffers, raise an IRQ and cause our NAPI code to run concurrently to the RX refill worker. Instead let the worker schedule our NAPI instance, and refill the RX ring from there. Keeping accurate count of how many buffers still need to be refilled also removes some quirky arithmetic from the low-level code. Fixes: b333293058aa ("qeth: add support for af_iucv HiperSockets transport") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 +- drivers/s390/net/qeth_core_main.c | 40 +++++++++++++++++++------------ drivers/s390/net/qeth_l2_main.c | 1 - drivers/s390/net/qeth_l3_main.c | 1 - 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 3d54c8bbfa86..ecfd6d152e86 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -764,6 +764,7 @@ struct qeth_rx { u8 buf_element; int e_offset; int qdio_err; + u8 bufs_refill; }; struct carrier_info { @@ -833,7 +834,6 @@ struct qeth_card { struct napi_struct napi; struct qeth_rx rx; struct delayed_work buffer_reclaim_work; - int reclaim_index; struct work_struct close_dev_work; }; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index b356ee5de9f8..c2e44853ac1a 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3492,20 +3492,15 @@ static int qeth_check_qdio_errors(struct qeth_card *card, return 0; } -static void qeth_queue_input_buffer(struct qeth_card *card, int index) +static unsigned int qeth_rx_refill_queue(struct qeth_card *card, + unsigned int count) { struct qeth_qdio_q *queue = card->qdio.in_q; struct list_head *lh; - int count; int i; int rc; int newcount = 0; - count = (index < queue->next_buf_to_init)? - card->qdio.in_buf_pool.buf_count - - (queue->next_buf_to_init - index) : - card->qdio.in_buf_pool.buf_count - - (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); /* only requeue at a certain threshold to avoid SIGAs */ if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { for (i = queue->next_buf_to_init; @@ -3533,12 +3528,11 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index) i++; if (i == card->qdio.in_buf_pool.buf_count) { QETH_CARD_TEXT(card, 2, "qsarbw"); - card->reclaim_index = index; schedule_delayed_work( &card->buffer_reclaim_work, QETH_RECLAIM_WORK_TIME); } - return; + return 0; } /* @@ -3555,7 +3549,10 @@ static void qeth_queue_input_buffer(struct qeth_card *card, int index) } queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + count); + return count; } + + return 0; } static void qeth_buffer_reclaim_work(struct work_struct *work) @@ -3563,8 +3560,10 @@ static void qeth_buffer_reclaim_work(struct work_struct *work) struct qeth_card *card = container_of(work, struct qeth_card, buffer_reclaim_work.work); - QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); - qeth_queue_input_buffer(card, card->reclaim_index); + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } static void qeth_handle_send_error(struct qeth_card *card, @@ -5743,6 +5742,7 @@ static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) { + struct qeth_rx *ctx = &card->rx; unsigned int work_done = 0; while (budget > 0) { @@ -5779,8 +5779,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) QETH_CARD_STAT_INC(card, rx_bufs); qeth_put_buffer_pool_entry(card, buffer->pool_entry); buffer->pool_entry = NULL; - qeth_queue_input_buffer(card, card->rx.b_index); card->rx.b_count--; + ctx->bufs_refill++; + ctx->bufs_refill -= qeth_rx_refill_queue(card, + ctx->bufs_refill); /* Step forward to next buffer: */ card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); @@ -5820,9 +5822,16 @@ int qeth_poll(struct napi_struct *napi, int budget) if (card->options.cq == QETH_CQ_ENABLED) qeth_cq_poll(card); - /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ - if (budget && work_done >= budget) - return work_done; + if (budget) { + struct qeth_rx *ctx = &card->rx; + + /* Process any substantial refill backlog: */ + ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); + + /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ + if (work_done >= budget) + return work_done; + } if (napi_complete_done(napi, work_done) && qdio_start_irq(CARD_DDEV(card))) @@ -7009,6 +7018,7 @@ int qeth_stop(struct net_device *dev) } napi_disable(&card->napi); + cancel_delayed_work_sync(&card->buffer_reclaim_work); qdio_stop_irq(CARD_DDEV(card)); return 0; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ef7a2db7a724..38e97bbde9ed 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -285,7 +285,6 @@ static void qeth_l2_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 15a12487ff7a..fe44b0249e34 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1169,7 +1169,6 @@ static void qeth_l3_stop_card(struct qeth_card *card) qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); qeth_drain_output_queues(card); - cancel_delayed_work_sync(&card->buffer_reclaim_work); card->state = CARD_STATE_DOWN; } From 02472e28b9a45471c6d8729ff2c7422baa9be46a Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 30 Jul 2020 17:01:20 +0200 Subject: [PATCH 1851/2056] s390/qeth: don't process empty bridge port events Discard events that don't contain any entries. This shouldn't happen, but subsequent code relies on being able to use entry 0. So better be safe than accessing garbage. Fixes: b4d72c08b358 ("qeth: bridgeport support - basic control") Signed-off-by: Julian Wiedmann Reviewed-by: Alexandra Winter Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 38e97bbde9ed..8b342a88ff5c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1140,6 +1140,10 @@ static void qeth_bridge_state_change(struct qeth_card *card, int extrasize; QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); return; From 9e7d92e0b0ace7546d338a44172063ccae36a8f1 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 30 Jul 2020 17:01:21 +0200 Subject: [PATCH 1852/2056] s390/qeth: use all configured RX buffers The (misplaced) comment doesn't make any sense, enforcing an uninitialized RX buffer won't help with IRQ reduction. So make the best use of all available RX buffers. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c2e44853ac1a..bba1b54b8aa3 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3022,6 +3022,7 @@ static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, static int qeth_init_qdio_queues(struct qeth_card *card) { + unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; unsigned int i; int rc; @@ -3033,16 +3034,14 @@ static int qeth_init_qdio_queues(struct qeth_card *card) qeth_initialize_working_pool_list(card); /*give only as many buffers to hardware as we have buffer pool entries*/ - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + for (i = 0; i < rx_bufs; i++) { rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); if (rc) return rc; } - card->qdio.in_q->next_buf_to_init = - card->qdio.in_buf_pool.buf_count - 1; - rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, - card->qdio.in_buf_pool.buf_count - 1); + card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs); if (rc) { QETH_CARD_TEXT_(card, 2, "1err%d", rc); return rc; @@ -3535,13 +3534,6 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card, return 0; } - /* - * according to old code it should be avoided to requeue all - * 128 buffers in order to benefit from PCI avoidance. - * this function keeps at least one buffer (the buffer at - * 'index') un-requeued -> this buffer is the first buffer that - * will be requeued the next time - */ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, queue->next_buf_to_init, count); if (rc) { From cc0b065fd550aee840dbca84e1c3ff667099b461 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 30 Jul 2020 18:09:04 +0300 Subject: [PATCH 1853/2056] hsr: Use %pM format specifier for MAC addresses Convert to %pM instead of using custom code. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- net/hsr/hsr_debugfs.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c index 3b6f675bd55a..7e11a6c35bc3 100644 --- a/net/hsr/hsr_debugfs.c +++ b/net/hsr/hsr_debugfs.c @@ -22,12 +22,6 @@ static struct dentry *hsr_debugfs_root_dir; -static void print_mac_address(struct seq_file *sfp, unsigned char *mac) -{ - seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x ", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); -} - /* hsr_node_table_show - Formats and prints node_table entries */ static int hsr_node_table_show(struct seq_file *sfp, void *data) @@ -49,8 +43,8 @@ hsr_node_table_show(struct seq_file *sfp, void *data) /* skip self node */ if (hsr_addr_is_self(priv, node->macaddress_A)) continue; - print_mac_address(sfp, &node->macaddress_A[0]); - print_mac_address(sfp, &node->macaddress_B[0]); + seq_printf(sfp, "%pM ", &node->macaddress_A[0]); + seq_printf(sfp, "%pM ", &node->macaddress_B[0]); seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]); seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]); seq_printf(sfp, "%14x, ", node->addr_B_port); From b03c3bacf58fdb9774cf209a5773ae500556115f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 30 Jul 2020 18:59:20 +0300 Subject: [PATCH 1854/2056] qed: Use %pM format specifier for MAC addresses Convert to %pM instead of using custom code. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 5 ++--- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 6 ++---- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 988d84564849..5be08f83e0aa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -2518,11 +2518,10 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), - "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n", + "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n", info->pause_on_host, info->protocol, info->bandwidth_min, info->bandwidth_max, - info->mac[0], info->mac[1], info->mac[2], - info->mac[3], info->mac[4], info->mac[5], + info->mac, info->wwn_port, info->wwn_node, info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index aa215eeeb4df..9489089706fe 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3276,14 +3276,12 @@ static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", + "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n", vf->abs_vf_id, params.opcode, params.type, params.is_rx_filter ? "RX" : "", params.is_tx_filter ? "TX" : "", params.vport_to_add_to, - params.mac[0], params.mac[1], - params.mac[2], params.mac[3], - params.mac[4], params.mac[5], params.vlan); + params.mac, params.vlan); if (!vf->vport_instance) { DP_VERBOSE(p_hwfn, From 26b4b2d99c3a0abe7b6ff4969f48afdda0931573 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 30 Jul 2020 19:00:57 +0300 Subject: [PATCH 1855/2056] qede: Use %pM format specifier for MAC addresses Convert to %pM instead of using custom code. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 1aaae3203f5a..4250c17940c0 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -144,9 +144,7 @@ static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) { struct qede_dev *edev = netdev_priv(ndev); - DP_VERBOSE(edev, QED_MSG_IOV, - "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", - mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); + DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx); if (!is_valid_ether_addr(mac)) { DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); From 77aec5e1c4935e37c8cbb818a69f2f7d3433ede5 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 30 Jul 2020 11:03:14 -0500 Subject: [PATCH 1856/2056] net/sched: cls_u32: Use struct_size() helper Make use of the struct_size() helper, in multiple places, instead of an open-coded version in order to avoid any potential type mistakes and protect against potential integer overflows. Also, remove unnecessary object identifier size. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 771b068f8254..7b69ab1993ba 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -852,9 +852,6 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, u32 htid, flags = 0; size_t sel_size; int err; -#ifdef CONFIG_CLS_U32_PERF - size_t size; -#endif if (!opt) { if (handle) { @@ -1022,15 +1019,15 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, goto erridr; } - n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL); + n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL); if (n == NULL) { err = -ENOBUFS; goto erridr; } #ifdef CONFIG_CLS_U32_PERF - size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64); - n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); + n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys), + __alignof__(struct tc_u32_pcnt)); if (!n->pf) { err = -ENOBUFS; goto errfree; @@ -1294,8 +1291,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, int cpu; #endif - if (nla_put(skb, TCA_U32_SEL, - sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), + if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys), &n->sel)) goto nla_put_failure; @@ -1345,9 +1341,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, goto nla_put_failure; } #ifdef CONFIG_CLS_U32_PERF - gpf = kzalloc(sizeof(struct tc_u32_pcnt) + - n->sel.nkeys * sizeof(u64), - GFP_KERNEL); + gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL); if (!gpf) goto nla_put_failure; @@ -1361,9 +1355,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, gpf->kcnts[i] += pf->kcnts[i]; } - if (nla_put_64bit(skb, TCA_U32_PCNT, - sizeof(struct tc_u32_pcnt) + - n->sel.nkeys * sizeof(u64), + if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys), gpf, TCA_U32_PAD)) { kfree(gpf); goto nla_put_failure; From f8ace8d915b88bd1bbaac695de94650dbb25c7b4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:50 +0200 Subject: [PATCH 1857/2056] tcp: rename request_sock cookie_ts bit to syncookie Nowadays output function has a 'synack_type' argument that tells us when the syn/ack is emitted via syncookies. The request already tells us when timestamps are supported, so check both to detect special timestamp for tcp option encoding is needed. We could remove cookie_ts altogether, but a followup patch would otherwise need to adjust function signatures to pass 'want_cookie' to mptcp core. This way, the 'existing' bit can be used. Suggested-by: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/crypto/chelsio/chtls/chtls_cm.c | 2 +- include/net/request_sock.h | 2 +- net/ipv4/tcp_input.c | 3 +-- net/ipv4/tcp_output.c | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c index f924c335a195..05520dccd906 100644 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c @@ -1348,7 +1348,7 @@ static void chtls_pass_accept_request(struct sock *sk, oreq->rsk_rcv_wnd = 0; oreq->rsk_window_clamp = 0; - oreq->cookie_ts = 0; + oreq->syncookie = 0; oreq->mss = 0; oreq->ts_recent = 0; diff --git a/include/net/request_sock.h b/include/net/request_sock.h index cf8b33213bbc..b2eb8b4ba697 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -54,7 +54,7 @@ struct request_sock { struct request_sock *dl_next; u16 mss; u8 num_retrans; /* number of retransmits */ - u8 cookie_ts:1; /* syncookie: encode tcpopts in timestamp */ + u8 syncookie:1; /* syncookie: encode tcpopts in timestamp */ u8 num_timeout:7; /* number of timeouts */ u32 ts_recent; struct timer_list rsk_timer; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a018bafd7bdf..11a6f128e51c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6519,7 +6519,6 @@ static void tcp_openreq_init(struct request_sock *req, struct inet_request_sock *ireq = inet_rsk(req); req->rsk_rcv_wnd = 0; /* So that tcp_send_synack() knows! */ - req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; tcp_rsk(req)->snt_synack = 0; @@ -6674,6 +6673,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (!req) goto drop; + req->syncookie = want_cookie; tcp_rsk(req)->af_specific = af_ops; tcp_rsk(req)->ts_off = 0; #if IS_ENABLED(CONFIG_MPTCP) @@ -6739,7 +6739,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (want_cookie) { isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); - req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d8f16f6a9b02..85ff417bda7f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3393,7 +3393,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, memset(&opts, 0, sizeof(opts)); now = tcp_clock_ns(); #ifdef CONFIG_SYN_COOKIES - if (unlikely(req->cookie_ts)) + if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) skb->skb_mstamp_ns = cookie_init_timestamp(req, now); else #endif From 535fb8152f313dd5d30ef84ce55b01ad9cbae3cf Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:51 +0200 Subject: [PATCH 1858/2056] mptcp: token: move retry to caller Once syncookie support is added, no state will be stored anymore when the syn/ack is generated in syncookie mode. When the ACK comes back, the generated key will be taken from the TCP ACK, the token is re-generated and inserted into the token tree. This means we can't retry with a new key when the token is already taken in the syncookie case. Therefore, move the retry logic to the caller to prepare for syncookie support in mptcp. Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 9 ++++++++- net/mptcp/token.c | 12 ++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 1c8482bc2ce5..9feb87880d1c 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -126,11 +126,18 @@ static void subflow_init_req(struct request_sock *req, } if (mp_opt.mp_capable && listener->request_mptcp) { - int err; + int err, retries = 4; + +again: + do { + get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); + } while (subflow_req->local_key == 0); err = mptcp_token_new_request(req); if (err == 0) subflow_req->mp_capable = 1; + else if (retries-- > 0) + goto again; subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; } else if (mp_opt.mp_join && listener->request_mptcp) { diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 97cfc45bcc4f..f82410c54653 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -109,14 +109,12 @@ static void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn) int mptcp_token_new_request(struct request_sock *req) { struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); - int retries = TOKEN_MAX_RETRIES; struct token_bucket *bucket; u32 token; -again: - mptcp_crypto_key_gen_sha(&subflow_req->local_key, - &subflow_req->token, - &subflow_req->idsn); + mptcp_crypto_key_sha(subflow_req->local_key, + &subflow_req->token, + &subflow_req->idsn); pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n", req, subflow_req->local_key, subflow_req->token, subflow_req->idsn); @@ -126,9 +124,7 @@ int mptcp_token_new_request(struct request_sock *req) spin_lock_bh(&bucket->lock); if (__token_bucket_busy(bucket, token)) { spin_unlock_bh(&bucket->lock); - if (!--retries) - return -EBUSY; - goto again; + return -EBUSY; } hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain); From 78d8b7bc4b32e2d32ac19d3b217166224c4342d0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:52 +0200 Subject: [PATCH 1859/2056] mptcp: subflow: split subflow_init_req When syncookie support is added, we will need to add a variant of subflow_init_req() helper. It will do almost same thing except that it will not compute/add a token to the mptcp token tree. To avoid excess copy&paste, this commit splits away part of the code into a new helper, __subflow_init_req, that can then be re-used from the 'no insert' function added in a followup change. Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 9feb87880d1c..091e305a81c8 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -91,17 +91,9 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req, return msk; } -static void subflow_init_req(struct request_sock *req, - const struct sock *sk_listener, - struct sk_buff *skb) +static int __subflow_init_req(struct request_sock *req, const struct sock *sk_listener) { - struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); - struct mptcp_options_received mp_opt; - - pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); - - mptcp_get_options(skb, &mp_opt); subflow_req->mp_capable = 0; subflow_req->mp_join = 0; @@ -113,9 +105,29 @@ static void subflow_init_req(struct request_sock *req, * TCP option space. */ if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) - return; + return -EINVAL; #endif + return 0; +} + +static void subflow_init_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb) +{ + struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); + struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); + struct mptcp_options_received mp_opt; + int ret; + + pr_debug("subflow_req=%p, listener=%p", subflow_req, listener); + + ret = __subflow_init_req(req, sk_listener); + if (ret) + return; + + mptcp_get_options(skb, &mp_opt); + if (mp_opt.mp_capable) { SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE); From 08b8d080982fec354173d3fd28a3106a719b8950 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:53 +0200 Subject: [PATCH 1860/2056] mptcp: rename and export mptcp_subflow_request_sock_ops syncookie code path needs to create an mptcp request sock. Prepare for this and add mptcp prefix plus needed export of ops struct. Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- include/net/mptcp.h | 1 + net/mptcp/subflow.c | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 02158c257bd4..76eb915bf91c 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -58,6 +58,7 @@ struct mptcp_out_options { }; #ifdef CONFIG_MPTCP +extern struct request_sock_ops mptcp_subflow_request_sock_ops; void mptcp_init(void); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 091e305a81c8..9b11d2b6ff4d 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -284,7 +284,8 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) tcp_done(sk); } -static struct request_sock_ops subflow_request_sock_ops; +struct request_sock_ops mptcp_subflow_request_sock_ops; +EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops); static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops; static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) @@ -297,7 +298,7 @@ static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; - return tcp_conn_request(&subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_request_sock_ops, &subflow_request_sock_ipv4_ops, sk, skb); drop: @@ -322,7 +323,7 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) if (!ipv6_unicast_destination(skb)) goto drop; - return tcp_conn_request(&subflow_request_sock_ops, + return tcp_conn_request(&mptcp_subflow_request_sock_ops, &subflow_request_sock_ipv6_ops, sk, skb); drop: @@ -1311,8 +1312,8 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops) void __init mptcp_subflow_init(void) { - subflow_request_sock_ops = tcp_request_sock_ops; - if (subflow_ops_init(&subflow_request_sock_ops) != 0) + mptcp_subflow_request_sock_ops = tcp_request_sock_ops; + if (subflow_ops_init(&mptcp_subflow_request_sock_ops) != 0) panic("MPTCP: failed to init subflow request sock ops\n"); subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops; From c83a47e50d8fd3825a4758158e9edd5acdc74185 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:54 +0200 Subject: [PATCH 1861/2056] mptcp: subflow: add mptcp_subflow_init_cookie_req helper Will be used to initialize the mptcp request socket when a MP_CAPABLE request was handled in syncookie mode, i.e. when a TCP ACK containing a MP_CAPABLE option is a valid syncookie value. Normally (non-cookie case), MPTCP will generate a unique 32 bit connection ID and stores it in the MPTCP token storage to be able to retrieve the mptcp socket for subflow joining. In syncookie case, we do not want to store any state, so just generate the unique ID and use it in the reply. This means there is a small window where another connection could generate the same token. When Cookie ACK comes back, we check that the token has not been registered in the mean time. If it was, the connection needs to fall back to TCP. Changes in v2: - use req->syncookie instead of passing 'want_cookie' arg to ->init_req() (Eric Dumazet) Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- include/net/mptcp.h | 10 +++++++++ net/mptcp/protocol.h | 1 + net/mptcp/subflow.c | 50 +++++++++++++++++++++++++++++++++++++++++++- net/mptcp/token.c | 26 +++++++++++++++++++++++ 4 files changed, 86 insertions(+), 1 deletion(-) diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 76eb915bf91c..3525d2822abe 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -131,6 +131,9 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, } void mptcp_seq_show(struct seq_file *seq); +int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb); #else static inline void mptcp_init(void) @@ -200,6 +203,13 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { } static inline void mptcp_seq_show(struct seq_file *seq) { } + +static inline int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb) +{ + return 0; /* TCP fallback */ +} #endif /* CONFIG_MPTCP */ #if IS_ENABLED(CONFIG_MPTCP_IPV6) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index beb34b8a5363..d76d3b40d69e 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -400,6 +400,7 @@ void mptcp_token_destroy_request(struct request_sock *req); int mptcp_token_new_connect(struct sock *sk); void mptcp_token_accept(struct mptcp_subflow_request_sock *r, struct mptcp_sock *msk); +bool mptcp_token_exists(u32 token); struct mptcp_sock *mptcp_token_get_sock(u32 token); struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, long *s_num); diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 9b11d2b6ff4d..3d346572d4c9 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -140,18 +140,31 @@ static void subflow_init_req(struct request_sock *req, if (mp_opt.mp_capable && listener->request_mptcp) { int err, retries = 4; + subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; again: do { get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key)); } while (subflow_req->local_key == 0); + if (unlikely(req->syncookie)) { + mptcp_crypto_key_sha(subflow_req->local_key, + &subflow_req->token, + &subflow_req->idsn); + if (mptcp_token_exists(subflow_req->token)) { + if (retries-- > 0) + goto again; + } else { + subflow_req->mp_capable = 1; + } + return; + } + err = mptcp_token_new_request(req); if (err == 0) subflow_req->mp_capable = 1; else if (retries-- > 0) goto again; - subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; } else if (mp_opt.mp_join && listener->request_mptcp) { subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq; subflow_req->mp_join = 1; @@ -165,6 +178,41 @@ static void subflow_init_req(struct request_sock *req, } } +int mptcp_subflow_init_cookie_req(struct request_sock *req, + const struct sock *sk_listener, + struct sk_buff *skb) +{ + struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener); + struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); + struct mptcp_options_received mp_opt; + int err; + + err = __subflow_init_req(req, sk_listener); + if (err) + return err; + + mptcp_get_options(skb, &mp_opt); + + if (mp_opt.mp_capable && mp_opt.mp_join) + return -EINVAL; + + if (mp_opt.mp_capable && listener->request_mptcp) { + if (mp_opt.sndr_key == 0) + return -EINVAL; + + subflow_req->local_key = mp_opt.rcvr_key; + err = mptcp_token_new_request(req); + if (err) + return err; + + subflow_req->mp_capable = 1; + subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req); + static void subflow_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) diff --git a/net/mptcp/token.c b/net/mptcp/token.c index f82410c54653..8b47c4bb1c6b 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -204,6 +204,32 @@ void mptcp_token_accept(struct mptcp_subflow_request_sock *req, spin_unlock_bh(&bucket->lock); } +bool mptcp_token_exists(u32 token) +{ + struct hlist_nulls_node *pos; + struct token_bucket *bucket; + struct mptcp_sock *msk; + struct sock *sk; + + rcu_read_lock(); + bucket = token_bucket(token); + +again: + sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) { + msk = mptcp_sk(sk); + if (READ_ONCE(msk->token) == token) + goto found; + } + if (get_nulls_value(pos) != (token & token_mask)) + goto again; + + rcu_read_unlock(); + return false; +found: + rcu_read_unlock(); + return true; +} + /** * mptcp_token_get_sock - retrieve mptcp connection sock using its token * @token: token of the mptcp connection to retrieve From 6fc8c827dd4fa615965c4eac9bbfd465f6eb8fb4 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:55 +0200 Subject: [PATCH 1862/2056] tcp: syncookies: create mptcp request socket for ACK cookies with MPTCP option If SYN packet contains MP_CAPABLE option, keep it enabled. Syncokie validation and cookie-based socket creation is changed to instantiate an mptcp request sockets if the ACK contains an MPTCP connection request. Rather than extend both cookie_v4/6_check, add a common helper to create the (mp)tcp request socket. Suggested-by: Paolo Abeni Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/tcp.h | 2 ++ net/ipv4/syncookies.c | 38 ++++++++++++++++++++++++++++++++++---- net/ipv4/tcp_input.c | 3 --- net/ipv6/syncookies.c | 5 +---- 4 files changed, 37 insertions(+), 11 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index e0c35d56091f..dbf5c791a6eb 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -469,6 +469,8 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); +struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk, struct sk_buff *skb); #ifdef CONFIG_SYN_COOKIES /* Syncookies use a monotonic timer which increments every 60 seconds. diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 9a4f6b16c9bc..54838ee2e8d4 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -276,6 +276,39 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt, } EXPORT_SYMBOL(cookie_ecn_ok); +struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk, + struct sk_buff *skb) +{ + struct tcp_request_sock *treq; + struct request_sock *req; + +#ifdef CONFIG_MPTCP + if (sk_is_mptcp(sk)) + ops = &mptcp_subflow_request_sock_ops; +#endif + + req = inet_reqsk_alloc(ops, sk, false); + if (!req) + return NULL; + +#if IS_ENABLED(CONFIG_MPTCP) + treq = tcp_rsk(req); + treq->is_mptcp = sk_is_mptcp(sk); + if (treq->is_mptcp) { + int err = mptcp_subflow_init_cookie_req(req, sk, skb); + + if (err) { + reqsk_free(req); + return NULL; + } + } +#endif + + return req; +} +EXPORT_SYMBOL_GPL(cookie_tcp_reqsk_alloc); + /* On input, sk is a listener. * Output is listener if incoming packet would not create a child * NULL if memory could not be allocated. @@ -326,7 +359,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */ + req = cookie_tcp_reqsk_alloc(&tcp_request_sock_ops, sk, skb); if (!req) goto out; @@ -350,9 +383,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) treq->snt_synack = 0; treq->tfo_listener = false; - if (IS_ENABLED(CONFIG_MPTCP)) - treq->is_mptcp = 0; - if (IS_ENABLED(CONFIG_SMC)) ireq->smc_ok = 0; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 11a6f128e51c..739da25b0c23 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6701,9 +6701,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, af_ops->init_req(req, sk, skb); - if (IS_ENABLED(CONFIG_MPTCP) && want_cookie) - tcp_rsk(req)->is_mptcp = 0; - if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 13235a012388..e796a64be308 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -170,7 +170,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; - req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false); + req = cookie_tcp_reqsk_alloc(&tcp6_request_sock_ops, sk, skb); if (!req) goto out; @@ -178,9 +178,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) treq = tcp_rsk(req); treq->tfo_listener = false; - if (IS_ENABLED(CONFIG_MPTCP)) - treq->is_mptcp = 0; - if (security_inet_conn_request(sk, skb, req)) goto out_free; From 9466a1ccebbe54ac57fb8a89c2b4b854826546a8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:56 +0200 Subject: [PATCH 1863/2056] mptcp: enable JOIN requests even if cookies are in use JOIN requests do not work in syncookie mode -- for HMAC validation, the peers nonce and the mptcp token (to obtain the desired connection socket the join is for) are required, but this information is only present in the initial syn. So either we need to drop all JOIN requests once a listening socket enters syncookie mode, or we need to store enough state to reconstruct the request socket later. This adds a state table (1024 entries) to store the data present in the MP_JOIN syn request and the random nonce used for the cookie syn/ack. When a MP_JOIN ACK passed cookie validation, the table is consulted to rebuild the request socket from it. An alternate approach would be to "cancel" syn-cookie mode and force MP_JOIN to always use a syn queue entry. However, doing so brings the backlog over the configured queue limit. v2: use req->syncookie, not (removed) want_cookie arg Suggested-by: Paolo Abeni Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- net/ipv4/syncookies.c | 6 ++ net/mptcp/Makefile | 1 + net/mptcp/ctrl.c | 1 + net/mptcp/protocol.h | 20 +++++++ net/mptcp/subflow.c | 14 +++++ net/mptcp/syncookies.c | 132 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 174 insertions(+) create mode 100644 net/mptcp/syncookies.c diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 54838ee2e8d4..11b20474be83 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -212,6 +212,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, refcount_set(&req->rsk_refcnt, 1); tcp_sk(child)->tsoffset = tsoff; sock_rps_save_rxhash(child, skb); + + if (tcp_rsk(req)->drop_req) { + refcount_set(&req->rsk_refcnt, 2); + return child; + } + if (inet_csk_reqsk_queue_add(sk, req, child)) return child; diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile index 2360cbd27d59..a611968be4d7 100644 --- a/net/mptcp/Makefile +++ b/net/mptcp/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_MPTCP) += mptcp.o mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \ mib.o pm_netlink.o +obj-$(CONFIG_SYN_COOKIES) += syncookies.o obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o mptcp_crypto_test-objs := crypto_test.o diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c index 8e39585d37f3..54b888f94009 100644 --- a/net/mptcp/ctrl.c +++ b/net/mptcp/ctrl.c @@ -112,6 +112,7 @@ static struct pernet_operations mptcp_pernet_ops = { void __init mptcp_init(void) { + mptcp_join_cookie_init(); mptcp_proto_init(); if (register_pernet_subsys(&mptcp_pernet_ops) < 0) diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d76d3b40d69e..60b27d44c184 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -506,4 +506,24 @@ static inline bool subflow_simultaneous_connect(struct sock *sk) !subflow->conn_finished; } +#ifdef CONFIG_SYN_COOKIES +void subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, + struct sk_buff *skb); +bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, + struct sk_buff *skb); +void __init mptcp_join_cookie_init(void); +#else +static inline void +subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, + struct sk_buff *skb) {} +static inline bool +mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, + struct sk_buff *skb) +{ + return false; +} + +static inline void mptcp_join_cookie_init(void) {} +#endif + #endif /* __MPTCP_PROTOCOL_H */ diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 3d346572d4c9..a4cc4591bd4e 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -173,6 +173,12 @@ static void subflow_init_req(struct request_sock *req, subflow_req->token = mp_opt.token; subflow_req->remote_nonce = mp_opt.nonce; subflow_req->msk = subflow_token_join_request(req, skb); + + if (unlikely(req->syncookie) && subflow_req->msk) { + if (mptcp_can_accept_new_subflow(subflow_req->msk)) + subflow_init_req_cookie_join_save(subflow_req, skb); + } + pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token, subflow_req->remote_nonce, subflow_req->msk); } @@ -207,6 +213,14 @@ int mptcp_subflow_init_cookie_req(struct request_sock *req, subflow_req->mp_capable = 1; subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; + } else if (mp_opt.mp_join && listener->request_mptcp) { + if (!mptcp_token_join_cookie_init_state(subflow_req, skb)) + return -EINVAL; + + if (mptcp_can_accept_new_subflow(subflow_req->msk)) + subflow_req->mp_join = 1; + + subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1; } return 0; diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c new file mode 100644 index 000000000000..6eb992789b50 --- /dev/null +++ b/net/mptcp/syncookies.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include "protocol.h" + +/* Syncookies do not work for JOIN requests. + * + * Unlike MP_CAPABLE, where the ACK cookie contains the needed MPTCP + * options to reconstruct the initial syn state, MP_JOIN does not contain + * the token to obtain the mptcp socket nor the server-generated nonce + * that was used in the cookie SYN/ACK response. + * + * Keep a small best effort state table to store the syn/synack data, + * indexed by skb hash. + * + * A MP_JOIN SYN packet handled by syn cookies is only stored if the 32bit + * token matches a known mptcp connection that can still accept more subflows. + * + * There is no timeout handling -- state is only re-constructed + * when the TCP ACK passed the cookie validation check. + */ + +struct join_entry { + u32 token; + u32 remote_nonce; + u32 local_nonce; + u8 join_id; + u8 local_id; + u8 backup; + u8 valid; +}; + +#define COOKIE_JOIN_SLOTS 1024 + +static struct join_entry join_entries[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp; +static spinlock_t join_entry_locks[COOKIE_JOIN_SLOTS] __cacheline_aligned_in_smp; + +static u32 mptcp_join_entry_hash(struct sk_buff *skb, struct net *net) +{ + u32 i = skb_get_hash(skb) ^ net_hash_mix(net); + + return i % ARRAY_SIZE(join_entries); +} + +static void mptcp_join_store_state(struct join_entry *entry, + const struct mptcp_subflow_request_sock *subflow_req) +{ + entry->token = subflow_req->token; + entry->remote_nonce = subflow_req->remote_nonce; + entry->local_nonce = subflow_req->local_nonce; + entry->backup = subflow_req->backup; + entry->join_id = subflow_req->remote_id; + entry->local_id = subflow_req->local_id; + entry->valid = 1; +} + +void subflow_init_req_cookie_join_save(const struct mptcp_subflow_request_sock *subflow_req, + struct sk_buff *skb) +{ + struct net *net = read_pnet(&subflow_req->sk.req.ireq_net); + u32 i = mptcp_join_entry_hash(skb, net); + + /* No use in waiting if other cpu is already using this slot -- + * would overwrite the data that got stored. + */ + spin_lock_bh(&join_entry_locks[i]); + mptcp_join_store_state(&join_entries[i], subflow_req); + spin_unlock_bh(&join_entry_locks[i]); +} + +/* Called for a cookie-ack with MP_JOIN option present. + * Look up the saved state based on skb hash & check token matches msk + * in same netns. + * + * Caller will check msk can still accept another subflow. The hmac + * present in the cookie ACK mptcp option space will be checked later. + */ +bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subflow_req, + struct sk_buff *skb) +{ + struct net *net = read_pnet(&subflow_req->sk.req.ireq_net); + u32 i = mptcp_join_entry_hash(skb, net); + struct mptcp_sock *msk; + struct join_entry *e; + + e = &join_entries[i]; + + spin_lock_bh(&join_entry_locks[i]); + + if (e->valid == 0) { + spin_unlock_bh(&join_entry_locks[i]); + return false; + } + + e->valid = 0; + + msk = mptcp_token_get_sock(e->token); + if (!msk) { + spin_unlock_bh(&join_entry_locks[i]); + return false; + } + + /* If this fails, the token got re-used in the mean time by another + * mptcp socket in a different netns, i.e. entry is outdated. + */ + if (!net_eq(sock_net((struct sock *)msk), net)) + goto err_put; + + subflow_req->remote_nonce = e->remote_nonce; + subflow_req->local_nonce = e->local_nonce; + subflow_req->backup = e->backup; + subflow_req->remote_id = e->join_id; + subflow_req->token = e->token; + subflow_req->msk = msk; + spin_unlock_bh(&join_entry_locks[i]); + return true; + +err_put: + spin_unlock_bh(&join_entry_locks[i]); + sock_put((struct sock *)msk); + return false; +} + +void __init mptcp_join_cookie_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(join_entry_locks); i++) + spin_lock_init(&join_entry_locks[i]); + + BUILD_BUG_ON(ARRAY_SIZE(join_entry_locks) != ARRAY_SIZE(join_entries)); +} From fed61c4b584c5839543fe46a2ee55e21dd1bbf80 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:57 +0200 Subject: [PATCH 1864/2056] selftests: mptcp: make 2nd net namespace use tcp syn cookies unconditionally check we can establish connections also when syn cookies are in use. Check that MPTcpExtMPCapableSYNRX and MPTcpExtMPCapableACKRX increase for each MPTCP test. Check TcpExtSyncookiesSent and TcpExtSyncookiesRecv increase in netns2. Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- .../selftests/net/mptcp/mptcp_connect.sh | 47 +++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index c0589e071f20..57d75b7f6220 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -196,6 +196,9 @@ ip -net "$ns4" link set ns4eth3 up ip -net "$ns4" route add default via 10.0.3.2 ip -net "$ns4" route add default via dead:beef:3::2 +# use TCP syn cookies, even if no flooding was detected. +ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2 + set_ethtool_flags() { local ns="$1" local dev="$2" @@ -407,6 +410,11 @@ do_transfer() sleep 1 fi + local stat_synrx_last_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableSYNRX | while read a count c rest ;do echo $count;done) + local stat_ackrx_last_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableACKRX | while read a count c rest ;do echo $count;done) + local stat_cookietx_last=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesSent | while read a count c rest ;do echo $count;done) + local stat_cookierx_last=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesRecv | while read a count c rest ;do echo $count;done) + ip netns exec ${listener_ns} ./mptcp_connect -t $timeout -l -p $port -s ${srv_proto} $extra_args $local_addr < "$sin" > "$sout" & local spid=$! @@ -450,6 +458,45 @@ do_transfer() check_transfer $cin $sout "file received by server" rets=$? + local stat_synrx_now_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableSYNRX | while read a count c rest ;do echo $count;done) + local stat_ackrx_now_l=$(ip netns exec ${listener_ns} nstat -z -a MPTcpExtMPCapableACKRX | while read a count c rest ;do echo $count;done) + + local stat_cookietx_now=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesSent | while read a count c rest ;do echo $count;done) + local stat_cookierx_now=$(ip netns exec ${listener_ns} nstat -z -a TcpExtSyncookiesRecv | while read a count c rest ;do echo $count;done) + + expect_synrx=$((stat_synrx_last_l)) + expect_ackrx=$((stat_ackrx_last_l)) + + cookies=$(ip netns exec ${listener_ns} sysctl net.ipv4.tcp_syncookies) + cookies=${cookies##*=} + + if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then + expect_synrx=$((stat_synrx_last_l+1)) + expect_ackrx=$((stat_ackrx_last_l+1)) + fi + if [ $cookies -eq 2 ];then + if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then + echo "${listener_ns} CookieSent: ${cl_proto} -> ${srv_proto}: did not advance" + fi + if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then + echo "${listener_ns} CookieRecv: ${cl_proto} -> ${srv_proto}: did not advance" + fi + else + if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then + echo "${listener_ns} CookieSent: ${cl_proto} -> ${srv_proto}: changed" + fi + if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then + echo "${listener_ns} CookieRecv: ${cl_proto} -> ${srv_proto}: changed" + fi + fi + + if [ $expect_synrx -ne $stat_synrx_now_l ] ;then + echo "${listener_ns} SYNRX: ${cl_proto} -> ${srv_proto}: expect ${expect_synrx}, got ${stat_synrx_now_l}" + fi + if [ $expect_ackrx -ne $stat_ackrx_now_l ] ;then + echo "${listener_ns} ACKRX: ${cl_proto} -> ${srv_proto}: expect ${expect_synrx}, got ${stat_synrx_now_l}" + fi + if [ $retc -eq 0 ] && [ $rets -eq 0 ];then echo "$duration [ OK ]" cat "$capout" From 00587187ad3016382bd8d3fad0f3bba0a518ab40 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 30 Jul 2020 21:25:58 +0200 Subject: [PATCH 1865/2056] selftests: mptcp: add test cases for mptcp join tests with syn cookies Also add test cases with MP_JOIN when tcp_syncookies sysctl is 2 (i.e., syncookies are always-on). While at it, also print the test number and add the test number to the pcap files that can be generated optionally. This makes it easier to match the pcap to the test case. Signed-off-by: Florian Westphal Reviewed-by: Mat Martineau Signed-off-by: David S. Miller --- .../testing/selftests/net/mptcp/mptcp_join.sh | 66 ++++++++++++++++++- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index dd42c2f692d0..f39c1129ce5f 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -72,6 +72,15 @@ reset() init } +reset_with_cookies() +{ + reset + + for netns in "$ns1" "$ns2";do + ip netns exec $netns sysctl -q net.ipv4.tcp_syncookies=2 + done +} + for arg in "$@"; do if [ "$arg" = "-c" ]; then capture=1 @@ -138,7 +147,7 @@ do_transfer() capuser="-Z $SUDO_USER" fi - capfile="mp_join-${listener_ns}.pcap" + capfile=$(printf "mp_join-%02u-%s.pcap" "$TEST_COUNT" "${listener_ns}") echo "Capturing traffic for test $TEST_COUNT into $capfile" ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 & @@ -227,7 +236,7 @@ chk_join_nr() local count local dump_stats - printf "%-36s %s" "$msg" "syn" + printf "%02u %-36s %s" "$TEST_COUNT" "$msg" "syn" count=`ip netns exec $ns1 nstat -as | grep MPTcpExtMPJoinSynRx | awk '{print $2}'` [ -z "$count" ] && count=0 if [ "$count" != "$syn_nr" ]; then @@ -354,4 +363,57 @@ ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow run_tests $ns1 $ns2 10.0.1.1 chk_join_nr "multiple subflows and signal" 3 3 3 +# single subflow, syncookies +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "single subflow with syn cookies" 1 1 1 + +# multiple subflows with syn cookies +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "multiple subflows with syn cookies" 2 2 2 + +# multiple subflows limited by server +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "subflows limited by server w cookies" 2 2 1 + +# test signal address with cookies +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 1 +ip netns exec $ns2 ./pm_nl_ctl limits 1 1 +ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "signal address with syn cookies" 1 1 1 + +# test cookie with subflow and signal +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal +ip netns exec $ns1 ./pm_nl_ctl limits 0 2 +ip netns exec $ns2 ./pm_nl_ctl limits 1 2 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "subflow and signal w cookies" 2 2 2 + +# accept and use add_addr with additional subflows +reset_with_cookies +ip netns exec $ns1 ./pm_nl_ctl limits 0 3 +ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal +ip netns exec $ns2 ./pm_nl_ctl limits 1 3 +ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow +ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow +run_tests $ns1 $ns2 10.0.1.1 +chk_join_nr "subflows and signal w. cookies" 3 3 3 + exit $ret From 48040793fa6003d211f021c6ad273477bcd90d91 Mon Sep 17 00:00:00 2001 From: Yousuk Seung Date: Thu, 30 Jul 2020 15:44:40 -0700 Subject: [PATCH 1866/2056] tcp: add earliest departure time to SCM_TIMESTAMPING_OPT_STATS This change adds TCP_NLA_EDT to SCM_TIMESTAMPING_OPT_STATS that reports the earliest departure time(EDT) of the timestamped skb. By tracking EDT values of the skb from different timestamps, we can observe when and how much the value changed. This allows to measure the precise delay injected on the sender host e.g. by a bpf-base throttler. Signed-off-by: Yousuk Seung Signed-off-by: Eric Dumazet Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- include/linux/tcp.h | 3 ++- include/uapi/linux/tcp.h | 1 + net/core/skbuff.c | 2 +- net/ipv4/tcp.c | 6 +++++- 4 files changed, 9 insertions(+), 3 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 527d668a5275..14b62d7df942 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -484,7 +484,8 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) tp->saved_syn = NULL; } -struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, + const struct sk_buff *orig_skb); static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) { diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index f2acb2566333..cfcb10b75483 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -313,6 +313,7 @@ enum { TCP_NLA_SRTT, /* smoothed RTT in usecs */ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */ + TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */ }; /* for TCP_MD5SIG socket option */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b8afefe6f6b6..4e2edfbe0e19 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4692,7 +4692,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) { - skb = tcp_get_timestamping_opt_stats(sk); + skb = tcp_get_timestamping_opt_stats(sk, orig_skb); opt_stats = true; } else #endif diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4afec552f211..c06d2bfd2ec4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3501,10 +3501,12 @@ static size_t tcp_opt_stats_get_size(void) nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ + nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 0; } -struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, + const struct sk_buff *orig_skb) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *stats; @@ -3558,6 +3560,8 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, max_t(int, 0, tp->write_seq - tp->snd_nxt)); + nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, + TCP_NLA_PAD); return stats; } From 8f3f330da28ede9d106cd9d5c5ccd6a3e7e9b50b Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 31 Jul 2020 00:17:20 -0400 Subject: [PATCH 1867/2056] tun: add missing rcu annotation in tun_set_ebpf() We expecte prog_p to be protected by rcu, so adding the rcu annotation to fix the following sparse warning: drivers/net/tun.c:3003:36: warning: incorrect type in argument 2 (different address spaces) drivers/net/tun.c:3003:36: expected struct tun_prog [noderef] __rcu **prog_p drivers/net/tun.c:3003:36: got struct tun_prog **prog_p drivers/net/tun.c:3292:42: warning: incorrect type in argument 2 (different address spaces) drivers/net/tun.c:3292:42: expected struct tun_prog **prog_p drivers/net/tun.c:3292:42: got struct tun_prog [noderef] __rcu ** drivers/net/tun.c:3296:42: warning: incorrect type in argument 2 (different address spaces) drivers/net/tun.c:3296:42: expected struct tun_prog **prog_p drivers/net/tun.c:3296:42: got struct tun_prog [noderef] __rcu ** Reported-by: Michael S. Tsirkin Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7adeb91bd368..9b4b25358f9b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2983,7 +2983,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) return ret; } -static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p, +static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p, void __user *data) { struct bpf_prog *prog; From 829eb208e80d6db95c0201cb8fa00c2f9ad87faf Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Fri, 31 Jul 2020 17:34:01 -0700 Subject: [PATCH 1868/2056] rtnetlink: add support for protodown reason netdev protodown is a mechanism that allows protocols to hold an interface down. It was initially introduced in the kernel to hold links down by a multihoming protocol. There was also an attempt to introduce protodown reason at the time but was rejected. protodown and protodown reason is supported by almost every switching and routing platform. It was ok for a while to live without a protodown reason. But, its become more critical now given more than one protocol may need to keep a link down on a system at the same time. eg: vrrp peer node, port security, multihoming protocol. Its common for Network operators and protocol developers to look for such a reason on a networking box (Its also known as errDisable by most networking operators) This patch adds support for link protodown reason attribute. There are two ways to maintain protodown reasons. (a) enumerate every possible reason code in kernel - A protocol developer has to make a request and have that appear in a certain kernel version (b) provide the bits in the kernel, and allow user-space (sysadmin or NOS distributions) to manage the bit-to-reasonname map. - This makes extending reason codes easier (kind of like the iproute2 table to vrf-name map /etc/iproute2/rt_tables.d/) This patch takes approach (b). a few things about the patch: - It treats the protodown reason bits as counter to indicate active protodown users - Since protodown attribute is already an exposed UAPI, the reason is not enforced on a protodown set. Its a no-op if not used. the patch follows the below algorithm: - presence of reason bits set indicates protodown is in use - user can set protodown and protodown reason in a single or multiple setlink operations - setlink operation to clear protodown, will return -EBUSY if there are active protodown reason bits - reason is not included in link dumps if not used example with patched iproute2: $cat /etc/iproute2/protodown_reasons.d/r.conf 0 mlag 1 evpn 2 vrrp 3 psecurity $ip link set dev vxlan0 protodown on protodown_reason vrrp on $ip link set dev vxlan0 protodown_reason mlag on $ip link show 14: vxlan0: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether f6:06:be:17:91:e7 brd ff:ff:ff:ff:ff:ff protodown on $ip link set dev vxlan0 protodown_reason mlag off $ip link set dev vxlan0 protodown off protodown_reason vrrp off Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++ include/uapi/linux/if_link.h | 10 ++++ net/core/dev.c | 25 ++++++++ net/core/rtnetlink.c | 113 +++++++++++++++++++++++++++++++++-- 4 files changed, 147 insertions(+), 5 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ac2cd3f49aba..ba0fa6b22787 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2058,6 +2058,8 @@ struct net_device { struct timer_list watchdog_timer; int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; int __percpu *pcpu_refcnt; @@ -3810,6 +3812,8 @@ int dev_get_port_parent_id(struct net_device *dev, bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); int dev_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, + u32 value); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 63af64646358..7fba4de511de 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -170,12 +170,22 @@ enum { IFLA_PROP_LIST, IFLA_ALT_IFNAME, /* Alternative ifname */ IFLA_PERM_ADDRESS, + IFLA_PROTO_DOWN_REASON, __IFLA_MAX }; #define IFLA_MAX (__IFLA_MAX - 1) +enum { + IFLA_PROTO_DOWN_REASON_UNSPEC, + IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */ + IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */ + + __IFLA_PROTO_DOWN_REASON_CNT, + IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1 +}; + /* backwards compatibility for userspace */ #ifndef __KERNEL__ #define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg)))) diff --git a/net/core/dev.c b/net/core/dev.c index 38a6371d9bc5..f7ef0f5c5569 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8715,6 +8715,31 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down_generic); +/** + * dev_change_proto_down_reason - proto down reason + * + * @dev: device + * @mask: proto down mask + * @value: proto down value + */ +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, + u32 value) +{ + int b; + + if (!mask) { + dev->proto_down_reason = value; + } else { + for_each_set_bit(b, &mask, 32) { + if (value & (1 << b)) + dev->proto_down_reason |= BIT(b); + else + dev->proto_down_reason &= ~BIT(b); + } + } +} +EXPORT_SYMBOL(dev_change_proto_down_reason); + u32 __dev_xdp_query(struct net_device *dev, bpf_op_t bpf_op, enum bpf_netdev_command cmd) { diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 85a4b0101f76..a54c3e0f2ee1 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1000,6 +1000,16 @@ static size_t rtnl_prop_list_size(const struct net_device *dev) return size; } +static size_t rtnl_proto_down_size(const struct net_device *dev) +{ + size_t size = nla_total_size(1); + + if (dev->proto_down_reason) + size += nla_total_size(0) + nla_total_size(4); + + return size; +} + static noinline size_t if_nlmsg_size(const struct net_device *dev, u32 ext_filter_mask) { @@ -1041,7 +1051,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, + nla_total_size(4) /* IFLA_EVENT */ + nla_total_size(4) /* IFLA_NEW_NETNSID */ + nla_total_size(4) /* IFLA_NEW_IFINDEX */ - + nla_total_size(1) /* IFLA_PROTO_DOWN */ + + rtnl_proto_down_size(dev) /* proto down */ + nla_total_size(4) /* IFLA_TARGET_NETNSID */ + nla_total_size(4) /* IFLA_CARRIER_UP_COUNT */ + nla_total_size(4) /* IFLA_CARRIER_DOWN_COUNT */ @@ -1658,6 +1668,35 @@ static int rtnl_fill_prop_list(struct sk_buff *skb, return ret; } +static int rtnl_fill_proto_down(struct sk_buff *skb, + const struct net_device *dev) +{ + struct nlattr *pr; + u32 preason; + + if (nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down)) + goto nla_put_failure; + + preason = dev->proto_down_reason; + if (!preason) + return 0; + + pr = nla_nest_start(skb, IFLA_PROTO_DOWN_REASON); + if (!pr) + return -EMSGSIZE; + + if (nla_put_u32(skb, IFLA_PROTO_DOWN_REASON_VALUE, preason)) { + nla_nest_cancel(skb, pr); + goto nla_put_failure; + } + + nla_nest_end(skb, pr); + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, struct net *src_net, int type, u32 pid, u32 seq, u32 change, @@ -1708,13 +1747,15 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, nla_put_u32(skb, IFLA_CARRIER_CHANGES, atomic_read(&dev->carrier_up_count) + atomic_read(&dev->carrier_down_count)) || - nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down) || nla_put_u32(skb, IFLA_CARRIER_UP_COUNT, atomic_read(&dev->carrier_up_count)) || nla_put_u32(skb, IFLA_CARRIER_DOWN_COUNT, atomic_read(&dev->carrier_down_count))) goto nla_put_failure; + if (rtnl_fill_proto_down(skb, dev)) + goto nla_put_failure; + if (event != IFLA_EVENT_NONE) { if (nla_put_u32(skb, IFLA_EVENT, event)) goto nla_put_failure; @@ -1834,6 +1875,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_ALT_IFNAME] = { .type = NLA_STRING, .len = ALTIFNAMSIZ - 1 }, [IFLA_PERM_ADDRESS] = { .type = NLA_REJECT }, + [IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED }, }; static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { @@ -2483,6 +2525,67 @@ static int do_set_master(struct net_device *dev, int ifindex, return 0; } +static const struct nla_policy ifla_proto_down_reason_policy[IFLA_PROTO_DOWN_REASON_VALUE + 1] = { + [IFLA_PROTO_DOWN_REASON_MASK] = { .type = NLA_U32 }, + [IFLA_PROTO_DOWN_REASON_VALUE] = { .type = NLA_U32 }, +}; + +static int do_set_proto_down(struct net_device *dev, + struct nlattr *nl_proto_down, + struct nlattr *nl_proto_down_reason, + struct netlink_ext_ack *extack) +{ + struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1]; + const struct net_device_ops *ops = dev->netdev_ops; + unsigned long mask = 0; + u32 value; + bool proto_down; + int err; + + if (!ops->ndo_change_proto_down) { + NL_SET_ERR_MSG(extack, "Protodown not supported by device"); + return -EOPNOTSUPP; + } + + if (nl_proto_down_reason) { + err = nla_parse_nested_deprecated(pdreason, + IFLA_PROTO_DOWN_REASON_MAX, + nl_proto_down_reason, + ifla_proto_down_reason_policy, + NULL); + if (err < 0) + return err; + + if (!pdreason[IFLA_PROTO_DOWN_REASON_VALUE]) { + NL_SET_ERR_MSG(extack, "Invalid protodown reason value"); + return -EINVAL; + } + + value = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_VALUE]); + + if (pdreason[IFLA_PROTO_DOWN_REASON_MASK]) + mask = nla_get_u32(pdreason[IFLA_PROTO_DOWN_REASON_MASK]); + + dev_change_proto_down_reason(dev, mask, value); + } + + if (nl_proto_down) { + proto_down = nla_get_u8(nl_proto_down); + + /* Dont turn off protodown if there are active reasons */ + if (!proto_down && dev->proto_down_reason) { + NL_SET_ERR_MSG(extack, "Cannot clear protodown, active reasons"); + return -EBUSY; + } + err = dev_change_proto_down(dev, + proto_down); + if (err) + return err; + } + + return 0; +} + #define DO_SETLINK_MODIFIED 0x01 /* notify flag means notify + modified. */ #define DO_SETLINK_NOTIFY 0x03 @@ -2771,9 +2874,9 @@ static int do_setlink(const struct sk_buff *skb, } err = 0; - if (tb[IFLA_PROTO_DOWN]) { - err = dev_change_proto_down(dev, - nla_get_u8(tb[IFLA_PROTO_DOWN])); + if (tb[IFLA_PROTO_DOWN] || tb[IFLA_PROTO_DOWN_REASON]) { + err = do_set_proto_down(dev, tb[IFLA_PROTO_DOWN], + tb[IFLA_PROTO_DOWN_REASON], extack); if (err) goto errout; status |= DO_SETLINK_NOTIFY; From 65c72291f709fe8a82e1df3d7b8d2e65d4d861a6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 28 Jul 2020 09:41:53 +0800 Subject: [PATCH 1869/2056] ice: mark PM functions as __maybe_unused In certain configurations without power management support, the following warnings happen: drivers/net/ethernet/intel/ice/ice_main.c:4214:12: warning: 'ice_resume' defined but not used [-Wunused-function] 4214 | static int ice_resume(struct device *dev) | ^~~~~~~~~~ drivers/net/ethernet/intel/ice/ice_main.c:4150:12: warning: 'ice_suspend' defined but not used [-Wunused-function] 4150 | static int ice_suspend(struct device *dev) | ^~~~~~~~~~~ Mark these functions as __maybe_unused to make it clear to the compiler that this is going to happen based on the configuration, which is the standard for these types of functions. Fixes: 769c500dcc1e ("ice: Add advanced power mgmt for WoL") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c0bde24ab344..bd2a2df25def 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4467,7 +4467,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) * Power Management callback to quiesce the device and prepare * for D3 transition. */ -static int ice_suspend(struct device *dev) +static int __maybe_unused ice_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ice_pf *pf; @@ -4531,7 +4531,7 @@ static int ice_suspend(struct device *dev) * ice_resume - PM callback for waking up from D3 * @dev: generic device information structure */ -static int ice_resume(struct device *dev) +static int __maybe_unused ice_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); enum ice_reset_req reset_type; From 585cdabdfdb73b4434fcb100f5adf588dd777f2e Mon Sep 17 00:00:00 2001 From: Nick Nunley Date: Wed, 29 Jul 2020 17:19:10 -0700 Subject: [PATCH 1870/2056] ice: rename misleading grst_delay variable The grst_delay variable in ice_check_reset contains the maximum time (in 100 msec units) that the driver will wait for a reset event to transition to the Device Active state. The value is the sum of three separate components: 1) The maximum time it may take for the firmware to process its outstanding command before handling the reset request. 2) The value in RSTCTL.GRSTDEL (the delay firmware inserts between first seeing the driver reset request and the actual hardware assertion). 3) The maximum expected reset processing time in hardware. Referring to this total time as "grst_delay" is misleading and potentially confusing to someone checking the code and cross-referencing the hardware specification. Fix this by renaming the variable to "grst_timeout", which is more descriptive of its actual use. Signed-off-by: Nick Nunley Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_common.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index ad5941ec7b11..8a93fbd6f1be 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1027,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay, uld_mask; + u32 cnt, reg = 0, grst_timeout, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ - grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> - GLGEN_RSTCTL_GRSTDEL_S) + 10; + grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> + GLGEN_RSTCTL_GRSTDEL_S) + 10; - for (cnt = 0; cnt < grst_delay; cnt++) { + for (cnt = 0; cnt < grst_timeout; cnt++) { mdelay(100); reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break; } - if (cnt == grst_delay) { + if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; From f07d134d3772d8b9546e3eb3cf4d13848d3af6e1 Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Wed, 29 Jul 2020 17:19:11 -0700 Subject: [PATCH 1871/2056] ice: fix the vsi_id mask to be 10 bit for set_rss_lut set_rss_lut can fail due to incorrect vsi_id mask. vsi_id is 10 bit but mask was 0x1FF whereas it should be 0x3FF. For vsi_num >= 512, FW set_rss_lut can fail with return code EACCESS (VSI ownership issue) because software was providing incorrect vsi_num (dropping 10th bit due to incorrect mask) for set_rss_lut admin command Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7fec9309d379..ba9375218fef 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1581,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys { struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 -#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) __le16 vsi_id; #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ From cdedbab92db4697f5a1d222d2d022cd6af3930e3 Mon Sep 17 00:00:00 2001 From: Vignesh Sridhar Date: Wed, 29 Jul 2020 17:19:12 -0700 Subject: [PATCH 1872/2056] ice: Fix RSS profile locks Replacing flow profile locks with RSS profile locks in the function to remove all RSS rules for a given VSI. This is to align the locks used for RSS rule addition to VSI and removal during VSI teardown to avoid a race condition owing to several iterations of the above operations. In function to get RSS rules for given VSI and protocol header replacing the pointer reference of the RSS entry with a copy of hash value to ensure thread safety. Signed-off-by: Vignesh Sridhar Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_flow.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index d74e5290677f..fe677621dd51 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) if (list_empty(&hw->fl_profs[blk])) return 0; - mutex_lock(&hw->fl_profs_locks[blk]); + mutex_lock(&hw->rss_locks); list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) if (test_bit(vsi_handle, p->vsis)) { status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); @@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) break; if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { - status = ice_flow_rem_prof_sync(hw, blk, p); + status = ice_flow_rem_prof(hw, blk, p->id); if (status) break; } } - mutex_unlock(&hw->fl_profs_locks[blk]); + mutex_unlock(&hw->rss_locks); return status; } @@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) */ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) { - struct ice_rss_cfg *r, *rss_cfg = NULL; + u64 rss_hash = ICE_HASH_INVALID; + struct ice_rss_cfg *r; /* verify if the protocol header is non zero and VSI is valid */ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) @@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) list_for_each_entry(r, &hw->rss_list_head, l_entry) if (test_bit(vsi_handle, r->vsis) && r->packet_hdr == hdrs) { - rss_cfg = r; + rss_hash = r->hashed_flds; break; } mutex_unlock(&hw->rss_locks); - return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID; + return rss_hash; } From a4c493fea5b7c65fbe0e1bea68007c6c27dd9f75 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 29 Jul 2020 17:19:13 -0700 Subject: [PATCH 1873/2056] ice: remove page_reuse statistic The page reuse statistic wasn't even being displayed to the user, even though the driver counted it. Don't waste the struct space and hot-path cycles since the driver doesn't display it. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_txrx.c | 2 -- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 - 2 files changed, 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index a508d4f463e9..53c67eeec2fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -632,7 +632,6 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) /* since we are recycling buffers we should seldom need to alloc */ if (likely(page)) { - rx_ring->rx_stats.page_reuse_count++; return true; } @@ -1033,7 +1032,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) if (ice_can_reuse_rx_page(rx_buf)) { /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); - rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index e70c4619edc3..77c94daeb434 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -193,7 +193,6 @@ struct ice_rxq_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buf_failed; - u64 page_reuse_count; }; /* this enum matches hardware bits and is meant to be used by DYN_CTLN From a8fffd7ae9a538fde8eb8ed528a93575ddc2a122 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 29 Jul 2020 17:19:14 -0700 Subject: [PATCH 1874/2056] ice: add useful statistics Display and count some useful hot-path statistics. The usefulness is as follows: - tx_restart: use to determine if the transmit ring size is too small or if the transmit interrupt rate is too low. - rx_gro_dropped: use to count drops from GRO layer, which previously were completely uncounted when occurring. - tx_busy: use to determine when the driver is miscounting number of descriptors needed for an skb. - tx_timeout: as our other drivers, count the number of times we've reset due to timeout because the kernel only prints a warning once per netdev. Several of these were already counted but not displayed. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice.h | 1 + drivers/net/ethernet/intel/ice/ice_ethtool.c | 4 ++++ drivers/net/ethernet/intel/ice/ice_main.c | 4 +++- drivers/net/ethernet/intel/ice/ice_txrx.h | 1 + drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 7 ++++++- 5 files changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 7d194d2a031a..fe140ff38f74 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -256,6 +256,7 @@ struct ice_vsi { u32 tx_busy; u32 rx_buf_failed; u32 rx_page_failed; + u32 rx_gro_dropped; u16 num_q_vectors; u16 base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 06b93e97892d..9e8e9531cd87 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), + ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped), ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), ICE_VSI_STAT("tx_linearize", tx_linearize), + ICE_VSI_STAT("tx_busy", tx_busy), + ICE_VSI_STAT("tx_restart", tx_restart), }; enum ice_ethtool_test_id { @@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), + ICE_PF_STAT("tx_timeout.nic", tx_timeout_count), ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index bd2a2df25def..22bbd84eef64 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5290,6 +5290,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi->tx_linearize = 0; vsi->rx_buf_failed = 0; vsi->rx_page_failed = 0; + vsi->rx_gro_dropped = 0; rcu_read_lock(); @@ -5304,6 +5305,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi_stats->rx_bytes += bytes; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; + vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; } /* update XDP Tx rings counters */ @@ -5335,7 +5337,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ice_update_eth_stats(vsi); cur_ns->tx_errors = cur_es->tx_errors; - cur_ns->rx_dropped = cur_es->rx_discards; + cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; cur_ns->tx_dropped = cur_es->tx_discards; cur_ns->multicast = cur_es->rx_multicast; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 77c94daeb434..51b4df7a59d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -193,6 +193,7 @@ struct ice_rxq_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buf_failed; + u64 gro_dropped; /* GRO returned dropped */ }; /* this enum matches hardware bits and is meant to be used by DYN_CTLN diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 02b12736ea80..bc2f4390b51d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - napi_gro_receive(&rx_ring->q_vector->napi, skb); + if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) { + /* this is tracked separately to help us debug stack drops */ + rx_ring->rx_stats.gro_dropped++; + netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n", + rx_ring->q_index); + } } /** From ec1d1d2302067e3ccbc4d0adcd36d72410933b70 Mon Sep 17 00:00:00 2001 From: Vignesh Sridhar Date: Wed, 29 Jul 2020 17:19:15 -0700 Subject: [PATCH 1875/2056] ice: Clear and free XLT entries on reset This fix has been added to address memory leak issues resulting from triggering a sudden driver reset which does not allow us to follow our normal removal flows for SW XLT entries for advanced features. - Adding call to destroy flow profile locks when clearing SW XLT tables. - Extraction sequence entries were not correctly cleared previously which could cause ownership conflicts for repeated reset-replay calls. Fixes: 31ad4e4ee1e4 ("ice: Allocate flow profile") Signed-off-by: Vignesh Sridhar Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 3c217e51b27e..d59869b2c65e 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) ICE_FLOW_ENTRY_HNDL(e)); list_del(&p->l_entry); + + mutex_destroy(&p->entries_lock); devm_kfree(ice_hw_to_dev(hw), p); } mutex_unlock(&hw->fl_profs_locks[blk_idx]); @@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(prof_redir->t, 0, prof_redir->count * sizeof(*prof_redir->t)); - memset(es->t, 0, es->count * sizeof(*es->t)); + memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); memset(es->written, 0, es->count * sizeof(*es->written)); } From f34f55557ac9a4dfbfbf36c70585d1648ab5cd90 Mon Sep 17 00:00:00 2001 From: Brett Creeley Date: Wed, 29 Jul 2020 17:19:16 -0700 Subject: [PATCH 1876/2056] ice: Allow 2 queue pairs per VF on SR-IOV initialization Currently VFs are only allowed to get 16, 4, and 1 queue pair by default, which require 17, 5, and 2 MSI-X vectors respectively. This is because each VF needs a MSI-X per data queue and a MSI-X for its other interrupt. The calculation is based on the number of VFs created, MSI-X available, and queue pairs available at the time of VF creation. Unfortunately the values above exclude 2 queue pairs when only 3 MSI-X are available to each VF based on resource constraints. The current calculation would default to 2 MSI-X and 1 queue pair. This is a waste of resources, so fix this by allowing 2 queue pairs per VF when there are between 2 and 5 MSI-X available per VF. Signed-off-by: Brett Creeley Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 2 ++ drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 7061730f0f37..cfdd820e9a2a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf) num_msix_per_vf = ICE_NUM_VF_MSIX_MED; } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { num_msix_per_vf = ICE_MIN_INTR_PER_VF; } else { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 049e0b583383..0f519fba3770 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -32,6 +32,7 @@ #define ICE_MAX_RSS_QS_PER_VF 16 #define ICE_NUM_VF_MSIX_MED 17 #define ICE_NUM_VF_MSIX_SMALL 5 +#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_SLEEP_MS 20 From 0a37abfa017a90cf0708f69022368cca6869416f Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Wed, 29 Jul 2020 17:19:17 -0700 Subject: [PATCH 1877/2056] ice: port fix for chk_linearlize This is a port of commit 248de22e638f ("i40e/i40evf: Account for frags split over multiple descriptors in check linearize") As part of testing workloads (read/write) using larger IO size (128K) tx_timeout is observed and whenever it happens, it was due to tx_linearize. Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_txrx.c | 26 ++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 53c67eeec2fa..77de8869e7ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -2292,10 +2292,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > ICE_MAX_DATA_PER_TXD) { + int align_pad = -(skb_frag_off(stale)) & + (ICE_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > ICE_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -2303,7 +2323,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; From bcc46cb8a077c6189b44f1555b8659837f748eb2 Mon Sep 17 00:00:00 2001 From: Surabhi Boob Date: Wed, 29 Jul 2020 17:19:18 -0700 Subject: [PATCH 1878/2056] ice: Graceful error handling in HW table calloc failure In the ice_init_hw_tbls, if the devm_kcalloc for es->written fails, catch that error and bail out gracefully, instead of continuing with a NULL pointer. Fixes: 32d63fa1e9f3 ("ice: Initialize DDP package structures") Signed-off-by: Surabhi Boob Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index d59869b2c65e..5ceba009db16 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -3151,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->ref_count), GFP_KERNEL); + if (!es->ref_count) + goto err; es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); - if (!es->ref_count) + if (!es->written) goto err; } return 0; From 68d210a609a048dde1b7c23385d9bafb0061acb3 Mon Sep 17 00:00:00 2001 From: Nick Nunley Date: Wed, 29 Jul 2020 17:19:19 -0700 Subject: [PATCH 1879/2056] ice: Disable VLAN pruning in promiscuous mode Disable VLAN pruning when entering promiscuous mode, and re-enable it when exiting. Without this VLAN-over-bridge topologies created on the device won't be functional unless rx-vlan-filter is explicitly disabled with ethtool. Signed-off-by: Nick Nunley Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ice/ice_lib.c | 7 +++++++ drivers/net/ethernet/intel/ice/ice_main.c | 3 +++ 2 files changed, 10 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 84202c814c3b..f2682776f8c8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2017,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) if (!vsi) return -EINVAL; + /* Don't enable VLAN pruning if the netdev is currently in promiscuous + * mode. VLAN pruning will be enabled when the interface exits + * promiscuous mode if any VLAN filters are active. + */ + if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena) + return 0; + pf = vsi->back; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 22bbd84eef64..22e3d32463f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } + ice_cfg_vlan_pruning(vsi, false, false); } } else { /* Clear Rx filter to remove traffic from wire */ @@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) IFF_PROMISC; goto out_promisc; } + if (vsi->num_vlan > 1) + ice_cfg_vlan_pruning(vsi, true, false); } } } From eddbee9b949a00675e19bf84eda52023da0059ef Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 29 Jul 2020 17:19:20 -0700 Subject: [PATCH 1880/2056] ice: update PTYPE lookup table Update the PTYPE lookup table to reflect values that can be set by the hardware. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers --- .../net/ethernet/intel/ice/ice_lan_tx_rx.h | 314 ++++++++++++++++++ 1 file changed, 314 insertions(+) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 14dfbbc1b2cf..4ec24c3e813f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -601,6 +601,7 @@ struct ice_tlan_ctx { /* shorter macros makes the table fit but are terse */ #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG +#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG /* Lookup table mapping the HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { @@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { ICE_PTT_UNUSED_ENTRY(0), ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(3), + ICE_PTT_UNUSED_ENTRY(4), + ICE_PTT_UNUSED_ENTRY(5), + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(8), + ICE_PTT_UNUSED_ENTRY(9), + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(12), + ICE_PTT_UNUSED_ENTRY(13), + ICE_PTT_UNUSED_ENTRY(14), + ICE_PTT_UNUSED_ENTRY(15), + ICE_PTT_UNUSED_ENTRY(16), + ICE_PTT_UNUSED_ENTRY(17), + ICE_PTT_UNUSED_ENTRY(18), + ICE_PTT_UNUSED_ENTRY(19), + ICE_PTT_UNUSED_ENTRY(20), + ICE_PTT_UNUSED_ENTRY(21), + + /* Non Tunneled IPv4 */ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(25), + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(32), + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(39), + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(47), + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(54), + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(62), + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(69), + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(77), + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(84), + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + ICE_PTT_UNUSED_ENTRY(91), + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(98), + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(105), + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(113), + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(120), + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(128), + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(135), + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(143), + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(150), + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + ICE_PTT_UNUSED_ENTRY(154), + ICE_PTT_UNUSED_ENTRY(155), + ICE_PTT_UNUSED_ENTRY(156), + ICE_PTT_UNUSED_ENTRY(157), + ICE_PTT_UNUSED_ENTRY(158), + ICE_PTT_UNUSED_ENTRY(159), + + ICE_PTT_UNUSED_ENTRY(160), + ICE_PTT_UNUSED_ENTRY(161), + ICE_PTT_UNUSED_ENTRY(162), + ICE_PTT_UNUSED_ENTRY(163), + ICE_PTT_UNUSED_ENTRY(164), + ICE_PTT_UNUSED_ENTRY(165), + ICE_PTT_UNUSED_ENTRY(166), + ICE_PTT_UNUSED_ENTRY(167), + ICE_PTT_UNUSED_ENTRY(168), + ICE_PTT_UNUSED_ENTRY(169), + + ICE_PTT_UNUSED_ENTRY(170), + ICE_PTT_UNUSED_ENTRY(171), + ICE_PTT_UNUSED_ENTRY(172), + ICE_PTT_UNUSED_ENTRY(173), + ICE_PTT_UNUSED_ENTRY(174), + ICE_PTT_UNUSED_ENTRY(175), + ICE_PTT_UNUSED_ENTRY(176), + ICE_PTT_UNUSED_ENTRY(177), + ICE_PTT_UNUSED_ENTRY(178), + ICE_PTT_UNUSED_ENTRY(179), + + ICE_PTT_UNUSED_ENTRY(180), + ICE_PTT_UNUSED_ENTRY(181), + ICE_PTT_UNUSED_ENTRY(182), + ICE_PTT_UNUSED_ENTRY(183), + ICE_PTT_UNUSED_ENTRY(184), + ICE_PTT_UNUSED_ENTRY(185), + ICE_PTT_UNUSED_ENTRY(186), + ICE_PTT_UNUSED_ENTRY(187), + ICE_PTT_UNUSED_ENTRY(188), + ICE_PTT_UNUSED_ENTRY(189), + + ICE_PTT_UNUSED_ENTRY(190), + ICE_PTT_UNUSED_ENTRY(191), + ICE_PTT_UNUSED_ENTRY(192), + ICE_PTT_UNUSED_ENTRY(193), + ICE_PTT_UNUSED_ENTRY(194), + ICE_PTT_UNUSED_ENTRY(195), + ICE_PTT_UNUSED_ENTRY(196), + ICE_PTT_UNUSED_ENTRY(197), + ICE_PTT_UNUSED_ENTRY(198), + ICE_PTT_UNUSED_ENTRY(199), + + ICE_PTT_UNUSED_ENTRY(200), + ICE_PTT_UNUSED_ENTRY(201), + ICE_PTT_UNUSED_ENTRY(202), + ICE_PTT_UNUSED_ENTRY(203), + ICE_PTT_UNUSED_ENTRY(204), + ICE_PTT_UNUSED_ENTRY(205), + ICE_PTT_UNUSED_ENTRY(206), + ICE_PTT_UNUSED_ENTRY(207), + ICE_PTT_UNUSED_ENTRY(208), + ICE_PTT_UNUSED_ENTRY(209), + + ICE_PTT_UNUSED_ENTRY(210), + ICE_PTT_UNUSED_ENTRY(211), + ICE_PTT_UNUSED_ENTRY(212), + ICE_PTT_UNUSED_ENTRY(213), + ICE_PTT_UNUSED_ENTRY(214), + ICE_PTT_UNUSED_ENTRY(215), + ICE_PTT_UNUSED_ENTRY(216), + ICE_PTT_UNUSED_ENTRY(217), + ICE_PTT_UNUSED_ENTRY(218), + ICE_PTT_UNUSED_ENTRY(219), + + ICE_PTT_UNUSED_ENTRY(220), + ICE_PTT_UNUSED_ENTRY(221), + ICE_PTT_UNUSED_ENTRY(222), + ICE_PTT_UNUSED_ENTRY(223), + ICE_PTT_UNUSED_ENTRY(224), + ICE_PTT_UNUSED_ENTRY(225), + ICE_PTT_UNUSED_ENTRY(226), + ICE_PTT_UNUSED_ENTRY(227), + ICE_PTT_UNUSED_ENTRY(228), + ICE_PTT_UNUSED_ENTRY(229), + + ICE_PTT_UNUSED_ENTRY(230), + ICE_PTT_UNUSED_ENTRY(231), + ICE_PTT_UNUSED_ENTRY(232), + ICE_PTT_UNUSED_ENTRY(233), + ICE_PTT_UNUSED_ENTRY(234), + ICE_PTT_UNUSED_ENTRY(235), + ICE_PTT_UNUSED_ENTRY(236), + ICE_PTT_UNUSED_ENTRY(237), + ICE_PTT_UNUSED_ENTRY(238), + ICE_PTT_UNUSED_ENTRY(239), + + ICE_PTT_UNUSED_ENTRY(240), + ICE_PTT_UNUSED_ENTRY(241), + ICE_PTT_UNUSED_ENTRY(242), + ICE_PTT_UNUSED_ENTRY(243), + ICE_PTT_UNUSED_ENTRY(244), + ICE_PTT_UNUSED_ENTRY(245), + ICE_PTT_UNUSED_ENTRY(246), + ICE_PTT_UNUSED_ENTRY(247), + ICE_PTT_UNUSED_ENTRY(248), + ICE_PTT_UNUSED_ENTRY(249), + + ICE_PTT_UNUSED_ENTRY(250), + ICE_PTT_UNUSED_ENTRY(251), + ICE_PTT_UNUSED_ENTRY(252), + ICE_PTT_UNUSED_ENTRY(253), + ICE_PTT_UNUSED_ENTRY(254), + ICE_PTT_UNUSED_ENTRY(255), }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) From 6a2c2b2c1bcbc3bc3ad484916d84c16eb58a6f70 Mon Sep 17 00:00:00 2001 From: Victor Raj Date: Wed, 29 Jul 2020 17:19:21 -0700 Subject: [PATCH 1881/2056] ice: adjust profile ID map locks The profile ID map lock should be held till the caller completes all references of that profile entries. The current code releases the lock right after the match search. This caused a driver issue when the profile map entries were referenced after it was freed in other thread after the lock was released earlier. Signed-off-by: Victor Raj Tested-by: Andrew Bowers Signed-off-by: Tony Nguyen --- .../net/ethernet/intel/ice/ice_flex_pipe.c | 90 +++++++++---------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 5ceba009db16..2691dac0e159 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -3878,16 +3878,16 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], } /** - * ice_search_prof_id_low - Search for a profile tracking ID low level + * ice_search_prof_id - Search for a profile tracking ID * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * - * This will search for a profile tracking ID which was previously added. This - * version assumes that the caller has already acquired the prof map lock. + * This will search for a profile tracking ID which was previously added. + * The profile map lock should be held before calling this function. */ static struct ice_prof_map * -ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; struct ice_prof_map *map; @@ -3901,26 +3901,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) return entry; } -/** - * ice_search_prof_id - Search for a profile tracking ID - * @hw: pointer to the HW struct - * @blk: hardware block - * @id: profile tracking ID - * - * This will search for a profile tracking ID which was previously added. - */ -static struct ice_prof_map * -ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) -{ - struct ice_prof_map *entry; - - mutex_lock(&hw->blk[blk].es.prof_map_lock); - entry = ice_search_prof_id_low(hw, blk, id); - mutex_unlock(&hw->blk[blk].es.prof_map_lock); - - return entry; -} - /** * ice_vsig_prof_id_count - count profiles in a VSIG * @hw: pointer to the HW struct @@ -4137,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) mutex_lock(&hw->blk[blk].es.prof_map_lock); - pmap = ice_search_prof_id_low(hw, blk, id); + pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_rem_prof; @@ -4170,22 +4150,28 @@ static enum ice_status ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { + enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; u16 i; + mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_get_prof; + } for (i = 0; i < map->ptg_cnt; i++) if (!hw->blk[blk].es.written[map->prof_id]) { /* add ES to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_get_prof; + } p->type = ICE_PTG_ES_ADD; p->ptype = 0; @@ -4200,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, list_add(&p->list_entry, chg); } - return 0; - err_ice_get_prof: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ - return ICE_ERR_NO_MEMORY; + return status; } /** @@ -4258,17 +4243,23 @@ static enum ice_status ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { + enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; u16 i; + mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_to_lst; + } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) - return ICE_ERR_NO_MEMORY; + if (!p) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_prof_to_lst; + } p->profile_cookie = map->profile_cookie; p->prof_id = map->prof_id; @@ -4282,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, list_add(&p->list, lst); - return 0; +err_ice_add_prof_to_lst: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); + return status; } /** @@ -4500,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; + enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; - /* Get the details on the profile specified by the handle ID */ - map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; - /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) return ICE_ERR_ALREADY_EXISTS; @@ -4519,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, if (!t) return ICE_ERR_NO_MEMORY; + mutex_lock(&hw->blk[blk].es.prof_map_lock); + /* Get the details on the profile specified by the handle ID */ + map = ice_search_prof_id(hw, blk, hdl); + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_id_vsig; + } + t->profile_cookie = map->profile_cookie; t->prof_id = map->prof_id; t->tcam_count = map->ptg_cnt; /* create TCAM entries */ for (i = 0; i < map->ptg_cnt; i++) { - enum ice_status status; u16 tcam_idx; /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_id_vsig; + } /* allocate the TCAM entry index */ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); @@ -4575,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, list_add(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); - return 0; + mutex_unlock(&hw->blk[blk].es.prof_map_lock); + return status; err_ice_add_prof_id_vsig: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ devm_kfree(ice_hw_to_dev(hw), t); - return ICE_ERR_NO_MEMORY; + return status; } /** From 7dbc63f0a5402293e887e89a7974c5e48405565d Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 29 Jul 2020 17:19:22 -0700 Subject: [PATCH 1882/2056] ice: Misc minor fixes This is a collection of minor fixes including typos, white space, and style. No functional changes. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers --- drivers/net/ethernet/intel/ice/ice_common.c | 5 ++--- drivers/net/ethernet/intel/ice/ice_devlink.c | 2 +- drivers/net/ethernet/intel/ice/ice_flex_pipe.c | 2 +- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 4 ++-- drivers/net/ethernet/intel/ice/ice_sched.c | 4 ++-- drivers/net/ethernet/intel/ice/ice_txrx.c | 7 +++---- drivers/net/ethernet/intel/ice/ice_type.h | 2 +- drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c | 4 ++-- drivers/net/ethernet/intel/ice/ice_xsk.c | 2 -- 9 files changed, 14 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 8a93fbd6f1be..34abfcea9858 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1718,8 +1718,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res) * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status -ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; @@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * @cap_count: the number of capabilities * * Helper device to parse device (0x000B) capabilities list. For - * capabilities shared between device and device, this relies on + * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index dbbd8b6f9d1a..111d6bfe4222 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf) * * Create and register a devlink_port for this PF. Note that although each * physical function is connected to a separate devlink instance, the port - * will still be numbered according to the physical function id. + * will still be numbered according to the physical function ID. * * Return: zero on success or an error code on failure. */ diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 2691dac0e159..b17ae3e20157 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * This function generates a key from a value, a don't care mask and a never * match mask. * upd, dc, and nm are optional parameters, and can be NULL: - * upd == NULL --> udp mask is all 1's (update all bits) + * upd == NULL --> upd mask is all 1's (update all bits) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 92e4abca62a4..90abc8612a6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -57,7 +57,7 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) -#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ +#define PRTDCB_TUP2TC 0x001D26C0 #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) @@ -362,6 +362,7 @@ #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define PRTRPB_RDPC 0x000AC260 #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) @@ -378,6 +379,5 @@ #define PFPM_WUS_FW_RST_WK_M BIT(31) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) -#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 355f727563e4..44a228530253 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, return ICE_ERR_PARAM; } - /* query the current node information from FW before additing it + /* query the current node information from FW before adding it * to the SW DB */ status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); @@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) /** * ice_aq_rl_profile - performs a rate limiting task * @hw: pointer to the HW struct - * @opcode:opcode for add, query, or remove profile(s) + * @opcode: opcode for add, query, or remove profile(s) * @num_profiles: the number of profiles * @buf: pointer to buffer * @buf_size: buffer size in bytes diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 77de8869e7ca..9d0d6b0025cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -631,9 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) { + if (likely(page)) return true; - } /* alloc new page for storage */ page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); @@ -1252,12 +1251,12 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) * @itr: ITR value to update * * Calculate how big of an increment should be applied to the ITR value passed - * in based on wmem_default, SKB overhead, Ethernet overhead, and the current + * in based on wmem_default, SKB overhead, ethernet overhead, and the current * link speed. * * The following is a calculation derived from: * wmem_default / (size + overhead) = desired_pkts_per_int - * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 1eb83d9b0546..4cdccfadf274 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -321,7 +321,7 @@ struct ice_nvm_info { u32 flash_size; /* Size of available flash in bytes */ u8 major_ver; /* major version of NVM package */ u8 minor_ver; /* minor version of dev starter */ - u8 blank_nvm_mode; /* is NVM empty (no FW present) */ + u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; struct ice_link_default_override_tlv { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index cfdd820e9a2a..71497776ac62 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -2974,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->max_frame = qpi->rxq.max_pkt_size; } - /* VF can request to configure less than allocated queues - * or default allocated queues. So update the VSI with new number + /* VF can request to configure less than allocated queues or default + * allocated queues. So update the VSI with new number */ vsi->num_txq = num_txq; vsi->num_rxq = num_rxq; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 87862918bc7a..20ac5fca68c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) } } - /** * ice_xsk_umem_disable - disable a UMEM region * @vsi: Current VSI @@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (!size) break; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_buf->xdp->data_end = rx_buf->xdp->data + size; xsk_buff_dma_sync_for_cpu(rx_buf->xdp); From 0e8642cf369a37b718c15effa6ffd52c00fd7d15 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 31 Jul 2020 19:09:29 -0700 Subject: [PATCH 1883/2056] tcp: fix build fong CONFIG_MPTCP=n Fixes these errors: net/ipv4/syncookies.c: In function 'tcp_get_cookie_sock': net/ipv4/syncookies.c:216:19: error: 'struct tcp_request_sock' has no member named 'drop_req' 216 | if (tcp_rsk(req)->drop_req) { | ^~ net/ipv4/syncookies.c: In function 'cookie_tcp_reqsk_alloc': net/ipv4/syncookies.c:289:27: warning: unused variable 'treq' [-Wunused-variable] 289 | struct tcp_request_sock *treq; | ^~~~ make[3]: *** [scripts/Makefile.build:280: net/ipv4/syncookies.o] Error 1 make[3]: *** Waiting for unfinished jobs.... Fixes: 9466a1ccebbe ("mptcp: enable JOIN requests even if cookies are in use") Signed-off-by: Eric Dumazet Cc: Florian Westphal Acked-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv4/syncookies.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 11b20474be83..f0794f0232ba 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -213,7 +213,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, tcp_sk(child)->tsoffset = tsoff; sock_rps_save_rxhash(child, skb); - if (tcp_rsk(req)->drop_req) { + if (rsk_drop_req(req)) { refcount_set(&req->rsk_refcnt, 2); return child; } @@ -286,10 +286,11 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk, struct sk_buff *skb) { - struct tcp_request_sock *treq; struct request_sock *req; #ifdef CONFIG_MPTCP + struct tcp_request_sock *treq; + if (sk_is_mptcp(sk)) ops = &mptcp_subflow_request_sock_ops; #endif From 8b66a6fd34f519f4ad42c4ab0152c3c0782a7dbd Mon Sep 17 00:00:00 2001 From: Brian Vazquez Date: Fri, 31 Jul 2020 20:01:10 -0700 Subject: [PATCH 1884/2056] fib: fix another fib_rules_ops indirect call wrapper problem It turns out that on commit 41d707b7332f ("fib: fix fib_rules_ops indirect calls wrappers") I forgot to include the case when CONFIG_IP_MULTIPLE_TABLES is not set. Fixes: 41d707b7332f ("fib: fix fib_rules_ops indirect calls wrappers") Reported-by: Randy Dunlap Cc: Stephen Rothwell Signed-off-by: Brian Vazquez Acked-by: Randy Dunlap # build-tested Signed-off-by: David S. Miller --- net/core/fib_rules.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index fce645f6b9b1..a7a3f500a857 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -17,10 +17,16 @@ #include #ifdef CONFIG_IPV6_MULTIPLE_TABLES +#ifdef CONFIG_IP_MULTIPLE_TABLES #define INDIRECT_CALL_MT(f, f2, f1, ...) \ INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__) #else +#define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) +#endif +#elif CONFIG_IP_MULTIPLE_TABLES #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) +#else +#define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__) #endif static const struct fib_kuid_range fib_kuid_range_unset = { From 7126bd5c8bcbc015cf89864cf71d750e8f33f924 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 1 Aug 2020 16:39:59 +0200 Subject: [PATCH 1885/2056] mptcp: fix syncookie build error on UP kernel test robot says: net/mptcp/syncookies.c: In function 'mptcp_join_cookie_init': include/linux/kernel.h:47:38: warning: division by zero [-Wdiv-by-zero] #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) I forgot that spinock_t size is 0 on UP, so ARRAY_SIZE cannot be used. Fixes: 9466a1ccebbe54 ("mptcp: enable JOIN requests even if cookies are in use") Reported-by: kernel test robot Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/mptcp/syncookies.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c index 6eb992789b50..abe0fd099746 100644 --- a/net/mptcp/syncookies.c +++ b/net/mptcp/syncookies.c @@ -125,8 +125,6 @@ void __init mptcp_join_cookie_init(void) { int i; - for (i = 0; i < ARRAY_SIZE(join_entry_locks); i++) + for (i = 0; i < COOKIE_JOIN_SLOTS; i++) spin_lock_init(&join_entry_locks[i]); - - BUILD_BUG_ON(ARRAY_SIZE(join_entry_locks) != ARRAY_SIZE(join_entries)); } From 83d9dcba06c53e24e7dc47d51607d5cf9b50e5f9 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 2 Aug 2020 02:56:37 +0200 Subject: [PATCH 1886/2056] netfilter: nf_tables: extended netlink error reporting for expressions This patch extends 36dd1bcc07e5 ("netfilter: nf_tables: initial support for extended ACK reporting") to include netlink extended error reporting for expressions. This allows userspace to identify what rule expression is triggering the error. Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0d96e4eb754d..fac552b0179f 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2509,6 +2509,7 @@ int nft_expr_dump(struct sk_buff *skb, unsigned int attr, struct nft_expr_info { const struct nft_expr_ops *ops; + const struct nlattr *attr; struct nlattr *tb[NFT_EXPR_MAXATTR + 1]; }; @@ -2556,7 +2557,9 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, } else ops = type->ops; + info->attr = nla; info->ops = ops; + return 0; err1: @@ -3214,8 +3217,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, expr = nft_expr_first(rule); for (i = 0; i < n; i++) { err = nf_tables_newexpr(&ctx, &info[i], expr); - if (err < 0) + if (err < 0) { + NL_SET_BAD_ATTR(extack, info[i].attr); goto err2; + } if (info[i].ops->validate) nft_validate_state_update(net, NFT_VALIDATE_NEED); From 78470d9d0d9f2f8d16f28382a4071568e839c0d5 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 2 Aug 2020 03:27:03 +0200 Subject: [PATCH 1887/2056] netfilter: nft_meta: fix iifgroup matching iifgroup matching erroneously checks the output interface. Fixes: 8724e819cc9a ("netfilter: nft_meta: move all interface related keys to helper") Reported-by: Demi M. Obenour Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_meta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 951b6e87ed5d..7bc6537f3ccb 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -253,7 +253,7 @@ static bool nft_meta_get_eval_ifname(enum nft_meta_keys key, u32 *dest, return false; break; case NFT_META_IIFGROUP: - if (!nft_meta_store_ifgroup(dest, nft_out(pkt))) + if (!nft_meta_store_ifgroup(dest, nft_in(pkt))) return false; break; case NFT_META_OIFGROUP: From 4939b2847d26c025e2e2118744226967f239a1ac Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 31 Jul 2020 15:09:14 -0700 Subject: [PATCH 1888/2056] bpf, selftests: Use single cgroup helpers for both test_sockmap/progs Nearly every user of cgroup helpers does the same sequence of API calls. So push these into a single helper cgroup_setup_and_join. The cases that do a bit of extra logic are test_progs which currently uses an env variable to decide if it needs to setup the cgroup environment or can use an existingi environment. And then tests that are doing cgroup tests themselves. We skip these cases for now. Signed-off-by: John Fastabend Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/159623335418.30208.15807461815525100199.stgit@john-XPS-13-9370 --- tools/testing/selftests/bpf/cgroup_helpers.c | 23 +++++++++++++++++++ tools/testing/selftests/bpf/cgroup_helpers.h | 1 + .../selftests/bpf/get_cgroup_id_user.c | 14 ++--------- .../selftests/bpf/test_cgroup_storage.c | 17 +------------- tools/testing/selftests/bpf/test_dev_cgroup.c | 15 ++---------- tools/testing/selftests/bpf/test_netcnt.c | 17 ++------------ .../selftests/bpf/test_skb_cgroup_id_user.c | 8 +------ tools/testing/selftests/bpf/test_sock.c | 8 +------ tools/testing/selftests/bpf/test_sock_addr.c | 8 +------ .../testing/selftests/bpf/test_sock_fields.c | 14 +++-------- .../selftests/bpf/test_socket_cookie.c | 8 +------ tools/testing/selftests/bpf/test_sockmap.c | 18 ++------------- tools/testing/selftests/bpf/test_sysctl.c | 8 +------ .../testing/selftests/bpf/test_tcpbpf_user.c | 8 +------ .../selftests/bpf/test_tcpnotify_user.c | 8 +------ 15 files changed, 43 insertions(+), 132 deletions(-) diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index 0fb910df5387..033051717ba5 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -290,3 +290,26 @@ unsigned long long get_cgroup_id(const char *path) free(fhp); return ret; } + +int cgroup_setup_and_join(const char *path) { + int cg_fd; + + if (setup_cgroup_environment()) { + fprintf(stderr, "Failed to setup cgroup environment\n"); + return -EINVAL; + } + + cg_fd = create_and_get_cgroup(path); + if (cg_fd < 0) { + fprintf(stderr, "Failed to create test cgroup\n"); + cleanup_cgroup_environment(); + return cg_fd; + } + + if (join_cgroup(path)) { + fprintf(stderr, "Failed to join cgroup\n"); + cleanup_cgroup_environment(); + return -EINVAL; + } + return cg_fd; +} diff --git a/tools/testing/selftests/bpf/cgroup_helpers.h b/tools/testing/selftests/bpf/cgroup_helpers.h index d64bb8957090..5fe3d88e4f0d 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.h +++ b/tools/testing/selftests/bpf/cgroup_helpers.h @@ -9,6 +9,7 @@ __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__) +int cgroup_setup_and_join(const char *path); int create_and_get_cgroup(const char *path); int join_cgroup(const char *path); int setup_cgroup_environment(void); diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c index e8da7b39158d..b8d6aef99db4 100644 --- a/tools/testing/selftests/bpf/get_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c @@ -58,20 +58,10 @@ int main(int argc, char **argv) int exit_code = 1; char buf[256]; - err = setup_cgroup_environment(); - if (CHECK(err, "setup_cgroup_environment", "err %d errno %d\n", err, - errno)) + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno)) return 1; - cgroup_fd = create_and_get_cgroup(TEST_CGROUP); - if (CHECK(cgroup_fd < 0, "create_and_get_cgroup", "err %d errno %d\n", - cgroup_fd, errno)) - goto cleanup_cgroup_env; - - err = join_cgroup(TEST_CGROUP); - if (CHECK(err, "join_cgroup", "err %d errno %d\n", err, errno)) - goto cleanup_cgroup_env; - err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno)) goto cleanup_cgroup_env; diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c index 655729004391..d946252a25bb 100644 --- a/tools/testing/selftests/bpf/test_cgroup_storage.c +++ b/tools/testing/selftests/bpf/test_cgroup_storage.c @@ -74,22 +74,7 @@ int main(int argc, char **argv) goto out; } - if (setup_cgroup_environment()) { - printf("Failed to setup cgroup environment\n"); - goto err; - } - - /* Create a cgroup, get fd, and join it */ - cgroup_fd = create_and_get_cgroup(TEST_CGROUP); - if (cgroup_fd < 0) { - printf("Failed to create test cgroup\n"); - goto err; - } - - if (join_cgroup(TEST_CGROUP)) { - printf("Failed to join cgroup\n"); - goto err; - } + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); /* Attach the bpf program */ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index d850fb9076b5..804dddd97d4c 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c @@ -33,21 +33,10 @@ int main(int argc, char **argv) goto out; } - if (setup_cgroup_environment()) { - printf("Failed to load DEV_CGROUP program\n"); - goto err; - } - - /* Create a cgroup, get fd, and join it */ - cgroup_fd = create_and_get_cgroup(TEST_CGROUP); + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); if (cgroup_fd < 0) { printf("Failed to create test cgroup\n"); - goto err; - } - - if (join_cgroup(TEST_CGROUP)) { - printf("Failed to join cgroup\n"); - goto err; + goto out; } /* Attach bpf program */ diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c index 7a68c9069639..a7b9a69f4fd5 100644 --- a/tools/testing/selftests/bpf/test_netcnt.c +++ b/tools/testing/selftests/bpf/test_netcnt.c @@ -58,22 +58,9 @@ int main(int argc, char **argv) goto out; } - if (setup_cgroup_environment()) { - printf("Failed to load bpf program\n"); + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + if (cgroup_fd < 0) goto err; - } - - /* Create a cgroup, get fd, and join it */ - cgroup_fd = create_and_get_cgroup(TEST_CGROUP); - if (cgroup_fd < 0) { - printf("Failed to create test cgroup\n"); - goto err; - } - - if (join_cgroup(TEST_CGROUP)) { - printf("Failed to join cgroup\n"); - goto err; - } /* Attach bpf program */ if (bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0)) { diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c index 356351c0ac28..4a64306728ab 100644 --- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c +++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c @@ -160,16 +160,10 @@ int main(int argc, char **argv) exit(EXIT_FAILURE); } - if (setup_cgroup_environment()) - goto err; - - cgfd = create_and_get_cgroup(CGROUP_PATH); + cgfd = cgroup_setup_and_join(CGROUP_PATH); if (cgfd < 0) goto err; - if (join_cgroup(CGROUP_PATH)) - goto err; - if (send_packet(argv[1])) goto err; diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index 52bf14955797..9613f7538840 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -464,16 +464,10 @@ int main(int argc, char **argv) int cgfd = -1; int err = 0; - if (setup_cgroup_environment()) - goto err; - - cgfd = create_and_get_cgroup(CG_PATH); + cgfd = cgroup_setup_and_join(CG_PATH); if (cgfd < 0) goto err; - if (join_cgroup(CG_PATH)) - goto err; - if (run_tests(cgfd)) goto err; diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index 0358814c67dc..b8c72c1d9cf7 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -1638,16 +1638,10 @@ int main(int argc, char **argv) exit(err); } - if (setup_cgroup_environment()) - goto err; - - cgfd = create_and_get_cgroup(CG_PATH); + cgfd = cgroup_setup_and_join(CG_PATH); if (cgfd < 0) goto err; - if (join_cgroup(CG_PATH)) - goto err; - if (run_tests(cgfd)) goto err; diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c index f0fc103261a4..6c9f269c396d 100644 --- a/tools/testing/selftests/bpf/test_sock_fields.c +++ b/tools/testing/selftests/bpf/test_sock_fields.c @@ -421,19 +421,11 @@ int main(int argc, char **argv) struct bpf_object *obj; struct bpf_map *map; - err = setup_cgroup_environment(); - CHECK(err, "setup_cgroup_environment()", "err:%d errno:%d", - err, errno); - - atexit(cleanup_cgroup_environment); - /* Create a cgroup, get fd, and join it */ - cgroup_fd = create_and_get_cgroup(TEST_CGROUP); - CHECK(cgroup_fd == -1, "create_and_get_cgroup()", + cgroup_fd = cgroup_setup_and_join(TEST_CGROUP); + CHECK(cgroup_fd < 0, "cgroup_setup_and_join()", "cgroup_fd:%d errno:%d", cgroup_fd, errno); - - err = join_cgroup(TEST_CGROUP); - CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno); + atexit(cleanup_cgroup_environment); err = bpf_prog_load_xattr(&attr, &obj, &egress_fd); CHECK(err, "bpf_prog_load_xattr()", "err:%d", err); diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c index 15653b0e26eb..154a8fd2a48d 100644 --- a/tools/testing/selftests/bpf/test_socket_cookie.c +++ b/tools/testing/selftests/bpf/test_socket_cookie.c @@ -191,16 +191,10 @@ int main(int argc, char **argv) int cgfd = -1; int err = 0; - if (setup_cgroup_environment()) - goto err; - - cgfd = create_and_get_cgroup(CG_PATH); + cgfd = cgroup_setup_and_join(CG_PATH); if (cgfd < 0) goto err; - if (join_cgroup(CG_PATH)) - goto err; - if (run_test(cgfd)) goto err; diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 78789b27e573..9b6fb00dc7a0 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -1963,23 +1963,9 @@ int main(int argc, char **argv) } if (!cg_fd) { - if (setup_cgroup_environment()) { - fprintf(stderr, "ERROR: cgroup env failed\n"); - return -EINVAL; - } - - cg_fd = create_and_get_cgroup(CG_PATH); - if (cg_fd < 0) { - fprintf(stderr, - "ERROR: (%i) open cg path failed: %s\n", - cg_fd, strerror(errno)); + cg_fd = cgroup_setup_and_join(CG_PATH); + if (cg_fd < 0) return cg_fd; - } - - if (join_cgroup(CG_PATH)) { - fprintf(stderr, "ERROR: failed to join cgroup\n"); - return -EINVAL; - } cg_created = 1; } diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index d196e2a4a6e0..a20a919244c0 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -1619,16 +1619,10 @@ int main(int argc, char **argv) int cgfd = -1; int err = 0; - if (setup_cgroup_environment()) - goto err; - - cgfd = create_and_get_cgroup(CG_PATH); + cgfd = cgroup_setup_and_join(CG_PATH); if (cgfd < 0) goto err; - if (join_cgroup(CG_PATH)) - goto err; - if (run_tests(cgfd)) goto err; diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 3ae127620463..74a9e49988b6 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -102,16 +102,10 @@ int main(int argc, char **argv) __u32 key = 0; int rv; - if (setup_cgroup_environment()) - goto err; - - cg_fd = create_and_get_cgroup(cg_path); + cg_fd = cgroup_setup_and_join(cg_path); if (cg_fd < 0) goto err; - if (join_cgroup(cg_path)) - goto err; - if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c index f9765ddf0761..8549b31716ab 100644 --- a/tools/testing/selftests/bpf/test_tcpnotify_user.c +++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c @@ -86,16 +86,10 @@ int main(int argc, char **argv) CPU_SET(0, &cpuset); pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset); - if (setup_cgroup_environment()) - goto err; - - cg_fd = create_and_get_cgroup(cg_path); + cg_fd = cgroup_setup_and_join(cg_path); if (cg_fd < 0) goto err; - if (join_cgroup(cg_path)) - goto err; - if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) { printf("FAILED: load_bpf_file failed for: %s\n", file); goto err; From 73b11c2ab072d5b0599d1e12cc126f55ee306daf Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:26 -0700 Subject: [PATCH 1889/2056] bpf: Add support for forced LINK_DETACH command Add LINK_DETACH command to force-detach bpf_link without destroying it. It has the same behavior as auto-detaching of bpf_link due to cgroup dying for bpf_cgroup_link or net_device being destroyed for bpf_xdp_link. In such case, bpf_link is still a valid kernel object, but is defuncts and doesn't hold BPF program attached to corresponding BPF hook. This functionality allows users with enough access rights to manually force-detach attached bpf_link without killing respective owner process. This patch implements LINK_DETACH for cgroup, xdp, and netns links, mostly re-using existing link release handling code. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731182830.286260-2-andriin@fb.com --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 5 +++++ kernel/bpf/cgroup.c | 15 ++++++++++++++- kernel/bpf/net_namespace.c | 8 ++++++++ kernel/bpf/syscall.c | 26 ++++++++++++++++++++++++++ net/core/dev.c | 11 ++++++++++- 6 files changed, 64 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 40c5e206ecf2..cef4ef0d2b4e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -793,6 +793,7 @@ struct bpf_link { struct bpf_link_ops { void (*release)(struct bpf_link *link); void (*dealloc)(struct bpf_link *link); + int (*detach)(struct bpf_link *link); int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog); void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index eb5e0c38eb2c..b134e679e9db 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -117,6 +117,7 @@ enum bpf_cmd { BPF_LINK_GET_NEXT_ID, BPF_ENABLE_STATS, BPF_ITER_CREATE, + BPF_LINK_DETACH, }; enum bpf_map_type { @@ -634,6 +635,10 @@ union bpf_attr { __u32 old_prog_fd; } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { /* struct used by BPF_ENABLE_STATS command */ __u32 type; } enable_stats; diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index 957cce1d5168..83ff127ef7ae 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c @@ -814,6 +814,7 @@ static void bpf_cgroup_link_release(struct bpf_link *link) { struct bpf_cgroup_link *cg_link = container_of(link, struct bpf_cgroup_link, link); + struct cgroup *cg; /* link might have been auto-detached by dying cgroup already, * in that case our work is done here @@ -832,8 +833,12 @@ static void bpf_cgroup_link_release(struct bpf_link *link) WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, cg_link->type)); + cg = cg_link->cgroup; + cg_link->cgroup = NULL; + mutex_unlock(&cgroup_mutex); - cgroup_put(cg_link->cgroup); + + cgroup_put(cg); } static void bpf_cgroup_link_dealloc(struct bpf_link *link) @@ -844,6 +849,13 @@ static void bpf_cgroup_link_dealloc(struct bpf_link *link) kfree(cg_link); } +static int bpf_cgroup_link_detach(struct bpf_link *link) +{ + bpf_cgroup_link_release(link); + + return 0; +} + static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, struct seq_file *seq) { @@ -883,6 +895,7 @@ static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, static const struct bpf_link_ops bpf_cgroup_link_lops = { .release = bpf_cgroup_link_release, .dealloc = bpf_cgroup_link_dealloc, + .detach = bpf_cgroup_link_detach, .update_prog = cgroup_bpf_replace, .show_fdinfo = bpf_cgroup_link_show_fdinfo, .fill_link_info = bpf_cgroup_link_fill_link_info, diff --git a/kernel/bpf/net_namespace.c b/kernel/bpf/net_namespace.c index 71405edd667c..542f275bf252 100644 --- a/kernel/bpf/net_namespace.c +++ b/kernel/bpf/net_namespace.c @@ -142,9 +142,16 @@ static void bpf_netns_link_release(struct bpf_link *link) bpf_prog_array_free(old_array); out_unlock: + net_link->net = NULL; mutex_unlock(&netns_bpf_mutex); } +static int bpf_netns_link_detach(struct bpf_link *link) +{ + bpf_netns_link_release(link); + return 0; +} + static void bpf_netns_link_dealloc(struct bpf_link *link) { struct bpf_netns_link *net_link = @@ -228,6 +235,7 @@ static void bpf_netns_link_show_fdinfo(const struct bpf_link *link, static const struct bpf_link_ops bpf_netns_link_ops = { .release = bpf_netns_link_release, .dealloc = bpf_netns_link_dealloc, + .detach = bpf_netns_link_detach, .update_prog = bpf_netns_link_update_prog, .fill_link_info = bpf_netns_link_fill_info, .show_fdinfo = bpf_netns_link_show_fdinfo, diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cd3d599e9e90..2f343ce15747 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3991,6 +3991,29 @@ static int link_update(union bpf_attr *attr) return ret; } +#define BPF_LINK_DETACH_LAST_FIELD link_detach.link_fd + +static int link_detach(union bpf_attr *attr) +{ + struct bpf_link *link; + int ret; + + if (CHECK_ATTR(BPF_LINK_DETACH)) + return -EINVAL; + + link = bpf_link_get_from_fd(attr->link_detach.link_fd); + if (IS_ERR(link)) + return PTR_ERR(link); + + if (link->ops->detach) + ret = link->ops->detach(link); + else + ret = -EOPNOTSUPP; + + bpf_link_put(link); + return ret; +} + static int bpf_link_inc_not_zero(struct bpf_link *link) { return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT; @@ -4240,6 +4263,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz case BPF_ITER_CREATE: err = bpf_iter_create(&attr); break; + case BPF_LINK_DETACH: + err = link_detach(&attr); + break; default: err = -EINVAL; break; diff --git a/net/core/dev.c b/net/core/dev.c index a2a57988880a..c8b911b10187 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8979,12 +8979,20 @@ static void bpf_xdp_link_release(struct bpf_link *link) /* if racing with net_device's tear down, xdp_link->dev might be * already NULL, in which case link was already auto-detached */ - if (xdp_link->dev) + if (xdp_link->dev) { WARN_ON(dev_xdp_detach_link(xdp_link->dev, NULL, xdp_link)); + xdp_link->dev = NULL; + } rtnl_unlock(); } +static int bpf_xdp_link_detach(struct bpf_link *link) +{ + bpf_xdp_link_release(link); + return 0; +} + static void bpf_xdp_link_dealloc(struct bpf_link *link) { struct bpf_xdp_link *xdp_link = container_of(link, struct bpf_xdp_link, link); @@ -9066,6 +9074,7 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog, static const struct bpf_link_ops bpf_xdp_link_lops = { .release = bpf_xdp_link_release, .dealloc = bpf_xdp_link_dealloc, + .detach = bpf_xdp_link_detach, .show_fdinfo = bpf_xdp_link_show_fdinfo, .fill_link_info = bpf_xdp_link_fill_link_info, .update_prog = bpf_xdp_link_update, From 2e49527e52486dac910460b1b3f6fce6e21c6b48 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:27 -0700 Subject: [PATCH 1890/2056] libbpf: Add bpf_link detach APIs Add low-level bpf_link_detach() API. Also add higher-level bpf_link__detach() one. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731182830.286260-3-andriin@fb.com --- tools/include/uapi/linux/bpf.h | 5 +++++ tools/lib/bpf/bpf.c | 10 ++++++++++ tools/lib/bpf/bpf.h | 2 ++ tools/lib/bpf/libbpf.c | 5 +++++ tools/lib/bpf/libbpf.h | 1 + tools/lib/bpf/libbpf.map | 2 ++ 6 files changed, 25 insertions(+) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index eb5e0c38eb2c..b134e679e9db 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -117,6 +117,7 @@ enum bpf_cmd { BPF_LINK_GET_NEXT_ID, BPF_ENABLE_STATS, BPF_ITER_CREATE, + BPF_LINK_DETACH, }; enum bpf_map_type { @@ -634,6 +635,10 @@ union bpf_attr { __u32 old_prog_fd; } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { /* struct used by BPF_ENABLE_STATS command */ __u32 type; } enable_stats; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index e1bdf214f75f..eab14c97c15d 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -603,6 +603,16 @@ int bpf_link_create(int prog_fd, int target_fd, return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr)); } +int bpf_link_detach(int link_fd) +{ + union bpf_attr attr; + + memset(&attr, 0, sizeof(attr)); + attr.link_detach.link_fd = link_fd; + + return sys_bpf(BPF_LINK_DETACH, &attr, sizeof(attr)); +} + int bpf_link_update(int link_fd, int new_prog_fd, const struct bpf_link_update_opts *opts) { diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6d367e01d05e..28855fd5b5f4 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -178,6 +178,8 @@ LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, const struct bpf_link_create_opts *opts); +LIBBPF_API int bpf_link_detach(int link_fd); + struct bpf_link_update_opts { size_t sz; /* size of this struct for forward/backward compatibility */ __u32 flags; /* extra flags */ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index b9f11f854985..7be04e45d29c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7748,6 +7748,11 @@ struct bpf_link *bpf_link__open(const char *path) return link; } +int bpf_link__detach(struct bpf_link *link) +{ + return bpf_link_detach(link->fd) ? -errno : 0; +} + int bpf_link__pin(struct bpf_link *link, const char *path) { int err; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 9924385462ab..3ed1399bfbbc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -229,6 +229,7 @@ LIBBPF_API int bpf_link__unpin(struct bpf_link *link); LIBBPF_API int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog); LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); +LIBBPF_API int bpf_link__detach(struct bpf_link *link); LIBBPF_API int bpf_link__destroy(struct bpf_link *link); LIBBPF_API struct bpf_link * diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index ca49a6a7e5b2..099863411f7d 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -273,6 +273,8 @@ LIBBPF_0.0.9 { LIBBPF_0.1.0 { global: + bpf_link__detach; + bpf_link_detach; bpf_map__ifindex; bpf_map__key_size; bpf_map__map_flags; From 90806ccc90bbd0150267a97ae4003269597a6a6c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:28 -0700 Subject: [PATCH 1891/2056] selftests/bpf: Add link detach tests for cgroup, netns, and xdp bpf_links Add bpf_link__detach() testing to selftests for cgroup, netns, and xdp bpf_links. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731182830.286260-4-andriin@fb.com --- .../selftests/bpf/prog_tests/cgroup_link.c | 20 +++++++- .../selftests/bpf/prog_tests/sk_lookup.c | 51 +++++++++---------- .../selftests/bpf/prog_tests/xdp_link.c | 14 +++++ tools/testing/selftests/bpf/testing_helpers.c | 14 +++++ tools/testing/selftests/bpf/testing_helpers.h | 3 ++ 5 files changed, 73 insertions(+), 29 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c index 6e04f8d1d15b..4d9b514b3fd9 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_link.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_link.c @@ -2,6 +2,7 @@ #include #include "cgroup_helpers.h" +#include "testing_helpers.h" #include "test_cgroup_link.skel.h" static __u32 duration = 0; @@ -37,7 +38,8 @@ void test_cgroup_link(void) int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs); DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts); struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link; - __u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags; + __u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id; + struct bpf_link_info info; int i = 0, err, prog_fd; bool detach_legacy = false; @@ -219,6 +221,22 @@ void test_cgroup_link(void) /* BPF programs should still get called */ ping_and_check(0, cg_nr); + prog_id = link_info_prog_id(links[0], &info); + CHECK(prog_id == 0, "link_info", "failed\n"); + CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id); + + err = bpf_link__detach(links[0]); + if (CHECK(err, "link_detach", "failed %d\n", err)) + goto cleanup; + + /* cgroup_id should be zero in link_info */ + prog_id = link_info_prog_id(links[0], &info); + CHECK(prog_id == 0, "link_info", "failed\n"); + CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id); + + /* First BPF program shouldn't be called anymore */ + ping_and_check(0, cg_nr - 1); + /* leave cgroup and remove them, don't detach programs */ cleanup_cgroup_environment(); diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index 379da6f10ee9..c571584c00f5 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -34,6 +34,7 @@ #include "bpf_util.h" #include "cgroup_helpers.h" #include "network_helpers.h" +#include "testing_helpers.h" #include "test_sk_lookup.skel.h" /* External (address, port) pairs the client sends packets to. */ @@ -469,34 +470,10 @@ static int update_lookup_map(struct bpf_map *map, int index, int sock_fd) return 0; } -static __u32 link_info_prog_id(struct bpf_link *link) -{ - struct bpf_link_info info = {}; - __u32 info_len = sizeof(info); - int link_fd, err; - - link_fd = bpf_link__fd(link); - if (CHECK(link_fd < 0, "bpf_link__fd", "failed\n")) { - errno = -link_fd; - log_err("bpf_link__fd failed"); - return 0; - } - - err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len); - if (CHECK(err, "bpf_obj_get_info_by_fd", "failed\n")) { - log_err("bpf_obj_get_info_by_fd"); - return 0; - } - if (CHECK(info_len != sizeof(info), "bpf_obj_get_info_by_fd", - "unexpected info len %u\n", info_len)) - return 0; - - return info.prog_id; -} - static void query_lookup_prog(struct test_sk_lookup *skel) { struct bpf_link *link[3] = {}; + struct bpf_link_info info; __u32 attach_flags = 0; __u32 prog_ids[3] = {}; __u32 prog_cnt = 3; @@ -534,18 +511,36 @@ static void query_lookup_prog(struct test_sk_lookup *skel) if (CHECK(prog_cnt != 3, "bpf_prog_query", "wrong program count on query: %u", prog_cnt)) goto detach; - prog_id = link_info_prog_id(link[0]); + prog_id = link_info_prog_id(link[0], &info); CHECK(prog_ids[0] != prog_id, "bpf_prog_query", "invalid program #0 id on query: %u != %u\n", prog_ids[0], prog_id); - prog_id = link_info_prog_id(link[1]); + CHECK(info.netns.netns_ino == 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + prog_id = link_info_prog_id(link[1], &info); CHECK(prog_ids[1] != prog_id, "bpf_prog_query", "invalid program #1 id on query: %u != %u\n", prog_ids[1], prog_id); - prog_id = link_info_prog_id(link[2]); + CHECK(info.netns.netns_ino == 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + prog_id = link_info_prog_id(link[2], &info); CHECK(prog_ids[2] != prog_id, "bpf_prog_query", "invalid program #2 id on query: %u != %u\n", prog_ids[2], prog_id); + CHECK(info.netns.netns_ino == 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); + + err = bpf_link__detach(link[0]); + if (CHECK(err, "link_detach", "failed %d\n", err)) + goto detach; + + /* prog id is still there, but netns_ino is zeroed out */ + prog_id = link_info_prog_id(link[0], &info); + CHECK(prog_ids[0] != prog_id, "bpf_prog_query", + "invalid program #0 id on query: %u != %u\n", + prog_ids[0], prog_id); + CHECK(info.netns.netns_ino != 0, "netns_ino", + "unexpected netns_ino: %u\n", info.netns.netns_ino); detach: if (link[2]) diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_link.c b/tools/testing/selftests/bpf/prog_tests/xdp_link.c index 52cba6795d40..6f814999b395 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_link.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_link.c @@ -131,6 +131,20 @@ void test_xdp_link(void) CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex", "got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO); + err = bpf_link__detach(link); + if (CHECK(err, "link_detach", "failed %d\n", err)) + goto cleanup; + + memset(&link_info, 0, sizeof(link_info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len); + if (CHECK(err, "link_info", "failed: %d\n", err)) + goto cleanup; + CHECK(link_info.prog_id != id1, "link_prog_id", + "got %u != exp %u\n", link_info.prog_id, id1); + /* ifindex should be zeroed out */ + CHECK(link_info.xdp.ifindex != 0, "link_ifindex", + "got %u != exp %u\n", link_info.xdp.ifindex, 0); + cleanup: test_xdp_link__destroy(skel1); test_xdp_link__destroy(skel2); diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 0af6337a8962..800d503e5cb4 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -64,3 +64,17 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len) return 0; } + +__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info) +{ + __u32 info_len = sizeof(*info); + int err; + + memset(info, 0, sizeof(*info)); + err = bpf_obj_get_info_by_fd(bpf_link__fd(link), info, &info_len); + if (err) { + printf("failed to get link info: %d\n", -errno); + return 0; + } + return info->prog_id; +} diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index 923b51762759..d4f8e749611b 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -1,5 +1,8 @@ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (C) 2020 Facebook, Inc. */ #include +#include +#include int parse_num_list(const char *s, bool **set, int *set_len); +__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info); From 0e8c7c07f090668566db2030a027a360ffd00938 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:29 -0700 Subject: [PATCH 1892/2056] tools/bpftool: Add `link detach` subcommand Add ability to force-detach BPF link. Also add missing error message, if specified link ID is wrong. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731182830.286260-5-andriin@fb.com --- tools/bpf/bpftool/link.c | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 326b8fdf0243..1b793759170e 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -22,6 +22,8 @@ static const char * const link_type_name[] = { static int link_parse_fd(int *argc, char ***argv) { + int fd; + if (is_prefix(**argv, "id")) { unsigned int id; char *endptr; @@ -35,7 +37,10 @@ static int link_parse_fd(int *argc, char ***argv) } NEXT_ARGP(); - return bpf_link_get_fd_by_id(id); + fd = bpf_link_get_fd_by_id(id); + if (fd < 0) + p_err("failed to get link with ID %d: %s", id, strerror(errno)); + return fd; } else if (is_prefix(**argv, "pinned")) { char *path; @@ -316,6 +321,34 @@ static int do_pin(int argc, char **argv) return err; } +static int do_detach(int argc, char **argv) +{ + int err, fd; + + if (argc != 2) { + p_err("link specifier is invalid or missing\n"); + return 1; + } + + fd = link_parse_fd(&argc, &argv); + if (fd < 0) + return 1; + + err = bpf_link_detach(fd); + if (err) + err = -errno; + close(fd); + if (err) { + p_err("failed link detach: %s", strerror(-err)); + return 1; + } + + if (json_output) + jsonw_null(json_wtr); + + return 0; +} + static int do_help(int argc, char **argv) { if (json_output) { @@ -326,6 +359,7 @@ static int do_help(int argc, char **argv) fprintf(stderr, "Usage: %1$s %2$s { show | list } [LINK]\n" " %1$s %2$s pin LINK FILE\n" + " %1$s %2$s detach LINK\n" " %1$s %2$s help\n" "\n" " " HELP_SPEC_LINK "\n" @@ -341,6 +375,7 @@ static const struct cmd cmds[] = { { "list", do_show }, { "help", do_help }, { "pin", do_pin }, + { "detach", do_detach }, { 0 } }; From e85f99aa7760e74bb5a7e8515948f99c264a275f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 31 Jul 2020 11:28:30 -0700 Subject: [PATCH 1893/2056] tools/bpftool: Add documentation and bash-completion for `link detach` Add info on link detach sub-command to man page. Add detach to bash-completion as well. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Acked-by: John Fastabend Date: Fri, 31 Jul 2020 13:49:57 -0700 Subject: [PATCH 1894/2056] selftests/bpf: Fix spurious test failures in core_retro selftest core_retro selftest uses BPF program that's triggered on sys_enter system-wide, but has no protection from some unrelated process doing syscall while selftest is running. This leads to occasional test failures with unexpected PIDs being returned. Fix that by filtering out all processes that are not test_progs process. Fixes: fcda189a5133 ("selftests/bpf: Add test relying only on CO-RE and no recent kernel features") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200731204957.2047119-1-andriin@fb.com --- tools/testing/selftests/bpf/prog_tests/core_retro.c | 8 ++++++-- tools/testing/selftests/bpf/progs/test_core_retro.c | 13 +++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c index 78e30d3a23d5..6acb0e94d4d7 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_retro.c +++ b/tools/testing/selftests/bpf/prog_tests/core_retro.c @@ -6,7 +6,7 @@ void test_core_retro(void) { - int err, zero = 0, res, duration = 0; + int err, zero = 0, res, duration = 0, my_pid = getpid(); struct test_core_retro *skel; /* load program */ @@ -14,6 +14,10 @@ void test_core_retro(void) if (CHECK(!skel, "skel_load", "skeleton open/load failed\n")) goto out_close; + err = bpf_map_update_elem(bpf_map__fd(skel->maps.exp_tgid_map), &zero, &my_pid, 0); + if (CHECK(err, "map_update", "failed to set expected PID: %d\n", errno)) + goto out_close; + /* attach probe */ err = test_core_retro__attach(skel); if (CHECK(err, "attach_kprobe", "err %d\n", err)) @@ -26,7 +30,7 @@ void test_core_retro(void) if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno)) goto out_close; - CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid()); + CHECK(res != my_pid, "pid_check", "got %d != exp %d\n", res, my_pid); out_close: test_core_retro__destroy(skel); diff --git a/tools/testing/selftests/bpf/progs/test_core_retro.c b/tools/testing/selftests/bpf/progs/test_core_retro.c index 75c60c3c29cf..20861ec2f674 100644 --- a/tools/testing/selftests/bpf/progs/test_core_retro.c +++ b/tools/testing/selftests/bpf/progs/test_core_retro.c @@ -8,6 +8,13 @@ struct task_struct { int tgid; } __attribute__((preserve_access_index)); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, int); +} exp_tgid_map SEC(".maps"); + struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); @@ -21,6 +28,12 @@ int handle_sys_enter(void *ctx) struct task_struct *task = (void *)bpf_get_current_task(); int tgid = BPF_CORE_READ(task, tgid); int zero = 0; + int real_tgid = bpf_get_current_pid_tgid() >> 32; + int *exp_tgid = bpf_map_lookup_elem(&exp_tgid_map, &zero); + + /* only pass through sys_enters from test process */ + if (!exp_tgid || *exp_tgid != real_tgid) + return 0; bpf_map_update_elem(&results, &zero, &tgid, 0); From 56b06d4da8128a73dd1568cf65cb57725cc71968 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 23 Jul 2020 17:32:14 +0100 Subject: [PATCH 1895/2056] rtlwifi: btcoex: remove redundant initialization of variables ant_num and single_ant_path The variables ant_num and single_ant_path are being initialized with a value that is never read and are being updated later with a new value. The initializations are redundant and can be removed. Addresses-Coverity: ("Unused value") Signed-off-by: Colin Ian King Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200723163214.995226-1-colin.king@canonical.com --- drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index a4940a3842de..4949f99844b5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -1318,7 +1318,7 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter) { struct rtl_priv *rtlpriv = adapter; struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); - u8 ant_num = 2, chip_type, single_ant_path = 0; + u8 ant_num, chip_type, single_ant_path; if (!btcoexist) return false; From 1751a7352b632991600c3e7e604be3920a82b386 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 30 Jul 2020 18:40:26 +0300 Subject: [PATCH 1896/2056] rtlwifi: btcoex: use %*ph to print small buffer Use %*ph format to print small buffer as hex string. Signed-off-by: Andy Shevchenko Acked-by: Ping-Ke Shih Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200730154026.39901-1-andriy.shevchenko@linux.intel.com --- .../net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 4949f99844b5..2b140c1e8e8d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -894,11 +894,9 @@ static void halbtc_display_wifi_status(struct btc_coexist *btcoexist, (low_power ? ", 32k" : "")); seq_printf(m, - "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)", + "\n %-35s = %6ph (0x%x/0x%x)", "Power mode cmd(lps/rpwm)", - btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1], - btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3], - btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5], + btcoexist->pwr_mode_val, btcoexist->bt_info.lps_val, btcoexist->bt_info.rpwm_val); } From 614946480f8f7de9ad077bca36e0c433e426a3dd Mon Sep 17 00:00:00 2001 From: Xu Wang Date: Mon, 27 Jul 2020 02:04:05 +0000 Subject: [PATCH 1897/2056] mwifiex: 11n_rxreorder: Remove unnecessary cast in kfree() Remove unnecassary casts in the argument to kfree. Signed-off-by: Xu Wang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200727020405.8476-1-vulab@iscas.ac.cn --- drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c index 0bdafe9f66db..1046b59647f5 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c @@ -398,7 +398,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, new_node->rx_reorder_ptr = kcalloc(win_size, sizeof(void *), GFP_KERNEL); if (!new_node->rx_reorder_ptr) { - kfree((u8 *) new_node); + kfree(new_node); mwifiex_dbg(priv->adapter, ERROR, "%s: failed to alloc reorder_ptr\n", __func__); return; From ba78405ecaac382d65d16dcd6798d8c64e264432 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 22 Jul 2020 12:27:07 +0200 Subject: [PATCH 1898/2056] p54: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'p54p_probe()', GFP_KERNEL can be used because it is the probe function and no spin_lock is taken in the between. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200722102707.27486-1-christophe.jaillet@wanadoo.fr --- drivers/net/wireless/intersil/p54/p54pci.c | 65 ++++++++++++---------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c index 80ad0b7eaef4..9d96c8b8409d 100644 --- a/drivers/net/wireless/intersil/p54/p54pci.c +++ b/drivers/net/wireless/intersil/p54/p54pci.c @@ -153,12 +153,12 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev, if (!skb) break; - mapping = pci_map_single(priv->pdev, + mapping = dma_map_single(&priv->pdev->dev, skb_tail_pointer(skb), priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(priv->pdev, mapping)) { + if (dma_mapping_error(&priv->pdev->dev, mapping)) { dev_kfree_skb_any(skb); dev_err(&priv->pdev->dev, "RX DMA Mapping error\n"); @@ -215,19 +215,22 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, len = priv->common.rx_mtu; } dma_addr = le32_to_cpu(desc->host_addr); - pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, - priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr, + priv->common.rx_mtu + 32, + DMA_FROM_DEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { - pci_unmap_single(priv->pdev, dma_addr, - priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, dma_addr, + priv->common.rx_mtu + 32, + DMA_FROM_DEVICE); rx_buf[i] = NULL; desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); - pci_dma_sync_single_for_device(priv->pdev, dma_addr, - priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&priv->pdev->dev, dma_addr, + priv->common.rx_mtu + 32, + DMA_FROM_DEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } @@ -258,8 +261,9 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, skb = tx_buf[i]; tx_buf[i] = NULL; - pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), - le16_to_cpu(desc->len), PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, + le32_to_cpu(desc->host_addr), + le16_to_cpu(desc->len), DMA_TO_DEVICE); desc->host_addr = 0; desc->device_addr = 0; @@ -334,9 +338,9 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) idx = le32_to_cpu(ring_control->host_idx[1]); i = idx % ARRAY_SIZE(ring_control->tx_data); - mapping = pci_map_single(priv->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, mapping)) { + mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, mapping)) { spin_unlock_irqrestore(&priv->lock, flags); p54_free_skb(dev, skb); dev_err(&priv->pdev->dev, "TX DMA mapping error\n"); @@ -378,10 +382,10 @@ static void p54p_stop(struct ieee80211_hw *dev) for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { desc = &ring_control->rx_data[i]; if (desc->host_addr) - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); kfree_skb(priv->rx_buf_data[i]); priv->rx_buf_data[i] = NULL; } @@ -389,10 +393,10 @@ static void p54p_stop(struct ieee80211_hw *dev) for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) { desc = &ring_control->rx_mgmt[i]; if (desc->host_addr) - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), priv->common.rx_mtu + 32, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); kfree_skb(priv->rx_buf_mgmt[i]); priv->rx_buf_mgmt[i] = NULL; } @@ -400,10 +404,10 @@ static void p54p_stop(struct ieee80211_hw *dev) for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) { desc = &ring_control->tx_data[i]; if (desc->host_addr) - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); p54_free_skb(dev, priv->tx_buf_data[i]); priv->tx_buf_data[i] = NULL; @@ -412,10 +416,10 @@ static void p54p_stop(struct ieee80211_hw *dev) for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) { desc = &ring_control->tx_mgmt[i]; if (desc->host_addr) - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); p54_free_skb(dev, priv->tx_buf_mgmt[i]); priv->tx_buf_mgmt[i] = NULL; @@ -568,9 +572,9 @@ static int p54p_probe(struct pci_dev *pdev, goto err_disable_dev; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; @@ -603,8 +607,9 @@ static int p54p_probe(struct pci_dev *pdev, goto err_free_dev; } - priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), - &priv->ring_control_dma); + priv->ring_control = dma_alloc_coherent(&pdev->dev, + sizeof(*priv->ring_control), + &priv->ring_control_dma, GFP_KERNEL); if (!priv->ring_control) { dev_err(&pdev->dev, "Cannot allocate rings\n"); err = -ENOMEM; @@ -623,8 +628,8 @@ static int p54p_probe(struct pci_dev *pdev, if (!err) return 0; - pci_free_consistent(pdev, sizeof(*priv->ring_control), - priv->ring_control, priv->ring_control_dma); + dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); @@ -653,8 +658,8 @@ static void p54p_remove(struct pci_dev *pdev) wait_for_completion(&priv->fw_loaded); p54_unregister_common(dev); release_firmware(priv->firmware); - pci_free_consistent(pdev, sizeof(*priv->ring_control), - priv->ring_control, priv->ring_control_dma); + dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control), + priv->ring_control, priv->ring_control_dma); iounmap(priv->map); pci_release_regions(pdev); pci_disable_device(pdev); From 87b589a19901552a6d4b9976c17530d73e3c2dcf Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 14:12:24 +0200 Subject: [PATCH 1899/2056] prism54: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200719121224.58581-1-grandmaster@al2klimov.de --- drivers/net/wireless/intersil/prism54/isl_oid.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intersil/prism54/isl_oid.h b/drivers/net/wireless/intersil/prism54/isl_oid.h index 1afc2ccf94ca..b889bb73a485 100644 --- a/drivers/net/wireless/intersil/prism54/isl_oid.h +++ b/drivers/net/wireless/intersil/prism54/isl_oid.h @@ -143,7 +143,7 @@ enum dot11_priv_t { * together with a CSMA contention. Without this all frames are * sent with a CSMA contention. * Bibliography: - * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html + * https://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html */ enum dot11_maxframeburst_t { /* Values for DOT11_OID_MAXFRAMEBURST */ From 81cf72b74671ab945bed00ef8da78078c04998ce Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Tue, 21 Jul 2020 18:25:15 +0530 Subject: [PATCH 1900/2056] prism54: islpci_hotplug: use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions as through the generic framework, PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200721125514.145607-1-vaibhavgupta40@gmail.com --- .../intersil/prism54/islpci_hotplug.c | 34 ++++++------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c index 20291c0d962d..e8369befe100 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c @@ -63,16 +63,17 @@ MODULE_DEVICE_TABLE(pci, prism54_id_tbl); static int prism54_probe(struct pci_dev *, const struct pci_device_id *); static void prism54_remove(struct pci_dev *); -static int prism54_suspend(struct pci_dev *, pm_message_t state); -static int prism54_resume(struct pci_dev *); +static int __maybe_unused prism54_suspend(struct device *); +static int __maybe_unused prism54_resume(struct device *); + +static SIMPLE_DEV_PM_OPS(prism54_pm_ops, prism54_suspend, prism54_resume); static struct pci_driver prism54_driver = { .name = DRV_NAME, .id_table = prism54_id_tbl, .probe = prism54_probe, .remove = prism54_remove, - .suspend = prism54_suspend, - .resume = prism54_resume, + .driver.pm = &prism54_pm_ops, }; /****************************************************************************** @@ -243,16 +244,13 @@ prism54_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static int -prism54_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused +prism54_suspend(struct device *dev) { - struct net_device *ndev = pci_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; BUG_ON(!priv); - - pci_save_state(pdev); - /* tell the device not to trigger interrupts for now... */ isl38xx_disable_interrupts(priv->device_base); @@ -266,26 +264,16 @@ prism54_suspend(struct pci_dev *pdev, pm_message_t state) return 0; } -static int -prism54_resume(struct pci_dev *pdev) +static int __maybe_unused +prism54_resume(struct device *dev) { - struct net_device *ndev = pci_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; - int err; BUG_ON(!priv); printk(KERN_NOTICE "%s: got resume request\n", ndev->name); - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", - ndev->name); - return err; - } - - pci_restore_state(pdev); - /* alright let's go into the PREBOOT state */ islpci_reset(priv, 1); From 84d47961a02c4568774b0a4e4b1373377786691c Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 22 Jul 2020 12:45:34 +0200 Subject: [PATCH 1901/2056] prism54: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'islpci_alloc_memory()' (islpci_dev.c), GFP_KERNEL can be used because it is only called from a probe function and no spin_lock is taken in the between. The call chain is: prism54_probe (probe function, in 'islpci_hotplug.c') --> islpci_setup (in 'islpci_dev.c') --> islpci_alloc_memory (in 'islpci_dev.c') @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200722104534.30760-1-christophe.jaillet@wanadoo.fr --- .../wireless/intersil/prism54/islpci_dev.c | 30 +++++++++---------- .../wireless/intersil/prism54/islpci_eth.c | 24 +++++++-------- .../intersil/prism54/islpci_hotplug.c | 2 +- .../wireless/intersil/prism54/islpci_mgt.c | 21 ++++++------- 4 files changed, 36 insertions(+), 41 deletions(-) diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c index a9bae69222dc..efd64e555bb5 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_dev.c +++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c @@ -636,10 +636,10 @@ islpci_alloc_memory(islpci_private *priv) */ /* perform the allocation */ - priv->driver_mem_address = pci_alloc_consistent(priv->pdev, - HOST_MEM_BLOCK, - &priv-> - device_host_address); + priv->driver_mem_address = dma_alloc_coherent(&priv->pdev->dev, + HOST_MEM_BLOCK, + &priv->device_host_address, + GFP_KERNEL); if (!priv->driver_mem_address) { /* error allocating the block of PCI memory */ @@ -692,11 +692,9 @@ islpci_alloc_memory(islpci_private *priv) /* map the allocated skb data area to pci */ priv->pci_map_rx_address[counter] = - pci_map_single(priv->pdev, (void *) skb->data, - MAX_FRAGMENT_SIZE_RX + 2, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pdev, - priv->pci_map_rx_address[counter])) { + dma_map_single(&priv->pdev->dev, (void *)skb->data, + MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[counter])) { priv->pci_map_rx_address[counter] = 0; /* error mapping the buffer to device accessible memory address */ @@ -727,9 +725,9 @@ islpci_free_memory(islpci_private *priv) /* free consistent DMA area... */ if (priv->driver_mem_address) - pci_free_consistent(priv->pdev, HOST_MEM_BLOCK, - priv->driver_mem_address, - priv->device_host_address); + dma_free_coherent(&priv->pdev->dev, HOST_MEM_BLOCK, + priv->driver_mem_address, + priv->device_host_address); /* clear some dangling pointers */ priv->driver_mem_address = NULL; @@ -741,8 +739,8 @@ islpci_free_memory(islpci_private *priv) for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) { struct islpci_membuf *buf = &priv->mgmt_rx[counter]; if (buf->pci_addr) - pci_unmap_single(priv->pdev, buf->pci_addr, - buf->size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, buf->pci_addr, + buf->size, DMA_FROM_DEVICE); buf->pci_addr = 0; kfree(buf->mem); buf->size = 0; @@ -752,10 +750,10 @@ islpci_free_memory(islpci_private *priv) /* clean up data rx buffers */ for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) { if (priv->pci_map_rx_address[counter]) - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[counter], MAX_FRAGMENT_SIZE_RX + 2, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); priv->pci_map_rx_address[counter] = 0; if (priv->data_low_rx[counter]) diff --git a/drivers/net/wireless/intersil/prism54/islpci_eth.c b/drivers/net/wireless/intersil/prism54/islpci_eth.c index 8d680250a281..74dd65716afd 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_eth.c +++ b/drivers/net/wireless/intersil/prism54/islpci_eth.c @@ -50,9 +50,9 @@ islpci_eth_cleanup_transmit(islpci_private *priv, skb, skb->data, skb->len, skb->truesize); #endif - pci_unmap_single(priv->pdev, + dma_unmap_single(&priv->pdev->dev, priv->pci_map_tx_address[index], - skb->len, PCI_DMA_TODEVICE); + skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq(skb); skb = NULL; } @@ -176,10 +176,9 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) #endif /* map the skb buffer to pci memory for DMA operation */ - pci_map_address = pci_map_single(priv->pdev, - (void *) skb->data, skb->len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, pci_map_address)) { + pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) { printk(KERN_WARNING "%s: cannot map buffer to PCI\n", ndev->name); goto drop_free; @@ -323,9 +322,8 @@ islpci_eth_receive(islpci_private *priv) #endif /* delete the streaming DMA mapping before processing the skb */ - pci_unmap_single(priv->pdev, - priv->pci_map_rx_address[index], - MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index], + MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); /* update the skb structure and align the buffer */ skb_put(skb, size); @@ -431,11 +429,9 @@ islpci_eth_receive(islpci_private *priv) /* set the streaming DMA mapping for proper PCI bus operation */ priv->pci_map_rx_address[index] = - pci_map_single(priv->pdev, (void *) skb->data, - MAX_FRAGMENT_SIZE_RX + 2, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pdev, - priv->pci_map_rx_address[index])) { + dma_map_single(&priv->pdev->dev, (void *)skb->data, + MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) { /* error mapping the buffer to device accessible memory address */ DEBUG(SHOW_ERROR_MESSAGES, "Error mapping DMA address\n"); diff --git a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c index e8369befe100..f30a79a0b9c7 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c @@ -107,7 +107,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* enable PCI DMA */ - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); goto do_pci_disable_device; } diff --git a/drivers/net/wireless/intersil/prism54/islpci_mgt.c b/drivers/net/wireless/intersil/prism54/islpci_mgt.c index e336eb106429..0c7fb76c7d1c 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_mgt.c +++ b/drivers/net/wireless/intersil/prism54/islpci_mgt.c @@ -115,10 +115,11 @@ islpci_mgmt_rx_fill(struct net_device *ndev) buf->size = MGMT_FRAME_SIZE; } if (buf->pci_addr == 0) { - buf->pci_addr = pci_map_single(priv->pdev, buf->mem, + buf->pci_addr = dma_map_single(&priv->pdev->dev, + buf->mem, MGMT_FRAME_SIZE, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pdev, buf->pci_addr)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) { printk(KERN_WARNING "Failed to make memory DMA'able.\n"); return -ENOMEM; @@ -203,9 +204,9 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid, #endif err = -ENOMEM; - buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pdev, buf.pci_addr)) { + buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len, + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) { printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n", ndev->name); goto error_free; @@ -302,8 +303,8 @@ islpci_mgt_receive(struct net_device *ndev) } /* Ensure the results of device DMA are visible to the CPU. */ - pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr, - buf->size, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr, + buf->size, DMA_FROM_DEVICE); /* Perform endianess conversion for PIMFOR header in-place. */ header = pimfor_decode_header(buf->mem, frag_len); @@ -414,8 +415,8 @@ islpci_mgt_cleanup_transmit(struct net_device *ndev) for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) { int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE; struct islpci_membuf *buf = &priv->mgmt_tx[index]; - pci_unmap_single(priv->pdev, buf->pci_addr, buf->size, - PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size, + DMA_TO_DEVICE); buf->pci_addr = 0; kfree(buf->mem); buf->mem = NULL; From ae44fa993e8e6c1a1d22e5ca03d9eadd53b2745b Mon Sep 17 00:00:00 2001 From: Tsang-Shian Lin Date: Fri, 17 Jul 2020 14:49:31 +0800 Subject: [PATCH 1902/2056] rtw88: fix LDPC field for RA info Convert the type of LDPC field to boolen because LDPC field of RA info H2C command to firmware is only one bit. Fixes: e3037485c68e ("rtw88: new Realtek 802.11ac driver") Signed-off-by: Tsang-Shian Lin Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-2-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 6478fd7a78f6..13e79482f6d5 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -456,7 +456,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); - SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en); + SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); From 4dd86b901d1373ef8446ecb50a7ca009f3475211 Mon Sep 17 00:00:00 2001 From: Tsang-Shian Lin Date: Fri, 17 Jul 2020 14:49:32 +0800 Subject: [PATCH 1903/2056] rtw88: fix short GI capability based on current bandwidth Fix the transmission is not sent with short GI under some conditions even if the receiver supports short GI. If VHT capability IE exists in the beacon, the original code uses the short GI for 80M field as driver's short GI setting for transmission, even the current bandwidth is not 80MHz. Short GI supported fields for 20M/40M are informed in HT capability information element, and short GI supported field for 80M is informed in VHT capability information element. These three fields may be set to different values. Driver needs to record each short GI support field for each bandwidth, and send correct info depends on current bandwidth to the WiFi firmware. Fixes: e3037485c68e ("rtw88: new Realtek 802.11ac driver") Signed-off-by: Tsang-Shian Lin Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-3-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/main.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 7304e8bc5e31..54044abf30d7 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -722,8 +722,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) stbc_en = VHT_STBC_EN; if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) ldpc_en = VHT_LDPC_EN; - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) - is_support_sgi = true; } else if (sta->ht_cap.ht_supported) { ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | (sta->ht_cap.mcs.rx_mask[0] << 12); @@ -731,9 +729,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) stbc_en = HT_STBC_EN; if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) ldpc_en = HT_LDPC_EN; - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 || - sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) - is_support_sgi = true; } if (efuse->hw_cap.nss == 1) @@ -775,12 +770,18 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si) switch (sta->bandwidth) { case IEEE80211_STA_RX_BW_80: bw_mode = RTW_CHANNEL_WIDTH_80; + is_support_sgi = sta->vht_cap.vht_supported && + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); break; case IEEE80211_STA_RX_BW_40: bw_mode = RTW_CHANNEL_WIDTH_40; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); break; default: bw_mode = RTW_CHANNEL_WIDTH_20; + is_support_sgi = sta->ht_cap.ht_supported && + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); break; } From d8e030c74e8394a9a452b34e3a8d95d9236b45c3 Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Fri, 17 Jul 2020 14:49:33 +0800 Subject: [PATCH 1904/2056] rtw88: update tx descriptor of mgmt and reserved page packets Previous settings for TX descriptors of and reserved page packets are insufficient. For the sequence number of packets downloaded to reserved page, it should be filled by hardware. And for ps-poll packets in reserved page, to prevent AID being changed by hardware, NAVUSEHDR should be set. Additionally, the rate should be adjusted based on the current band for mgmt and reserved page packets. Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-4-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/fw.c | 10 +-- drivers/net/wireless/realtek/rtw88/main.h | 2 + drivers/net/wireless/realtek/rtw88/tx.c | 104 +++++++++++++++------- drivers/net/wireless/realtek/rtw88/tx.h | 13 ++- 4 files changed, 90 insertions(+), 39 deletions(-) diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index 13e79482f6d5..c922b4bb8d72 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -915,14 +915,14 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw, return skb_new; } -static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb) +static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, + enum rtw_rsvd_packet_type type) { - struct rtw_tx_pkt_info pkt_info; + struct rtw_tx_pkt_info pkt_info = {0}; struct rtw_chip_info *chip = rtwdev->chip; u8 *pkt_desc; - memset(&pkt_info, 0, sizeof(pkt_info)); - rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb); + rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type); pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); memset(pkt_desc, 0, chip->tx_pkt_desc_sz); rtw_tx_fill_tx_desc(&pkt_info, skb); @@ -1261,7 +1261,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size) * And iter->len will be added with size of tx_desc_sz. */ if (rsvd_pkt->add_txdesc) - rtw_fill_rsvd_page_desc(rtwdev, iter); + rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type); rsvd_pkt->skb = iter; rsvd_pkt->page = total_page; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 45ebc5a70b1e..276b5d381467 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -592,6 +592,8 @@ struct rtw_tx_pkt_info { bool dis_qselseq; bool en_hwseq; u8 hw_ssn_sel; + bool nav_use_hdr; + bool bt_null; }; struct rtw_rx_pkt_stat { diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index 79c42118825f..7fcc992b01a8 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -61,6 +61,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb) SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq); SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq); SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel); + SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr); + SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null); } EXPORT_SYMBOL(rtw_tx_fill_tx_desc); @@ -227,17 +229,58 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src) spin_unlock_irqrestore(&tx_report->q_lock, flags); } +static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb) +{ + if (rtwdev->hal.current_band_type == RTW_BAND_2G) { + pkt_info->rate_id = RTW_RATEID_B_20M; + pkt_info->rate = DESC_RATE1M; + } else { + pkt_info->rate_id = RTW_RATEID_G; + pkt_info->rate = DESC_RATE6M; + } + pkt_info->use_rate = true; + pkt_info->dis_rate_fallback = true; +} + +static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 sec_type = 0; + + if (info && info->control.hw_key) { + struct ieee80211_key_conf *key = info->control.hw_key; + + switch (key->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + sec_type = 0x01; + break; + case WLAN_CIPHER_SUITE_CCMP: + sec_type = 0x03; + break; + default: + break; + } + } + + pkt_info->sec_type = sec_type; +} + static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, struct ieee80211_sta *sta, struct sk_buff *skb) { - pkt_info->use_rate = true; - pkt_info->rate_id = 6; - pkt_info->dis_rate_fallback = true; + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); pkt_info->dis_qselseq = true; pkt_info->en_hwseq = true; pkt_info->hw_ssn_sel = 0; + /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, @@ -312,7 +355,6 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, struct rtw_sta_info *si; struct ieee80211_vif *vif = NULL; __le16 fc = hdr->frame_control; - u8 sec_type = 0; bool bmc; if (sta) { @@ -325,23 +367,6 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, else if (ieee80211_is_data(fc)) rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb); - if (info->control.hw_key) { - struct ieee80211_key_conf *key = info->control.hw_key; - - switch (key->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - case WLAN_CIPHER_SUITE_WEP104: - case WLAN_CIPHER_SUITE_TKIP: - sec_type = 0x01; - break; - case WLAN_CIPHER_SUITE_CCMP: - sec_type = 0x03; - break; - default: - break; - } - } - bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); @@ -349,7 +374,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, rtw_tx_report_enable(rtwdev, pkt_info); pkt_info->bmc = bmc; - pkt_info->sec_type = sec_type; + rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; pkt_info->qsel = skb->priority; @@ -359,24 +384,42 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, rtw_tx_stats(rtwdev, vif, skb); } -void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, - struct rtw_tx_pkt_info *pkt_info, - struct sk_buff *skb) +void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb, + enum rtw_rsvd_packet_type type) { struct rtw_chip_info *chip = rtwdev->chip; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; bool bmc; + /* A beacon or dummy reserved page packet indicates that it is the first + * reserved page, and the qsel of it will be set in each hci. + */ + if (type != RSVD_BEACON && type != RSVD_DUMMY) + pkt_info->qsel = TX_DESC_QSEL_MGMT; + + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); + bmc = is_broadcast_ether_addr(hdr->addr1) || is_multicast_ether_addr(hdr->addr1); - pkt_info->use_rate = true; - pkt_info->rate_id = 6; - pkt_info->dis_rate_fallback = true; pkt_info->bmc = bmc; pkt_info->tx_pkt_size = skb->len; pkt_info->offset = chip->tx_pkt_desc_sz; - pkt_info->qsel = TX_DESC_QSEL_MGMT; pkt_info->ls = true; + if (type == RSVD_PS_POLL) { + pkt_info->nav_use_hdr = true; + } else { + pkt_info->dis_qselseq = true; + pkt_info->en_hwseq = true; + pkt_info->hw_ssn_sel = 0; + } + if (type == RSVD_QOS_NULL) + pkt_info->bt_null = true; + + rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); + + /* TODO: need to change hw port and hw ssn sel for multiple vifs */ } struct sk_buff * @@ -399,8 +442,7 @@ rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev, skb_reserve(skb, tx_pkt_desc_sz); skb_put_data(skb, buf, size); - pkt_info->tx_pkt_size = size; - pkt_info->offset = tx_pkt_desc_sz; + rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON); return skb; } diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h index 72dfd4059f03..cfe84eef5923 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.h +++ b/drivers/net/wireless/realtek/rtw88/tx.h @@ -59,6 +59,10 @@ le32p_replace_bits((__le32 *)(txdesc) + 0x08, value, BIT(15)) #define SET_TX_DESC_HW_SSN_SEL(txdesc, value) \ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(7, 6)) +#define SET_TX_DESC_NAVUSEHDR(txdesc, value) \ + le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(15)) +#define SET_TX_DESC_BT_NULL(txdesc, value) \ + le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(23)) enum rtw_tx_desc_queue_select { TX_DESC_QSEL_TID0 = 0, @@ -83,6 +87,8 @@ enum rtw_tx_desc_queue_select { TX_DESC_QSEL_H2C = 19, }; +enum rtw_rsvd_packet_type; + void rtw_tx(struct rtw_dev *rtwdev, struct ieee80211_tx_control *control, struct sk_buff *skb); @@ -96,9 +102,10 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev, void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb); void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn); void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src); -void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, - struct rtw_tx_pkt_info *pkt_info, - struct sk_buff *skb); +void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, + struct rtw_tx_pkt_info *pkt_info, + struct sk_buff *skb, + enum rtw_rsvd_packet_type type); struct sk_buff * rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev, struct rtw_tx_pkt_info *pkt_info, From 3f194bd4ca1cd9b8eef34d37d562279dbeb80319 Mon Sep 17 00:00:00 2001 From: Yan-Hsuan Chuang Date: Fri, 17 Jul 2020 14:49:34 +0800 Subject: [PATCH 1905/2056] rtw88: coex: only skip coex triggered by BT info The coex mechanism used to skip upon the freeze flag is raised. That will cause the coex mechanism being skipped unexpectedly. Coex only wanted to keep the TDMA table from being changed by BT side. So, check the freeze and reason, if the coex reason is coming from BT info, skip it, to make sure the coex triggered by Wifi itself can work. This is required for the AP mode, while the control flow is different with STA mode. When starting an AP mode, the AP mode needs to start working immedaitely after leaving IPS, and the freeze flag could be raised. If the coex info is skipped, then the AP mode will not set the antenna owner, leads to TX stuck. Fixes: 4136214f7c46 ("rtw88: add BT co-existence support") Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-5-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/coex.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c index 3abae32341c4..aa08fd7d9fcd 100644 --- a/drivers/net/wireless/realtek/rtw88/coex.c +++ b/drivers/net/wireless/realtek/rtw88/coex.c @@ -1962,7 +1962,8 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason) if (coex_stat->wl_under_ips) return; - if (coex->freeze && !coex_stat->bt_setup_link) + if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO && + !coex_stat->bt_setup_link) return; coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++; From 40b788d15832f5947b6d4e4ff3afea9af267bac6 Mon Sep 17 00:00:00 2001 From: Yan-Hsuan Chuang Date: Fri, 17 Jul 2020 14:49:35 +0800 Subject: [PATCH 1906/2056] rtw88: add ieee80211_ops::change_interface Sometimes mac80211 will just ask driver to change the interface instead of to remove and then add a new one. And for driver, to change the interface is just simply to remove and add a new one but change the type of it. Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-6-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/mac80211.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index c412bc54efde..bb82b044ff82 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -231,6 +231,23 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw, mutex_unlock(&rtwdev->mutex); } +static int rtw_ops_change_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum nl80211_iftype type, bool p2p) +{ + struct rtw_dev *rtwdev = hw->priv; + + rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n", + vif->addr, vif->type, type, vif->p2p, p2p); + + rtw_ops_remove_interface(hw, vif); + + vif->type = type; + vif->p2p = p2p; + + return rtw_ops_add_interface(hw, vif); +} + static void rtw_ops_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, @@ -827,6 +844,7 @@ const struct ieee80211_ops rtw_ops = { .config = rtw_ops_config, .add_interface = rtw_ops_add_interface, .remove_interface = rtw_ops_remove_interface, + .change_interface = rtw_ops_change_interface, .configure_filter = rtw_ops_configure_filter, .bss_info_changed = rtw_ops_bss_info_changed, .conf_tx = rtw_ops_conf_tx, From 752310ed2b406831d71aeaf493b488f90c673a3c Mon Sep 17 00:00:00 2001 From: Yan-Hsuan Chuang Date: Fri, 17 Jul 2020 14:49:36 +0800 Subject: [PATCH 1907/2056] rtw88: allows driver to enable/disable beacon Enable/disable beacon accordingly when the BSS info changed. Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-7-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/mac80211.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index bb82b044ff82..6b199152abcf 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -390,6 +390,15 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_BEACON) rtw_fw_download_rsvd_page(rtwdev); + if (changed & BSS_CHANGED_BEACON_ENABLED) { + if (conf->enable_beacon) + rtw_write32_set(rtwdev, REG_FWHW_TXQ_CTRL, + BIT_EN_BCNQ_DL); + else + rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL, + BIT_EN_BCNQ_DL); + } + if (changed & BSS_CHANGED_MU_GROUPS) rtw_chip_set_gid_table(rtwdev, vif, conf); From c376c1fc87b77ae06ed6fa18d424ab548ac3a8fc Mon Sep 17 00:00:00 2001 From: Tzu-En Huang Date: Fri, 17 Jul 2020 14:49:37 +0800 Subject: [PATCH 1908/2056] rtw88: add h2c command in debugfs It's very useful to send H2C command for debug usage. Add an entry for sending H2C command to firmware. usage example: echo 42,00,00,43,02,00,00,00 > /sys/kernel/debug/ieee80211/phyX/rtw88 Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717064937.27966-8-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/debug.c | 30 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtw88/fw.c | 5 ++++ drivers/net/wireless/realtek/rtw88/fw.h | 2 ++ 3 files changed, 37 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c index 09f04feb8fe1..f769c982cc91 100644 --- a/drivers/net/wireless/realtek/rtw88/debug.c +++ b/drivers/net/wireless/realtek/rtw88/debug.c @@ -344,6 +344,31 @@ static ssize_t rtw_debugfs_set_write_reg(struct file *filp, return count; } +static ssize_t rtw_debugfs_set_h2c(struct file *filp, + const char __user *buffer, + size_t count, loff_t *loff) +{ + struct rtw_debugfs_priv *debugfs_priv = filp->private_data; + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; + char tmp[32 + 1]; + u8 param[8]; + int num; + + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); + + num = sscanf(tmp, "%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx", + ¶m[0], ¶m[1], ¶m[2], ¶m[3], + ¶m[4], ¶m[5], ¶m[6], ¶m[7]); + if (num != 8) { + rtw_info(rtwdev, "invalid H2C command format for debug\n"); + return -EINVAL; + } + + rtw_fw_h2c_cmd_dbg(rtwdev, param); + + return count; +} + static ssize_t rtw_debugfs_set_rf_write(struct file *filp, const char __user *buffer, size_t count, loff_t *loff) @@ -808,6 +833,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_write_reg = { .cb_write = rtw_debugfs_set_write_reg, }; +static struct rtw_debugfs_priv rtw_debug_priv_h2c = { + .cb_write = rtw_debugfs_set_h2c, +}; + static struct rtw_debugfs_priv rtw_debug_priv_rf_write = { .cb_write = rtw_debugfs_set_rf_write, }; @@ -877,6 +906,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev) rtw_debugfs_add_r(phy_info); rtw_debugfs_add_r(coex_info); rtw_debugfs_add_rw(coex_enable); + rtw_debugfs_add_w(h2c); rtw_debugfs_add_r(mac_0); rtw_debugfs_add_r(mac_1); rtw_debugfs_add_r(mac_2); diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c index c922b4bb8d72..63b00bc19000 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.c +++ b/drivers/net/wireless/realtek/rtw88/fw.c @@ -253,6 +253,11 @@ static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, spin_unlock(&rtwdev->h2c.lock); } +void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c) +{ + rtw_fw_send_h2c_command(rtwdev, h2c); +} + static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt) { int ret; diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h index 470e1809645a..686dcd3bbda6 100644 --- a/drivers/net/wireless/realtek/rtw88/fw.h +++ b/drivers/net/wireless/realtek/rtw88/fw.h @@ -563,4 +563,6 @@ void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable); void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, struct cfg80211_ssid *ssid); void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable); +void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c); + #endif From 9de6959f8584064de8c1d7392e3a6bb50a4a9c18 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Tue, 21 Jul 2020 21:49:01 +0800 Subject: [PATCH 1909/2056] rtw88: 8821c: make symbol 'rtw8821c_rtw_pwr_track_tbl' static The sparse tool complains as follows: drivers/net/wireless/realtek/rtw88/rtw8821c.c:1374:32: warning: symbol 'rtw8821c_rtw_pwr_track_tbl' was not declared. Should it be static? This variable is not used outside of rtw8821c.c, so this commit marks it static. Fixes: 3a4312828ce1 ("rtw88: 8821c: add power tracking") Reported-by: Hulk Robot Signed-off-by: Wei Yongjun Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200721134901.33968-1-weiyongjun1@huawei.com --- drivers/net/wireless/realtek/rtw88/rtw8821c.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index c41c61ee2fb6..5fef19f19ac1 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -1371,7 +1371,7 @@ static const u8 rtw8821c_pwrtrk_2g_cck_a_p[] = { 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9 }; -const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { +static const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { .pwrtrk_5gb_n[0] = rtw8821c_pwrtrk_5gb_n[0], .pwrtrk_5gb_n[1] = rtw8821c_pwrtrk_5gb_n[1], .pwrtrk_5gb_n[2] = rtw8821c_pwrtrk_5gb_n[2], From 7b080e085943eb9b2862a04b0450afe17e286a21 Mon Sep 17 00:00:00 2001 From: Ping-Cheng Chen Date: Fri, 24 Jul 2020 13:42:08 +0800 Subject: [PATCH 1910/2056] rtw88: 8821c: coex: add functions and parameters Without this patch, RTL8821CE will not have coex support, and will crash the system because of the NULL pointers for the coex functions. While RTL8822C series are WiFi + BT combo chips, it needs the co-existence mechanism for the device to work on both WiFi and BT without interfering each other. And the coex support has already been added before, most of the mechanisms are implemented. The driver should just add corresponding functions to operate on different types of chips and its coex parameters. Fixes: f745eb9ca5bf ("rtw88: 8821c: Add 8821CE to Kconfig and Makefile") Signed-off-by: Ping-Cheng Chen Signed-off-by: Tzu-En Huang Signed-off-by: Yan-Hsuan Chuang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200724054208.31115-1-yhchuang@realtek.com --- drivers/net/wireless/realtek/rtw88/reg.h | 1 + drivers/net/wireless/realtek/rtw88/rtw8821c.c | 403 ++++++++++++++++++ drivers/net/wireless/realtek/rtw88/rtw8821c.h | 26 ++ 3 files changed, 430 insertions(+) diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index f1275757f6b4..8f468d6b5f78 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -61,6 +61,7 @@ #define BIT_FSPI_EN BIT(19) #define BIT_EN_SIC BIT(12) #define BIT_BT_AOD_GPIO3 BIT(9) +#define BIT_PO_BT_PTA_PINS BIT(9) #define BIT_BT_PTA_EN BIT(5) #define BIT_WLRFE_4_5_EN BIT(2) diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index 5fef19f19ac1..d8863d8a5468 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -649,6 +649,197 @@ static void rtw8821c_phy_calibration(struct rtw_dev *rtwdev) rtw8821c_do_iqk(rtwdev); } +/* for coex */ +static void rtw8821c_coex_cfg_init(struct rtw_dev *rtwdev) +{ + /* enable TBTT nterrupt */ + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); + + /* BT report packet sample rate */ + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, SAMPLE_RATE_MASK, + SAMPLE_RATE); + + /* enable BT counter statistics */ + rtw_write8(rtwdev, REG_BT_STAT_CTRL, BT_CNT_ENABLE); + + /* enable PTA (3-wire function form BT side) */ + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); + + /* enable PTA (tx/rx signal form WiFi side) */ + rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); + /* wl tx signal to PTA not case EDCCA */ + rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN); + /* GNT_BT=1 while select both */ + rtw_write16_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); + + /* beacon queue always hi-pri */ + rtw_write8_mask(rtwdev, REG_BT_COEX_TABLE_H + 3, BIT_BCN_QUEUE, + BCN_PRI_EN); +} + +static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, + u8 pos_type) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + u32 switch_status = FIELD_PREP(CTRL_TYPE_MASK, ctrl_type) | pos_type; + bool polarity_inverse; + u8 regval = 0; + + if (switch_status == coex_dm->cur_switch_status) + return; + + coex_dm->cur_switch_status = switch_status; + + if (coex_rfe->ant_switch_diversity && + ctrl_type == COEX_SWITCH_CTRL_BY_BBSW) + ctrl_type = COEX_SWITCH_CTRL_BY_ANTDIV; + + polarity_inverse = (coex_rfe->ant_switch_polarity == 1); + + switch (ctrl_type) { + default: + case COEX_SWITCH_CTRL_BY_BBSW: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */ + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, + DPDT_CTRL_PIN); + + if (pos_type == COEX_SWITCH_TO_WLG_BT) { + if (coex_rfe->rfe_module_type != 0x4 && + coex_rfe->rfe_module_type != 0x2) + regval = 0x3; + else + regval = (!polarity_inverse ? 0x2 : 0x1); + } else if (pos_type == COEX_SWITCH_TO_WLG) { + regval = (!polarity_inverse ? 0x2 : 0x1); + } else { + regval = (!polarity_inverse ? 0x1 : 0x2); + } + + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15, + regval); + break; + case COEX_SWITCH_CTRL_BY_PTA: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */ + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, + PTA_CTRL_PIN); + + regval = (!polarity_inverse ? 0x2 : 0x1); + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15, + regval); + break; + case COEX_SWITCH_CTRL_BY_ANTDIV: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, + ANTDIC_CTRL_PIN); + break; + case COEX_SWITCH_CTRL_BY_MAC: + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + + regval = (!polarity_inverse ? 0x0 : 0x1); + rtw_write8_mask(rtwdev, REG_PAD_CTRL1, BIT_SW_DPDT_SEL_DATA, + regval); + break; + case COEX_SWITCH_CTRL_BY_FW: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + break; + case COEX_SWITCH_CTRL_BY_BT: + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); + break; + } + + if (ctrl_type == COEX_SWITCH_CTRL_BY_BT) { + rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1); + rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2); + } else { + rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1); + rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2); + } +} + +static void rtw8821c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) +{} + +static void rtw8821c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) +{ + rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_BTGP_SPI_EN); + rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_BTGP_JTAG_EN); + rtw_write32_clr(rtwdev, REG_GPIO_MUXCFG, BIT_FSPI_EN); + rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_LED1DIS); + rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_SDIO_INT); + rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_DBG_GNT_WL_BT); +} + +static void rtw8821c_coex_cfg_rfe_type(struct rtw_dev *rtwdev) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_rfe *coex_rfe = &coex->rfe; + struct rtw_efuse *efuse = &rtwdev->efuse; + + coex_rfe->rfe_module_type = efuse->rfe_option; + coex_rfe->ant_switch_polarity = 0; + coex_rfe->ant_switch_exist = true; + coex_rfe->wlg_at_btg = false; + + switch (coex_rfe->rfe_module_type) { + case 0: + case 8: + case 1: + case 9: /* 1-Ant, Main, WLG */ + default: /* 2-Ant, DPDT, WLG */ + break; + case 2: + case 10: /* 1-Ant, Main, BTG */ + case 7: + case 15: /* 2-Ant, DPDT, BTG */ + coex_rfe->wlg_at_btg = true; + break; + case 3: + case 11: /* 1-Ant, Aux, WLG */ + coex_rfe->ant_switch_polarity = 1; + break; + case 4: + case 12: /* 1-Ant, Aux, BTG */ + coex_rfe->wlg_at_btg = true; + coex_rfe->ant_switch_polarity = 1; + break; + case 5: + case 13: /* 2-Ant, no switch, WLG */ + case 6: + case 14: /* 2-Ant, no antenna switch, WLG */ + coex_rfe->ant_switch_exist = false; + break; + } +} + +static void rtw8821c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) +{ + struct rtw_coex *coex = &rtwdev->coex; + struct rtw_coex_dm *coex_dm = &coex->dm; + struct rtw_efuse *efuse = &rtwdev->efuse; + bool share_ant = efuse->share_ant; + + if (share_ant) + return; + + if (wl_pwr == coex_dm->cur_wl_pwr_lvl) + return; + + coex_dm->cur_wl_pwr_lvl = wl_pwr; +} + +static void rtw8821c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) +{} + static void rtw8821c_txagc_swing_offset(struct rtw_dev *rtwdev, u8 pwr_idx_offset, s8 pwr_idx_offset_lower, @@ -1293,8 +1484,166 @@ static struct rtw_chip_ops rtw8821c_ops = { .config_bfee = rtw8821c_bf_config_bfee, .set_gid_table = rtw_bf_set_gid_table, .cfg_csi_rate = rtw_bf_cfg_csi_rate, + + .coex_set_init = rtw8821c_coex_cfg_init, + .coex_set_ant_switch = rtw8821c_coex_cfg_ant_switch, + .coex_set_gnt_fix = rtw8821c_coex_cfg_gnt_fix, + .coex_set_gnt_debug = rtw8821c_coex_cfg_gnt_debug, + .coex_set_rfe_type = rtw8821c_coex_cfg_rfe_type, + .coex_set_wl_tx_power = rtw8821c_coex_cfg_wl_tx_power, + .coex_set_wl_rx_gain = rtw8821c_coex_cfg_wl_rx_gain, }; +/* rssi in percentage % (dbm = % - 100) */ +static const u8 wl_rssi_step_8821c[] = {101, 45, 101, 40}; +static const u8 bt_rssi_step_8821c[] = {101, 101, 101, 101}; + +/* Shared-Antenna Coex Table */ +static const struct coex_table_para table_sant_8821c[] = { + {0x55555555, 0x55555555}, /* case-0 */ + {0x55555555, 0x55555555}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xfafafafa, 0xfafafafa}, /* case-5 */ + {0x6a5a5555, 0xaaaaaaaa}, + {0x6a5a56aa, 0x6a5a56aa}, + {0x6a5a5a5a, 0x6a5a5a5a}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-10 */ + {0x66555555, 0xaaaaaaaa}, + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0x6aaa6aaa}, + {0x66555555, 0x6a5a5aaa}, + {0x66555555, 0xaaaaaaaa}, /* case-15 */ + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x6afa5afa}, + {0xaaffffaa, 0xfafafafa}, + {0xaa5555aa, 0x5a5a5a5a}, + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ + {0xaa5555aa, 0xaaaaaaaa}, + {0xffffffff, 0x55555555}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x5a5a5a5a}, + {0xffffffff, 0x5a5a5aaa}, /* case-25 */ + {0x55555555, 0x5a5a5a5a}, + {0x55555555, 0xaaaaaaaa}, + {0x66555555, 0x6a5a6a5a}, + {0x66556655, 0x66556655}, + {0x66556aaa, 0x6a5a6aaa}, /* case-30 */ + {0xffffffff, 0x5aaa5aaa}, + {0x56555555, 0x5a5a5aaa} +}; + +/* Non-Shared-Antenna Coex Table */ +static const struct coex_table_para table_nsant_8821c[] = { + {0xffffffff, 0xffffffff}, /* case-100 */ + {0xffff55ff, 0xfafafafa}, + {0x66555555, 0x66555555}, + {0xaaaaaaaa, 0xaaaaaaaa}, + {0x5a5a5a5a, 0x5a5a5a5a}, + {0xffffffff, 0xffffffff}, /* case-105 */ + {0x5afa5afa, 0x5afa5afa}, + {0x55555555, 0xfafafafa}, + {0x66555555, 0xfafafafa}, + {0x66555555, 0x5a5a5a5a}, + {0x66555555, 0x6a5a5a5a}, /* case-110 */ + {0x66555555, 0xaaaaaaaa}, + {0xffff55ff, 0xfafafafa}, + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0xaaaaaaaa}, + {0xffff55ff, 0xffff55ff}, /* case-115 */ + {0xaaffffaa, 0x5afa5afa}, + {0xaaffffaa, 0xaaaaaaaa}, + {0xffffffff, 0xfafafafa}, + {0xffff55ff, 0xfafafafa}, + {0xffffffff, 0xaaaaaaaa}, /* case-120 */ + {0xffff55ff, 0x5afa5afa}, + {0xffff55ff, 0x5afa5afa}, + {0x55ff55ff, 0x55ff55ff} +}; + +/* Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_sant_8821c[] = { + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, + { {0x61, 0x35, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, /* case-5 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x35, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ + { {0x61, 0x08, 0x03, 0x11, 0x15} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x11, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x21, 0x03, 0x10, 0x50} }, + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, + { {0x51, 0x08, 0x03, 0x30, 0x54} }, + { {0x55, 0x08, 0x03, 0x10, 0x54} }, + { {0x65, 0x10, 0x03, 0x11, 0x10} }, + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ + { {0x51, 0x21, 0x03, 0x10, 0x50} }, + { {0x61, 0x08, 0x03, 0x11, 0x11} } +}; + +/* Non-Shared-Antenna TDMA */ +static const struct coex_tdma_para tdma_nsant_8821c[] = { + { {0x00, 0x00, 0x00, 0x40, 0x00} }, /* case-100 */ + { {0x61, 0x45, 0x03, 0x11, 0x11} }, + { {0x61, 0x25, 0x03, 0x11, 0x11} }, + { {0x61, 0x35, 0x03, 0x11, 0x11} }, + { {0x61, 0x20, 0x03, 0x11, 0x11} }, + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */ + { {0x61, 0x45, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x30, 0x03, 0x11, 0x10} }, + { {0x61, 0x20, 0x03, 0x11, 0x10} }, + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */ + { {0x61, 0x10, 0x03, 0x11, 0x11} }, + { {0x61, 0x08, 0x03, 0x10, 0x14} }, + { {0x51, 0x08, 0x03, 0x10, 0x54} }, + { {0x51, 0x08, 0x03, 0x10, 0x55} }, + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */ + { {0x51, 0x45, 0x03, 0x10, 0x50} }, + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, + { {0x51, 0x30, 0x03, 0x10, 0x50} }, + { {0x51, 0x21, 0x03, 0x10, 0x50} }, + { {0x51, 0x21, 0x03, 0x10, 0x50} }, /* case-120 */ + { {0x51, 0x10, 0x03, 0x10, 0x50} } +}; + +static const struct coex_5g_afh_map afh_5g_8821c[] = { {0, 0, 0} }; + +/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ +static const struct coex_rf_para rf_para_tx_8821c[] = { + {0, 0, false, 7}, /* for normal */ + {0, 20, false, 7}, /* for WL-CPT */ + {8, 17, true, 4}, + {7, 18, true, 4}, + {6, 19, true, 4}, + {5, 20, true, 4} +}; + +static const struct coex_rf_para rf_para_rx_8821c[] = { + {0, 0, false, 7}, /* for normal */ + {0, 20, false, 7}, /* for WL-CPT */ + {3, 24, true, 5}, + {2, 26, true, 5}, + {1, 27, true, 5}, + {0, 28, true, 5} +}; + +static_assert(ARRAY_SIZE(rf_para_tx_8821c) == ARRAY_SIZE(rf_para_rx_8821c)); + static const u8 rtw8821c_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { {0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12}, @@ -1394,6 +1743,31 @@ static const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { .pwrtrk_2g_ccka_p = rtw8821c_pwrtrk_2g_cck_a_p, }; +static const struct rtw_reg_domain coex_info_hw_regs_8821c[] = { + {0xCB0, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0xCB4, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0xCBA, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16}, + {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8}, + {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32}, + {0x64, BIT(0), RTW_REG_DOMAIN_MAC8}, + {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8}, + {0x40, BIT(5), RTW_REG_DOMAIN_MAC8}, + {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_A}, + {0, 0, RTW_REG_DOMAIN_NL}, + {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32}, + {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x953, BIT(1), RTW_REG_DOMAIN_MAC8}, + {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, + {0x60A, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, +}; + struct rtw_chip_info rtw8821c_hw_spec = { .ops = &rtw8821c_ops, .id = RTW_CHIP_TYPE_8821C, @@ -1440,6 +1814,35 @@ struct rtw_chip_info rtw8821c_hw_spec = { .iqk_threshold = 8, .bfer_su_max_num = 2, .bfer_mu_max_num = 1, + + .coex_para_ver = 0x19092746, + .bt_desired_ver = 0x46, + .scbd_support = true, + .new_scbd10_def = false, + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, + .bt_rssi_type = COEX_BTRSSI_RATIO, + .ant_isolation = 15, + .rssi_tolerance = 2, + .wl_rssi_step = wl_rssi_step_8821c, + .bt_rssi_step = bt_rssi_step_8821c, + .table_sant_num = ARRAY_SIZE(table_sant_8821c), + .table_sant = table_sant_8821c, + .table_nsant_num = ARRAY_SIZE(table_nsant_8821c), + .table_nsant = table_nsant_8821c, + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8821c), + .tdma_sant = tdma_sant_8821c, + .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8821c), + .tdma_nsant = tdma_nsant_8821c, + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8821c), + .wl_rf_para_tx = rf_para_tx_8821c, + .wl_rf_para_rx = rf_para_rx_8821c, + .bt_afh_span_bw20 = 0x24, + .bt_afh_span_bw40 = 0x36, + .afh_5g_num = ARRAY_SIZE(afh_5g_8821c), + .afh_5g = afh_5g_8821c, + + .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8821c), + .coex_info_hw_regs = coex_info_hw_regs_8821c, }; EXPORT_SYMBOL(rtw8821c_hw_spec); diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h index 166bd0e9294e..bd01e82b6bcd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.h +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.h @@ -160,6 +160,18 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8)) #define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12)) +#define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0)) +#define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8)) +#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0)) +#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8)) +#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0)) +#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \ + le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8)) #define REG_INIRTS_RATE_SEL 0x0480 #define REG_HTSTFWT 0x800 @@ -217,6 +229,20 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data) #define REG_CCA_CCK 0xfcc #define REG_ANTWT 0x1904 #define REG_IQKFAILMSK 0x1bf0 +#define BIT_MASK_R_RFE_SEL_15 GENMASK(31, 28) +#define BIT_SDIO_INT BIT(18) +#define SAMPLE_RATE_MASK GENMASK(5, 0) +#define SAMPLE_RATE 0x5 +#define BT_CNT_ENABLE 0x1 +#define BIT_BCN_QUEUE BIT(3) +#define BCN_PRI_EN 0x1 +#define PTA_CTRL_PIN 0x66 +#define DPDT_CTRL_PIN 0x77 +#define ANTDIC_CTRL_PIN 0x88 +#define REG_CTRL_TYPE 0x67 +#define BIT_CTRL_TYPE1 BIT(5) +#define BIT_CTRL_TYPE2 BIT(4) +#define CTRL_TYPE_MASK GENMASK(15, 8) #define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8)) #define RF18_BAND_2G (0) From 3f4600de8c93917594a8b3c9ca713160ee4d563c Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 25 Jun 2020 18:52:10 +0200 Subject: [PATCH 1911/2056] iwlwifi: yoyo: don't print failure if debug firmware is missing Missing this firmware is not fatal, my wifi card still works. Even more, I couldn't find any documentation what it is or where to get it. So, I don't think the users should be notified if it is missing. If you browse the net, you see the message is present is in quite some logs. Better remove it. Signed-off-by: Wolfram Sang Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200625165210.14904-1-wsa@kernel.org --- drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 7987a288917b..f180db2936e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -468,7 +468,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans) if (!iwlwifi_mod_params.enable_ini) return; - res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev); + res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); if (res) return; From 880e21490be68ef9cc8185962e9b5722a0f1fc4d Mon Sep 17 00:00:00 2001 From: Navid Emamdoost Date: Sat, 18 Jul 2020 00:26:29 -0500 Subject: [PATCH 1912/2056] mt7601u: add missing release on skb in mt7601u_mcu_msg_send In the implementation of mt7601u_mcu_msg_send(), skb is supposed to be consumed on all execution paths. Release skb before returning if test_bit() fails. Signed-off-by: Navid Emamdoost Acked-by: Jakub Kicinski Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200718052630.11032-1-navid.emamdoost@gmail.com --- drivers/net/wireless/mediatek/mt7601u/mcu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index af55ed82b96f..1b5cc271a9e1 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -116,8 +116,10 @@ mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb, int sent, ret; u8 seq = 0; - if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) { + consume_skb(skb); return 0; + } mutex_lock(&dev->mcu.mutex); From c83e2a6e2fbbb7d47ea46fd7cb1f01292f2d0b6e Mon Sep 17 00:00:00 2001 From: Ajay Singh Date: Fri, 17 Jul 2020 05:11:38 +0000 Subject: [PATCH 1913/2056] wilc1000: Move wilc1000 SDIO ID's from driver source to common header file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moved macros used for Vendor/Device ID from wilc1000 driver to common header file and changed macro name for consistency with other macros. Signed-off-by: Ajay Singh Acked-by: Ulf Hansson Acked-by: Pali Rohár Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717051134.19160-1-ajay.kathat@microchip.com --- drivers/net/wireless/microchip/wilc1000/sdio.c | 6 ++---- include/linux/mmc/sdio_ids.h | 3 +++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 36eb589263bf..3ece7b0b0392 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -15,11 +16,8 @@ #define SDIO_MODALIAS "wilc1000_sdio" -#define SDIO_VENDOR_ID_WILC 0x0296 -#define SDIO_DEVICE_ID_WILC 0x5347 - static const struct sdio_device_id wilc_sdio_ids[] = { - { SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) }, + { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, { }, }; diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 15ed8ce9d394..519820d18e62 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -105,6 +105,9 @@ #define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 #define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 +#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296 +#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347 + #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202 From 560a218d1ce6ea36df6fb943a1065181df90a4d6 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Fri, 17 Jul 2020 16:39:29 +0530 Subject: [PATCH 1914/2056] rt2x00: pci: use generic power management Drivers using legacy PM have to manage PCI states and device's PM states themselves. They also need to take care of configuration registers. With improved and powerful support of generic PM, PCI Core takes care of above mentioned, device-independent, jobs. The callbacks make use of PCI helper functions like pci_save/restore_state(), pci_enable/disable_device() and pci_set_power_state() to do required operations. In generic mode, they are no longer needed. Change function parameter in both .suspend() and .resume() to "struct device*" type. Use dev_get_drvdata() to get drv data. The .suspend() callback is invoking rt2x00lib_suspend() which needs to be modified as generic rt2x00pci_suspend() has no pm_message_t type argument, passed to it, which is required by it according to its declaration. Although this variable remained unused in the function body. Hence, remove it from the function definition & declaration. rt2x00lib_suspend() is also invoked by rt2x00usb_suspend() and rt2x00soc_suspend(). Thus, modify the functional call accordingly in their function body. Earlier, .suspend() & .resume() were exported and were used by the following drivers: - drivers/net/wireless/ralink/rt2x00/rt2400pci.c - drivers/net/wireless/ralink/rt2x00/rt2500pci.c - drivers/net/wireless/ralink/rt2x00/rt2800pci.c - drivers/net/wireless/ralink/rt2x00/rt61pci.c Now, we only need to bind "struct dev_pm_ops" variable to "struct pci_driver". Thus, make the callbacks static. Declare an "extern const struct dev_pm_ops" variable and bind PM callbacks to it. Now, export the variable instead and use it in respective drivers. Compile-tested only. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200717110928.454867-1-vaibhavgupta40@gmail.com --- .../net/wireless/ralink/rt2x00/rt2400pci.c | 3 +- .../net/wireless/ralink/rt2x00/rt2500pci.c | 3 +- .../net/wireless/ralink/rt2x00/rt2800pci.c | 3 +- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 5 ++- .../net/wireless/ralink/rt2x00/rt2x00dev.c | 4 +-- .../net/wireless/ralink/rt2x00/rt2x00pci.c | 31 +++++-------------- .../net/wireless/ralink/rt2x00/rt2x00pci.h | 9 ++---- .../net/wireless/ralink/rt2x00/rt2x00soc.c | 2 +- .../net/wireless/ralink/rt2x00/rt2x00usb.c | 2 +- drivers/net/wireless/ralink/rt2x00/rt61pci.c | 3 +- 10 files changed, 19 insertions(+), 46 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 4d44509e2ce3..c1ac933349d1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1834,8 +1834,7 @@ static struct pci_driver rt2400pci_driver = { .id_table = rt2400pci_device_table, .probe = rt2400pci_probe, .remove = rt2x00pci_remove, - .suspend = rt2x00pci_suspend, - .resume = rt2x00pci_resume, + .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt2400pci_driver); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 4620990a94cf..0859adebd860 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -2132,8 +2132,7 @@ static struct pci_driver rt2500pci_driver = { .id_table = rt2500pci_device_table, .probe = rt2500pci_probe, .remove = rt2x00pci_remove, - .suspend = rt2x00pci_suspend, - .resume = rt2x00pci_resume, + .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt2500pci_driver); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c index 3868c07672bd..9a33baaa6184 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c @@ -455,8 +455,7 @@ static struct pci_driver rt2800pci_driver = { .id_table = rt2800pci_device_table, .probe = rt2800pci_probe, .remove = rt2x00pci_remove, - .suspend = rt2x00pci_suspend, - .resume = rt2x00pci_resume, + .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt2800pci_driver); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index ea8a34ecae14..ecc60d8cbb01 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -1487,9 +1487,8 @@ bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw); */ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev); void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev); -#ifdef CONFIG_PM -int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state); + +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev); int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev); -#endif /* CONFIG_PM */ #endif /* RT2X00_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c index 7f9e43a4f805..8c6d3099b19d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c @@ -1556,8 +1556,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); /* * Device state handlers */ -#ifdef CONFIG_PM -int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) +int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev) { rt2x00_dbg(rt2x00dev, "Going to sleep\n"); @@ -1614,7 +1613,6 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_resume); -#endif /* CONFIG_PM */ /* * rt2x00lib module information. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.c b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.c index 7f9baa94c7c8..cabeef0dde45 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.c @@ -169,39 +169,24 @@ void rt2x00pci_remove(struct pci_dev *pci_dev) } EXPORT_SYMBOL_GPL(rt2x00pci_remove); -#ifdef CONFIG_PM -int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) +static int __maybe_unused rt2x00pci_suspend(struct device *dev) { - struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rt2x00_dev *rt2x00dev = hw->priv; - int retval; - retval = rt2x00lib_suspend(rt2x00dev, state); - if (retval) - return retval; - - pci_save_state(pci_dev); - pci_disable_device(pci_dev); - return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); + return rt2x00lib_suspend(rt2x00dev); } -EXPORT_SYMBOL_GPL(rt2x00pci_suspend); -int rt2x00pci_resume(struct pci_dev *pci_dev) +static int __maybe_unused rt2x00pci_resume(struct device *dev) { - struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); + struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rt2x00_dev *rt2x00dev = hw->priv; - if (pci_set_power_state(pci_dev, PCI_D0) || - pci_enable_device(pci_dev)) { - rt2x00_err(rt2x00dev, "Failed to resume device\n"); - return -EIO; - } - - pci_restore_state(pci_dev); return rt2x00lib_resume(rt2x00dev); } -EXPORT_SYMBOL_GPL(rt2x00pci_resume); -#endif /* CONFIG_PM */ + +SIMPLE_DEV_PM_OPS(rt2x00pci_pm_ops, rt2x00pci_suspend, rt2x00pci_resume); +EXPORT_SYMBOL_GPL(rt2x00pci_pm_ops); /* * rt2x00pci module information. diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h index fd955ccaa1e6..27f7b2bd26ea 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00pci.h @@ -21,12 +21,7 @@ */ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); void rt2x00pci_remove(struct pci_dev *pci_dev); -#ifdef CONFIG_PM -int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state); -int rt2x00pci_resume(struct pci_dev *pci_dev); -#else -#define rt2x00pci_suspend NULL -#define rt2x00pci_resume NULL -#endif /* CONFIG_PM */ + +extern const struct dev_pm_ops rt2x00pci_pm_ops; #endif /* RT2X00PCI_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index 596b8a432946..eface610178d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -130,7 +130,7 @@ int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state) struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; - return rt2x00lib_suspend(rt2x00dev, state); + return rt2x00lib_suspend(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00soc_suspend); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 92e9e023c349..e4473a551241 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -886,7 +886,7 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; - return rt2x00lib_suspend(rt2x00dev, state); + return rt2x00lib_suspend(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00usb_suspend); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index d83288bef2fc..eefce76fc99b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -3009,8 +3009,7 @@ static struct pci_driver rt61pci_driver = { .id_table = rt61pci_device_table, .probe = rt61pci_probe, .remove = rt2x00pci_remove, - .suspend = rt2x00pci_suspend, - .resume = rt2x00pci_resume, + .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt61pci_driver); From 348cb5dc4d702e071efb4f70c4dacfe2473c815d Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sat, 18 Jul 2020 12:02:40 +0200 Subject: [PATCH 1915/2056] ipw2x00: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200718100240.98593-1-grandmaster@al2klimov.de --- .../networking/device_drivers/wifi/intel/ipw2100.rst | 2 +- drivers/net/wireless/intel/ipw2x00/Kconfig | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/networking/device_drivers/wifi/intel/ipw2100.rst b/Documentation/networking/device_drivers/wifi/intel/ipw2100.rst index d54ad522f937..883e96355799 100644 --- a/Documentation/networking/device_drivers/wifi/intel/ipw2100.rst +++ b/Documentation/networking/device_drivers/wifi/intel/ipw2100.rst @@ -78,7 +78,7 @@ such, if you are interested in deploying or shipping a driver as part of solution intended to be used for purposes other than development, please obtain a tested driver from Intel Customer Support at: -http://www.intel.com/support/wireless/sb/CS-006408.htm +https://www.intel.com/support/wireless/sb/CS-006408.htm 1. Introduction =============== diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig index 18436592a3a6..b1e7b4470842 100644 --- a/drivers/net/wireless/intel/ipw2x00/Kconfig +++ b/drivers/net/wireless/intel/ipw2x00/Kconfig @@ -28,7 +28,7 @@ config IPW2100 You will also very likely need the Wireless Tools in order to configure your card: - . + . It is recommended that you compile this driver as a module (M) rather than built-in (Y). This driver requires firmware at device @@ -90,7 +90,7 @@ config IPW2200 You will also very likely need the Wireless Tools in order to configure your card: - . + . It is recommended that you compile this driver as a module (M) rather than built-in (Y). This driver requires firmware at device From 2d96c1ed4bab52e2dced98746ecc978a1ac0eeb7 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 13:01:15 +0200 Subject: [PATCH 1916/2056] b43: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200719110115.58085-1-grandmaster@al2klimov.de --- drivers/net/wireless/broadcom/b43/main.c | 14 +- .../net/wireless/broadcom/b43/phy_common.c | 2 +- drivers/net/wireless/broadcom/b43/phy_g.c | 12 +- drivers/net/wireless/broadcom/b43/phy_ht.c | 2 +- drivers/net/wireless/broadcom/b43/phy_lp.c | 2 +- drivers/net/wireless/broadcom/b43/phy_n.c | 150 +++++++++--------- .../net/wireless/broadcom/b43/radio_2056.c | 2 +- .../net/wireless/broadcom/b43/tables_nphy.c | 4 +- drivers/net/wireless/intersil/orinoco/Kconfig | 4 +- 9 files changed, 96 insertions(+), 96 deletions(-) diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index 0c2ea12fca19..a54dd4f7fa54 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -734,7 +734,7 @@ static void b43_short_slot_timing_disable(struct b43_wldev *dev) } /* DummyTransmission function, as documented on - * http://bcm-v4.sipsolutions.net/802.11/DummyTransmission + * https://bcm-v4.sipsolutions.net/802.11/DummyTransmission */ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on) { @@ -1198,7 +1198,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */ void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev) { struct bcma_drv_cc *bcma_cc __maybe_unused; @@ -2290,7 +2290,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx, return -EPROTO; } -/* http://bcm-v4.sipsolutions.net/802.11/Init/Firmware */ +/* https://bcm-v4.sipsolutions.net/802.11/Init/Firmware */ static int b43_try_request_fw(struct b43_request_fw_context *ctx) { struct b43_wldev *dev = ctx->dev; @@ -2843,7 +2843,7 @@ static int b43_upload_initvals_band(struct b43_wldev *dev) } /* Initialize the GPIOs - * http://bcm-specs.sipsolutions.net/GPIO + * https://bcm-specs.sipsolutions.net/GPIO */ #ifdef CONFIG_B43_SSB @@ -2971,7 +2971,7 @@ void b43_mac_enable(struct b43_wldev *dev) } } -/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +/* https://bcm-specs.sipsolutions.net/SuspendMAC */ void b43_mac_suspend(struct b43_wldev *dev) { int i; @@ -3004,7 +3004,7 @@ void b43_mac_suspend(struct b43_wldev *dev) dev->mac_suspended++; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on) { u32 tmp; @@ -3231,7 +3231,7 @@ static void b43_chip_exit(struct b43_wldev *dev) } /* Initialize the chip - * http://bcm-specs.sipsolutions.net/ChipInit + * https://bcm-specs.sipsolutions.net/ChipInit */ static int b43_chip_init(struct b43_wldev *dev) { diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c index 923d4cb9fc30..1de4de094d61 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.c +++ b/drivers/net/wireless/broadcom/b43/phy_common.c @@ -559,7 +559,7 @@ bool b43_is_40mhz(struct b43_wldev *dev) return dev->phy.chandef->width == NL80211_CHAN_WIDTH_40; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */ void b43_phy_force_clock(struct b43_wldev *dev, bool force) { u32 tmp; diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c index 1e022ec733a3..d5a1a5c58236 100644 --- a/drivers/net/wireless/broadcom/b43/phy_g.c +++ b/drivers/net/wireless/broadcom/b43/phy_g.c @@ -357,14 +357,14 @@ static void b43_set_original_gains(struct b43_wldev *dev) b43_dummy_transmission(dev, false, true); } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) { b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) { u16 val; @@ -375,7 +375,7 @@ static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) return (s16) val; } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) { u16 i; @@ -389,7 +389,7 @@ static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) } } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ static void b43_nrssi_mem_update(struct b43_wldev *dev) { struct b43_phy_g *gphy = dev->phy.g; @@ -1575,7 +1575,7 @@ static void b43_phy_initb5(struct b43_wldev *dev) b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */ static void b43_phy_initb6(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -2746,7 +2746,7 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev, return 0; } -/* http://bcm-specs.sipsolutions.net/EstimatePowerOut +/* https://bcm-specs.sipsolutions.net/EstimatePowerOut * This function converts a TSSI value to dBm in Q5.2 */ static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi) diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c index 6033df1c3053..c685b4bb5ed6 100644 --- a/drivers/net/wireless/broadcom/b43/phy_ht.c +++ b/drivers/net/wireless/broadcom/b43/phy_ht.c @@ -1018,7 +1018,7 @@ static void b43_phy_ht_op_free(struct b43_wldev *dev) phy->ht = NULL; } -/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ +/* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev, bool blocked) { diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index cfb953d61dc5..0e5c076e7544 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -70,7 +70,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev) dev->phy.lp = NULL; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */ static void lpphy_read_band_sprom(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 46db91846007..f75f0ff1db1e 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -98,7 +98,7 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev) (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev) { return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >> @@ -109,7 +109,7 @@ static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev) * RF (just without b43_nphy_rf_ctl_intc_override) **************************************************/ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, enum b43_nphy_rf_sequence seq) { @@ -146,7 +146,7 @@ static void b43_nphy_rf_ctl_override_rev19(struct b43_wldev *dev, u16 field, /* TODO */ } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field, u16 value, u8 core, bool off, u8 override) @@ -193,7 +193,7 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field, } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */ static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev, enum n_rf_ctl_over_cmd cmd, u16 value, u8 core, bool off) @@ -237,7 +237,7 @@ static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev, } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field, u16 value, u8 core, bool off) { @@ -382,7 +382,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev, } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, enum n_intc_override intc_override, u16 value, u8 core) @@ -490,7 +490,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, * Various PHY ops **************************************************/ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ static void b43_nphy_write_clip_detection(struct b43_wldev *dev, const u16 *clip_st) { @@ -498,14 +498,14 @@ static void b43_nphy_write_clip_detection(struct b43_wldev *dev, b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) { clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) { u16 tmp; @@ -526,7 +526,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) return tmp; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ static void b43_nphy_reset_cca(struct b43_wldev *dev) { u16 bbcfg; @@ -540,7 +540,7 @@ static void b43_nphy_reset_cca(struct b43_wldev *dev) b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) { struct b43_phy *phy = &dev->phy; @@ -564,7 +564,7 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) } } -/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */ +/* https://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */ static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset) { if (!offset) @@ -572,7 +572,7 @@ static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset) return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -628,7 +628,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, u8 *events, u8 *delays, u8 length) { @@ -805,7 +805,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev, } /* Calibrate resistors in LPF of PLL? - * http://bcm-v4.sipsolutions.net/PHY/radio205x_rcal + * https://bcm-v4.sipsolutions.net/PHY/radio205x_rcal */ static u8 b43_radio_2057_rcal(struct b43_wldev *dev) { @@ -919,7 +919,7 @@ static u8 b43_radio_2057_rcal(struct b43_wldev *dev) } /* Calibrate the internal RC oscillator? - * http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal + * https://bcm-v4.sipsolutions.net/PHY/radio2057_rccal */ static u16 b43_radio_2057_rccal(struct b43_wldev *dev) { @@ -1030,7 +1030,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev) b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8); } -/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ +/* https://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ static void b43_radio_2057_init(struct b43_wldev *dev) { b43_radio_2057_init_pre(dev); @@ -1117,7 +1117,7 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev, e->radio_tx1_mixg_boost_tune); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ static void b43_radio_2056_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev3 *e) { @@ -1356,7 +1356,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev) /* * Initialize a Broadcom 2056 N-radio - * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init + * https://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init */ static void b43_radio_init2056(struct b43_wldev *dev) { @@ -1406,7 +1406,7 @@ static void b43_chantab_radio_upload(struct b43_wldev *dev, b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ static void b43_radio_2055_setup(struct b43_wldev *dev, const struct b43_nphy_channeltab_entry_rev2 *e) { @@ -1480,7 +1480,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev) /* * Initialize a Broadcom 2055 N-radio - * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init + * https://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init */ static void b43_radio_init2055(struct b43_wldev *dev) { @@ -1499,7 +1499,7 @@ static void b43_radio_init2055(struct b43_wldev *dev) * Samples **************************************************/ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ static int b43_nphy_load_samples(struct b43_wldev *dev, struct cordic_iq *samples, u16 len) { struct b43_phy_n *nphy = dev->phy.n; @@ -1526,7 +1526,7 @@ static int b43_nphy_load_samples(struct b43_wldev *dev, return 0; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, bool test) { @@ -1569,7 +1569,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, return (i < 0) ? 0 : len; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, u16 wait, bool iqmode, bool dac_test, bool modify_bbmult) @@ -1650,7 +1650,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, * RSSI **************************************************/ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, s8 offset, u8 core, enum n_rail_type rail, @@ -1895,7 +1895,7 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, enum n_rssi_type type) { @@ -1907,7 +1907,7 @@ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, b43_nphy_rev2_rssi_select(dev, code, type); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, enum n_rssi_type rssi_type, u8 *buf) { @@ -1936,7 +1936,7 @@ static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type, s32 *buf, u8 nsamp) { @@ -2025,7 +2025,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type, return out; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -2287,7 +2287,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) b43_nphy_write_clip_detection(dev, clip_state); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type) { int i, j, vcm; @@ -2453,7 +2453,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type) /* * RSSI Calibration - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ static void b43_nphy_rssi_cal(struct b43_wldev *dev) { @@ -2680,7 +2680,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev) b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) { if (dev->phy.rev >= 19) @@ -3433,7 +3433,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev) B43_NPHY_FINERX2_CGC_DECGC); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ static void b43_nphy_workarounds(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -3468,7 +3468,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev) /* * Transmits a known value for LO calibration - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone */ static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, bool iqmode, bool dac_test, bool modify_bbmult) @@ -3481,7 +3481,7 @@ static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, return 0; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -3509,7 +3509,7 @@ static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) ~B43_NPHY_RFSEQMODE_CAOVER); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ static void b43_nphy_stop_playback(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -3546,7 +3546,7 @@ static void b43_nphy_stop_playback(struct b43_wldev *dev) b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, struct nphy_txgains target, struct nphy_iqcal_params *params) @@ -3595,7 +3595,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, * Tx and Rx **************************************************/ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) { struct b43_phy *phy = &dev->phy; @@ -3732,7 +3732,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ static void b43_nphy_tx_power_fix(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -3926,7 +3926,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev) /* * Stop radio and transmit known signal. Then check received signal strength to * get TSSI (Transmit Signal Strength Indicator). - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi */ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) { @@ -3978,7 +3978,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF; } -/* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */ +/* https://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -4039,7 +4039,7 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -4272,7 +4272,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) { struct b43_phy_n *nphy = dev->phy.n; @@ -4310,7 +4310,7 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) /* * TX low-pass filter bandwidth setup - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */ static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev) { @@ -4333,7 +4333,7 @@ static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, u16 samps, u8 time, bool wait) { @@ -4372,7 +4372,7 @@ static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, memset(est, 0, sizeof(*est)); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, struct b43_phy_n_iq_comp *pcomp) { @@ -4391,7 +4391,7 @@ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, #if 0 /* Ready but not used anywhere */ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; @@ -4414,7 +4414,7 @@ static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) { u8 rxval, txval; @@ -4476,7 +4476,7 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) } #endif -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) { int i; @@ -4574,7 +4574,7 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) b43_nphy_rx_iq_coeffs(dev, true, &new); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) { u16 array[4]; @@ -4586,7 +4586,7 @@ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ static void b43_nphy_spur_workaround(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -4645,7 +4645,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev) b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -4713,7 +4713,7 @@ static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) /* * Restore RSSI Calibration - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal */ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) { @@ -4822,7 +4822,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -4921,7 +4921,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) { struct b43_phy_n *nphy = dev->phy.n; @@ -4955,14 +4955,14 @@ static void b43_nphy_pa_set_tx_dig_filter(struct b43_wldev *dev, u16 offset, b43_phy_write(dev, offset, filter[i]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */ static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev) { b43_nphy_pa_set_tx_dig_filter(dev, 0x2C5, tbl_tx_filter_coef_rev4[2]); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) { /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */ @@ -5002,7 +5002,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -5077,7 +5077,7 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev) return target; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */ static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev) { u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; @@ -5106,7 +5106,7 @@ static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -5207,7 +5207,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */ static void b43_nphy_save_cal(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -5278,7 +5278,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev) b43_nphy_stay_in_carrier_search(dev, 0); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */ static void b43_nphy_restore_cal(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -5366,7 +5366,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev) b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, struct nphy_txgains target, bool full, bool mphase) @@ -5599,7 +5599,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, return error; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; @@ -5634,7 +5634,7 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { @@ -5821,7 +5821,7 @@ static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev, return -1; } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev, struct nphy_txgains target, u8 type, bool debug) { @@ -5834,7 +5834,7 @@ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev, return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) { struct b43_phy *phy = &dev->phy; @@ -5939,7 +5939,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, * N-PHY init **************************************************/ -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) { u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); @@ -5953,7 +5953,7 @@ static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ static void b43_nphy_bphy_init(struct b43_wldev *dev) { unsigned int i; @@ -5972,7 +5972,7 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev) b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) { if (dev->phy.rev >= 7) @@ -6246,7 +6246,7 @@ static void b43_chantab_phy_upload(struct b43_wldev *dev, b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); } -/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ +/* https://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) { switch (dev->dev->bus_type) { @@ -6265,7 +6265,7 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ static void b43_nphy_channel_setup(struct b43_wldev *dev, const struct b43_phy_n_sfo_cfg *e, struct ieee80211_channel *new_channel) @@ -6372,7 +6372,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev, b43_nphy_spur_workaround(dev); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */ static int b43_nphy_set_channel(struct b43_wldev *dev, struct ieee80211_channel *channel, enum nl80211_channel_type channel_type) @@ -6589,7 +6589,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } -/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ +/* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, bool blocked) { @@ -6643,7 +6643,7 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, } } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) { struct b43_phy *phy = &dev->phy; diff --git a/drivers/net/wireless/broadcom/b43/radio_2056.c b/drivers/net/wireless/broadcom/b43/radio_2056.c index 575c696b7cdf..94f5e626acba 100644 --- a/drivers/net/wireless/broadcom/b43/radio_2056.c +++ b/drivers/net/wireless/broadcom/b43/radio_2056.c @@ -3072,7 +3072,7 @@ INITTABSPTS(b2056_inittab_radio_rev11); .phy_regs.phy_bw5 = r4, \ .phy_regs.phy_bw6 = r5 -/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */ +/* https://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */ static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = { { .freq = 4920, RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04, diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c index dad405abf9b1..7957db94e84c 100644 --- a/drivers/net/wireless/broadcom/b43/tables_nphy.c +++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c @@ -3620,7 +3620,7 @@ static void b43_nphy_tables_init_rev0(struct b43_wldev *dev) ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */ void b43_nphy_tables_init(struct b43_wldev *dev) { if (dev->phy.rev >= 16) @@ -3633,7 +3633,7 @@ void b43_nphy_tables_init(struct b43_wldev *dev) b43_nphy_tables_init_rev0(dev); } -/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ +/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; diff --git a/drivers/net/wireless/intersil/orinoco/Kconfig b/drivers/net/wireless/intersil/orinoco/Kconfig index c470ee23673f..f62730aa7be3 100644 --- a/drivers/net/wireless/intersil/orinoco/Kconfig +++ b/drivers/net/wireless/intersil/orinoco/Kconfig @@ -27,7 +27,7 @@ config HERMES You will also very likely also need the Wireless Tools in order to configure your card and that /etc/pcmcia/wireless.opts works : - + config HERMES_PRISM bool "Support Prism 2/2.5 chipset" @@ -120,7 +120,7 @@ config PCMCIA_HERMES You will very likely need the Wireless Tools in order to configure your card and that /etc/pcmcia/wireless.opts works: - . + . config PCMCIA_SPECTRUM tristate "Symbol Spectrum24 Trilogy PCMCIA card support" From 140c6026167b2b394bf3a8a882f3bfbca0726111 Mon Sep 17 00:00:00 2001 From: "Alexander A. Klimov" Date: Sun, 19 Jul 2020 13:11:24 +0200 Subject: [PATCH 1917/2056] b43legacy: Replace HTTP links with HTTPS ones Rationale: Reduces attack surface on kernel devs opening the links for MITM as HTTPS traffic is much harder to manipulate. Deterministic algorithm: For each file: If not .svg: For each line: If doesn't contain `\bxmlns\b`: For each link, `\bhttp://[^# \t\r\n]*(?:\w|/)`: If neither `\bgnu\.org/license`, nor `\bmozilla\.org/MPL\b`: If both the HTTP and HTTPS versions return 200 OK and serve the same content: Replace HTTP with HTTPS. Signed-off-by: Alexander A. Klimov Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200719111124.58167-1-grandmaster@al2klimov.de --- drivers/net/wireless/broadcom/b43legacy/main.c | 8 ++++---- drivers/net/wireless/broadcom/b43legacy/phy.c | 8 ++++---- drivers/net/wireless/broadcom/b43legacy/radio.c | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 7bb6681fa882..da0d9e6087ca 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -591,7 +591,7 @@ static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev) } /* DummyTransmission function, as documented on - * http://bcm-specs.sipsolutions.net/DummyTransmission + * https://bcm-specs.sipsolutions.net/DummyTransmission */ void b43legacy_dummy_transmission(struct b43legacy_wldev *dev) { @@ -1870,7 +1870,7 @@ static int b43legacy_upload_initvals(struct b43legacy_wldev *dev) } /* Initialize the GPIOs - * http://bcm-specs.sipsolutions.net/GPIO + * https://bcm-specs.sipsolutions.net/GPIO */ static int b43legacy_gpio_init(struct b43legacy_wldev *dev) { @@ -1960,7 +1960,7 @@ void b43legacy_mac_enable(struct b43legacy_wldev *dev) } } -/* http://bcm-specs.sipsolutions.net/SuspendMAC */ +/* https://bcm-specs.sipsolutions.net/SuspendMAC */ void b43legacy_mac_suspend(struct b43legacy_wldev *dev) { int i; @@ -2141,7 +2141,7 @@ static void b43legacy_chip_exit(struct b43legacy_wldev *dev) } /* Initialize the chip - * http://bcm-specs.sipsolutions.net/ChipInit + * https://bcm-specs.sipsolutions.net/ChipInit */ static int b43legacy_chip_init(struct b43legacy_wldev *dev) { diff --git a/drivers/net/wireless/broadcom/b43legacy/phy.c b/drivers/net/wireless/broadcom/b43legacy/phy.c index a659259bc51a..05404fbd1e70 100644 --- a/drivers/net/wireless/broadcom/b43legacy/phy.c +++ b/drivers/net/wireless/broadcom/b43legacy/phy.c @@ -129,7 +129,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev) } /* initialize B PHY power control - * as described in http://bcm-specs.sipsolutions.net/InitPowerControl + * as described in https://bcm-specs.sipsolutions.net/InitPowerControl */ static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) { @@ -1461,7 +1461,7 @@ void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev, b43legacy_phy_write(dev, 0x0060, value); } -/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ +/* https://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) { static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 }; @@ -1721,7 +1721,7 @@ void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev) } } -/* http://bcm-specs.sipsolutions.net/EstimatePowerOut +/* https://bcm-specs.sipsolutions.net/EstimatePowerOut * This function converts a TSSI value to dBm in Q5.2 */ static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi) @@ -1747,7 +1747,7 @@ static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi) return dbm; } -/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ +/* https://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c index da40d1ca8723..06891b4f837b 100644 --- a/drivers/net/wireless/broadcom/b43legacy/radio.c +++ b/drivers/net/wireless/broadcom/b43legacy/radio.c @@ -313,14 +313,14 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev) return ret[channel - 1]; } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) { b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) { u16 val; @@ -331,7 +331,7 @@ s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) return (s16)val; } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) { u16 i; @@ -345,7 +345,7 @@ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) } } -/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ +/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev) { struct b43legacy_phy *phy = &dev->phy; From 99aaa1aafa5c745c0d01f6f980f990ed0edd88ba Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Tue, 21 Jul 2020 20:35:48 +0530 Subject: [PATCH 1918/2056] hostap: use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions as through the generic framework, PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200721150547.371763-1-vaibhavgupta40@gmail.com --- .../net/wireless/intersil/hostap/hostap_hw.c | 6 ++-- .../net/wireless/intersil/hostap/hostap_pci.c | 32 ++++++------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 2ab34cf74ecc..b6c497ce12e1 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -3366,8 +3366,8 @@ static void prism2_free_local_data(struct net_device *dev) } -#if (defined(PRISM2_PCI) && defined(CONFIG_PM)) || defined(PRISM2_PCCARD) -static void prism2_suspend(struct net_device *dev) +#if defined(PRISM2_PCI) || defined(PRISM2_PCCARD) +static void __maybe_unused prism2_suspend(struct net_device *dev) { struct hostap_interface *iface; struct local_info *local; @@ -3385,7 +3385,7 @@ static void prism2_suspend(struct net_device *dev) /* Disable hardware and firmware */ prism2_hw_shutdown(dev, 0); } -#endif /* (PRISM2_PCI && CONFIG_PM) || PRISM2_PCCARD */ +#endif /* PRISM2_PCI || PRISM2_PCCARD */ /* These might at some point be compiled separately and used as separate diff --git a/drivers/net/wireless/intersil/hostap/hostap_pci.c b/drivers/net/wireless/intersil/hostap/hostap_pci.c index 0c2aa880e32a..101887e6bd0f 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_pci.c +++ b/drivers/net/wireless/intersil/hostap/hostap_pci.c @@ -403,36 +403,23 @@ static void prism2_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } - -#ifdef CONFIG_PM -static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused prism2_pci_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); if (netif_running(dev)) { netif_stop_queue(dev); netif_device_detach(dev); } prism2_suspend(dev); - pci_save_state(pdev); - pci_disable_device(pdev); - pci_set_power_state(pdev, PCI_D3hot); return 0; } -static int prism2_pci_resume(struct pci_dev *pdev) +static int __maybe_unused prism2_pci_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); - int err; + struct net_device *dev = dev_get_drvdata(dev_d); - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", - dev->name); - return err; - } - pci_restore_state(pdev); prism2_hw_config(dev, 0); if (netif_running(dev)) { netif_device_attach(dev); @@ -441,20 +428,19 @@ static int prism2_pci_resume(struct pci_dev *pdev) return 0; } -#endif /* CONFIG_PM */ - MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); +static SIMPLE_DEV_PM_OPS(prism2_pci_pm_ops, + prism2_pci_suspend, + prism2_pci_resume); + static struct pci_driver prism2_pci_driver = { .name = "hostap_pci", .id_table = prism2_pci_id_table, .probe = prism2_pci_probe, .remove = prism2_pci_remove, -#ifdef CONFIG_PM - .suspend = prism2_pci_suspend, - .resume = prism2_pci_resume, -#endif /* CONFIG_PM */ + .driver.pm = &prism2_pci_pm_ops, }; module_pci_driver(prism2_pci_driver); From 9130559cf8dbc34b3d54a2ab5f2a746b964c46a4 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 22 Jul 2020 12:17:01 +0200 Subject: [PATCH 1919/2056] ipw2100: Use GFP_KERNEL instead of GFP_ATOMIC in some memory allocation The call chain is: ipw2100_pci_init_one (the probe function) --> ipw2100_queues_allocate --> ipw2100_tx_allocate No lock is taken in the between. So it is safe to use GFP_KERNEL in 'ipw2100_tx_allocate()'. BTW, 'ipw2100_queues_allocate()' also calls 'ipw2100_msg_allocate()' which already allocates some memory using GFP_KERNEL. Signed-off-by: Christophe JAILLET Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200722101701.26126-1-christophe.jaillet@wanadoo.fr --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 83d2f2acc0de..699deca745a2 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -4430,7 +4430,7 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH, sizeof(struct ipw2100_tx_packet), - GFP_ATOMIC); + GFP_KERNEL); if (!priv->tx_buffers) { bd_queue_free(priv, &priv->tx_queue); return -ENOMEM; From e52525c0c320076deab35409a6b2cff6388959b8 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 22 Jul 2020 12:17:16 +0200 Subject: [PATCH 1920/2056] ipw2x00: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'ipw2100_msg_allocate()' (ipw2100.c), GFP_KERNEL can be used because it is called from the probe function. The call chain is: ipw2100_pci_init_one (the probe function) --> ipw2100_queues_allocate --> ipw2100_msg_allocate Moreover, 'ipw2100_msg_allocate()' already uses GFP_KERNEL for some other memory allocations. When memory is allocated in 'status_queue_allocate()' (ipw2100.c), GFP_KERNEL can be used because it is called from the probe function. The call chain is: ipw2100_pci_init_one (the probe function) --> ipw2100_queues_allocate --> ipw2100_rx_allocate --> status_queue_allocate Moreover, 'ipw2100_rx_allocate()' already uses GFP_KERNEL for some other memory allocations. When memory is allocated in 'bd_queue_allocate()' (ipw2100.c), GFP_KERNEL can be used because it is called from the probe function. The call chain is: ipw2100_pci_init_one (the probe function) --> ipw2100_queues_allocate --> ipw2100_rx_allocate --> bd_queue_allocate Moreover, 'ipw2100_rx_allocate()' already uses GFP_KERNEL for some other memory allocations. When memory is allocated in 'ipw2100_tx_allocate()' (ipw2100.c), GFP_KERNEL can be used because it is called from the probe function. The call chain is: ipw2100_pci_init_one (the probe function) --> ipw2100_queues_allocate --> ipw2100_tx_allocate Moreover, 'ipw2100_tx_allocate()' already uses GFP_KERNEL for some other memory allocations. When memory is allocated in 'ipw_queue_tx_init()' (ipw2200.c), GFP_KERNEL can be used because it is called from a call chain that already uses GFP_KERNEL and no spin_lock is taken in the between. The call chain is: ipw_up --> ipw_load --> ipw_queue_reset --> ipw_queue_tx_init 'ipw_up()' already uses GFP_KERNEL for some other memory allocations. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200722101716.26185-1-christophe.jaillet@wanadoo.fr --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 121 +++++++++---------- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 56 ++++----- 2 files changed, 88 insertions(+), 89 deletions(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 699deca745a2..461e955aa259 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -2295,10 +2295,11 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv, return -ENOMEM; packet->rxp = (struct ipw2100_rx *)packet->skb->data; - packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data, + packet->dma_addr = dma_map_single(&priv->pci_dev->dev, + packet->skb->data, sizeof(struct ipw2100_rx), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pci_dev->dev, packet->dma_addr)) { dev_kfree_skb(packet->skb); return -ENOMEM; } @@ -2479,9 +2480,8 @@ static void isr_rx(struct ipw2100_priv *priv, int i, return; } - pci_unmap_single(priv->pci_dev, - packet->dma_addr, - sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr, + sizeof(struct ipw2100_rx), DMA_FROM_DEVICE); skb_put(packet->skb, status->frame_size); @@ -2563,8 +2563,8 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i, return; } - pci_unmap_single(priv->pci_dev, packet->dma_addr, - sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr, + sizeof(struct ipw2100_rx), DMA_FROM_DEVICE); memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), packet->skb->data, status->frame_size); @@ -2689,9 +2689,9 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) /* Sync the DMA for the RX buffer so CPU is sure to get * the correct values */ - pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, - sizeof(struct ipw2100_rx), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&priv->pci_dev->dev, packet->dma_addr, + sizeof(struct ipw2100_rx), + DMA_FROM_DEVICE); if (unlikely(ipw2100_corruption_check(priv, i))) { ipw2100_corruption_detected(priv, i); @@ -2923,9 +2923,8 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv) (packet->index + 1 + i) % txq->entries, tbd->host_addr, tbd->buf_length); - pci_unmap_single(priv->pci_dev, - tbd->host_addr, - tbd->buf_length, PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pci_dev->dev, tbd->host_addr, + tbd->buf_length, DMA_TO_DEVICE); } libipw_txb_free(packet->info.d_struct.txb); @@ -3165,15 +3164,13 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) tbd->buf_length = packet->info.d_struct.txb-> fragments[i]->len - LIBIPW_3ADDR_LEN; - tbd->host_addr = pci_map_single(priv->pci_dev, + tbd->host_addr = dma_map_single(&priv->pci_dev->dev, packet->info.d_struct. - txb->fragments[i]-> - data + + txb->fragments[i]->data + LIBIPW_3ADDR_LEN, tbd->buf_length, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pci_dev, - tbd->host_addr)) { + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pci_dev->dev, tbd->host_addr)) { IPW_DEBUG_TX("dma mapping error\n"); break; } @@ -3182,10 +3179,10 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) txq->next, tbd->host_addr, tbd->buf_length); - pci_dma_sync_single_for_device(priv->pci_dev, - tbd->host_addr, - tbd->buf_length, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(&priv->pci_dev->dev, + tbd->host_addr, + tbd->buf_length, + DMA_TO_DEVICE); txq->next++; txq->next %= txq->entries; @@ -3440,9 +3437,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) return -ENOMEM; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { - v = pci_zalloc_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), - &p); + v = dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_cmd_header), &p, + GFP_KERNEL); if (!v) { printk(KERN_ERR DRV_NAME ": " "%s: PCI alloc failed for msg " @@ -3461,11 +3458,10 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) return 0; for (j = 0; j < i; j++) { - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), - priv->msg_buffers[j].info.c_struct.cmd, - priv->msg_buffers[j].info.c_struct. - cmd_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_cmd_header), + priv->msg_buffers[j].info.c_struct.cmd, + priv->msg_buffers[j].info.c_struct.cmd_phys); } kfree(priv->msg_buffers); @@ -3496,11 +3492,10 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv) return; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), - priv->msg_buffers[i].info.c_struct.cmd, - priv->msg_buffers[i].info.c_struct. - cmd_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_cmd_header), + priv->msg_buffers[i].info.c_struct.cmd, + priv->msg_buffers[i].info.c_struct.cmd_phys); } kfree(priv->msg_buffers); @@ -4323,7 +4318,8 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries) IPW_DEBUG_INFO("enter\n"); q->size = entries * sizeof(struct ipw2100_status); - q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); + q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic, + GFP_KERNEL); if (!q->drv) { IPW_DEBUG_WARNING("Can not allocate status queue.\n"); return -ENOMEM; @@ -4339,9 +4335,10 @@ static void status_queue_free(struct ipw2100_priv *priv) IPW_DEBUG_INFO("enter\n"); if (priv->status_queue.drv) { - pci_free_consistent(priv->pci_dev, priv->status_queue.size, - priv->status_queue.drv, - priv->status_queue.nic); + dma_free_coherent(&priv->pci_dev->dev, + priv->status_queue.size, + priv->status_queue.drv, + priv->status_queue.nic); priv->status_queue.drv = NULL; } @@ -4357,7 +4354,8 @@ static int bd_queue_allocate(struct ipw2100_priv *priv, q->entries = entries; q->size = entries * sizeof(struct ipw2100_bd); - q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); + q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic, + GFP_KERNEL); if (!q->drv) { IPW_DEBUG_INFO ("can't allocate shared memory for buffer descriptors\n"); @@ -4377,7 +4375,8 @@ static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q) return; if (q->drv) { - pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic); + dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv, + q->nic); q->drv = NULL; } @@ -4437,9 +4436,9 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) } for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { - v = pci_alloc_consistent(priv->pci_dev, - sizeof(struct ipw2100_data_header), - &p); + v = dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_data_header), &p, + GFP_KERNEL); if (!v) { printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for tx " "buffers.\n", @@ -4459,11 +4458,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) return 0; for (j = 0; j < i; j++) { - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_data_header), - priv->tx_buffers[j].info.d_struct.data, - priv->tx_buffers[j].info.d_struct. - data_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_data_header), + priv->tx_buffers[j].info.d_struct.data, + priv->tx_buffers[j].info.d_struct.data_phys); } kfree(priv->tx_buffers); @@ -4540,12 +4538,10 @@ static void ipw2100_tx_free(struct ipw2100_priv *priv) priv->tx_buffers[i].info.d_struct.txb = NULL; } if (priv->tx_buffers[i].info.d_struct.data) - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_data_header), - priv->tx_buffers[i].info.d_struct. - data, - priv->tx_buffers[i].info.d_struct. - data_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_data_header), + priv->tx_buffers[i].info.d_struct.data, + priv->tx_buffers[i].info.d_struct.data_phys); } kfree(priv->tx_buffers); @@ -4608,9 +4604,10 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv) return 0; for (j = 0; j < i; j++) { - pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr, + dma_unmap_single(&priv->pci_dev->dev, + priv->rx_buffers[j].dma_addr, sizeof(struct ipw2100_rx_packet), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(priv->rx_buffers[j].skb); } @@ -4662,10 +4659,10 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv) for (i = 0; i < RX_QUEUE_LENGTH; i++) { if (priv->rx_buffers[i].rxp) { - pci_unmap_single(priv->pci_dev, + dma_unmap_single(&priv->pci_dev->dev, priv->rx_buffers[i].dma_addr, sizeof(struct ipw2100_rx), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(priv->rx_buffers[i].skb); } } @@ -6196,7 +6193,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, pci_set_master(pci_dev); pci_set_drvdata(pci_dev, priv); - err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_WARNING DRV_NAME "Error calling pci_set_dma_mask.\n"); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 39ff3a426092..129ef2f6248a 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3442,8 +3442,9 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv, /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].skb != NULL) { - pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, + rxq->pool[i].dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(rxq->pool[i].skb); rxq->pool[i].skb = NULL; } @@ -3774,7 +3775,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv, return -ENOMEM; q->bd = - pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); + dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count, + &q->q.dma_addr, GFP_KERNEL); if (!q->bd) { IPW_ERROR("pci_alloc_consistent(%zd) failed\n", sizeof(q->bd[0]) * count); @@ -3816,9 +3818,10 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, /* unmap chunks if any */ for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { - pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), + dma_unmap_single(&dev->dev, + le32_to_cpu(bd->u.data.chunk_ptr[i]), le16_to_cpu(bd->u.data.chunk_len[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (txq->txb[txq->q.last_used]) { libipw_txb_free(txq->txb[txq->q.last_used]); txq->txb[txq->q.last_used] = NULL; @@ -3850,8 +3853,8 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) } /* free buffers belonging to queue itself */ - pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, - q->dma_addr); + dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, + q->dma_addr); kfree(txq->txb); /* 0 fill whole structure */ @@ -5196,8 +5199,8 @@ static void ipw_rx_queue_replenish(void *data) list_del(element); rxb->dma_addr = - pci_map_single(priv->pci_dev, rxb->skb->data, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_map_single(&priv->pci_dev->dev, rxb->skb->data, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; @@ -5230,8 +5233,9 @@ static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].skb != NULL) { - pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, + rxq->pool[i].dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(rxq->pool[i].skb); } } @@ -8263,9 +8267,8 @@ static void ipw_rx(struct ipw_priv *priv) } priv->rxq->queue[i] = NULL; - pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, - IPW_RX_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); pkt = (struct ipw_rx_packet *)rxb->skb->data; IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", @@ -8417,8 +8420,8 @@ static void ipw_rx(struct ipw_priv *priv) rxb->skb = NULL; } - pci_unmap_single(priv->pci_dev, rxb->dma_addr, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &priv->rxq->rx_used); i = (i + 1) % RX_QUEUE_SIZE; @@ -10217,11 +10220,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, txb->fragments[i]->len - hdr_len); tfd->u.data.chunk_ptr[i] = - cpu_to_le32(pci_map_single - (priv->pci_dev, - txb->fragments[i]->data + hdr_len, - txb->fragments[i]->len - hdr_len, - PCI_DMA_TODEVICE)); + cpu_to_le32(dma_map_single(&priv->pci_dev->dev, + txb->fragments[i]->data + hdr_len, + txb->fragments[i]->len - hdr_len, + DMA_TO_DEVICE)); tfd->u.data.chunk_len[i] = cpu_to_le16(txb->fragments[i]->len - hdr_len); } @@ -10251,10 +10253,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, dev_kfree_skb_any(txb->fragments[i]); txb->fragments[i] = skb; tfd->u.data.chunk_ptr[i] = - cpu_to_le32(pci_map_single - (priv->pci_dev, skb->data, - remaining_bytes, - PCI_DMA_TODEVICE)); + cpu_to_le32(dma_map_single(&priv->pci_dev->dev, + skb->data, + remaining_bytes, + DMA_TO_DEVICE)); le32_add_cpu(&tfd->u.data.num_chunks, 1); } @@ -11620,9 +11622,9 @@ static int ipw_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); goto out_pci_disable_device; From 141bc9abbbffa89d020957caa9ac4a61d0ef1e26 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Thu, 30 Jul 2020 14:49:10 +0800 Subject: [PATCH 1921/2056] qtnfmac: Missing platform_device_unregister() on error in qtnf_core_mac_alloc() Add the missing platform_device_unregister() before return from qtnf_core_mac_alloc() in the error handling case. Fixes: 616f5701f4ab ("qtnfmac: assign each wiphy to its own virtual platform device") Reported-by: Hulk Robot Signed-off-by: Wang Hai Reviewed-by: Sergey Matyukevich Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200730064910.37589-1-wanghai38@huawei.com --- drivers/net/wireless/quantenna/qtnfmac/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index eea777f8acea..6aafff9d4231 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -446,8 +446,11 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, } wiphy = qtnf_wiphy_allocate(bus, pdev); - if (!wiphy) + if (!wiphy) { + if (pdev) + platform_device_unregister(pdev); return ERR_PTR(-ENOMEM); + } mac = wiphy_priv(wiphy); From 4dd9e7e08bc39ef34be40be8a0ddc6469fec8ff6 Mon Sep 17 00:00:00 2001 From: Flavio Suligoi Date: Thu, 23 Jul 2020 15:52:54 +0200 Subject: [PATCH 1922/2056] intersil: fix wiki website url In some Intersil files, the wiki url is still the old "wireless.kernel.org" instead of the new "wireless.wiki.kernel.org" Signed-off-by: Flavio Suligoi Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200723135254.594984-1-f.suligoi@asem.it --- drivers/net/wireless/intersil/Kconfig | 2 +- drivers/net/wireless/intersil/p54/Kconfig | 6 +++--- drivers/net/wireless/intersil/p54/fwio.c | 2 +- drivers/net/wireless/intersil/p54/p54usb.c | 2 +- drivers/net/wireless/intersil/prism54/islpci_hotplug.c | 3 ++- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intersil/Kconfig b/drivers/net/wireless/intersil/Kconfig index 6a6ce9d4aeee..c52d9b535623 100644 --- a/drivers/net/wireless/intersil/Kconfig +++ b/drivers/net/wireless/intersil/Kconfig @@ -30,7 +30,7 @@ config PRISM54 For more information refer to the p54 wiki: - http://wireless.kernel.org/en/users/Drivers/p54 + http://wireless.wiki.kernel.org/en/users/Drivers/p54 Note: You need a motherboard with DMA support to use any of these cards diff --git a/drivers/net/wireless/intersil/p54/Kconfig b/drivers/net/wireless/intersil/p54/Kconfig index 024be5507daf..003c378ed131 100644 --- a/drivers/net/wireless/intersil/p54/Kconfig +++ b/drivers/net/wireless/intersil/p54/Kconfig @@ -10,7 +10,7 @@ config P54_COMMON also need to be enabled in order to support any devices. These devices require softmac firmware which can be found at - + If you choose to build a module, it'll be called p54common. @@ -22,7 +22,7 @@ config P54_USB This driver is for USB isl38xx based wireless cards. These devices require softmac firmware which can be found at - + If you choose to build a module, it'll be called p54usb. @@ -36,7 +36,7 @@ config P54_PCI supported by the fullmac driver/firmware. This driver requires softmac firmware which can be found at - + If you choose to build a module, it'll be called p54pci. diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c index a5afcc865196..bece14e4ff0d 100644 --- a/drivers/net/wireless/intersil/p54/fwio.c +++ b/drivers/net/wireless/intersil/p54/fwio.c @@ -132,7 +132,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) if (priv->fw_var < 0x500) wiphy_info(priv->hw->wiphy, "you are using an obsolete firmware. " - "visit http://wireless.kernel.org/en/users/Drivers/p54 " + "visit http://wireless.wiki.kernel.org/en/users/Drivers/p54 " "and grab one for \"kernel >= 2.6.28\"!\n"); if (priv->fw_var >= 0x300) { diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index ff0e30c0c14c..cae47663b17b 100644 --- a/drivers/net/wireless/intersil/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -36,7 +36,7 @@ static struct usb_driver p54u_driver; * Note: * * Always update our wiki's device list (located at: - * http://wireless.kernel.org/en/users/Drivers/p54/devices ), + * http://wireless.wiki.kernel.org/en/users/Drivers/p54/devices ), * whenever you add a new device. */ diff --git a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c index f30a79a0b9c7..31a1e61326ff 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_hotplug.c +++ b/drivers/net/wireless/intersil/prism54/islpci_hotplug.c @@ -26,7 +26,8 @@ module_param(init_pcitm, int, 0); /* In this order: vendor, device, subvendor, subdevice, class, class_mask, * driver_data * If you have an update for this please contact prism54-devel@prism54.org - * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */ + * The latest list can be found at http://wireless.wiki.kernel.org/en/users/Drivers/p54 + */ static const struct pci_device_id prism54_id_tbl[] = { /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ { From c3ab1804b168867974a4da541f6e77487a1074f5 Mon Sep 17 00:00:00 2001 From: Vaibhav Gupta Date: Tue, 28 Jul 2020 17:11:28 +0530 Subject: [PATCH 1923/2056] airo: use generic power management Drivers using legacy power management .suspen()/.resume() callbacks have to manage PCI states and device's PM states themselves. They also need to take care of standard configuration registers. Switch to generic power management framework using a single "struct dev_pm_ops" variable to take the unnecessary load from the driver. This also avoids the need for the driver to directly call most of the PCI helper functions and device power state control functions, as through the generic framework PCI Core takes care of the necessary operations, and drivers are required to do only device-specific jobs. Signed-off-by: Vaibhav Gupta Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200728114128.1218310-1-vaibhavgupta40@gmail.com --- drivers/net/wireless/cisco/airo.c | 39 +++++++++++++++---------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 726cb1bc86cd..316672486d82 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -74,16 +74,19 @@ MODULE_DEVICE_TABLE(pci, card_ids); static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *); static void airo_pci_remove(struct pci_dev *); -static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state); -static int airo_pci_resume(struct pci_dev *pdev); +static int __maybe_unused airo_pci_suspend(struct device *dev); +static int __maybe_unused airo_pci_resume(struct device *dev); + +static SIMPLE_DEV_PM_OPS(airo_pci_pm_ops, + airo_pci_suspend, + airo_pci_resume); static struct pci_driver airo_driver = { - .name = DRV_NAME, - .id_table = card_ids, - .probe = airo_pci_probe, - .remove = airo_pci_remove, - .suspend = airo_pci_suspend, - .resume = airo_pci_resume, + .name = DRV_NAME, + .id_table = card_ids, + .probe = airo_pci_probe, + .remove = airo_pci_remove, + .driver.pm = &airo_pci_pm_ops, }; #endif /* CONFIG_PCI */ @@ -5573,9 +5576,9 @@ static void airo_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused airo_pci_suspend(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct airo_info *ai = dev->ml_priv; Cmd cmd; Resp rsp; @@ -5591,25 +5594,21 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) return -EAGAIN; disable_MAC(ai, 0); netif_device_detach(dev); - ai->power = state; + ai->power = PMSG_SUSPEND; cmd.cmd = HOSTSLEEP; issuecommand(ai, &cmd, &rsp); - pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); - pci_save_state(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + device_wakeup_enable(dev_d); return 0; } -static int airo_pci_resume(struct pci_dev *pdev) +static int __maybe_unused airo_pci_resume(struct device *dev_d) { - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(dev_d); struct airo_info *ai = dev->ml_priv; - pci_power_t prev_state = pdev->current_state; + pci_power_t prev_state = to_pci_dev(dev_d)->current_state; - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - pci_enable_wake(pdev, PCI_D0, 0); + device_wakeup_disable(dev_d); if (prev_state != PCI_D1) { reset_card(dev, 0); From 20e6421344b5bc2f97b8e2db47b6994368417904 Mon Sep 17 00:00:00 2001 From: Wang Hai Date: Thu, 30 Jul 2020 15:39:39 +0800 Subject: [PATCH 1924/2056] wl1251: fix always return 0 error wl1251_event_ps_report() should not always return 0 because wl1251_ps_set_mode() may fail. Change it to return 'ret'. Fixes: f7ad1eed4d4b ("wl1251: retry power save entry") Reported-by: Hulk Robot Signed-off-by: Wang Hai Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200730073939.33704-1-wanghai38@huawei.com --- drivers/net/wireless/ti/wl1251/event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index 850864dbafa1..e6d426edab56 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -70,7 +70,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl, break; } - return 0; + return ret; } static void wl1251_event_mbox_dump(struct event_mailbox *mbox) From 92d26d1abf8e4b563bb8d98ce1bf31681364e4a8 Mon Sep 17 00:00:00 2001 From: Zheng Yongjun Date: Tue, 21 Jul 2020 16:39:35 +0800 Subject: [PATCH 1925/2056] drivers: bcma: remove set but not used variable `addrh` and `sizeh` Fixes gcc '-Wunused-but-set-variable' warning: drivers/bcma/scan.c: In function 'bcma_erom_get_addr_desc': drivers/bcma/scan.c:219 warning: variable `addrh` and `sizeh` set but not used [-Wunused-but-set-variable] Signed-off-by: Zheng Yongjun Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200721083935.13306-1-zhengyongjun3@huawei.com --- drivers/bcma/scan.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 1a942f734188..d49e7c0de2b6 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -219,7 +219,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr) static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, u32 type, u8 port) { - u32 addrl, addrh, sizeh = 0; + u32 addrl; u32 size; u32 ent = bcma_erom_get_ent(bus, eromptr); @@ -233,14 +233,12 @@ static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, addrl = ent & SCAN_ADDR_ADDR; if (ent & SCAN_ADDR_AG32) - addrh = bcma_erom_get_ent(bus, eromptr); - else - addrh = 0; + bcma_erom_get_ent(bus, eromptr); if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) { size = bcma_erom_get_ent(bus, eromptr); if (size & SCAN_SIZE_SG32) - sizeh = bcma_erom_get_ent(bus, eromptr); + bcma_erom_get_ent(bus, eromptr); } return addrl; From a080ecb11ae22f8433b8f88554f1f43e93a97a78 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Wed, 22 Jul 2020 13:17:25 +0200 Subject: [PATCH 1926/2056] bcma: gpio: Use irqchip template MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes the driver use the irqchip template to assign properties to the gpio_irq_chip instead of using the explicit call to gpiochip_irqchip_add(). The irqchip is instead added while adding the gpiochip. Cc: Rafał Miłecki Cc: Florian Fainelli Signed-off-by: Linus Walleij Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200722111725.210923-1-linus.walleij@linaro.org --- drivers/bcma/driver_gpio.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index a5df3d111334..8a1e4705bc87 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -122,6 +122,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id) static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) { struct gpio_chip *chip = &cc->gpio; + struct gpio_irq_chip *girq = &chip->irq; int hwirq, err; if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) @@ -136,15 +137,13 @@ static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) bcma_chipco_gpio_intmask(cc, ~0, 0); bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO); - err = gpiochip_irqchip_add(chip, - &bcma_gpio_irq_chip, - 0, - handle_simple_irq, - IRQ_TYPE_NONE); - if (err) { - free_irq(hwirq, cc); - return err; - } + girq->chip = &bcma_gpio_irq_chip; + /* This will let us handle the parent IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; return 0; } @@ -212,13 +211,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) else chip->base = -1; - err = gpiochip_add_data(chip, cc); + err = bcma_gpio_irq_init(cc); if (err) return err; - err = bcma_gpio_irq_init(cc); + err = gpiochip_add_data(chip, cc); if (err) { - gpiochip_remove(chip); + bcma_gpio_irq_exit(cc); return err; } From 3dc05ffb04436020f63138186dbc4f37bd938552 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Tue, 21 Jul 2020 13:23:02 +0200 Subject: [PATCH 1927/2056] brcmfmac: Set timeout value when configuring power save Set the timeout value as per cfg80211's set_power_mgmt() request. If the requested value value is left undefined we set it to 2 seconds, the maximum supported value. Signed-off-by: Nicolas Saenz Julienne Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20200721112302.22718-1-nsaenzjulienne@suse.de --- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5d99771c3f64..ab0da2ff982e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -84,6 +84,8 @@ #define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000) +#define BRCMF_PS_MAX_TIMEOUT_MS 2000 + #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) @@ -2942,6 +2944,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev, else bphy_err(drvr, "error (%d)\n", err); } + + err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret", + min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS)); + if (err) + bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err); + done: brcmf_dbg(TRACE, "Exit\n"); return err; From 77a92189ecfd061616ad531d386639aab7baaad9 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 2 Aug 2020 03:05:25 +0200 Subject: [PATCH 1928/2056] netfilter: nf_tables: report EEXIST on overlaps Replace EBUSY by EEXIST in the following cases: - If the user adds a chain with a different configuration such as different type, hook and priority. - If the user adds a non-base chain that clashes with an existing basechain. - If the user adds a { key : value } mapping element and the key exists but the value differs. - If the device already belongs to an existing flowtable. User describe that this error reporting is confusing: - https://bugzilla.netfilter.org/show_bug.cgi?id=1176 - https://bugzilla.netfilter.org/show_bug.cgi?id=1413 Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fac552b0179f..6571789989bc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2097,7 +2097,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, if (nla[NFTA_CHAIN_HOOK]) { if (!nft_is_base_chain(chain)) - return -EBUSY; + return -EEXIST; err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family, false); @@ -2107,21 +2107,21 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, basechain = nft_base_chain(chain); if (basechain->type != hook.type) { nft_chain_release_hook(&hook); - return -EBUSY; + return -EEXIST; } if (ctx->family == NFPROTO_NETDEV) { if (!nft_hook_list_equal(&basechain->hook_list, &hook.list)) { nft_chain_release_hook(&hook); - return -EBUSY; + return -EEXIST; } } else { ops = &basechain->ops; if (ops->hooknum != hook.num || ops->priority != hook.priority) { nft_chain_release_hook(&hook); - return -EBUSY; + return -EEXIST; } } nft_chain_release_hook(&hook); @@ -5262,10 +5262,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^ nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) || nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^ - nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) { - err = -EBUSY; + nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) goto err_element_clash; - } if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) && nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) && memcmp(nft_set_ext_data(ext), @@ -5273,7 +5271,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) && nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF) && *nft_set_ext_obj(ext) != *nft_set_ext_obj(ext2))) - err = -EBUSY; + goto err_element_clash; else if (!(nlmsg_flags & NLM_F_EXCL)) err = 0; } else if (err == -ENOTEMPTY) { @@ -6423,7 +6421,7 @@ static int nft_register_flowtable_net_hooks(struct net *net, list_for_each_entry(hook2, &ft->hook_list, list) { if (hook->ops.dev == hook2->ops.dev && hook->ops.pf == hook2->ops.pf) { - err = -EBUSY; + err = -EEXIST; goto err_unregister_net_hooks; } } From 4e56cde15f7d68cf86ff8efff8504497de152475 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Fri, 31 Jul 2020 21:38:30 +0300 Subject: [PATCH 1929/2056] mac80211: Handle special status codes in SAE commit SAE authentication has been extended with H2E (IEEE 802.11 REVmd) and PK (WFA) options. Those extensions use special status code values in the SAE commit messages (Authentication frame with transaction sequence number 1) to identify which extension is in use. mac80211 was interpreting those new values as the AP denying authentication and that resulted in failure to complete SAE authentication in some cases. Fix this by adding exceptions for the new status code values 126 and 127. Signed-off-by: Jouni Malinen Link: https://lore.kernel.org/r/20200731183830.18735-1-jouni@codeaurora.org Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 2 ++ net/mac80211/mlme.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 9f732499ea88..c47f43e65a2f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2561,6 +2561,8 @@ enum ieee80211_statuscode { /* 802.11ai */ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, + WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126, + WLAN_STATUS_SAE_PK = 127, }; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 839d0367446c..8b7ca8ddfe20 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2988,7 +2988,10 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata, cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len); if (auth_alg == WLAN_AUTH_SAE && - status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED) + (status_code == WLAN_STATUS_ANTI_CLOG_REQUIRED || + (auth_transaction == 1 && + (status_code == WLAN_STATUS_SAE_HASH_TO_ELEMENT || + status_code == WLAN_STATUS_SAE_PK)))) return; sdata_info(sdata, "%pM denied authentication (status %d)\n", From 6628d00116b37bd5ef44cc69f5a8df6b0d4e14b7 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Mon, 3 Aug 2020 10:45:40 +0200 Subject: [PATCH 1930/2056] mac8211: fix struct initialisation Sparse showed up with the following error. net/mac80211/agg-rx.c:480:43: warning: Using plain integer as NULL pointer Fixes: 2ab45876756f (mac80211: add support for the ADDBA extension element) Signed-off-by: John Crispin Link: https://lore.kernel.org/r/20200803084540.179908-1-john@phrozen.org Signed-off-by: Johannes Berg --- net/mac80211/agg-rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 7f245e9f114c..313ba97acae3 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c @@ -477,7 +477,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, size_t len) { u16 capab, tid, timeout, ba_policy, buf_size, start_seq_num; - struct ieee802_11_elems elems = { 0 }; + struct ieee802_11_elems elems = { }; u8 dialog_token; int ies_len; From 47d76e31908d8fdb06c6c75e4f3c4d4a08c9c850 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 1 Aug 2020 17:12:38 +0800 Subject: [PATCH 1931/2056] mac80211: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Miaohe Lin Link: https://lore.kernel.org/r/1596273158-24183-1-git-send-email-linmiaohe@huawei.com Signed-off-by: Johannes Berg --- net/mac80211/trace.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index 1b4709694d2a..50ab5b9d8eab 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -22,7 +22,8 @@ #define LOCAL_PR_ARG __entry->wiphy_name #define STA_ENTRY __array(char, sta_addr, ETH_ALEN) -#define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN)) +#define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : \ + eth_zero_addr(__entry->sta_addr)) #define STA_NAMED_ASSIGN(s) memcpy(__entry->sta_addr, (s)->addr, ETH_ALEN) #define STA_PR_FMT " sta:%pM" #define STA_PR_ARG __entry->sta_addr From 3b1648f10961ce41bb709dfcadfdb9836c9a8ab8 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 1 Aug 2020 17:15:49 +0800 Subject: [PATCH 1932/2056] nl80211: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Miaohe Lin Link: https://lore.kernel.org/r/1596273349-24333-1-git-send-email-linmiaohe@huawei.com Signed-off-by: Johannes Berg --- net/wireless/nl80211.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 5c57ac2ee75b..814e23d3ce7c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -10400,8 +10400,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) memcpy(dev->ieee80211_ptr->disconnect_bssid, connect.bssid, ETH_ALEN); else - memset(dev->ieee80211_ptr->disconnect_bssid, - 0, ETH_ALEN); + eth_zero_addr(dev->ieee80211_ptr->disconnect_bssid); } wdev_unlock(dev->ieee80211_ptr); From 5981fe5b0529ba25d95f37d7faa434183ad618c5 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 3 Aug 2020 11:02:10 +0200 Subject: [PATCH 1933/2056] mac80211: fix misplaced while instead of if This never was intended to be a 'while' loop, it should've just been an 'if' instead of 'while'. Fix this. I noticed this while applying another patch from Ben that intended to fix a busy loop at this spot. Cc: stable@vger.kernel.org Fixes: b16798f5b907 ("mac80211: mark station unauthorized before key removal") Reported-by: Ben Greear Link: https://lore.kernel.org/r/20200803110209.253009ae41ff.I3522aad099392b31d5cf2dcca34cbac7e5832dde@changeid Signed-off-by: Johannes Berg --- net/mac80211/sta_info.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 1dc747de30cc..f2840d1d95cf 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1050,7 +1050,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta) might_sleep(); lockdep_assert_held(&local->sta_mtx); - while (sta->sta_state == IEEE80211_STA_AUTHORIZED) { + if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC); WARN_ON_ONCE(ret); } From 2ef740da4fef947fb4614808fb61ca8ccb313e0b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 2 Aug 2020 03:26:21 +0200 Subject: [PATCH 1934/2056] selftests: netfilter: add meta iif/oif match test simple test case, but would have caught this: FAIL: iifgroupcount, want "packets 2", got table inet filter { counter iifgroupcount { packets 0 bytes 0 } } Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- tools/testing/selftests/netfilter/Makefile | 2 +- tools/testing/selftests/netfilter/nft_meta.sh | 124 ++++++++++++++++++ 2 files changed, 125 insertions(+), 1 deletion(-) create mode 100755 tools/testing/selftests/netfilter/nft_meta.sh diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index a179f0dca8ce..a374e10ef506 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile @@ -4,7 +4,7 @@ TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \ conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \ nft_concat_range.sh nft_conntrack_helper.sh \ - nft_queue.sh + nft_queue.sh nft_meta.sh LDLIBS = -lmnl TEST_GEN_FILES = nf-queue diff --git a/tools/testing/selftests/netfilter/nft_meta.sh b/tools/testing/selftests/netfilter/nft_meta.sh new file mode 100755 index 000000000000..d250b84dd5bc --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_meta.sh @@ -0,0 +1,124 @@ +#!/bin/bash + +# check iif/iifname/oifgroup/iiftype match. + +# Kselftest framework requirement - SKIP code is 4. +ksft_skip=4 +sfx=$(mktemp -u "XXXXXXXX") +ns0="ns0-$sfx" + +nft --version > /dev/null 2>&1 +if [ $? -ne 0 ];then + echo "SKIP: Could not run test without nft tool" + exit $ksft_skip +fi + +cleanup() +{ + ip netns del "$ns0" +} + +ip netns add "$ns0" +ip -net "$ns0" link set lo up +ip -net "$ns0" addr add 127.0.0.1 dev lo + +trap cleanup EXIT + +ip netns exec "$ns0" nft -f /dev/stdin < /dev/null + +check_lo_counters "2" true + +check_one_counter oskuidcounter "1" true +check_one_counter oskgidcounter "1" true +check_one_counter imarkcounter "1" true +check_one_counter omarkcounter "1" true + +if [ $ret -eq 0 ];then + echo "OK: nftables meta iif/oif counters at expected values" +fi + +exit $ret From 73f9407b3eb893bc8a82293cc8d4dfa3db079c0b Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 3 Aug 2020 10:33:04 +0300 Subject: [PATCH 1935/2056] netfilter: conntrack: Move nf_ct_offload_timeout to header file To be used by callers from other modules. [ Rename DAY to NF_CT_DAY to avoid possible symbol name pollution issue --Pablo ] Signed-off-by: Roi Dayan Reviewed-by: Oz Shlomo Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 12 ++++++++++++ net/netfilter/nf_conntrack_core.c | 12 ------------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 90690e37a56f..c7bfddfc65b0 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -279,6 +279,18 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct) !nf_ct_is_dying(ct); } +#define NF_CT_DAY (86400 * HZ) + +/* Set an arbitrary timeout large enough not to ever expire, this save + * us a check for the IPS_OFFLOAD_BIT from the packet path via + * nf_ct_is_expired(). + */ +static inline void nf_ct_offload_timeout(struct nf_conn *ct) +{ + if (nf_ct_expires(ct) < NF_CT_DAY / 2) + ct->timeout = nfct_time_stamp + NF_CT_DAY; +} + struct kernel_param; int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f33d72c5b06e..c3cea50d1bcb 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1344,18 +1344,6 @@ static bool gc_worker_can_early_drop(const struct nf_conn *ct) return false; } -#define DAY (86400 * HZ) - -/* Set an arbitrary timeout large enough not to ever expire, this save - * us a check for the IPS_OFFLOAD_BIT from the packet path via - * nf_ct_is_expired(). - */ -static void nf_ct_offload_timeout(struct nf_conn *ct) -{ - if (nf_ct_expires(ct) < DAY / 2) - ct->timeout = nfct_time_stamp + DAY; -} - static void gc_worker(struct work_struct *work) { unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u); From 4203b19c27967d9eff6928f6a733f81892ffc592 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 3 Aug 2020 10:33:05 +0300 Subject: [PATCH 1936/2056] netfilter: flowtable: Set offload timeout when adding flow On heavily loaded systems the GC can take time to go over all existing conns and reset their timeout. At that time other calls like from nf_conntrack_in() can call of nf_ct_is_expired() and see the conn as expired. To fix this when we set the offload bit we should also reset the timeout instead of counting on GC to finish first iteration over all conns before the initial timeout. Fixes: 90964016e5d3 ("netfilter: nf_conntrack: add IPS_OFFLOAD status bit") Signed-off-by: Roi Dayan Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index b1eb5272b379..4f7a567c536e 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -243,6 +243,8 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) return err; } + nf_ct_offload_timeout(flow->ct); + if (nf_flowtable_hw_offload(flow_table)) { __set_bit(NF_FLOW_HW, &flow->flags); nf_flow_offload_add(flow_table, flow); From 0b91111fb1a164fedbb68a9263afafd10ffa6ec3 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Thu, 18 Jun 2020 11:17:42 +0200 Subject: [PATCH 1937/2056] mac80211: Do not report beacon loss if beacon filtering enabled mac80211.h says: Beacon filter support is advertised with the IEEE80211_VIF_BEACON_FILTER interface capability. The driver needs to enable beacon filter support whenever power save is enabled, that is IEEE80211_CONF_PS is set. When power save is enabled, the stack will not check for beacon loss and the driver needs to notify about loss of beacons with ieee80211_beacon_loss(). Some controllers may want to dynamically enable the beacon filter capabilities on power save entry (CONF_PS) and disable it on exit. This is the case for the wcn36xx driver which only supports beacon filtering in PS mode (no CONNECTION_MONITOR support). When the mac80211 beacon monitor timer expires, the beacon filter flag must be checked again in case it as been changed in between (e.g. vif moved to PS mode). Signed-off-by: Loic Poulain Link: https://lore.kernel.org/r/1592471863-31402-1-git-send-email-loic.poulain@linaro.org Signed-off-by: Johannes Berg --- net/mac80211/mlme.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8b7ca8ddfe20..ac870309b911 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4563,6 +4563,9 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn) return; + if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) + return; + sdata->u.mgd.connection_loss = false; ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_connection_loss_work); From a278f3d8191228212c553a5d4303fa603214b717 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 30 Jul 2020 19:42:44 -0700 Subject: [PATCH 1938/2056] tools, build: Propagate build failures from tools/build/Makefile.build The '&&' command seems to have a bad effect when $(cmd_$(1)) exits with non-zero effect: the command failure is masked (despite `set -e`) and all but the first command of $(dep-cmd) is executed (successfully, as they are mostly printfs), thus overall returning 0 in the end. This means in practice that despite compilation errors, tools's build Makefile will return success. We see this very reliably with libbpf's Makefile, which doesn't get compilation error propagated properly. This in turns causes issues with selftests build, as well as bpftool and other projects that rely on building libbpf. The fix is simple: don't use &&. Given `set -e`, we don't need to chain commands with &&. The shell will exit on first failure, giving desired behavior and propagating error properly. Fixes: 275e2d95591e ("tools build: Move dependency copy into function") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Jiri Olsa Link: https://lore.kernel.org/bpf/20200731024244.872574-1-andriin@fb.com --- tools/build/Build.include | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/build/Build.include b/tools/build/Build.include index 9ec01f4454f9..585486e40995 100644 --- a/tools/build/Build.include +++ b/tools/build/Build.include @@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)), # dependencies in the cmd file if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \ @set -e; \ - $(echo-cmd) $(cmd_$(1)) && $(dep-cmd)) + $(echo-cmd) $(cmd_$(1)); \ + $(dep-cmd)) # if_changed - execute command if any prerequisite is newer than # target, or command line has changed From 041549b7b2c7811ec40e705c439211f00ade2dda Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:40 +0800 Subject: [PATCH 1939/2056] tools, bpftool: Fix wrong return value in do_dump() In case of btf_id does not exist, a negative error code -ENOENT should be returned. Fixes: c93cc69004df3 ("bpftool: add ability to dump BTF types") Signed-off-by: Tianjia Zhang Signed-off-by: Daniel Borkmann Reviewed-by: Tobias Klauser Acked-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20200802111540.5384-1-tianjia.zhang@linux.alibaba.com --- tools/bpf/bpftool/btf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index fc9bc7a23db6..37c091308714 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -596,7 +596,7 @@ static int do_dump(int argc, char **argv) goto done; } if (!btf) { - err = ENOENT; + err = -ENOENT; p_err("can't find btf with ID (%u)", btf_id); goto done; } From 94a1fedd63edb672933bef44ca9213937e377c05 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 1 Aug 2020 18:32:17 -0700 Subject: [PATCH 1940/2056] libbpf: Add btf__parse_raw() and generic btf__parse() APIs Add public APIs to parse BTF from raw data file (e.g., /sys/kernel/btf/vmlinux), as well as generic btf__parse(), which will try to determine correct format, currently either raw or ELF. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200802013219.864880-2-andriin@fb.com --- tools/lib/bpf/btf.c | 114 ++++++++++++++++++++++++++------------- tools/lib/bpf/btf.h | 5 +- tools/lib/bpf/libbpf.map | 2 + 3 files changed, 83 insertions(+), 38 deletions(-) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index ded5b29965f9..856b09a04563 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -562,6 +562,83 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) return btf; } +struct btf *btf__parse_raw(const char *path) +{ + void *data = NULL; + struct btf *btf; + FILE *f = NULL; + __u16 magic; + int err = 0; + long sz; + + f = fopen(path, "rb"); + if (!f) { + err = -errno; + goto err_out; + } + + /* check BTF magic */ + if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) { + err = -EIO; + goto err_out; + } + if (magic != BTF_MAGIC) { + /* definitely not a raw BTF */ + err = -EPROTO; + goto err_out; + } + + /* get file size */ + if (fseek(f, 0, SEEK_END)) { + err = -errno; + goto err_out; + } + sz = ftell(f); + if (sz < 0) { + err = -errno; + goto err_out; + } + /* rewind to the start */ + if (fseek(f, 0, SEEK_SET)) { + err = -errno; + goto err_out; + } + + /* pre-alloc memory and read all of BTF data */ + data = malloc(sz); + if (!data) { + err = -ENOMEM; + goto err_out; + } + if (fread(data, 1, sz, f) < sz) { + err = -EIO; + goto err_out; + } + + /* finally parse BTF data */ + btf = btf__new(data, sz); + +err_out: + free(data); + if (f) + fclose(f); + return err ? ERR_PTR(err) : btf; +} + +struct btf *btf__parse(const char *path, struct btf_ext **btf_ext) +{ + struct btf *btf; + + if (btf_ext) + *btf_ext = NULL; + + btf = btf__parse_raw(path); + if (!IS_ERR(btf) || PTR_ERR(btf) != -EPROTO) + return btf; + + return btf__parse_elf(path, btf_ext); +} + static int compare_vsi_off(const void *_a, const void *_b) { const struct btf_var_secinfo *a = _a; @@ -2951,41 +3028,6 @@ static int btf_dedup_remap_types(struct btf_dedup *d) return 0; } -static struct btf *btf_load_raw(const char *path) -{ - struct btf *btf; - size_t read_cnt; - struct stat st; - void *data; - FILE *f; - - if (stat(path, &st)) - return ERR_PTR(-errno); - - data = malloc(st.st_size); - if (!data) - return ERR_PTR(-ENOMEM); - - f = fopen(path, "rb"); - if (!f) { - btf = ERR_PTR(-errno); - goto cleanup; - } - - read_cnt = fread(data, 1, st.st_size, f); - fclose(f); - if (read_cnt < st.st_size) { - btf = ERR_PTR(-EBADF); - goto cleanup; - } - - btf = btf__new(data, read_cnt); - -cleanup: - free(data); - return btf; -} - /* * Probe few well-known locations for vmlinux kernel image and try to load BTF * data out of it to use for target BTF. @@ -3021,7 +3063,7 @@ struct btf *libbpf_find_kernel_btf(void) continue; if (locations[i].raw_btf) - btf = btf_load_raw(path); + btf = btf__parse_raw(path); else btf = btf__parse_elf(path, NULL); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 491c7b41ffdc..f4a1a1d2b9a3 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -64,8 +64,9 @@ struct btf_ext_header { LIBBPF_API void btf__free(struct btf *btf); LIBBPF_API struct btf *btf__new(const void *data, __u32 size); -LIBBPF_API struct btf *btf__parse_elf(const char *path, - struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); +LIBBPF_API struct btf *btf__parse_raw(const char *path); LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 099863411f7d..0c4722bfdd0a 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -293,5 +293,7 @@ LIBBPF_0.1.0 { bpf_program__is_sk_lookup; bpf_program__set_autoload; bpf_program__set_sk_lookup; + btf__parse; + btf__parse_raw; btf__set_fd; } LIBBPF_0.0.9; From 8526df04570f5698c97ac661ad1f2f35293557a7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 1 Aug 2020 18:32:18 -0700 Subject: [PATCH 1941/2056] tools/bpftool: Use libbpf's btf__parse() API for parsing BTF from file Use generic libbpf API to parse BTF data from file, instead of re-implementing it in bpftool. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200802013219.864880-3-andriin@fb.com --- tools/bpf/bpftool/btf.c | 54 +---------------------------------------- 1 file changed, 1 insertion(+), 53 deletions(-) diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 37c091308714..8ab142ff5eac 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -422,54 +422,6 @@ static int dump_btf_c(const struct btf *btf, return err; } -static struct btf *btf__parse_raw(const char *file) -{ - struct btf *btf; - struct stat st; - __u8 *buf; - FILE *f; - - if (stat(file, &st)) - return NULL; - - f = fopen(file, "rb"); - if (!f) - return NULL; - - buf = malloc(st.st_size); - if (!buf) { - btf = ERR_PTR(-ENOMEM); - goto exit_close; - } - - if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) { - btf = ERR_PTR(-EINVAL); - goto exit_free; - } - - btf = btf__new(buf, st.st_size); - -exit_free: - free(buf); -exit_close: - fclose(f); - return btf; -} - -static bool is_btf_raw(const char *file) -{ - __u16 magic = 0; - int fd, nb_read; - - fd = open(file, O_RDONLY); - if (fd < 0) - return false; - - nb_read = read(fd, &magic, sizeof(magic)); - close(fd); - return nb_read == sizeof(magic) && magic == BTF_MAGIC; -} - static int do_dump(int argc, char **argv) { struct btf *btf = NULL; @@ -547,11 +499,7 @@ static int do_dump(int argc, char **argv) } NEXT_ARG(); } else if (is_prefix(src, "file")) { - if (is_btf_raw(*argv)) - btf = btf__parse_raw(*argv); - else - btf = btf__parse_elf(*argv, NULL); - + btf = btf__parse(*argv, NULL); if (IS_ERR(btf)) { err = -PTR_ERR(btf); btf = NULL; From f86ca3cffef153555a3f4755b3a44881d962754f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 1 Aug 2020 18:32:19 -0700 Subject: [PATCH 1942/2056] tools/resolve_btfids: Use libbpf's btf__parse() API Instead of re-implementing generic BTF parsing logic, use libbpf's API. Also add .gitignore for resolve_btfids's build artifacts. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200802013219.864880-4-andriin@fb.com --- tools/bpf/resolve_btfids/.gitignore | 4 ++ tools/bpf/resolve_btfids/main.c | 58 +---------------------------- 2 files changed, 5 insertions(+), 57 deletions(-) create mode 100644 tools/bpf/resolve_btfids/.gitignore diff --git a/tools/bpf/resolve_btfids/.gitignore b/tools/bpf/resolve_btfids/.gitignore new file mode 100644 index 000000000000..a026df7dc280 --- /dev/null +++ b/tools/bpf/resolve_btfids/.gitignore @@ -0,0 +1,4 @@ +/FEATURE-DUMP.libbpf +/bpf_helper_defs.h +/fixdep +/resolve_btfids diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index 6956b6350cad..52d883325a23 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -403,62 +403,6 @@ static int symbols_collect(struct object *obj) return 0; } -static struct btf *btf__parse_raw(const char *file) -{ - struct btf *btf; - struct stat st; - __u8 *buf; - FILE *f; - - if (stat(file, &st)) - return NULL; - - f = fopen(file, "rb"); - if (!f) - return NULL; - - buf = malloc(st.st_size); - if (!buf) { - btf = ERR_PTR(-ENOMEM); - goto exit_close; - } - - if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) { - btf = ERR_PTR(-EINVAL); - goto exit_free; - } - - btf = btf__new(buf, st.st_size); - -exit_free: - free(buf); -exit_close: - fclose(f); - return btf; -} - -static bool is_btf_raw(const char *file) -{ - __u16 magic = 0; - int fd, nb_read; - - fd = open(file, O_RDONLY); - if (fd < 0) - return false; - - nb_read = read(fd, &magic, sizeof(magic)); - close(fd); - return nb_read == sizeof(magic) && magic == BTF_MAGIC; -} - -static struct btf *btf_open(const char *path) -{ - if (is_btf_raw(path)) - return btf__parse_raw(path); - else - return btf__parse_elf(path, NULL); -} - static int symbols_resolve(struct object *obj) { int nr_typedefs = obj->nr_typedefs; @@ -469,7 +413,7 @@ static int symbols_resolve(struct object *obj) struct btf *btf; __u32 nr; - btf = btf_open(obj->btf ?: obj->path); + btf = btf__parse(obj->btf ?: obj->path, NULL); err = libbpf_get_error(btf); if (err) { pr_err("FAILED: load BTF from %s: %s", From 45d252ca803b9aa232c455a7c3d0c9e14b89247f Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 21 Jun 2020 06:57:03 +0300 Subject: [PATCH 1943/2056] net/mlx5e: Enable users to change VF/PF representors carrier state Currently PF and VF representor netdevice carrier is always controlled by controlling the representor netdevice device state as up/down. Representor netdevice state change undergoes one or more txq/rxq destroy/create commands to firmware, skb and its rx buffer allocation, health reporters creation and more. Due to this limitation users do not have the ability to just change the carrier of the non uplink representors without modifying the device state. In one use case when the eswitch physical port carrier is down/up, user needs to update the VF link state to same as physical port carrier. Example of updating VF representor carrier state: $ ip link set enp0s8f0npf0vf0 carrier off $ ip link set enp0s8f0npf0vf0 carrier on This enhancement results into VF link state change which is represented by the VF representor netdevice carrier. This enables users to modify the representor carrier without modifying the representor netdevice state. A simple test is run using [1] to calculate the time difference between updating carrier vs updating device state (to update just the carrier) with one VF to simulate 255 VFs. Time taken to update the carrier using device up/down: $ time ./calculate.sh dev enp0s8f0npf0vf0 real 0m30.913s user 0m0.200s sys 0m11.168s Time taken to update just the carrier using carrier iproute2 command: $ time ./calculate.sh carrier enp0s8f0npf0vf0 real 0m2.142s user 0m0.160s sys 0m2.021s Test shows that its better to use carrier on/off user interface to notify link up/down event to VF compare to device up/down interface, because carrier user interface delivers the same event 15 times faster. [1] https://github.com/paravmellanox/myscripts/blob/master/calculate_carrier_time.sh Signed-off-by: Parav Pandit Acked-by: Jiri Pirko Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_rep.c | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 48989541e2ef..3db81a8cfc1d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -611,6 +611,29 @@ static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev) return &rpriv->dl_port; } +static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + int err; + + if (new_carrier) { + err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, + rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP); + if (err) + return err; + netif_carrier_on(dev); + } else { + err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT, + rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN); + if (err) + return err; + netif_carrier_off(dev); + } + return 0; +} + static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_open = mlx5e_rep_open, .ndo_stop = mlx5e_rep_close, @@ -621,6 +644,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_change_mtu = mlx5e_rep_change_mtu, + .ndo_change_carrier = mlx5e_rep_change_carrier, }; static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { From b206490940216542c68563699b279eed3c55107c Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Mon, 13 Jul 2020 14:09:04 +0300 Subject: [PATCH 1944/2056] net/mlx5: DR, Change push vlan action sequence The DR TX state machine supports the following order: modify header, push vlan and encapsulation. Instead fs_dr would pass: push vlan, modify header and encapsulation. The above caused the rule creation to fail on invalid action sequence provided error. Fixes: 6a48faeeca10 ("net/mlx5: Add direct rule fs_cmd implementation") Signed-off-by: Alex Vesker Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/steering/fs_dr.c | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 8887b2440c7d..9b08eb557a31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -279,29 +279,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, /* The order of the actions are must to be keep, only the following * order is supported by SW steering: - * TX: push vlan -> modify header -> encap + * TX: modify header -> push vlan -> encap * RX: decap -> pop vlan -> modify header */ - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - } - - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { - tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - } - if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { enum mlx5dr_action_reformat_type decap_type = DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2; @@ -354,6 +334,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = fte->action.modify_hdr->action.dr_action; + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { + tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + if (delay_encap_set) actions[num_actions++] = fte->action.pkt_reformat->action.dr_action; From 966e50597666d530b69de2abb9c83ff0a9bd3ee6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 28 Jul 2020 14:47:58 -0700 Subject: [PATCH 1945/2056] udp_tunnel: add the ability to hard-code IANA VXLAN mlx5 has the IANA VXLAN port (4789) hard coded by the device, instead of being added dynamically when tunnels are created. To support this add a workaround flag to struct udp_tunnel_nic_info. Skipping updates for the port is fairly trivial, dumping the hard coded port via ethtool requires some code duplication. The port is not a part of any real table, we dump it in a special table which has no tunnel types supported and only one entry. This is the last known workaround / hack needed to convert all drivers to the new infra. Signed-off-by: Jakub Kicinski Signed-off-by: Saeed Mahameed --- Documentation/networking/ethtool-netlink.rst | 3 + include/net/udp_tunnel.h | 5 ++ net/ethtool/tunnels.c | 69 +++++++++++++++++--- net/ipv4/udp_tunnel_nic.c | 7 ++ 4 files changed, 76 insertions(+), 8 deletions(-) diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 7d75f1e32152..d53bcb31645a 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1263,6 +1263,9 @@ Kernel response contents: | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE`` | u32 | tunnel type | +-+-+-+---------------------------------------+--------+---------------------+ +For UDP tunnel table empty ``ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES`` indicates that +the table contains static entries, hard-coded by the NIC. + Request translation =================== diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index dd20ce99740c..94bb7a882250 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -193,6 +193,11 @@ enum udp_tunnel_nic_info_flags { UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1), /* Device supports only IPv4 tunnels */ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2), + /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN. + * This port must not be counted towards n_entries of any table. + * Driver will not receive any callback associated with port 4789. + */ + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3), }; /** diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c index 6b89255f1231..84f23289475b 100644 --- a/net/ethtool/tunnels.c +++ b/net/ethtool/tunnels.c @@ -2,6 +2,7 @@ #include #include +#include #include "bitset.h" #include "common.h" @@ -18,6 +19,20 @@ static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE)); static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE == ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE)); +static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact) +{ + ssize_t size; + + size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact); + if (size < 0) + return size; + + return size + + nla_total_size(0) + /* _UDP_TABLE */ + nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ +} + static ssize_t ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, struct netlink_ext_ack *extack) @@ -25,8 +40,8 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct udp_tunnel_nic_info *info; unsigned int i; + ssize_t ret; size_t size; - int ret; info = req_base->dev->udp_tunnel_nic_info; if (!info) { @@ -39,13 +54,10 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { if (!info->tables[i].n_entries) - return size; + break; - size += nla_total_size(0); /* _UDP_TABLE */ - size += nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */ - ret = ethnl_bitset32_size(&info->tables[i].tunnel_types, NULL, - __ETHTOOL_UDP_TUNNEL_TYPE_CNT, - udp_tunnel_type_names, compact); + ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types, + compact); if (ret < 0) return ret; size += ret; @@ -53,6 +65,17 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base, size += udp_tunnel_nic_dump_size(req_base->dev, i); } + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { + ret = ethnl_udp_table_reply_size(0, compact); + if (ret < 0) + return ret; + size += ret; + + size += nla_total_size(0) + /* _TABLE_ENTRY */ + nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */ + nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */ + } + return size; } @@ -62,7 +85,7 @@ ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, { bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; const struct udp_tunnel_nic_info *info; - struct nlattr *ports, *table; + struct nlattr *ports, *table, *entry; unsigned int i; info = req_base->dev->udp_tunnel_nic_info; @@ -97,10 +120,40 @@ ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base, nla_nest_end(skb, table); } + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) { + u32 zero = 0; + + table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE); + if (!table) + goto err_cancel_ports; + + if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1)) + goto err_cancel_table; + + if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES, + &zero, NULL, + __ETHTOOL_UDP_TUNNEL_TYPE_CNT, + udp_tunnel_type_names, compact)) + goto err_cancel_table; + + entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY); + + if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT, + htons(IANA_VXLAN_UDP_PORT)) || + nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE, + ilog2(UDP_TUNNEL_TYPE_VXLAN))) + goto err_cancel_entry; + + nla_nest_end(skb, entry); + nla_nest_end(skb, table); + } + nla_nest_end(skb, ports); return 0; +err_cancel_entry: + nla_nest_cancel(skb, entry); err_cancel_table: nla_nest_cancel(skb, table); err_cancel_ports: diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c index f0dbd9905a53..69962165c0e8 100644 --- a/net/ipv4/udp_tunnel_nic.c +++ b/net/ipv4/udp_tunnel_nic.c @@ -7,6 +7,7 @@ #include #include #include +#include enum udp_tunnel_nic_table_entry_flags { UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0), @@ -504,6 +505,12 @@ __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) return; if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) return; + if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN && + ti->port == htons(IANA_VXLAN_UDP_PORT)) { + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n"); + return; + } if (!udp_tunnel_nic_is_capable(dev, utn, ti)) return; From 18a2b7f969c9b6386a506a5d026962e53dc0bb14 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 28 Jul 2020 14:47:59 -0700 Subject: [PATCH 1946/2056] net/mlx5: convert to new udp_tunnel infrastructure Allocate nic_info dynamically - n_entries is not constant. Attach the tunnel offload info only to the uplink representor. We expect the "main" netdev to be unregistered in switchdev mode, and there to be only one uplink representor. Drop the udp_tunnel_drop_rx_info() call, it was not there until commit b3c2ed21c0bd ("net/mlx5e: Fix VXLAN configuration restore after function reload") so the device doesn't need it, and core should handle reloads and reset just fine. v2: - don't drop the ndos on reprs, and register info on uplink repr. v4: - Move netdev tunnel structure handling to en_main.c Signed-off-by: Jakub Kicinski Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 5 +- .../net/ethernet/mellanox/mlx5/core/en_main.c | 121 ++++++------------ .../net/ethernet/mellanox/mlx5/core/en_rep.c | 5 +- .../ethernet/mellanox/mlx5/core/lib/vxlan.c | 66 +++------- .../ethernet/mellanox/mlx5/core/lib/vxlan.h | 5 + 5 files changed, 65 insertions(+), 137 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f2fa1307e90c..0cc2080fd847 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include #include @@ -792,6 +793,7 @@ struct mlx5e_priv { u16 drop_rq_q_counter; struct notifier_block events_nb; + struct udp_tunnel_nic_info nic_info; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx dcbx; #endif @@ -1012,6 +1014,7 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, mlx5e_fp_preactivate preactivate); +void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); /* ethtool helpers */ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, @@ -1080,8 +1083,6 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); -void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); -void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 72c91ff55465..8f26cd951ff5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4191,83 +4191,6 @@ int mlx5e_get_vf_stats(struct net_device *dev, } #endif -struct mlx5e_vxlan_work { - struct work_struct work; - struct mlx5e_priv *priv; - u16 port; -}; - -static void mlx5e_vxlan_add_work(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; - - mutex_lock(&priv->state_lock); - mlx5_vxlan_add_port(priv->mdev->vxlan, port); - mutex_unlock(&priv->state_lock); - - kfree(vxlan_work); -} - -static void mlx5e_vxlan_del_work(struct work_struct *work) -{ - struct mlx5e_vxlan_work *vxlan_work = - container_of(work, struct mlx5e_vxlan_work, work); - struct mlx5e_priv *priv = vxlan_work->priv; - u16 port = vxlan_work->port; - - mutex_lock(&priv->state_lock); - mlx5_vxlan_del_port(priv->mdev->vxlan, port); - mutex_unlock(&priv->state_lock); - kfree(vxlan_work); -} - -static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) -{ - struct mlx5e_vxlan_work *vxlan_work; - - vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); - if (!vxlan_work) - return; - - if (add) - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work); - else - INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work); - - vxlan_work->priv = priv; - vxlan_work->port = port; - queue_work(priv->wq, &vxlan_work->work); -} - -void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) - return; - - mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); -} - -void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) -{ - struct mlx5e_priv *priv = netdev_priv(netdev); - - if (ti->type != UDP_TUNNEL_TYPE_VXLAN) - return; - - if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) - return; - - mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0); -} - static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, struct sk_buff *skb, netdev_features_t features) @@ -4597,8 +4520,8 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_change_mtu = mlx5e_change_nic_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = mlx5e_features_check, .ndo_tx_timeout = mlx5e_tx_timeout, .ndo_bpf = mlx5e_xdp, @@ -4869,6 +4792,39 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } +static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port)); +} + +static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table, + unsigned int entry, struct udp_tunnel_info *ti) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port)); +} + +void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv) +{ + if (!mlx5_vxlan_allowed(priv->mdev->vxlan)) + return; + + priv->nic_info.set_port = mlx5e_vxlan_set_port; + priv->nic_info.unset_port = mlx5e_vxlan_unset_port; + priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP | + UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN; + priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN; + /* Don't count the space hard-coded to the IANA port */ + priv->nic_info.tables[0].n_entries = + mlx5_vxlan_max_udp_ports(priv->mdev) - 1; + + priv->netdev->udp_tunnel_nic_info = &priv->nic_info; +} + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4912,6 +4868,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + mlx5e_vxlan_set_netdev_info(priv); + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) || mlx5e_any_tunnel_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; @@ -5217,8 +5175,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(netdev)) mlx5e_open(netdev); - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) - udp_tunnel_get_rx_info(netdev); + udp_tunnel_nic_reset_ntf(priv->netdev); netif_device_attach(netdev); rtnl_unlock(); } @@ -5233,8 +5190,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) rtnl_lock(); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); - if (mlx5_vxlan_allowed(priv->mdev->vxlan)) - udp_tunnel_drop_rx_info(priv->netdev); netif_device_detach(priv->netdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3db81a8cfc1d..e13e5d1b3eae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -658,8 +658,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, + .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, .ndo_features_check = mlx5e_features_check, .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_rate = mlx5e_set_vf_rate, @@ -730,6 +730,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) /* we want a persistent mac for the uplink rep */ mlx5_query_mac_address(mdev, netdev->dev_addr); netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; + mlx5e_vxlan_set_netdev_info(priv); mlx5e_dcbnl_build_rep_netdev(netdev); } else { netdev->netdev_ops = &mlx5e_netdev_ops_rep; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index be34330d89cc..3315afe2f8dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -42,21 +42,14 @@ struct mlx5_vxlan { struct mlx5_core_dev *mdev; /* max_num_ports is usuallly 4, 16 buckets is more than enough */ DECLARE_HASHTABLE(htable, 4); - int num_ports; struct mutex sync_lock; /* sync add/del port HW operations */ }; struct mlx5_vxlan_port { struct hlist_node hlist; - refcount_t refcount; u16 udp_port; }; -static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) -{ - return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4; -} - static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {}; @@ -109,48 +102,24 @@ static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 p int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; - int ret = 0; - - mutex_lock(&vxlan->sync_lock); - vxlanp = vxlan_lookup_port(vxlan, port); - if (vxlanp) { - refcount_inc(&vxlanp->refcount); - goto unlock; - } - - if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) { - mlx5_core_info(vxlan->mdev, - "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n", - port, mlx5_vxlan_max_udp_ports(vxlan->mdev)); - ret = -ENOSPC; - goto unlock; - } - - ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port); - if (ret) - goto unlock; + int ret; vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); - if (!vxlanp) { - ret = -ENOMEM; - goto err_delete_port; + if (!vxlanp) + return -ENOMEM; + vxlanp->udp_port = port; + + ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port); + if (ret) { + kfree(vxlanp); + return ret; } - vxlanp->udp_port = port; - refcount_set(&vxlanp->refcount, 1); - + mutex_lock(&vxlan->sync_lock); hash_add_rcu(vxlan->htable, &vxlanp->hlist, port); - - vxlan->num_ports++; mutex_unlock(&vxlan->sync_lock); + return 0; - -err_delete_port: - mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); - -unlock: - mutex_unlock(&vxlan->sync_lock); - return ret; } int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) @@ -161,18 +130,15 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) mutex_lock(&vxlan->sync_lock); vxlanp = vxlan_lookup_port(vxlan, port); - if (!vxlanp) { + if (WARN_ON(!vxlanp)) { ret = -ENOENT; goto out_unlock; } - if (refcount_dec_and_test(&vxlanp->refcount)) { - hash_del_rcu(&vxlanp->hlist); - synchronize_rcu(); - mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); - kfree(vxlanp); - vxlan->num_ports--; - } + hash_del_rcu(&vxlanp->hlist); + synchronize_rcu(); + mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); + kfree(vxlanp); out_unlock: mutex_unlock(&vxlan->sync_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h index 6d599f4a8acd..ec766529f49b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h @@ -37,6 +37,11 @@ struct mlx5_vxlan; struct mlx5_vxlan_port; +static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4; +} + static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) { /* not allowed reason is encoded in vxlan pointer as error, From 6c4e9bcfb48933d533ff975e152757991556294a Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 19 Jul 2020 11:04:30 +0300 Subject: [PATCH 1947/2056] net/mlx5: Delete extra dump stack that gives nothing The WARN_*() macros are intended to catch impossible situations from the SW point of view. They gave a little in case HW<->SW interface is out-of-sync. Such out-of-sync scenario can be due to SW errors that are not part of this flow or because some HW errors, where dump stack won't help either. This specific WARN_ON() is useless because mlx5_core code is prepared to handle such situations and will unfold everything correctly while providing enough information to the users to understand why FS is not working. WARNING: CPU: 0 PID: 3222 at drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:825 connect_fts_in_prio.isra.20+0x1dd/0x260 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:825 Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 3222 Comm: syz-executor861 Not tainted 5.5.0-rc6+ #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58e9a3f-prebuilt.qemu.org 04/01/2014 Call Trace: __dump_stack linux/lib/dump_stack.c:77 [inline] dump_stack+0x94/0xce linux/lib/dump_stack.c:118 panic+0x234/0x56f linux/kernel/panic.c:221 __warn+0x1cc/0x1e1 linux/kernel/panic.c:582 report_bug+0x200/0x310 linux/lib/bug.c:195 fixup_bug.part.11+0x32/0x80 linux/arch/x86/kernel/traps.c:174 fixup_bug linux/arch/x86/kernel/traps.c:273 [inline] do_error_trap+0xd3/0x100 linux/arch/x86/kernel/traps.c:267 do_invalid_op+0x31/0x40 linux/arch/x86/kernel/traps.c:286 invalid_op+0x1e/0x30 linux/arch/x86/entry/entry_64.S:1027 RIP: 0010:connect_fts_in_prio.isra.20+0x1dd/0x260 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:825 Code: 00 00 48 c7 c2 60 8c 31 84 48 c7 c6 00 81 31 84 48 8b 38 e8 3c a8 cb ff 41 83 fd 01 8b 04 24 0f 8e 29 ff ff ff e8 83 7b bc fe <0f> 0b 8b 04 24 e9 1a ff ff ff 89 04 24 e8 c1 20 e0 fe 8b 04 24 eb RSP: 0018:ffffc90004bb7858 EFLAGS: 00010293 RAX: ffff88805de98e80 RBX: 0000000000000c96 RCX: ffffffff827a853d RDX: 0000000000000000 RSI: 0000000000000000 RDI: fffff52000976efa RBP: 0000000000000007 R08: ffffed100da060e3 R09: ffffed100da060e3 R10: 0000000000000001 R11: ffffed100da060e2 R12: dffffc0000000000 R13: 0000000000000002 R14: ffff8880683a1a10 R15: ffffed100d07bc1c connect_prev_fts linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:844 [inline] connect_flow_table linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:975 [inline] __mlx5_create_flow_table+0x8f8/0x1710 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:1064 mlx5_create_flow_table linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:1094 [inline] mlx5_create_auto_grouped_flow_table+0xe1/0x210 linux/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c:1136 _get_prio linux/drivers/infiniband/hw/mlx5/main.c:3286 [inline] get_flow_table+0x2ea/0x760 linux/drivers/infiniband/hw/mlx5/main.c:3376 mlx5_ib_create_flow+0x331/0x11c0 linux/drivers/infiniband/hw/mlx5/main.c:3896 ib_uverbs_ex_create_flow+0x13e8/0x1b40 linux/drivers/infiniband/core/uverbs_cmd.c:3311 ib_uverbs_write+0xaa5/0xdf0 linux/drivers/infiniband/core/uverbs_main.c:769 __vfs_write+0x7c/0x100 linux/fs/read_write.c:494 vfs_write+0x168/0x4a0 linux/fs/read_write.c:558 ksys_write+0xc8/0x200 linux/fs/read_write.c:611 do_syscall_64+0x9c/0x390 linux/arch/x86/entry/common.c:294 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x45a059 Code: 00 00 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 bc ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fcc17564c98 EFLAGS: 00000246 ORIG_RAX: 0000000000000001 RAX: ffffffffffffffda RBX: 00007fcc17564ca0 RCX: 000000000045a059 RDX: 0000000000000030 RSI: 00000000200003c0 RDI: 0000000000000005 RBP: 0000000000000007 R08: 0000000000000002 R09: 0000000000003131 R10: 0000000000000000 R11: 0000000000000246 R12: 00000000006e636c R13: 0000000000000000 R14: 00000000006e6360 R15: 00007ffdcbdaf6a0 Dumping ftrace buffer: (ftrace buffer empty) Kernel Offset: disabled Rebooting in 1 seconds.. Fixes: f90edfd279f3 ("net/mlx5_core: Connect flow tables") Reviewed-by: Maor Gottlieb Reviewed-by: Mark Bloch Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index a10814814856..7e70a8178a46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -846,18 +846,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root = find_root(&prio->node); struct mlx5_flow_table *iter; - int i = 0; int err; fs_for_each_ft(iter, prio) { - i++; err = root->cmds->modify_flow_table(root, iter, ft); if (err) { - mlx5_core_warn(dev, "Failed to modify flow table %d\n", - iter->id); + mlx5_core_err(dev, + "Failed to modify flow table id %d, type %d, err %d\n", + iter->id, iter->type, err); /* The driver is out of sync with the FW */ - if (i > 1) - WARN_ON(true); return err; } } From fa5cb548ced61b9d3095f32f8a7e427a248c65ee Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Mon, 3 Aug 2020 12:05:44 +0300 Subject: [PATCH 1948/2056] bpf: Setup socket family and addresses in bpf_prog_test_run_skb Now it's impossible to test all branches of cgroup_skb bpf program which accesses skb->family and skb->{local,remote}_ip{4,6} fields because they are zeroed during socket allocation. This commit fills socket family and addresses from related fields in constructed skb. Signed-off-by: Dmitry Yakunin Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200803090545.82046-2-zeil@yandex-team.ru --- net/bpf/test_run.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index b03c469cd01f..736a5964fa95 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -449,6 +449,27 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); skb_reset_network_header(skb); + switch (skb->protocol) { + case htons(ETH_P_IP): + sk->sk_family = AF_INET; + if (sizeof(struct iphdr) <= skb_headlen(skb)) { + sk->sk_rcv_saddr = ip_hdr(skb)->saddr; + sk->sk_daddr = ip_hdr(skb)->daddr; + } + break; +#if IS_ENABLED(CONFIG_IPV6) + case htons(ETH_P_IPV6): + sk->sk_family = AF_INET6; + if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { + sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; + sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; + } + break; +#endif + default: + break; + } + if (is_l2) __skb_push(skb, hh_len); if (is_direct_pkt_access) From 21594c44083c375697d418729c4b2e4522cf9f70 Mon Sep 17 00:00:00 2001 From: Dmitry Yakunin Date: Mon, 3 Aug 2020 12:05:45 +0300 Subject: [PATCH 1949/2056] bpf: Allow to specify ifindex for skb in bpf_prog_test_run_skb Now skb->dev is unconditionally set to the loopback device in current net namespace. But if we want to test bpf program which contains code branch based on ifindex condition (eg filters out localhost packets) it is useful to allow specifying of ifindex from userspace. This patch adds such option through ctx_in (__sk_buff) parameter. Signed-off-by: Dmitry Yakunin Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200803090545.82046-3-zeil@yandex-team.ru --- net/bpf/test_run.c | 22 +++++++++++++++++-- .../selftests/bpf/prog_tests/skb_ctx.c | 5 +++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 736a5964fa95..99eb8c6c0fbc 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -327,6 +327,12 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) /* priority is allowed */ if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), + offsetof(struct __sk_buff, ifindex))) + return -EINVAL; + + /* ifindex is allowed */ + + if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), offsetof(struct __sk_buff, cb))) return -EINVAL; @@ -381,6 +387,7 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) __skb->mark = skb->mark; __skb->priority = skb->priority; + __skb->ifindex = skb->dev->ifindex; __skb->tstamp = skb->tstamp; memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); __skb->wire_len = cb->pkt_len; @@ -391,6 +398,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr) { bool is_l2 = false, is_direct_pkt_access = false; + struct net *net = current->nsproxy->net_ns; + struct net_device *dev = net->loopback_dev; u32 size = kattr->test.data_size_in; u32 repeat = kattr->test.repeat; struct __sk_buff *ctx = NULL; @@ -432,7 +441,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, kfree(ctx); return -ENOMEM; } - sock_net_set(sk, current->nsproxy->net_ns); + sock_net_set(sk, net); sock_init_data(NULL, sk); skb = build_skb(data, 0); @@ -446,7 +455,14 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); __skb_put(skb, size); - skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev); + if (ctx && ctx->ifindex > 1) { + dev = dev_get_by_index(net, ctx->ifindex); + if (!dev) { + ret = -ENODEV; + goto out; + } + } + skb->protocol = eth_type_trans(skb, dev); skb_reset_network_header(skb); switch (skb->protocol) { @@ -502,6 +518,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, ret = bpf_ctx_finish(kattr, uattr, ctx, sizeof(struct __sk_buff)); out: + if (dev && dev != net->loopback_dev) + dev_put(dev); kfree_skb(skb); bpf_sk_storage_free(sk); kfree(sk); diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index 7021b92af313..25de86af2d03 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -11,6 +11,7 @@ void test_skb_ctx(void) .cb[3] = 4, .cb[4] = 5, .priority = 6, + .ifindex = 1, .tstamp = 7, .wire_len = 100, .gso_segs = 8, @@ -92,6 +93,10 @@ void test_skb_ctx(void) "ctx_out_priority", "skb->priority == %d, expected %d\n", skb.priority, 7); + CHECK_ATTR(skb.ifindex != 1, + "ctx_out_ifindex", + "skb->ifindex == %d, expected %d\n", + skb.ifindex, 1); CHECK_ATTR(skb.tstamp != 8, "ctx_out_tstamp", "skb->tstamp == %lld, expected %d\n", From c44a1b91753e54a9e9381ac6411ec36389d21453 Mon Sep 17 00:00:00 2001 From: Bruno Thomsen Date: Thu, 30 Jul 2020 21:57:46 +0200 Subject: [PATCH 1950/2056] dt-bindings: net: mdio: add reset-post-delay-us property Add "reset-post-delay-us" parameter to MDIO bus properties, so it's possible to add a delay after reset deassert. This is optional in case external hardware slows down release of the reset signal. Signed-off-by: Bruno Thomsen Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/mdio.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml index d6a3bf8550eb..26afb556dfae 100644 --- a/Documentation/devicetree/bindings/net/mdio.yaml +++ b/Documentation/devicetree/bindings/net/mdio.yaml @@ -39,6 +39,13 @@ properties: and must therefore be appropriately determined based on all devices requirements (maximum value of all per-device RESET pulse widths). + reset-post-delay-us: + description: + Delay after reset deassert in microseconds. It applies to all MDIO + devices and it's determined by how fast all devices are ready for + communication. This delay happens just before e.g. Ethernet PHY + type ID auto detection. + clock-frequency: description: Desired MDIO bus clock frequency in Hz. Values greater than IEEE 802.3 From 6259e0f5478d7a7e4ff3e38bc739b612b8907246 Mon Sep 17 00:00:00 2001 From: Bruno Thomsen Date: Thu, 30 Jul 2020 21:57:47 +0200 Subject: [PATCH 1951/2056] net: mdiobus: use flexible sleeping for reset-delay-us MDIO bus reset pulse width is created by using udelay() and that function might not be optimal depending on device tree value. By switching to the new fsleep() helper the correct delay function is called depending on delay length, e.g. udelay(), usleep_range() or msleep(). Signed-off-by: Bruno Thomsen Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 46b33701ad4b..5df3782b05b4 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -554,7 +554,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) bus->reset_gpiod = gpiod; gpiod_set_value_cansleep(gpiod, 1); - udelay(bus->reset_delay_us); + fsleep(bus->reset_delay_us); gpiod_set_value_cansleep(gpiod, 0); } From bb3831294cd50750806f2ce8d73317dc8feeda09 Mon Sep 17 00:00:00 2001 From: Bruno Thomsen Date: Thu, 30 Jul 2020 21:57:48 +0200 Subject: [PATCH 1952/2056] net: mdiobus: add reset-post-delay-us handling Load new "reset-post-delay-us" value from MDIO properties, and if configured to a greater then zero delay do a flexible sleeping delay after MDIO bus reset deassert. This allows devices to exit reset state before start bus communication. Signed-off-by: Bruno Thomsen Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 2 ++ drivers/of/of_mdio.c | 2 ++ include/linux/phy.h | 2 ++ 3 files changed, 6 insertions(+) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 5df3782b05b4..0af20faad69d 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -556,6 +556,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) gpiod_set_value_cansleep(gpiod, 1); fsleep(bus->reset_delay_us); gpiod_set_value_cansleep(gpiod, 0); + if (bus->reset_post_delay_us > 0) + fsleep(bus->reset_post_delay_us); } if (bus->reset) { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index eb84507de28a..cb32d7ef4938 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -268,6 +268,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) /* Get bus level PHY reset GPIO details */ mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us); + mdio->reset_post_delay_us = 0; + of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us); /* Register the MDIO bus */ rc = mdiobus_register(mdio); diff --git a/include/linux/phy.h b/include/linux/phy.h index 0403eb799913..3a09d2bf69ea 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -293,6 +293,8 @@ struct mii_bus { /* GPIO reset pulse width in microseconds */ int reset_delay_us; + /* GPIO reset deassert delay in microseconds */ + int reset_post_delay_us; /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; From e4d5efdd0bc43c63c64021147175800a1d58114f Mon Sep 17 00:00:00 2001 From: Bruno Thomsen Date: Thu, 30 Jul 2020 21:57:49 +0200 Subject: [PATCH 1953/2056] net: mdio device: use flexible sleeping in reset function MDIO device reset assert and deassert length was created by usleep_range() but that does not ensure optimal handling of all the different values from device tree properties. By switching to the new flexible sleeping helper function, fsleep(), the correct delay function is called depending on delay length, e.g. udelay(), usleep_range() or msleep(). Signed-off-by: Bruno Thomsen Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 0f625a1b1644..0837319a52d7 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -132,7 +132,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value) d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay; if (d) - usleep_range(d, d + max_t(unsigned int, d / 10, 100)); + fsleep(d); } EXPORT_SYMBOL(mdio_device_reset); From 038ebb1a713d114d54dbf14868a73181c0c92758 Mon Sep 17 00:00:00 2001 From: wenxu Date: Fri, 31 Jul 2020 10:45:01 +0800 Subject: [PATCH 1954/2056] net/sched: act_ct: fix miss set mru for ovs after defrag in act_ct When openvswitch conntrack offload with act_ct action. Fragment packets defrag in the ingress tc act_ct action and miss the next chain. Then the packet pass to the openvswitch datapath without the mru. The over mtu packet will be dropped in output action in openvswitch for over mtu. "kernel: net2: dropped over-mtu packet: 1528 > 1500" This patch add mru in the tc_skb_ext for adefrag and miss next chain situation. And also add mru in the qdisc_skb_cb. The act_ct set the mru to the qdisc_skb_cb when the packet defrag. And When the chain miss, The mru is set to tc_skb_ext which can be got by ovs datapath. Fixes: b57dc7c13ea9 ("net/sched: Introduce action ct") Signed-off-by: wenxu Reviewed-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + include/net/sch_generic.h | 3 ++- net/openvswitch/flow.c | 1 + net/sched/act_ct.c | 8 ++++++-- net/sched/cls_api.c | 1 + 5 files changed, 11 insertions(+), 3 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fa817a105517..3ad65d4ce085 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -283,6 +283,7 @@ struct nf_bridge_info { */ struct tc_skb_ext { __u32 chain; + __u16 mru; }; #endif diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c510b03b9751..d60e7c39d60c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -384,6 +384,7 @@ struct qdisc_skb_cb { }; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; + u16 mru; }; typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); @@ -463,7 +464,7 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; - BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); BUILD_BUG_ON(sizeof(qcb->data) < sz); } diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9d375e74b607..03942c30d83e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -890,6 +890,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, if (static_branch_unlikely(&tc_recirc_sharing_support)) { tc_ext = skb_ext_find(skb, TC_SKB_EXT); key->recirc_id = tc_ext ? tc_ext->chain : 0; + OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; } else { key->recirc_id = 0; } diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 97e27946897f..e6ad42b11835 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -706,8 +706,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (err && err != -EINPROGRESS) goto out_free; - if (!err) + if (!err) { *defrag = true; + cb.mru = IPCB(skb)->frag_max_size; + } } else { /* NFPROTO_IPV6 */ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; @@ -717,8 +719,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (err && err != -EINPROGRESS) goto out_free; - if (!err) + if (!err) { *defrag = true; + cb.mru = IP6CB(skb)->frag_max_size; + } #else err = -EOPNOTSUPP; goto out_free; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0b8623b3b24f..41a55c6cbeb8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1629,6 +1629,7 @@ int tcf_classify_ingress(struct sk_buff *skb, if (WARN_ON_ONCE(!ext)) return TC_ACT_SHOT; ext->chain = last_executed_chain; + ext->mru = qdisc_skb_cb(skb)->mru; } return ret; From 9aba6c5b49254d5bee927d81593ed4429e91d4ae Mon Sep 17 00:00:00 2001 From: Peilin Ye Date: Fri, 31 Jul 2020 00:48:38 -0400 Subject: [PATCH 1955/2056] openvswitch: Prevent kernel-infoleak in ovs_ct_put_key() ovs_ct_put_key() is potentially copying uninitialized kernel stack memory into socket buffers, since the compiler may leave a 3-byte hole at the end of `struct ovs_key_ct_tuple_ipv4` and `struct ovs_key_ct_tuple_ipv6`. Fix it by initializing `orig` with memset(). Fixes: 9dd7f8907c37 ("openvswitch: Add original direction conntrack tuple to sw_flow_key.") Suggested-by: Dan Carpenter Signed-off-by: Peilin Ye Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 38 +++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4340f25fe390..98d393e70de3 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -276,10 +276,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -#define IN6_ADDR_INITIALIZER(ADDR) \ - { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ - (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } - int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { @@ -301,24 +297,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ct_tuple_ipv4 orig = { - output->ipv4.ct_orig.src, - output->ipv4.ct_orig.dst, - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv4 orig; + + memset(&orig, 0, sizeof(orig)); + orig.ipv4_src = output->ipv4.ct_orig.src; + orig.ipv4_dst = output->ipv4.ct_orig.dst; + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv4_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) return -EMSGSIZE; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ct_tuple_ipv6 orig = { - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv6 orig; + + memset(&orig, 0, sizeof(orig)); + memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, + sizeof(orig.ipv6_src)); + memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, + sizeof(orig.ipv6_dst)); + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv6_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) return -EMSGSIZE; From 71fed0bc8665969f4681fc5772e5df42bfcc472e Mon Sep 17 00:00:00 2001 From: Gaurav Singh Date: Fri, 31 Jul 2020 00:58:44 -0400 Subject: [PATCH 1956/2056] ethtool: ethnl_set_linkmodes: remove redundant null check info cannot be NULL here since its being accessed earlier in the function: nlmsg_parse(info->nlhdr...). Remove this redundant NULL check. Signed-off-by: Gaurav Singh Reviewed-by: Michal Kubecek Signed-off-by: David S. Miller --- net/ethtool/linkmodes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index 317a93129551..7044a2853886 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -421,8 +421,7 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info) ret = __ethtool_get_link_ksettings(dev, &ksettings); if (ret < 0) { - if (info) - GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); + GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); goto out_ops; } From d6526926de7397a97308780911565e31a6b67b59 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 31 Jul 2020 10:38:32 +0200 Subject: [PATCH 1957/2056] net: mvpp2: fix memory leak in mvpp2_rx Release skb memory in mvpp2_rx() if mvpp2_rx_refill routine fails Fixes: b5015854674b ("net: mvpp2: fix refilling BM pools in RX path") Signed-off-by: Lorenzo Bianconi Acked-by: Matteo Croce Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 6a3f356640a0..4298a029be55 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3508,6 +3508,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, err = mvpp2_rx_refill(port, bm_pool, pp, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); + dev_kfree_skb_any(skb); goto err_drop_frame; } From 9d2f627b7ec9d5d3246b6cec17f290ee6778c83b Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Fri, 31 Jul 2020 14:20:56 +0200 Subject: [PATCH 1958/2056] net: openvswitch: add masks cache hit counter Add a counter that counts the number of masks cache hits, and export it through the megaflow netlink statistics. Reviewed-by: Paolo Abeni Reviewed-by: Tonghao Zhang Signed-off-by: Eelco Chaudron Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 2 +- net/openvswitch/datapath.c | 5 ++++- net/openvswitch/datapath.h | 3 +++ net/openvswitch/flow_table.c | 19 ++++++++++++++----- net/openvswitch/flow_table.h | 3 ++- 5 files changed, 24 insertions(+), 8 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 9b14519e74d9..7cb76e5ca7cf 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -102,8 +102,8 @@ struct ovs_dp_megaflow_stats { __u64 n_mask_hit; /* Number of masks used for flow lookups. */ __u32 n_masks; /* Number of masks for the datapath. */ __u32 pad0; /* Pad for future expension. */ + __u64 n_cache_hit; /* Number of cache matches for flow lookups. */ __u64 pad1; /* Pad for future expension. */ - __u64 pad2; /* Pad for future expension. */ }; struct ovs_vport_stats { diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6b6822f82f70..f45fee760504 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) struct dp_stats_percpu *stats; u64 *stats_counter; u32 n_mask_hit; + u32 n_cache_hit; int error; stats = this_cpu_ptr(dp->stats_percpu); /* Look up flow. */ flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb), - &n_mask_hit); + &n_mask_hit, &n_cache_hit); if (unlikely(!flow)) { struct dp_upcall_info upcall; @@ -262,6 +263,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) u64_stats_update_begin(&stats->syncp); (*stats_counter)++; stats->n_mask_hit += n_mask_hit; + stats->n_cache_hit += n_cache_hit; u64_stats_update_end(&stats->syncp); } @@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, stats->n_missed += local_stats.n_missed; stats->n_lost += local_stats.n_lost; mega_stats->n_mask_hit += local_stats.n_mask_hit; + mega_stats->n_cache_hit += local_stats.n_cache_hit; } } diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 24fcec22fde2..38f7d3e66ca6 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -38,12 +38,15 @@ * @n_mask_hit: Number of masks looked up for flow match. * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked * up per packet. + * @n_cache_hit: The number of received packets that had their mask found using + * the mask cache. */ struct dp_stats_percpu { u64 n_hit; u64 n_missed; u64 n_lost; u64 n_mask_hit; + u64 n_cache_hit; struct u64_stats_sync syncp; }; diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index af22c9ee28dd..a5912ea05352 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -667,6 +667,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct mask_array *ma, const struct sw_flow_key *key, u32 *n_mask_hit, + u32 *n_cache_hit, u32 *index) { u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); @@ -682,6 +683,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, u64_stats_update_begin(&ma->syncp); usage_counters[*index]++; u64_stats_update_end(&ma->syncp); + (*n_cache_hit)++; return flow; } } @@ -719,7 +721,8 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, const struct sw_flow_key *key, u32 skb_hash, - u32 *n_mask_hit) + u32 *n_mask_hit, + u32 *n_cache_hit) { struct mask_array *ma = rcu_dereference(tbl->mask_array); struct table_instance *ti = rcu_dereference(tbl->ti); @@ -729,10 +732,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, int seg; *n_mask_hit = 0; + *n_cache_hit = 0; if (unlikely(!skb_hash)) { u32 mask_index = 0; + u32 cache = 0; - return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, + &mask_index); } /* Pre and post recirulation flows usually have the same skb_hash @@ -753,7 +759,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, e = &entries[index]; if (e->skb_hash == skb_hash) { flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, - &e->mask_index); + n_cache_hit, &e->mask_index); if (!flow) e->skb_hash = 0; return flow; @@ -766,10 +772,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, } /* Cache miss, do full lookup. */ - flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index); + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, + &ce->mask_index); if (flow) ce->skb_hash = skb_hash; + *n_cache_hit = 0; return flow; } @@ -779,9 +787,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); u32 __always_unused n_mask_hit; + u32 __always_unused n_cache_hit; u32 index = 0; - return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); } struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 1f664b050e3b..325e939371d8 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -82,7 +82,8 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, const struct sw_flow_key *, u32 skb_hash, - u32 *n_mask_hit); + u32 *n_mask_hit, + u32 *n_cache_hit); struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, const struct sw_flow_key *); struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, From 9bf24f594c6acf676fb8c229f152c21bfb915ddb Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Fri, 31 Jul 2020 14:21:34 +0200 Subject: [PATCH 1959/2056] net: openvswitch: make masks cache size configurable This patch makes the masks cache size configurable, or with a size of 0, disable it. Reviewed-by: Paolo Abeni Reviewed-by: Tonghao Zhang Signed-off-by: Eelco Chaudron Signed-off-by: David S. Miller --- include/uapi/linux/openvswitch.h | 1 + net/openvswitch/datapath.c | 17 ++++++ net/openvswitch/flow_table.c | 101 +++++++++++++++++++++++++++---- net/openvswitch/flow_table.h | 10 ++- 4 files changed, 115 insertions(+), 14 deletions(-) diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 7cb76e5ca7cf..8300cc29dec8 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -86,6 +86,7 @@ enum ovs_datapath_attr { OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */ OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */ OVS_DP_ATTR_PAD, + OVS_DP_ATTR_MASKS_CACHE_SIZE, __OVS_DP_ATTR_MAX }; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f45fee760504..42f8cc70bb2c 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1498,6 +1498,7 @@ static size_t ovs_dp_cmd_msg_size(void) msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats)); msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ + msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */ return msgsize; } @@ -1535,6 +1536,10 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) goto nla_put_failure; + if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE, + ovs_flow_tbl_masks_cache_size(&dp->table))) + goto nla_put_failure; + genlmsg_end(skb, ovs_header); return 0; @@ -1599,6 +1604,16 @@ static int ovs_dp_change(struct datapath *dp, struct nlattr *a[]) #endif } + if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) { + int err; + u32 cache_size; + + cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]); + err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size); + if (err) + return err; + } + dp->user_features = user_features; if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) @@ -1887,6 +1902,8 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, + [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, + PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), }; static const struct genl_ops dp_datapath_genl_ops[] = { diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index a5912ea05352..6527d84c3ea6 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -38,8 +38,8 @@ #define MASK_ARRAY_SIZE_MIN 16 #define REHASH_INTERVAL (10 * 60 * HZ) +#define MC_DEFAULT_HASH_ENTRIES 256 #define MC_HASH_SHIFT 8 -#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT) #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) static struct kmem_cache *flow_cache; @@ -341,15 +341,79 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) } } +static void __mask_cache_destroy(struct mask_cache *mc) +{ + free_percpu(mc->mask_cache); + kfree(mc); +} + +static void mask_cache_rcu_cb(struct rcu_head *rcu) +{ + struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); + + __mask_cache_destroy(mc); +} + +static struct mask_cache *tbl_mask_cache_alloc(u32 size) +{ + struct mask_cache_entry __percpu *cache = NULL; + struct mask_cache *new; + + /* Only allow size to be 0, or a power of 2, and does not exceed + * percpu allocation size. + */ + if ((!is_power_of_2(size) && size != 0) || + (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + new->cache_size = size; + if (new->cache_size > 0) { + cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), + new->cache_size), + __alignof__(struct mask_cache_entry)); + if (!cache) { + kfree(new); + return NULL; + } + } + + new->mask_cache = cache; + return new; +} +int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) +{ + struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_cache *new; + + if (size == mc->cache_size) + return 0; + + if ((!is_power_of_2(size) && size != 0) || + (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) + return -EINVAL; + + new = tbl_mask_cache_alloc(size); + if (!new) + return -ENOMEM; + + rcu_assign_pointer(table->mask_cache, new); + call_rcu(&mc->rcu, mask_cache_rcu_cb); + + return 0; +} + int ovs_flow_tbl_init(struct flow_table *table) { struct table_instance *ti, *ufid_ti; + struct mask_cache *mc; struct mask_array *ma; - table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) * - MC_HASH_ENTRIES, - __alignof__(struct mask_cache_entry)); - if (!table->mask_cache) + mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); + if (!mc) return -ENOMEM; ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); @@ -367,6 +431,7 @@ int ovs_flow_tbl_init(struct flow_table *table) rcu_assign_pointer(table->ti, ti); rcu_assign_pointer(table->ufid_ti, ufid_ti); rcu_assign_pointer(table->mask_array, ma); + rcu_assign_pointer(table->mask_cache, mc); table->last_rehash = jiffies; table->count = 0; table->ufid_count = 0; @@ -377,7 +442,7 @@ int ovs_flow_tbl_init(struct flow_table *table) free_mask_array: __mask_array_destroy(ma); free_mask_cache: - free_percpu(table->mask_cache); + __mask_cache_destroy(mc); return -ENOMEM; } @@ -453,9 +518,11 @@ void ovs_flow_tbl_destroy(struct flow_table *table) { struct table_instance *ti = rcu_dereference_raw(table->ti); struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); + struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); - free_percpu(table->mask_cache); - call_rcu(&table->mask_array->rcu, mask_array_rcu_cb); + call_rcu(&mc->rcu, mask_cache_rcu_cb); + call_rcu(&ma->rcu, mask_array_rcu_cb); table_instance_destroy(table, ti, ufid_ti, false); } @@ -724,6 +791,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, u32 *n_mask_hit, u32 *n_cache_hit) { + struct mask_cache *mc = rcu_dereference(tbl->mask_cache); struct mask_array *ma = rcu_dereference(tbl->mask_array); struct table_instance *ti = rcu_dereference(tbl->ti); struct mask_cache_entry *entries, *ce; @@ -733,7 +801,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, *n_mask_hit = 0; *n_cache_hit = 0; - if (unlikely(!skb_hash)) { + if (unlikely(!skb_hash || mc->cache_size == 0)) { u32 mask_index = 0; u32 cache = 0; @@ -749,11 +817,11 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, ce = NULL; hash = skb_hash; - entries = this_cpu_ptr(tbl->mask_cache); + entries = this_cpu_ptr(mc->mask_cache); /* Find the cache entry 'ce' to operate on. */ for (seg = 0; seg < MC_HASH_SEGS; seg++) { - int index = hash & (MC_HASH_ENTRIES - 1); + int index = hash & (mc->cache_size - 1); struct mask_cache_entry *e; e = &entries[index]; @@ -867,6 +935,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table) return READ_ONCE(ma->count); } +u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) +{ + struct mask_cache *mc = rcu_dereference(table->mask_cache); + + return READ_ONCE(mc->cache_size); +} + static struct table_instance *table_instance_expand(struct table_instance *ti, bool ufid) { @@ -1095,8 +1170,8 @@ void ovs_flow_masks_rebalance(struct flow_table *table) for (i = 0; i < masks_entries; i++) { int index = masks_and_count[i].index; - new->masks[new->count++] = - rcu_dereference_ovsl(ma->masks[index]); + if (ovsl_dereference(ma->masks[index])) + new->masks[new->count++] = ma->masks[index]; } rcu_assign_pointer(table->mask_array, new); diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 325e939371d8..74ce48fecba9 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -27,6 +27,12 @@ struct mask_cache_entry { u32 mask_index; }; +struct mask_cache { + struct rcu_head rcu; + u32 cache_size; /* Must be ^2 value. */ + struct mask_cache_entry __percpu *mask_cache; +}; + struct mask_count { int index; u64 counter; @@ -53,7 +59,7 @@ struct table_instance { struct flow_table { struct table_instance __rcu *ti; struct table_instance __rcu *ufid_ti; - struct mask_cache_entry __percpu *mask_cache; + struct mask_cache __rcu *mask_cache; struct mask_array __rcu *mask_array; unsigned long last_rehash; unsigned int count; @@ -77,6 +83,8 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, const struct sw_flow_mask *mask); void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); int ovs_flow_tbl_num_masks(const struct flow_table *table); +u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table); +int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size); struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, u32 *bucket, u32 *idx); struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, From d208a42a62e70b9c86b46b7bad126eb862891314 Mon Sep 17 00:00:00 2001 From: Florent Fourcot Date: Fri, 31 Jul 2020 15:32:06 +0200 Subject: [PATCH 1960/2056] ipv6/addrconf: call addrconf_ifdown with consistent values Second parameter of addrconf_ifdown "how" is used as a boolean internally. It does not make sense to call it with something different of 0 or 1. This value is set to 2 in all git history. Signed-off-by: Florent Fourcot Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 840bfdb3d7bd..861265fa9d6d 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -7189,7 +7189,7 @@ void addrconf_cleanup(void) continue; addrconf_ifdown(dev, 1); } - addrconf_ifdown(init_net.loopback_dev, 2); + addrconf_ifdown(init_net.loopback_dev, 1); /* * Check hash table. From ae79dbf60905a09a5cca63c02300b2a0f1befbad Mon Sep 17 00:00:00 2001 From: Florent Fourcot Date: Fri, 31 Jul 2020 15:32:07 +0200 Subject: [PATCH 1961/2056] ipv6/addrconf: use a boolean to choose between UNREGISTER/DOWN "how" was used as a boolean. Change the type to bool, and improve variable name Signed-off-by: Florent Fourcot Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 861265fa9d6d..0acf6a9796ca 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -163,7 +163,7 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); static void addrconf_type_change(struct net_device *dev, unsigned long event); -static int addrconf_ifdown(struct net_device *dev, int how); +static int addrconf_ifdown(struct net_device *dev, bool unregister); static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, @@ -3630,7 +3630,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, * an L3 master device (e.g., VRF) */ if (info->upper_dev && netif_is_l3_master(info->upper_dev)) - addrconf_ifdown(dev, 0); + addrconf_ifdown(dev, false); } return NOTIFY_OK; @@ -3663,9 +3663,9 @@ static bool addr_is_local(const struct in6_addr *addr) (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } -static int addrconf_ifdown(struct net_device *dev, int how) +static int addrconf_ifdown(struct net_device *dev, bool unregister) { - unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN; + unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN; struct net *net = dev_net(dev); struct inet6_dev *idev; struct inet6_ifaddr *ifa, *tmp; @@ -3684,7 +3684,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) * Step 1: remove reference to ipv6 device from parent device. * Do not dev_put! */ - if (how) { + if (unregister) { idev->dead = 1; /* protected by rtnl_lock */ @@ -3698,7 +3698,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) /* combine the user config with event to determine if permanent * addresses are to be removed from address hash table */ - if (!how && !idev->cnf.disable_ipv6) { + if (!unregister && !idev->cnf.disable_ipv6) { /* aggregate the system setting and interface setting */ int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down; @@ -3736,7 +3736,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) addrconf_del_rs_timer(idev); /* Step 2: clear flags for stateless addrconf */ - if (!how) + if (!unregister) idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); /* Step 3: clear tempaddr list */ @@ -3806,7 +3806,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) write_unlock_bh(&idev->lock); /* Step 5: Discard anycast and multicast list */ - if (how) { + if (unregister) { ipv6_ac_destroy_dev(idev); ipv6_mc_destroy_dev(idev); } else { @@ -3816,7 +3816,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) idev->tstamp = jiffies; /* Last: Shot the device (if unregistered) */ - if (how) { + if (unregister) { addrconf_sysctl_unregister(idev); neigh_parms_release(&nd_tbl, idev->nd_parms); neigh_ifdown(&nd_tbl, dev); @@ -4038,7 +4038,7 @@ static void addrconf_dad_work(struct work_struct *w) in6_ifa_hold(ifp); addrconf_dad_stop(ifp, 1); if (disable_ipv6) - addrconf_ifdown(idev->dev, 0); + addrconf_ifdown(idev->dev, false); goto out; } @@ -7187,9 +7187,9 @@ void addrconf_cleanup(void) for_each_netdev(&init_net, dev) { if (__in6_dev_get(dev) == NULL) continue; - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, true); } - addrconf_ifdown(init_net.loopback_dev, 1); + addrconf_ifdown(init_net.loopback_dev, true); /* * Check hash table. From fd65e5a95d08389444e8591a20538b3edece0e15 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Fri, 31 Jul 2020 19:26:16 +0300 Subject: [PATCH 1962/2056] net: bridge: clear bridge's private skb space on xmit We need to clear all of the bridge private skb variables as they can be stale due to the packet being recirculated through the stack and then transmitted through the bridge device. Similar memset is already done on bridge's input. We've seen cases where proxyarp_replied was 1 on routed multicast packets transmitted through the bridge to ports with neigh suppress which were getting dropped. Same thing can in theory happen with the port isolation bit as well. Fixes: 821f1b21cabb ("bridge: add new BR_NEIGH_SUPPRESS port flag to suppress arp and nd flood") Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/bridge/br_device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 8c7b78f8bc23..9a2fb4aa1a10 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -36,6 +36,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) const unsigned char *dest; u16 vid = 0; + memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); + rcu_read_lock(); nf_ops = rcu_dereference(nf_br_ops); if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { From 622e32b7d4a6492cf5c1f759ef833f817418f7b3 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 31 Jul 2020 20:12:05 +0200 Subject: [PATCH 1963/2056] net: gre: recompute gre csum for sctp over gre tunnels The GRE tunnel can be used to transport traffic that does not rely on a Internet checksum (e.g. SCTP). The issue can be triggered creating a GRE or GRETAP tunnel and transmitting SCTP traffic ontop of it where CRC offload has been disabled. In order to fix the issue we need to recompute the GRE csum in gre_gso_segment() not relying on the inner checksum. The issue is still present when we have the CRC offload enabled. In this case we need to disable the CRC offload if we require GRE checksum since otherwise skb_checksum() will report a wrong value. Fixes: 90017accff61 ("sctp: Add GSO support") Signed-off-by: Lorenzo Bianconi Reviewed-by: Marcelo Ricardo Leitner Signed-off-by: David S. Miller --- net/ipv4/gre_offload.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 2e6d1b7a7bc9..e0a246575887 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -15,12 +15,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + bool need_csum, need_recompute_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -41,6 +41,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); + need_recompute_csum = skb->csum_not_inet; skb->encap_hdr_csum = need_csum; features &= skb->dev->hw_enc_features; @@ -98,7 +99,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, } *(pcsum + 1) = 0; - *pcsum = gso_make_checksum(skb, 0); + if (need_recompute_csum && !skb_is_gso(skb)) { + __wsum csum; + + csum = skb_checksum(skb, gre_offset, + skb->len - gre_offset, 0); + *pcsum = csum_fold(csum); + } else { + *pcsum = gso_make_checksum(skb, 0); + } } while ((skb = skb->next)); out: return segs; From 155f15ad6760718288c914962bff8acd39942573 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 31 Jul 2020 13:15:34 -0700 Subject: [PATCH 1964/2056] ionic: use fewer firmware doorbells on rx fill We really don't need to hit the Rx queue doorbell so many times, we can wait to the end and cause a little less thrash. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 85eb8f276a37..e660cd66f9a8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -331,9 +331,6 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, __free_page(page); } -#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1) -#define IONIC_RX_RING_HEAD_BUF_SZ 2048 - void ionic_rx_fill(struct ionic_queue *q) { struct net_device *netdev = q->lif->netdev; @@ -345,7 +342,6 @@ void ionic_rx_fill(struct ionic_queue *q) unsigned int remain_len; unsigned int seg_len; unsigned int nfrags; - bool ring_doorbell; unsigned int i, j; unsigned int len; @@ -360,9 +356,7 @@ void ionic_rx_fill(struct ionic_queue *q) page_info = &desc_info->pages[0]; if (page_info->page) { /* recycle the buffer */ - ring_doorbell = ((q->head->index + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); + ionic_rxq_post(q, false, ionic_rx_clean, NULL); continue; } @@ -401,10 +395,11 @@ void ionic_rx_fill(struct ionic_queue *q) page_info++; } - ring_doorbell = ((q->head->index + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); + ionic_rxq_post(q, false, ionic_rx_clean, NULL); } + + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head->index); } static void ionic_rx_fill_cb(void *arg) From b14e4e95f9ec5acf2cf65bcc4fab2fe8ded81ac3 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 31 Jul 2020 13:15:35 -0700 Subject: [PATCH 1965/2056] ionic: tx separate servicing We give the tx clean path its own budget and service routine in order to give a little more leeway to be more aggressive, and in preparation for coming changes. We've found this gives us a little better performance in some packet processing scenarios without hurting other scenarios. Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- .../net/ethernet/pensando/ionic/ionic_lif.c | 1 + .../net/ethernet/pensando/ionic/ionic_lif.h | 2 + .../net/ethernet/pensando/ionic/ionic_txrx.c | 98 +++++++++---------- 3 files changed, 49 insertions(+), 52 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 85e4cf229a96..c951ccc17dc4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2065,6 +2065,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index lif->index = index; lif->ntxq_descs = IONIC_DEF_TXRX_DESC; lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + lif->tx_budget = IONIC_TX_BUDGET_DEFAULT; /* Convert the default coalesce value to actual hw resolution */ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index edad17d7aeeb..13f173d83970 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -13,6 +13,7 @@ #define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) #define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) #define IONIC_RX_COPYBREAK_DEFAULT 256 +#define IONIC_TX_BUDGET_DEFAULT 256 struct ionic_tx_stats { u64 dma_map_err; @@ -176,6 +177,7 @@ struct ionic_lif { unsigned int ntxq_descs; unsigned int nrxq_descs; u32 rx_copybreak; + u32 tx_budget; unsigned int rx_mode; u64 hw_features; bool mc_overflow; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index e660cd66f9a8..766f4595895c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -15,6 +15,10 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_cq_info *cq_info, void *cb_arg); +static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + +static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { @@ -249,29 +253,13 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) return true; } -static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit) -{ - u32 work_done = 0; - - while (ionic_rx_service(rxcq, rxcq->tail)) { - if (rxcq->tail->last) - rxcq->done_color = !rxcq->done_color; - rxcq->tail = rxcq->tail->next; - DEBUG_STATS_CQE_CNT(rxcq); - - if (++work_done >= limit) - break; - } - - return work_done; -} - void ionic_rx_flush(struct ionic_cq *cq) { struct ionic_dev *idev = &cq->lif->ionic->idev; u32 work_done; - work_done = ionic_rx_walk_cq(cq, cq->num_descs); + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_rx_service, NULL, NULL); if (work_done) ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, @@ -439,32 +427,42 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) struct ionic_dev *idev; struct ionic_lif *lif; struct ionic_cq *txcq; + u32 rx_work_done = 0; + u32 tx_work_done = 0; u32 work_done = 0; u32 flags = 0; + bool unmask; lif = rxcq->bound_q->lif; idev = &lif->ionic->idev; txcq = &lif->txqcqs[qi].qcq->cq; - ionic_tx_flush(txcq); + tx_work_done = ionic_cq_service(txcq, lif->tx_budget, + ionic_tx_service, NULL, NULL); - work_done = ionic_rx_walk_cq(rxcq, budget); - - if (work_done) + rx_work_done = ionic_cq_service(rxcq, budget, + ionic_rx_service, NULL, NULL); + if (rx_work_done) ionic_rx_fill_cb(rxcq->bound_q); - if (work_done < budget && napi_complete_done(napi, work_done)) { + unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); + + if (unmask && napi_complete_done(napi, rx_work_done)) { flags |= IONIC_INTR_CRED_UNMASK; DEBUG_STATS_INTR_REARM(rxcq->bound_intr); + work_done = rx_work_done; + } else { + work_done = budget; } if (work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, - work_done, flags); + tx_work_done + rx_work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, work_done); + DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); + DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); return work_done; } @@ -552,43 +550,39 @@ static void ionic_tx_clean(struct ionic_queue *q, } } -void ionic_tx_flush(struct ionic_cq *cq) +static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_txq_comp *comp = cq->tail->cq_desc; - struct ionic_dev *idev = &cq->lif->ionic->idev; + struct ionic_txq_comp *comp = cq_info->cq_desc; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; - unsigned int work_done = 0; - /* walk the completed cq entries */ - while (work_done < cq->num_descs && - color_match(comp->color, cq->done_color)) { + if (!color_match(comp->color, cq->done_color)) + return false; - /* clean the related q entries, there could be - * several q entries completed for each cq completion - */ - do { - desc_info = q->tail; - q->tail = desc_info->next; - ionic_tx_clean(q, desc_info, cq->tail, - desc_info->cb_arg); - desc_info->cb = NULL; - desc_info->cb_arg = NULL; - } while (desc_info->index != le16_to_cpu(comp->comp_index)); + /* clean the related q entries, there could be + * several q entries completed for each cq completion + */ + do { + desc_info = q->tail; + q->tail = desc_info->next; + ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg); + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } while (desc_info->index != le16_to_cpu(comp->comp_index)); - if (cq->tail->last) - cq->done_color = !cq->done_color; + return true; +} - cq->tail = cq->tail->next; - comp = cq->tail->cq_desc; - DEBUG_STATS_CQE_CNT(cq); - - work_done++; - } +void ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_dev *idev = &cq->lif->ionic->idev; + u32 work_done; + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_tx_service, NULL, NULL); if (work_done) ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, - work_done, 0); + work_done, IONIC_INTR_CRED_RESET_COALESCE); } void ionic_tx_empty(struct ionic_queue *q) From fe8c30b50835436152a4d2319f834acb36b3b659 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 31 Jul 2020 13:15:36 -0700 Subject: [PATCH 1966/2056] ionic: separate interrupt for Tx and Rx Add the capability to split the Tx queues onto their own interrupts with their own napi contexts. This gives the opportunity for more direct control of Tx interrupt handling, such as CPU affinity and interrupt coalescing, useful for some traffic loads. v2: use ethtool -L, not a vendor specific priv-flag v3: simplify logging, drop unnecessary "no-change" tests Reviewed-by: Jakub Kicinski Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- .../ethernet/pensando/ionic/ionic_ethtool.c | 94 +++++++++++++++---- .../net/ethernet/pensando/ionic/ionic_lif.c | 41 ++++++-- .../net/ethernet/pensando/ionic/ionic_lif.h | 3 + .../net/ethernet/pensando/ionic/ionic_txrx.c | 67 +++++++++++++ .../net/ethernet/pensando/ionic/ionic_txrx.h | 2 + 5 files changed, 181 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 095561924bdc..3c57c331729f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -403,8 +403,7 @@ static int ionic_get_coalesce(struct net_device *netdev, { struct ionic_lif *lif = netdev_priv(netdev); - /* Tx uses Rx interrupt */ - coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs; + coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs; coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs; return 0; @@ -417,7 +416,8 @@ static int ionic_set_coalesce(struct net_device *netdev, struct ionic_identity *ident; struct ionic_qcq *qcq; unsigned int i; - u32 coal; + u32 rx_coal; + u32 tx_coal; ident = &lif->ionic->ident; if (ident->dev.intr_coal_div == 0) { @@ -426,26 +426,31 @@ static int ionic_set_coalesce(struct net_device *netdev, return -EIO; } - /* Tx uses Rx interrupt, so only change Rx */ - if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) { + /* Tx normally shares Rx interrupt, so only change Rx */ + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) && + coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) { netdev_warn(netdev, "only the rx-usecs can be changed\n"); return -EINVAL; } - /* Convert the usec request to a HW useable value. If they asked + /* Convert the usec request to a HW usable value. If they asked * for non-zero and it resolved to zero, bump it up */ - coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); - if (!coal && coalesce->rx_coalesce_usecs) - coal = 1; + rx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); + if (!rx_coal && coalesce->rx_coalesce_usecs) + rx_coal = 1; + tx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->tx_coalesce_usecs); + if (!tx_coal && coalesce->tx_coalesce_usecs) + tx_coal = 1; - if (coal > IONIC_INTR_CTRL_COAL_MAX) + if (rx_coal > IONIC_INTR_CTRL_COAL_MAX || + tx_coal > IONIC_INTR_CTRL_COAL_MAX) return -ERANGE; - /* Save the new value */ + /* Save the new values */ lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs; - if (coal != lif->rx_coalesce_hw) { - lif->rx_coalesce_hw = coal; + if (rx_coal != lif->rx_coalesce_hw) { + lif->rx_coalesce_hw = rx_coal; if (test_bit(IONIC_LIF_F_UP, lif->state)) { for (i = 0; i < lif->nxqs; i++) { @@ -457,6 +462,23 @@ static int ionic_set_coalesce(struct net_device *netdev, } } + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs; + else + lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs; + if (tx_coal != lif->tx_coalesce_hw) { + lif->tx_coalesce_hw = tx_coal; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + for (i = 0; i < lif->nxqs; i++) { + qcq = lif->txqcqs[i].qcq; + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + qcq->intr.index, + lif->tx_coalesce_hw); + } + } + } + return 0; } @@ -510,29 +532,63 @@ static void ionic_get_channels(struct net_device *netdev, /* report maximum channels */ ch->max_combined = lif->ionic->ntxqs_per_lif; + ch->max_rx = lif->ionic->ntxqs_per_lif / 2; + ch->max_tx = lif->ionic->ntxqs_per_lif / 2; /* report current channels */ - ch->combined_count = lif->nxqs; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + ch->rx_count = lif->nxqs; + ch->tx_count = lif->nxqs; + } else { + ch->combined_count = lif->nxqs; + } } static void ionic_set_queuecount(struct ionic_lif *lif, void *arg) { struct ethtool_channels *ch = arg; - lif->nxqs = ch->combined_count; + if (ch->combined_count) { + lif->nxqs = ch->combined_count; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; + netdev_info(lif->netdev, "Sharing queue interrupts\n"); + } + } else { + lif->nxqs = ch->rx_count; + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + netdev_info(lif->netdev, "Splitting queue interrupts\n"); + } + } } static int ionic_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct ionic_lif *lif = netdev_priv(netdev); + int new_cnt; - if (!ch->combined_count || ch->other_count || - ch->rx_count || ch->tx_count) + if (ch->rx_count != ch->tx_count) { + netdev_info(netdev, "The rx and tx count must be equal\n"); return -EINVAL; + } - if (ch->combined_count == lif->nxqs) - return 0; + if (ch->combined_count && ch->rx_count) { + netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n"); + return -EINVAL; + } + + if (ch->combined_count) + new_cnt = ch->combined_count; + else + new_cnt = ch->rx_count; + + if (lif->nxqs != new_cnt) + netdev_info(netdev, "Changing queue count from %d to %d\n", + lif->nxqs, new_cnt); return ionic_reset_queues(lif, ionic_set_queuecount, ch); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index c951ccc17dc4..1944bf5264db 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -616,7 +616,6 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .index = cpu_to_le32(q->index), .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | IONIC_QINIT_F_SG), - .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), .pid = cpu_to_le16(q->pid), .ring_size = ilog2(q->num_descs), .ring_base = cpu_to_le64(q->base_pa), @@ -624,14 +623,22 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .sg_ring_base = cpu_to_le64(q->sg_base_pa), }, }; + unsigned int intr_index; int err; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + intr_index = qcq->intr.index; + else + intr_index = lif->rxqcqs[q->index].qcq->intr.index; + ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); + dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); + dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); q->tail = q->info; q->head = q->tail; @@ -648,6 +655,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, + NAPI_POLL_WEIGHT); + qcq->flags |= IONIC_QCQ_F_INITED; return 0; @@ -684,6 +695,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); + dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); q->tail = q->info; q->head = q->tail; @@ -700,8 +712,12 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); - netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, - NAPI_POLL_WEIGHT); + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, + NAPI_POLL_WEIGHT); + else + netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, + NAPI_POLL_WEIGHT); qcq->flags |= IONIC_QCQ_F_INITED; @@ -1537,6 +1553,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) sg_desc_sz = sizeof(struct ionic_txq_sg_desc); flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + flags |= IONIC_QCQ_F_INTR; for (i = 0; i < lif->nxqs; i++) { err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, lif->ntxq_descs, @@ -1547,6 +1565,11 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) if (err) goto err_out; + if (flags & IONIC_QCQ_F_INTR) + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->txqcqs[i].qcq->intr.index, + lif->tx_coalesce_hw); + lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq); } @@ -1562,13 +1585,15 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) if (err) goto err_out; - lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; - ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, lif->rxqcqs[i].qcq->intr.index, lif->rx_coalesce_hw); - ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, - lif->txqcqs[i].qcq); + + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, + lif->txqcqs[i].qcq); + + lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq); } @@ -2071,6 +2096,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; snprintf(lif->name, sizeof(lif->name), "lif%u", index); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 13f173d83970..1ee3b14c8d50 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -137,6 +137,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, IONIC_LIF_F_FW_RESET, + IONIC_LIF_F_SPLIT_INTR, /* leave this as last */ IONIC_LIF_F_STATE_SIZE @@ -205,6 +206,8 @@ struct ionic_lif { struct dentry *dentry; u32 rx_coalesce_usecs; /* what the user asked for */ u32 rx_coalesce_hw; /* what the hw is using */ + u32 tx_coalesce_usecs; /* what the user asked for */ + u32 tx_coalesce_hw; /* what the hw is using */ struct work_struct tx_timeout_work; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 766f4595895c..8107d32c2767 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -419,7 +419,74 @@ void ionic_rx_empty(struct ionic_queue *q) } } +int ionic_tx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_tx_service, NULL, NULL); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(cq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + int ionic_rx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_rx_service, NULL, NULL); + + if (work_done) + ionic_rx_fill(cq->bound_q); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(cq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_txrx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); struct ionic_cq *rxcq = napi_to_cq(napi); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index 71973e3c35a6..a5883be0413f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -11,6 +11,8 @@ void ionic_rx_fill(struct ionic_queue *q); void ionic_rx_empty(struct ionic_queue *q); void ionic_tx_empty(struct ionic_queue *q); int ionic_rx_napi(struct napi_struct *napi, int budget); +int ionic_tx_napi(struct napi_struct *napi, int budget); +int ionic_txrx_napi(struct napi_struct *napi, int budget); netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); #endif /* _IONIC_TXRX_H_ */ From fd4ec07631b13438c148af973ecd461cf440ee2e Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Sat, 1 Aug 2020 02:20:52 +0530 Subject: [PATCH 1967/2056] cxgb4: fix check for running offline ethtool selftest The flag indicating the selftest to run is a bitmask. So, fix the check. Also, the selftests will fail if adapter initialization has not been completed yet. So, add appropriate check and bail sooner. Fixes: 7235ffae3d2c ("cxgb4: add loopback ethtool self-test") Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 12ef9ddd1e54..9f3173f86eed 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -31,7 +31,7 @@ enum cxgb4_ethtool_tests { }; static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = { - "Loop back test", + "Loop back test (offline)", }; static const char * const flash_region_strings[] = { @@ -2095,12 +2095,13 @@ static void cxgb4_self_test(struct net_device *netdev, memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST); - if (!(adap->flags & CXGB4_FW_OK)) { + if (!(adap->flags & CXGB4_FULL_INIT_DONE) || + !(adap->flags & CXGB4_FW_OK)) { eth_test->flags |= ETH_TEST_FL_FAILED; return; } - if (eth_test->flags == ETH_TEST_FL_OFFLINE) + if (eth_test->flags & ETH_TEST_FL_OFFLINE) cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]); if (data[CXGB4_ETHTOOL_LB_TEST]) From 29b3705facb3d2b9f51383aa5ad3962a9105d06b Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Sat, 1 Aug 2020 02:23:40 +0530 Subject: [PATCH 1968/2056] cxgb4: fix extracting IP addresses in TC-FLOWER rules commit c8729cac2a11 ("cxgb4: add ethtool n-tuple filter insertion") has removed checking control key for determining IP address types for TC-FLOWER rules, which causes all the rules being inserted to hardware to become IPv6 rule type always. So, add back the check to select the correct IP address type to extract and hence fix the correct rule type being inserted to hardware. Also, ethtool_rx_flow_key doesn't have any control key and instead directly sets the IPv4/IPv6 address keys. So, explicitly set the IP address type for ethtool n-tuple filters to reuse the same code. Fixes: c8729cac2a11 ("cxgb4: add ethtool n-tuple filter insertion") Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- .../ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 6251444ffa58..f642c1b475c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -80,6 +80,19 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct flow_rule *rule, struct ch_filter_specification *fs) { + u16 addr_type = 0; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; u16 ethtype_key, ethtype_mask; @@ -102,7 +115,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.proto = match.mask->ip_proto; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); @@ -117,7 +130,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src)); } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); From 7ad9c26f7512917819b0b3590062c34f22d128bf Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 1 Aug 2020 17:13:54 +0800 Subject: [PATCH 1969/2056] net: qede: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4250c17940c0..140a392a81bb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2674,8 +2674,8 @@ static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) data->feat_flags |= QED_TLV_LSO; ether_addr_copy(data->mac[0], edev->ndev->dev_addr); - memset(data->mac[1], 0, ETH_ALEN); - memset(data->mac[2], 0, ETH_ALEN); + eth_zero_addr(data->mac[1]); + eth_zero_addr(data->mac[2]); /* Copy the first two UC macs */ netif_addr_lock_bh(edev->ndev); i = 1; From 8340303670d895407ab02885f1fc5a38cc7b044f Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 1 Aug 2020 17:14:41 +0800 Subject: [PATCH 1970/2056] net: qed: use eth_zero_addr() to clear mac address Use eth_zero_addr() to clear mac address instead of memset(). Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9489089706fe..f1f75b6d0421 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -5038,8 +5038,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(vf->shadow_config.macs[i], vf_info->mac)) { - memset(vf->shadow_config.macs[i], 0, - ETH_ALEN); + eth_zero_addr(vf->shadow_config.macs[i]); DP_VERBOSE(hwfn, QED_MSG_IOV, "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", vf_info->mac, vf_id); @@ -5048,7 +5047,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) } ether_addr_copy(vf_info->mac, force_mac); - memset(vf_info->forced_mac, 0, ETH_ALEN); + eth_zero_addr(vf_info->forced_mac); vf->bulletin.p_virt->valid_bitmap &= ~BIT(MAC_ADDR_FORCED); qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); @@ -5059,7 +5058,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) if (!vf_info->is_trusted_configured) { u8 empty_mac[ETH_ALEN]; - memset(empty_mac, 0, ETH_ALEN); + eth_zero_addr(empty_mac); for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(vf->shadow_config.macs[i], empty_mac)) { From c15fc199b3757496325c4855662fad89bd1efdad Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 1 Aug 2020 17:30:23 +0800 Subject: [PATCH 1971/2056] net: Use __skb_pagelen() directly in skb_cow_data() In fact, skb_pagelen() - skb_headlen() is equal to __skb_pagelen(), use it directly to avoid unnecessary skb_headlen() call. Also fix the CHECK note of checkpatch.pl: Comparison to NULL could be written "!__pskb_pull_tail" Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- net/core/skbuff.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e2edfbe0e19..97ad6523e490 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4413,7 +4413,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) * at the moment even if they are anonymous). */ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && - __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) + !__pskb_pull_tail(skb, __skb_pagelen(skb))) return -ENOMEM; /* Easy case. Most of packets will go this way. */ From 2f631133c40cd8e311ae393518c3e651e476ab66 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 1 Aug 2020 17:36:05 +0800 Subject: [PATCH 1972/2056] net: Pass NULL to skb_network_protocol() when we don't care about vlan depth When we don't care about vlan depth, we could pass NULL instead of the address of a unused local variable to skb_network_protocol() as a param. Signed-off-by: Miaohe Lin Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 97ad6523e490..2828f6d5ba89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3758,7 +3758,6 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int err = -ENOMEM; int i = 0; int pos; - int dummy; if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { @@ -3780,7 +3779,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, } __skb_push(head_skb, doffset); - proto = skb_network_protocol(head_skb, &dummy); + proto = skb_network_protocol(head_skb, NULL); if (unlikely(!proto)) return ERR_PTR(-EINVAL); From e9d204fde5fd7d8ca1d6490d1b5c3d8d963bcd1a Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Sat, 1 Aug 2020 18:05:54 +0100 Subject: [PATCH 1973/2056] net: dsa: qca8k: Add define for port VID Rather than using a magic value of 1 when configuring the port VIDs add a QCA8K_PORT_VID_DEF define and use that instead. Also fix up the bitmask in the process; the top 4 bits are reserved so this wasn't a problem, but only masking 12 bits is the correct approach. Signed-off-by: Jonathan McDowell Reviewed-by: Florian Fainelli Acked-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 11 ++++++----- drivers/net/dsa/qca8k.h | 2 ++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a5566de82853..3ebc4da63074 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -663,10 +663,11 @@ qca8k_setup(struct dsa_switch *ds) * default egress vid */ qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), - 0xffff << shift, 1 << shift); + 0xfff << shift, + QCA8K_PORT_VID_DEF << shift); qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), - QCA8K_PORT_VLAN_CVID(1) | - QCA8K_PORT_VLAN_SVID(1)); + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); } } @@ -1133,7 +1134,7 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, { /* Set the vid to the port vlan id if no vid is set */ if (!vid) - vid = 1; + vid = QCA8K_PORT_VID_DEF; return qca8k_fdb_add(priv, addr, port_mask, vid, QCA8K_ATU_STATUS_STATIC); @@ -1157,7 +1158,7 @@ qca8k_port_fdb_del(struct dsa_switch *ds, int port, u16 port_mask = BIT(port); if (!vid) - vid = 1; + vid = QCA8K_PORT_VID_DEF; return qca8k_fdb_del(priv, addr, port_mask, vid); } diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 31439396401c..92216a52daa5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -22,6 +22,8 @@ #define QCA8K_CPU_PORT 0 +#define QCA8K_PORT_VID_DEF 1 + /* Global control registers */ #define QCA8K_REG_MASK_CTRL 0x000 #define QCA8K_MASK_CTRL_ID_M 0xff From 69462fe6a39048ec186f60178a3a6ea23ae16e0c Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Sat, 1 Aug 2020 18:06:46 +0100 Subject: [PATCH 1974/2056] net: dsa: qca8k: Add 802.1q VLAN support This adds full 802.1q VLAN support to the qca8k, allowing the use of vlan_filtering and more complicated bridging setups than allowed by basic port VLAN support. Tested with a number of untagged ports with separate VLANs and then a trunk port with all the VLANs tagged on it. v3: - Pull QCA8K_PORT_VID_DEF changes into separate cleanup patch - Reverse Christmas tree notation for variable definitions - Use untagged instead of tagged for consistency v2: - Return sensible errnos on failure rather than -1 (rmk) - Style cleanups based on Florian's feedback - Silently allow VLAN 0 as device correctly treats this as no tag Signed-off-by: Jonathan McDowell Reviewed-by: Florian Fainelli Acked-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 181 ++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/qca8k.h | 27 ++++++ 2 files changed, 208 insertions(+) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 3ebc4da63074..f1e484477e35 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -408,6 +408,112 @@ qca8k_fdb_flush(struct qca8k_priv *priv) mutex_unlock(&priv->reg_mutex); } +static int +qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) +{ + u32 reg; + + /* Set the command and VLAN index */ + reg = QCA8K_VTU_FUNC1_BUSY; + reg |= cmd; + reg |= vid << QCA8K_VTU_FUNC1_VID_S; + + /* Write the function register triggering the table access */ + qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); + + /* wait for completion */ + if (qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY)) + return -ETIMEDOUT; + + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_VLAN_LOAD) { + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1); + if (reg & QCA8K_VTU_FUNC1_FULL) + return -ENOMEM; + } + + return 0; +} + +static int +qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) +{ + u32 reg; + int ret; + + /* + We do the right thing with VLAN 0 and treat it as untagged while + preserving the tag on egress. + */ + if (vid == 0) + return 0; + + mutex_lock(&priv->reg_mutex); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); + if (ret < 0) + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; + reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + if (untagged) + reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + else + reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + + qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + +out: + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int +qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) +{ + u32 reg, mask; + int ret, i; + bool del; + + mutex_lock(&priv->reg_mutex); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); + if (ret < 0) + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); + reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + + /* Check if we're the last member to be removed */ + del = true; + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + mask = QCA8K_VTU_FUNC0_EG_MODE_NOT; + mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i); + + if ((reg & mask) != mask) { + del = false; + break; + } + } + + if (del) { + ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); + } else { + qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + } + +out: + mutex_unlock(&priv->reg_mutex); + + return ret; +} + static void qca8k_mib_init(struct qca8k_priv *priv) { @@ -1187,6 +1293,76 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } +static int +qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) +{ + struct qca8k_priv *priv = ds->priv; + + if (vlan_filtering) { + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_VLAN_MODE, + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); + } else { + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_VLAN_MODE, + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); + } + + return 0; +} + +static int +qca8k_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + return 0; +} + +static void +qca8k_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct qca8k_priv *priv = ds->priv; + int ret = 0; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid) + ret = qca8k_vlan_add(priv, port, vid, untagged); + + if (ret) + dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); + + if (pvid) { + int shift = 16 * (port % 2); + + qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), + 0xfff << shift, + vlan->vid_end << shift); + qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), + QCA8K_PORT_VLAN_CVID(vlan->vid_end) | + QCA8K_PORT_VLAN_SVID(vlan->vid_end)); + } +} + +static int +qca8k_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct qca8k_priv *priv = ds->priv; + int ret = 0; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid) + ret = qca8k_vlan_del(priv, port, vid); + + if (ret) + dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); + + return ret; +} + static enum dsa_tag_protocol qca8k_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) @@ -1212,6 +1388,10 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, + .port_vlan_filtering = qca8k_port_vlan_filtering, + .port_vlan_prepare = qca8k_port_vlan_prepare, + .port_vlan_add = qca8k_port_vlan_add, + .port_vlan_del = qca8k_port_vlan_del, .phylink_validate = qca8k_phylink_validate, .phylink_mac_link_state = qca8k_phylink_mac_link_state, .phylink_mac_config = qca8k_phylink_mac_config, @@ -1262,6 +1442,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->ds->dev = &mdiodev->dev; priv->ds->num_ports = QCA8K_NUM_PORTS; + priv->ds->configure_vlan_while_not_filtering = true; priv->ds->priv = priv; priv->ops = qca8k_switch_ops; priv->ds->ops = &priv->ops; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 92216a52daa5..7ca4b93e0bb5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -128,6 +128,19 @@ #define QCA8K_ATU_FUNC_FULL BIT(12) #define QCA8K_ATU_FUNC_PORT_M 0xf #define QCA8K_ATU_FUNC_PORT_S 8 +#define QCA8K_REG_VTU_FUNC0 0x610 +#define QCA8K_VTU_FUNC0_VALID BIT(20) +#define QCA8K_VTU_FUNC0_IVL_EN BIT(19) +#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) +#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3 +#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0 +#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1 +#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2 +#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3 +#define QCA8K_REG_VTU_FUNC1 0x614 +#define QCA8K_VTU_FUNC1_BUSY BIT(31) +#define QCA8K_VTU_FUNC1_VID_S 16 +#define QCA8K_VTU_FUNC1_FULL BIT(4) #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 @@ -137,6 +150,11 @@ #define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0 #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) +#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8) #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) #define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16) #define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16) @@ -180,6 +198,15 @@ enum qca8k_fdb_cmd { QCA8K_FDB_SEARCH = 7, }; +enum qca8k_vlan_cmd { + QCA8K_VLAN_FLUSH = 1, + QCA8K_VLAN_LOAD = 2, + QCA8K_VLAN_PURGE = 3, + QCA8K_VLAN_REMOVE_PORT = 4, + QCA8K_VLAN_NEXT = 5, + QCA8K_VLAN_READ = 6, +}; + struct ar8xxx_port_status { int enabled; }; From d0f6ba2ef2c1c95069509e71402e7d6d43452512 Mon Sep 17 00:00:00 2001 From: Vincent Duvert Date: Sun, 2 Aug 2020 07:06:51 +0200 Subject: [PATCH 1975/2056] appletalk: Fix atalk_proc_init() return path Add a missing return statement to atalk_proc_init so it doesn't return -ENOMEM when successful. This allows the appletalk module to load properly. Fixes: e2bcd8b0ce6e ("appletalk: use remove_proc_subtree to simplify procfs code") Link: https://www.downtowndougbrown.com/2020/08/hacking-up-a-fix-for-the-broken-appletalk-kernel-module-in-linux-5-1-and-newer/ Reported-by: Christopher KOBAYASHI Reported-by: Doug Brown Signed-off-by: Vincent Duvert [lukas: add missing tags] Signed-off-by: Lukas Wunner Cc: stable@vger.kernel.org # v5.1+ Cc: Yue Haibing Signed-off-by: David S. Miller --- net/appletalk/atalk_proc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 550c6ca007cc..9c1241292d1d 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -229,6 +229,8 @@ int __init atalk_proc_init(void) sizeof(struct aarp_iter_state), NULL)) goto out; + return 0; + out: remove_proc_subtree("atalk", init_net.proc_net); return -ENOMEM; From 6d78e473e018e15065d3004ee73ac77acf82baa2 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Sun, 2 Aug 2020 09:49:53 +0200 Subject: [PATCH 1976/2056] net: phy: mdio-mvusb: select MDIO_DEVRES in Kconfig PHYLIB is not selected by the mvusb driver but it uses mdio devres helpers. Explicitly select MDIO_DEVRES in this driver's Kconfig entry. Reported-by: kernel test robot Fixes: 1814cff26739 ("net: phy: add a Kconfig option for mdio_devres") Signed-off-by: Bartosz Golaszewski Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index dd20c2c27c2f..726e4b240e7e 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -194,6 +194,7 @@ config MDIO_MSCC_MIIM config MDIO_MVUSB tristate "Marvell USB to MDIO Adapter" depends on USB + select MDIO_DEVRES help A USB to MDIO converter present on development boards for Marvell's Link Street family of Ethernet switches. From a45a9e8a768c32103ffec67f9b173968a6154a11 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sun, 2 Aug 2020 17:16:11 +0800 Subject: [PATCH 1977/2056] atm: eni: avoid accessing the data mapped to streaming DMA In do_tx(), skb->data is mapped to streaming DMA on line 1111: paddr = dma_map_single(...,skb->data,DMA_TO_DEVICE); Then skb->data is accessed on line 1153: (skb->data[3] & 0xf) This access may cause data inconsistency between CPU cache and hardware. To fix this problem, skb->data[3] is assigned to a local variable before DMA mapping, and then the driver accesses this local variable instead of skb->data[3]. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/atm/eni.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b3d8e00e7671..39be444534d0 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1034,6 +1034,7 @@ static enum enq_res do_tx(struct sk_buff *skb) u32 dma_rd,dma_wr; u32 size; /* in words */ int aal5,dma_size,i,j; + unsigned char skb_data3; DPRINTK(">do_tx\n"); NULLCHECK(skb); @@ -1108,6 +1109,7 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); vcc->dev->number); return enq_jam; } + skb_data3 = skb->data[3]; paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, DMA_TO_DEVICE); ENI_PRV_PADDR(skb) = paddr; @@ -1150,7 +1152,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */ (size/(ATM_CELL_PAYLOAD/4)),tx->send+tx->tx_pos*4); /*printk("dsc = 0x%08lx\n",(unsigned long) readl(tx->send+tx->tx_pos*4));*/ writel((vcc->vci << MID_SEG_VCI_SHIFT) | - (aal5 ? 0 : (skb->data[3] & 0xf)) | + (aal5 ? 0 : (skb_data3 & 0xf)) | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? MID_SEG_CLP : 0), tx->send+((tx->tx_pos+1) & (tx->words-1))*4); DPRINTK("size: %d, len:%d\n",size,skb->len); From cbbb64f62acd97429f517bd382ada9d618c8856a Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Sun, 2 Aug 2020 17:33:40 +0800 Subject: [PATCH 1978/2056] atm: idt77252: avoid accessing the data mapped to streaming DMA In queue_skb(), skb->data is mapped to streaming DMA on line 850: dma_map_single(..., skb->data, ...); Then skb->data is accessed on lines 862 and 863: tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) | (skb->data[2] << 8) | (skb->data[3] << 0); and on lines 893 and 894: tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) | (skb->data[2] << 8) | (skb->data[3] << 0); These accesses may cause data inconsistency between CPU cache and hardware. To fix this problem, the calculation result of skb->data is stored in a local variable before DMA mapping, and then the driver accesses this local variable instead of skb->data. Signed-off-by: Jia-Ju Bai Signed-off-by: David S. Miller --- drivers/atm/idt77252.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index df51680e8931..65a3886f68c9 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -835,6 +835,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, unsigned long flags; int error; int aal; + u32 word4; if (skb->len == 0) { printk("%s: invalid skb->len (%d)\n", card->name, skb->len); @@ -846,6 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, tbd = &IDT77252_PRV_TBD(skb); vcc = ATM_SKB(skb)->vcc; + word4 = (skb->data[0] << 24) | (skb->data[1] << 16) | + (skb->data[2] << 8) | (skb->data[3] << 0); IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data, skb->len, DMA_TO_DEVICE); @@ -859,8 +862,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, tbd->word_1 = SAR_TBD_OAM | ATM_CELL_PAYLOAD | SAR_TBD_EPDU; tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4; tbd->word_3 = 0x00000000; - tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) | - (skb->data[2] << 8) | (skb->data[3] << 0); + tbd->word_4 = word4; if (test_bit(VCF_RSV, &vc->flags)) vc = card->vcs[0]; @@ -890,8 +892,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4; tbd->word_3 = 0x00000000; - tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) | - (skb->data[2] << 8) | (skb->data[3] << 0); + tbd->word_4 = word4; break; case ATM_AAL5: From 0470a48880f8bc42ce26962b79c7b802c5a695ec Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:37 +0800 Subject: [PATCH 1979/2056] net: ethernet: aquantia: Fix wrong return value In function hw_atl_a0_hw_multicast_list_set(), when an invalid request is encountered, a negative error code should be returned. Fixes: bab6de8fd180b ("net: ethernet: aquantia: Atlantic A0 and B0 specific functions") Cc: David VomLehn Signed-off-by: Tianjia Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index c38a4b8a14cb..611875ef2cd1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -786,7 +786,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { - err = EBADRQC; + err = -EBADRQC; goto err_exit; } for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) { From bace287c55aa7b66b217701d290801ed810702b1 Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:38 +0800 Subject: [PATCH 1980/2056] net/enetc: Fix wrong return value in enetc_psfp_parse_clsflower() In the case of invalid rule, a positive value EINVAL is returned here. I think this is a typo error. It is necessary to return an error value. Cc: Po Liu Signed-off-by: Tianjia Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc_qos.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index a9939550fa8f..1c4a535890da 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1130,7 +1130,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, !is_zero_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "Cannot match on both source and destination MAC"); - err = EINVAL; + err = -EINVAL; goto free_filter; } @@ -1138,7 +1138,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, if (!is_broadcast_ether_addr(match.mask->dst)) { NL_SET_ERR_MSG_MOD(extack, "Masked matching on destination MAC not supported"); - err = EINVAL; + err = -EINVAL; goto free_filter; } ether_addr_copy(filter->sid.dst_mac, match.key->dst); @@ -1149,7 +1149,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, if (!is_broadcast_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "Masked matching on source MAC not supported"); - err = EINVAL; + err = -EINVAL; goto free_filter; } ether_addr_copy(filter->sid.src_mac, match.key->src); @@ -1157,7 +1157,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); - err = EINVAL; + err = -EINVAL; goto free_filter; } From aa027850a292ea65524b8fab83eb91a124ad362c Mon Sep 17 00:00:00 2001 From: Tianjia Zhang Date: Sun, 2 Aug 2020 19:15:44 +0800 Subject: [PATCH 1981/2056] liquidio: Fix wrong return value in cn23xx_get_pf_num() On an error exit path, a negative error code should be returned instead of a positive return value. Fixes: 0c45d7fe12c7e ("liquidio: fix use of pf in pass-through mode in a virtual machine") Cc: Rick Farrington Signed-off-by: Tianjia Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 43d11c38b38a..4cddd628d41b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & CN23XX_PCIE_SRIOV_FDL_MASK); } else { - ret = EINVAL; + ret = -EINVAL; /* Under some virtual environments, extended PCI regs are * inaccessible, in which case the above read will have failed. From edab74e9cb1d073c70add0f9b75e17aebf0598ff Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 2 Aug 2020 15:52:04 +0200 Subject: [PATCH 1982/2056] net: sgi: ioc3-eth: Fix the size used in some 'dma_free_coherent()' calls Update the size used in 'dma_free_coherent()' in order to match the one used in the corresponding 'dma_alloc_coherent()'. Fixes: 369a782af0f1 ("net: sgi: ioc3-eth: ensure tx ring is 16k aligned.") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/sgi/ioc3-eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 6646eba9f57f..6eef0f45b133 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -951,7 +951,7 @@ static int ioc3eth_probe(struct platform_device *pdev) dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); if (ip->tx_ring) - dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, + dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); out_free: free_netdev(dev); @@ -964,7 +964,7 @@ static int ioc3eth_remove(struct platform_device *pdev) struct ioc3_private *ip = netdev_priv(dev); dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); - dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma); + dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); From 36f28f7687a9ce665479cce5d64ce7afaa9e77ae Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 2 Aug 2020 15:53:33 +0200 Subject: [PATCH 1983/2056] net: spider_net: Fix the size used in a 'dma_free_coherent()' call Update the size used in 'dma_free_coherent()' in order to match the one used in the corresponding 'dma_alloc_coherent()', in 'spider_net_init_chain()'. Fixes: d4ed8f8d1fb7 ("Spidernet DMA coalescing") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/toshiba/spider_net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3902b3aeb0c2..94267e1f5d30 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card, descr = descr->next; } while (descr != chain->ring); - dma_free_coherent(&card->pdev->dev, chain->num_desc, - chain->hwring, chain->dma_addr); + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), + chain->hwring, chain->dma_addr); } /** From c23cf402d0bb069310ce75bbf2758ff9c23fbe73 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 2 Aug 2020 15:53:48 +0200 Subject: [PATCH 1984/2056] net: spider_net: Remove a useless memset Avoid a memset after a call to 'dma_alloc_coherent()'. This is useless since commit 518a2f1925c3 ("dma-mapping: zero memory returned from dma_alloc_*") Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/toshiba/spider_net.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 94267e1f5d30..07389702a540 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -314,8 +314,6 @@ spider_net_init_chain(struct spider_net_card *card, if (!chain->hwring) return -ENOMEM; - memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); - /* Set up the hardware pointers in each descriptor */ descr = chain->ring; hwdescr = chain->hwring; From 99f47abd9f7bf6e365820d355dc98f6955a562df Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:30 +0300 Subject: [PATCH 1985/2056] fsl/fman: use 32-bit unsigned integer Potentially overflowing expression (ts_freq << 16 and intgr << 16) declared as type u32 (32-bit unsigned) is evaluated using 32-bit arithmetic and then used in a context that expects an expression of type u64 (64-bit unsigned) which ultimately is used as 16-bit unsigned by typecasting to u16. Fixed by using an unsigned 32-bit integer since the value is truncated anyway in the end. Fixes: 414fd46e7762 ("fsl/fman: Add FMan support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index f151d6e111dd..ef67e8599b39 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1398,8 +1398,7 @@ static void enable_time_stamp(struct fman *fman) { struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; u16 fm_clk_freq = fman->state->fm_clk_freq; - u32 tmp, intgr, ts_freq; - u64 frac; + u32 tmp, intgr, ts_freq, frac; ts_freq = (u32)(1 << fman->state->count1_micro_bit); /* configure timestamp so that bit 8 will count 1 microsecond From 0572054617f32670abab4b4e89a876954d54b704 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:31 +0300 Subject: [PATCH 1986/2056] fsl/fman: fix dereference null return value Check before using returned value to avoid dereferencing null pointer. Fixes: 18a6c85fcc78 ("fsl/fman: Add FMan Port Support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_port.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 87b26f063cc8..c27df153f895 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1767,6 +1767,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct fman_port *port; struct fman *fman; struct device_node *fm_node, *port_node; + struct platform_device *fm_pdev; struct resource res; struct resource *dev_res; u32 val; @@ -1791,8 +1792,14 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + fm_pdev = of_find_device_by_node(fm_node); of_node_put(fm_node); + if (!fm_pdev) { + err = -EINVAL; + goto return_err; + } + + fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; goto return_err; From cc79fd8f557767de90ff199d3b6fb911df43160a Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:32 +0300 Subject: [PATCH 1987/2056] fsl/fman: fix unreachable code The parameter 'priority' is incorrectly forced to zero which ultimately induces logically dead code in the subsequent lines. Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_memac.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index a5500ede4070..bb02b37422cc 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -852,7 +852,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, tmp = ioread32be(®s->command_config); tmp &= ~CMD_CFG_PFC_MODE; - priority = 0; iowrite32be(tmp, ®s->command_config); From cc5d229a122106733a85c279d89d7703f21e4d4f Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:33 +0300 Subject: [PATCH 1988/2056] fsl/fman: check dereferencing null pointer Add a safe check to avoid dereferencing null pointer Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_dtsec.c | 4 ++-- drivers/net/ethernet/freescale/fman/fman_memac.c | 2 +- drivers/net/ethernet/freescale/fman/fman_tgec.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 004c266802a8..bce3c9398887 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1200,7 +1200,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->multicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; @@ -1213,7 +1213,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->unicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index bb02b37422cc..645764abdaae 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -981,7 +981,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 8c7eb878d5b4..41946b16f6c7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -626,7 +626,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; From 3207f715c34317d08e798e11a10ce816feb53c0f Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Mon, 3 Aug 2020 10:07:34 +0300 Subject: [PATCH 1989/2056] fsl/fman: fix eth hash table allocation Fix memory allocation for ethernet address hash table. The code was wrongly allocating an array for eth hash table which is incorrect because this is the main structure for eth hash table (struct eth_hash_t) that contains inside a number of elements. Fixes: 57ba4c9b56d8 ("fsl/fman: Add FMan MAC support") Signed-off-by: Florinel Iordache Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_mac.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index dd6d0526f6c1..19f327efdaff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) struct eth_hash_t *hash; /* Allocate address hash table */ - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + hash = kmalloc(sizeof(*hash), GFP_KERNEL); if (!hash) return NULL; From fbc97de84ef88bc38874c41be151cf220c72a865 Mon Sep 17 00:00:00 2001 From: Huang Guobin Date: Sun, 2 Aug 2020 22:00:55 -0400 Subject: [PATCH 1990/2056] tipc: Use is_broadcast_ether_addr() instead of memcmp() Using is_broadcast_ether_addr() instead of directly use memcmp() to determine if the ethernet address is broadcast address. spatch with a semantic match is used to found this problem. (http://coccinelle.lip6.fr/) Signed-off-by: Huang Guobin Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/eth_media.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 8b0bb600602d..c68019697cfe 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -62,12 +62,10 @@ static int tipc_eth_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) { - char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - memset(addr, 0, sizeof(*addr)); ether_addr_copy(addr->value, msg); addr->media_id = TIPC_MEDIA_TYPE_ETH; - addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN); + addr->broadcast = is_broadcast_ether_addr(addr->value); return 0; } From 730e700e2c19d87e578ff0e7d8cb1d4a02b036d2 Mon Sep 17 00:00:00 2001 From: Jianfeng Wang Date: Thu, 30 Jul 2020 23:49:16 +0000 Subject: [PATCH 1991/2056] tcp: apply a floor of 1 for RTT samples from TCP timestamps For retransmitted packets, TCP needs to resort to using TCP timestamps for computing RTT samples. In the common case where the data and ACK fall in the same 1-millisecond interval, TCP senders with millisecond- granularity TCP timestamps compute a ca_rtt_us of 0. This ca_rtt_us of 0 propagates to rs->rtt_us. This value of 0 can cause performance problems for congestion control modules. For example, in BBR, the zero min_rtt sample can bring the min_rtt and BDP estimate down to 0, reduce snd_cwnd and result in a low throughput. It would be hard to mitigate this with filtering in the congestion control module, because the proper floor to apply would depend on the method of RTT sampling (using timestamp options or internally-saved transmission timestamps). This fix applies a floor of 1 for the RTT sample delta from TCP timestamps, so that seq_rtt_us, ca_rtt_us, and rs->rtt_us will be at least 1 * (USEC_PER_SEC / TCP_TS_HZ). Note that the receiver RTT computation in tcp_rcv_rtt_measure() and min_rtt computation in tcp_update_rtt_min() both already apply a floor of 1 timestamp tick, so this commit makes the code more consistent in avoiding this edge case of a value of 0. Signed-off-by: Jianfeng Wang Signed-off-by: Neal Cardwell Signed-off-by: Eric Dumazet Acked-by: Kevin Yang Acked-by: Yuchung Cheng Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 739da25b0c23..184ea556f50e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2950,6 +2950,8 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { + if (!delta) + delta = 1; seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); ca_rtt_us = seq_rtt_us; } From 88fab21c691bb1ff164e540735237a385e3afeaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ioana-Ruxandra=20St=C4=83ncioi?= Date: Mon, 3 Aug 2020 07:33:33 +0000 Subject: [PATCH 1992/2056] seg6_iptunnel: Refactor seg6_lwt_headroom out of uapi header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refactor the function seg6_lwt_headroom out of the seg6_iptunnel.h uapi header, because it is only used in seg6_iptunnel.c. Moreover, it is only used in the kernel code, as indicated by the "#ifdef __KERNEL__". Suggested-by: David Miller Signed-off-by: Ioana-Ruxandra Stăncioi Signed-off-by: David S. Miller --- include/uapi/linux/seg6_iptunnel.h | 21 --------------------- net/ipv6/seg6_iptunnel.c | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index 09fb608a35ec..eb815e0d0ac3 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -37,25 +37,4 @@ enum { SEG6_IPTUN_MODE_L2ENCAP, }; -#ifdef __KERNEL__ - -static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) -{ - int head = 0; - - switch (tuninfo->mode) { - case SEG6_IPTUN_MODE_INLINE: - break; - case SEG6_IPTUN_MODE_ENCAP: - head = sizeof(struct ipv6hdr); - break; - case SEG6_IPTUN_MODE_L2ENCAP: - return 0; - } - - return ((tuninfo->srh->hdrlen + 1) << 3) + head; -} - -#endif - #endif diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index e0e9f48ab14f..897fa59c47de 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -27,6 +27,23 @@ #include #endif +static size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) +{ + int head = 0; + + switch (tuninfo->mode) { + case SEG6_IPTUN_MODE_INLINE: + break; + case SEG6_IPTUN_MODE_ENCAP: + head = sizeof(struct ipv6hdr); + break; + case SEG6_IPTUN_MODE_L2ENCAP: + return 0; + } + + return ((tuninfo->srh->hdrlen + 1) << 3) + head; +} + struct seg6_lwt { struct dst_cache cache; struct seg6_iptunnel_encap tuninfo[]; From 01f4d47a5b55a8b6cc053a7516ad976ad45534ce Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 3 Aug 2020 16:56:47 +0800 Subject: [PATCH 1993/2056] net: stmmac: fix failed to suspend if phy based WOL is enabled With the latest net-next tree, if test suspend/resume after enabling WOL, we get error as below: [ 487.086365] dpm_run_callback(): mdio_bus_suspend+0x0/0x30 returns -16 [ 487.086375] PM: Device stmmac-0:00 failed to suspend: error -16 -16 means -EBUSY, this is because I didn't enable wakeup of the correct device when implementing phy based WOL feature. To be honest, I caught the issue when implementing phy based WOL and then fix it locally, but forgot to amend the phy based wol patch. Today, I found the issue by testing net-next tree. Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 05d63963fdb7..ac5e8cc5fb9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -625,7 +625,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) int ret = phylink_ethtool_set_wol(priv->phylink, wol); if (!ret) - device_set_wakeup_enable(&dev->dev, !!wol->wolopts); + device_set_wakeup_enable(priv->device, !!wol->wolopts); return ret; } From 190f8b060ee38fcea885e08b2fe0e3fdd428a618 Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 3 Aug 2020 21:00:44 +0800 Subject: [PATCH 1994/2056] mptcp: use mptcp_for_each_subflow in mptcp_stream_accept Use mptcp_for_each_subflow in mptcp_stream_accept instead of open-coding. Signed-off-by: Geliang Tang Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index d3fe7296e1c9..400824eabf73 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2249,7 +2249,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, * This is needed so NOSPACE flag can be set from tcp stack. */ __mptcp_flush_join_list(msk); - list_for_each_entry(subflow, &msk->conn_list, node) { + mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (!ssk->sk_socket) From 80fbbb1672e7815a15d7329638537961d65c453d Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Mon, 3 Aug 2020 21:19:48 +0800 Subject: [PATCH 1995/2056] fib: Fix undef compile warning net/core/fib_rules.c:26:7: warning: "CONFIG_IP_MULTIPLE_TABLES" is not defined, evaluates to 0 [-Wundef] #elif CONFIG_IP_MULTIPLE_TABLES ^~~~~~~~~~~~~~~~~~~~~~~~~ Fixes: 8b66a6fd34f5 ("fib: fix another fib_rules_ops indirect call wrapper problem") Signed-off-by: YueHaibing Acked-By: Brian Vazquez Signed-off-by: David S. Miller --- net/core/fib_rules.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index a7a3f500a857..51678a528f85 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -23,7 +23,7 @@ #else #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__) #endif -#elif CONFIG_IP_MULTIPLE_TABLES +#elif defined(CONFIG_IP_MULTIPLE_TABLES) #define INDIRECT_CALL_MT(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) #else #define INDIRECT_CALL_MT(f, f2, f1, ...) f(__VA_ARGS__) From 08e335f6ad35a019f4cb1a74badc2f4bceb63bcf Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Mon, 3 Aug 2020 19:11:33 +0300 Subject: [PATCH 1996/2056] devlink: Add early_drop trap Add the packet trap that can report packets that were ECN marked due to RED AQM. Signed-off-by: Amit Cohen Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- Documentation/networking/devlink/devlink-trap.rst | 4 ++++ include/net/devlink.h | 3 +++ net/core/devlink.c | 1 + 3 files changed, 8 insertions(+) diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst index 2014307fbe63..7a798352b45d 100644 --- a/Documentation/networking/devlink/devlink-trap.rst +++ b/Documentation/networking/devlink/devlink-trap.rst @@ -405,6 +405,10 @@ be added to the following table: - ``control`` - Traps packets logged during processing of flow action trap (e.g., via tc's trap action) + * - ``early_drop`` + - ``drop`` + - Traps packets dropped due to the RED (Random Early Detection) algorithm + (i.e., early drops) Driver-specific Packet Traps ============================ diff --git a/include/net/devlink.h b/include/net/devlink.h index 0606967cb501..fd3ae0760492 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -703,6 +703,7 @@ enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE, DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP, + DEVLINK_TRAP_GENERIC_ID_EARLY_DROP, /* Add new generic trap IDs above */ __DEVLINK_TRAP_GENERIC_ID_MAX, @@ -891,6 +892,8 @@ enum devlink_trap_group_generic_id { "flow_action_sample" #define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_TRAP \ "flow_action_trap" +#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \ + "early_drop" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ "l2_drops" diff --git a/net/core/devlink.c b/net/core/devlink.c index 5fdebb7289e9..bde4c29a30bc 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -8801,6 +8801,7 @@ static const struct devlink_trap devlink_trap_generic[] = { DEVLINK_TRAP(PTP_GENERAL, CONTROL), DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL), DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL), + DEVLINK_TRAP(EARLY_DROP, DROP), }; #define DEVLINK_TRAP_GROUP(_id) \ From c88e11e04716ab4ed51d5972ea04c7b70b6e9d8a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 3 Aug 2020 19:11:34 +0300 Subject: [PATCH 1997/2056] devlink: Pass extack when setting trap's action and group's parameters A later patch will refuse to set the action of certain traps in mlxsw and also to change the policer binding of certain groups. Pass extack so that failure could be communicated clearly to user space. Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 10 ++++++---- drivers/net/ethernet/mellanox/mlxsw/core.h | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c | 13 ++++++++----- drivers/net/netdevsim/dev.c | 6 ++++-- include/net/devlink.h | 6 ++++-- net/core/devlink.c | 8 +++++--- 7 files changed, 35 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 866381e72960..08d101138fbe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1177,14 +1177,15 @@ static void mlxsw_devlink_trap_fini(struct devlink *devlink, static int mlxsw_devlink_trap_action_set(struct devlink *devlink, const struct devlink_trap *trap, - enum devlink_trap_action action) + enum devlink_trap_action action, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; if (!mlxsw_driver->trap_action_set) return -EOPNOTSUPP; - return mlxsw_driver->trap_action_set(mlxsw_core, trap, action); + return mlxsw_driver->trap_action_set(mlxsw_core, trap, action, extack); } static int @@ -1202,14 +1203,15 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink, static int mlxsw_devlink_trap_group_set(struct devlink *devlink, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer) + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; if (!mlxsw_driver->trap_group_set) return -EOPNOTSUPP; - return mlxsw_driver->trap_group_set(mlxsw_core, group, policer); + return mlxsw_driver->trap_group_set(mlxsw_core, group, policer, extack); } static int diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c1c1e039323a..219ce89e629a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -326,12 +326,14 @@ struct mlxsw_driver { const struct devlink_trap *trap, void *trap_ctx); int (*trap_action_set)(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, - enum devlink_trap_action action); + enum devlink_trap_action action, + struct netlink_ext_ack *extack); int (*trap_group_init)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group); int (*trap_group_set)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer); + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack); int (*trap_policer_init)(struct mlxsw_core *mlxsw_core, const struct devlink_trap_policer *policer); void (*trap_policer_fini)(struct mlxsw_core *mlxsw_core, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6ab1b6d725af..866a1193f12b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -1177,12 +1177,14 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, void *trap_ctx); int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, - enum devlink_trap_action action); + enum devlink_trap_action action, + struct netlink_ext_ack *extack); int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group); int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer); + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack); int mlxsw_sp_trap_policer_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_policer *policer); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 1e38dfe7cf64..00b6cb9d2306 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -1352,7 +1352,8 @@ void mlxsw_sp_trap_fini(struct mlxsw_core *mlxsw_core, int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap *trap, - enum devlink_trap_action action) + enum devlink_trap_action action, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); const struct mlxsw_sp_trap_item *trap_item; @@ -1392,7 +1393,7 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, static int __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - u32 policer_id) + u32 policer_id, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); u16 hw_policer_id = MLXSW_REG_HTGT_INVALID_POLICER; @@ -1422,16 +1423,18 @@ int mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group) { return __mlxsw_sp_trap_group_init(mlxsw_core, group, - group->init_policer_id); + group->init_policer_id, NULL); } int mlxsw_sp_trap_group_set(struct mlxsw_core *mlxsw_core, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer) + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack) { u32 policer_id = policer ? policer->id : 0; - return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id); + return __mlxsw_sp_trap_group_init(mlxsw_core, group, policer_id, + extack); } static int diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index ce719c830a77..32f339fedb21 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -810,7 +810,8 @@ static int nsim_dev_devlink_trap_init(struct devlink *devlink, static int nsim_dev_devlink_trap_action_set(struct devlink *devlink, const struct devlink_trap *trap, - enum devlink_trap_action action) + enum devlink_trap_action action, + struct netlink_ext_ack *extack) { struct nsim_dev *nsim_dev = devlink_priv(devlink); struct nsim_trap_item *nsim_trap_item; @@ -829,7 +830,8 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink, static int nsim_dev_devlink_trap_group_set(struct devlink *devlink, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer) + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack) { struct nsim_dev *nsim_dev = devlink_priv(devlink); diff --git a/include/net/devlink.h b/include/net/devlink.h index fd3ae0760492..8f3c8a443238 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1077,7 +1077,8 @@ struct devlink_ops { */ int (*trap_action_set)(struct devlink *devlink, const struct devlink_trap *trap, - enum devlink_trap_action action); + enum devlink_trap_action action, + struct netlink_ext_ack *extack); /** * @trap_group_init: Trap group initialization function. * @@ -1094,7 +1095,8 @@ struct devlink_ops { */ int (*trap_group_set)(struct devlink *devlink, const struct devlink_trap_group *group, - const struct devlink_trap_policer *policer); + const struct devlink_trap_policer *policer, + struct netlink_ext_ack *extack); /** * @trap_policer_init: Trap policer initialization function. * diff --git a/net/core/devlink.c b/net/core/devlink.c index bde4c29a30bc..e674f0f46dc2 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -6423,7 +6423,7 @@ static int __devlink_trap_action_set(struct devlink *devlink, } err = devlink->ops->trap_action_set(devlink, trap_item->trap, - trap_action); + trap_action, extack); if (err) return err; @@ -6713,7 +6713,8 @@ static int devlink_trap_group_set(struct devlink *devlink, } policer = policer_item ? policer_item->policer : NULL; - err = devlink->ops->trap_group_set(devlink, group_item->group, policer); + err = devlink->ops->trap_group_set(devlink, group_item->group, policer, + extack); if (err) return err; @@ -9051,7 +9052,8 @@ static void devlink_trap_disable(struct devlink *devlink, if (WARN_ON_ONCE(!trap_item)) return; - devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP); + devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP, + NULL); trap_item->action = DEVLINK_TRAP_ACTION_DROP; } From 76ba292cc7d7b760ee3e25aba353fb7ab86e64d3 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 3 Aug 2020 19:11:35 +0300 Subject: [PATCH 1998/2056] mlxsw: spectrum_trap: Use 'size_t' for array sizes Use 'size_t' instead of 'u64' for array sizes, as this this is correct type to use for expressions involving sizeof(). Suggested-by: Petr Machata Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 00b6cb9d2306..47bc11a861cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -1063,10 +1063,10 @@ static int mlxsw_sp_trap_dummy_group_init(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_trap_policer_items_arr_init(struct mlxsw_sp *mlxsw_sp) { + size_t arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr); size_t elem_size = sizeof(struct mlxsw_sp_trap_policer_item); - u64 arr_size = ARRAY_SIZE(mlxsw_sp_trap_policer_items_arr); struct mlxsw_sp_trap *trap = mlxsw_sp->trap; - u64 free_policers = 0; + size_t free_policers = 0; u32 last_id; int i; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h index 13ac412f4d53..a0560fb030ee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h @@ -9,13 +9,13 @@ struct mlxsw_sp_trap { struct mlxsw_sp_trap_policer_item *policer_items_arr; - u64 policers_count; /* Number of registered policers */ + size_t policers_count; /* Number of registered policers */ struct mlxsw_sp_trap_group_item *group_items_arr; - u64 groups_count; /* Number of registered groups */ + size_t groups_count; /* Number of registered groups */ struct mlxsw_sp_trap_item *trap_items_arr; - u64 traps_count; /* Number of registered traps */ + size_t traps_count; /* Number of registered traps */ u16 thin_policer_hw_id; From 928345c08b72dd175d4eefa24900f09706a9a3b5 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 3 Aug 2020 19:11:36 +0300 Subject: [PATCH 1999/2056] mlxsw: spectrum_span: On policer_id_base_ref_count, use dec_and_test When unsetting policer base, the SPAN code currently uses refcount_dec(). However that function splats when the counter reaches zero, because reaching zero without actually testing is in general indicative of a missing cleanup. There is no cleanup to be done here, but nonetheless, use refcount_dec_and_test() as required. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index 323eaf979aea..5c959a995199 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -837,7 +837,8 @@ static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span, static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span) { - refcount_dec(&span->policer_id_base_ref_count); + if (refcount_dec_and_test(&span->policer_id_base_ref_count)) + span->policer_id_base = 0; } static struct mlxsw_sp_span_entry * From 36d1fd687d56d807cc65210f14712349919c13e8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 3 Aug 2020 19:11:37 +0300 Subject: [PATCH 2000/2056] mlxsw: spectrum_trap: Allow for per-ASIC trap groups initialization Subsequent patches will need to register different trap groups for Spectrum-1 and Spectrum-2 onwards. Enable that by invoking a per-ASIC operation during trap groups initialization. Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.c | 3 + .../net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../ethernet/mellanox/mlxsw/spectrum_trap.c | 89 +++++++++++++++++-- .../ethernet/mellanox/mlxsw/spectrum_trap.h | 9 ++ 4 files changed, 93 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 519eb44e4097..fdf9aa8314b2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3055,6 +3055,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp1_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops; + mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops; mlxsw_sp->listeners = mlxsw_sp1_listener; mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener); mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1; @@ -3084,6 +3085,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp2_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; + mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); @@ -3111,6 +3113,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops; mlxsw_sp->span_ops = &mlxsw_sp3_span_ops; mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops; + mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops; mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 866a1193f12b..b808f6b4d670 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -177,6 +177,7 @@ struct mlxsw_sp { const struct mlxsw_sp_ptp_ops *ptp_ops; const struct mlxsw_sp_span_ops *span_ops; const struct mlxsw_sp_policer_core_ops *policer_core_ops; + const struct mlxsw_sp_trap_ops *trap_ops; const struct mlxsw_listener *listeners; size_t listeners_count; u32 lowest_shaper_bs; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 47bc11a861cc..3726be5c02b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -1159,6 +1159,43 @@ static void mlxsw_sp_trap_policers_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_trap_policer_items_arr_fini(mlxsw_sp); } +static int mlxsw_sp_trap_group_items_arr_init(struct mlxsw_sp *mlxsw_sp) +{ + size_t common_groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr); + const struct mlxsw_sp_trap_group_item *spec_group_items_arr; + size_t elem_size = sizeof(struct mlxsw_sp_trap_group_item); + struct mlxsw_sp_trap *trap = mlxsw_sp->trap; + size_t groups_count, spec_groups_count; + int err; + + err = mlxsw_sp->trap_ops->groups_init(mlxsw_sp, &spec_group_items_arr, + &spec_groups_count); + if (err) + return err; + + /* The group items array is created by concatenating the common trap + * group items and the ASIC-specific trap group items. + */ + groups_count = common_groups_count + spec_groups_count; + trap->group_items_arr = kcalloc(groups_count, elem_size, GFP_KERNEL); + if (!trap->group_items_arr) + return -ENOMEM; + + memcpy(trap->group_items_arr, mlxsw_sp_trap_group_items_arr, + elem_size * common_groups_count); + memcpy(trap->group_items_arr + common_groups_count, + spec_group_items_arr, elem_size * spec_groups_count); + + trap->groups_count = groups_count; + + return 0; +} + +static void mlxsw_sp_trap_group_items_arr_fini(struct mlxsw_sp *mlxsw_sp) +{ + kfree(mlxsw_sp->trap->group_items_arr); +} + static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); @@ -1166,13 +1203,9 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_trap *trap = mlxsw_sp->trap; int err, i; - trap->group_items_arr = kmemdup(mlxsw_sp_trap_group_items_arr, - sizeof(mlxsw_sp_trap_group_items_arr), - GFP_KERNEL); - if (!trap->group_items_arr) - return -ENOMEM; - - trap->groups_count = ARRAY_SIZE(mlxsw_sp_trap_group_items_arr); + err = mlxsw_sp_trap_group_items_arr_init(mlxsw_sp); + if (err) + return err; for (i = 0; i < trap->groups_count; i++) { group_item = &trap->group_items_arr[i]; @@ -1189,7 +1222,7 @@ static int mlxsw_sp_trap_groups_init(struct mlxsw_sp *mlxsw_sp) group_item = &trap->group_items_arr[i]; devlink_trap_groups_unregister(devlink, &group_item->group, 1); } - kfree(trap->group_items_arr); + mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp); return err; } @@ -1205,7 +1238,7 @@ static void mlxsw_sp_trap_groups_fini(struct mlxsw_sp *mlxsw_sp) group_item = &trap->group_items_arr[i]; devlink_trap_groups_unregister(devlink, &group_item->group, 1); } - kfree(trap->group_items_arr); + mlxsw_sp_trap_group_items_arr_fini(mlxsw_sp); } static bool @@ -1579,3 +1612,41 @@ mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core, return 0; } + +static const struct mlxsw_sp_trap_group_item +mlxsw_sp1_trap_group_items_arr[] = { +}; + +static int +mlxsw_sp1_trap_groups_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_trap_group_item **arr, + size_t *p_groups_count) +{ + *arr = mlxsw_sp1_trap_group_items_arr; + *p_groups_count = ARRAY_SIZE(mlxsw_sp1_trap_group_items_arr); + + return 0; +} + +const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops = { + .groups_init = mlxsw_sp1_trap_groups_init, +}; + +static const struct mlxsw_sp_trap_group_item +mlxsw_sp2_trap_group_items_arr[] = { +}; + +static int +mlxsw_sp2_trap_groups_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_trap_group_item **arr, + size_t *p_groups_count) +{ + *arr = mlxsw_sp2_trap_group_items_arr; + *p_groups_count = ARRAY_SIZE(mlxsw_sp2_trap_group_items_arr); + + return 0; +} + +const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops = { + .groups_init = mlxsw_sp2_trap_groups_init, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h index a0560fb030ee..4ae5212b9a48 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h @@ -23,4 +23,13 @@ struct mlxsw_sp_trap { unsigned long policers_usage[]; /* Usage bitmap */ }; +struct mlxsw_sp_trap_ops { + int (*groups_init)(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_trap_group_item **arr, + size_t *p_groups_count); +}; + +extern const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops; +extern const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops; + #endif From 869c7be940e50aa14845fcdb7a9d67c18be7e27a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 3 Aug 2020 19:11:38 +0300 Subject: [PATCH 2001/2056] mlxsw: spectrum_trap: Allow for per-ASIC traps initialization Subsequent patches will need to register different traps for Spectrum-1 and Spectrum-2 onwards. Enable that by invoking a per-ASIC operation during traps initialization. Reviewed-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_trap.c | 81 ++++++++++++++++--- .../ethernet/mellanox/mlxsw/spectrum_trap.h | 3 + 2 files changed, 75 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 3726be5c02b4..93dd88abbe23 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -1247,6 +1247,43 @@ mlxsw_sp_trap_listener_is_valid(const struct mlxsw_listener *listener) return listener->trap_id != 0; } +static int mlxsw_sp_trap_items_arr_init(struct mlxsw_sp *mlxsw_sp) +{ + size_t common_traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr); + const struct mlxsw_sp_trap_item *spec_trap_items_arr; + size_t elem_size = sizeof(struct mlxsw_sp_trap_item); + struct mlxsw_sp_trap *trap = mlxsw_sp->trap; + size_t traps_count, spec_traps_count; + int err; + + err = mlxsw_sp->trap_ops->traps_init(mlxsw_sp, &spec_trap_items_arr, + &spec_traps_count); + if (err) + return err; + + /* The trap items array is created by concatenating the common trap + * items and the ASIC-specific trap items. + */ + traps_count = common_traps_count + spec_traps_count; + trap->trap_items_arr = kcalloc(traps_count, elem_size, GFP_KERNEL); + if (!trap->trap_items_arr) + return -ENOMEM; + + memcpy(trap->trap_items_arr, mlxsw_sp_trap_items_arr, + elem_size * common_traps_count); + memcpy(trap->trap_items_arr + common_traps_count, + spec_trap_items_arr, elem_size * spec_traps_count); + + trap->traps_count = traps_count; + + return 0; +} + +static void mlxsw_sp_trap_items_arr_fini(struct mlxsw_sp *mlxsw_sp) +{ + kfree(mlxsw_sp->trap->trap_items_arr); +} + static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); @@ -1254,13 +1291,9 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_trap_item *trap_item; int err, i; - trap->trap_items_arr = kmemdup(mlxsw_sp_trap_items_arr, - sizeof(mlxsw_sp_trap_items_arr), - GFP_KERNEL); - if (!trap->trap_items_arr) - return -ENOMEM; - - trap->traps_count = ARRAY_SIZE(mlxsw_sp_trap_items_arr); + err = mlxsw_sp_trap_items_arr_init(mlxsw_sp); + if (err) + return err; for (i = 0; i < trap->traps_count; i++) { trap_item = &trap->trap_items_arr[i]; @@ -1277,7 +1310,7 @@ static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) trap_item = &trap->trap_items_arr[i]; devlink_traps_unregister(devlink, &trap_item->trap, 1); } - kfree(trap->trap_items_arr); + mlxsw_sp_trap_items_arr_fini(mlxsw_sp); return err; } @@ -1293,7 +1326,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) trap_item = &trap->trap_items_arr[i]; devlink_traps_unregister(devlink, &trap_item->trap, 1); } - kfree(trap->trap_items_arr); + mlxsw_sp_trap_items_arr_fini(mlxsw_sp); } int mlxsw_sp_devlink_traps_init(struct mlxsw_sp *mlxsw_sp) @@ -1617,6 +1650,10 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp1_trap_group_items_arr[] = { }; +static const struct mlxsw_sp_trap_item +mlxsw_sp1_trap_items_arr[] = { +}; + static int mlxsw_sp1_trap_groups_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_trap_group_item **arr, @@ -1628,14 +1665,29 @@ mlxsw_sp1_trap_groups_init(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp1_traps_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_trap_item **arr, + size_t *p_traps_count) +{ + *arr = mlxsw_sp1_trap_items_arr; + *p_traps_count = ARRAY_SIZE(mlxsw_sp1_trap_items_arr); + + return 0; +} + const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops = { .groups_init = mlxsw_sp1_trap_groups_init, + .traps_init = mlxsw_sp1_traps_init, }; static const struct mlxsw_sp_trap_group_item mlxsw_sp2_trap_group_items_arr[] = { }; +static const struct mlxsw_sp_trap_item +mlxsw_sp2_trap_items_arr[] = { +}; + static int mlxsw_sp2_trap_groups_init(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_trap_group_item **arr, @@ -1647,6 +1699,17 @@ mlxsw_sp2_trap_groups_init(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp2_traps_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_trap_item **arr, + size_t *p_traps_count) +{ + *arr = mlxsw_sp2_trap_items_arr; + *p_traps_count = ARRAY_SIZE(mlxsw_sp2_trap_items_arr); + + return 0; +} + const struct mlxsw_sp_trap_ops mlxsw_sp2_trap_ops = { .groups_init = mlxsw_sp2_trap_groups_init, + .traps_init = mlxsw_sp2_traps_init, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h index 4ae5212b9a48..b8df684bedef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.h @@ -27,6 +27,9 @@ struct mlxsw_sp_trap_ops { int (*groups_init)(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_trap_group_item **arr, size_t *p_groups_count); + int (*traps_init)(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_trap_item **arr, + size_t *p_traps_count); }; extern const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops; From 6687e953f44fdb4683adb899a963adba45cffa5d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 3 Aug 2020 19:11:39 +0300 Subject: [PATCH 2002/2056] mlxsw: spectrum_trap: Add early_drop trap As previously explained, packets that are dropped due to buffer related reasons (e.g., tail drop, early drop) can be mirrored to the CPU port. These packets are then trapped with one of the "mirror session" traps and their CQE includes the reason for which the packet was mirrored. Register with devlink a new trap, early_drop, and initialize the corresponding Rx listener with the appropriate mirror reason. Return an error in case user tries to change the traps' action, as this is not supported. Since Spectrum-1 does not support these traps, the above is only done for Spectrum-2 onwards. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.h | 13 ++++-- drivers/net/ethernet/mellanox/mlxsw/reg.h | 1 + .../ethernet/mellanox/mlxsw/spectrum_trap.c | 42 +++++++++++++++++++ 3 files changed, 53 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 219ce89e629a..11af3308f8cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -89,13 +89,15 @@ struct mlxsw_listener { }; #define __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \ - _dis_action, _enabled_on_register, _dis_trap_group) \ + _dis_action, _enabled_on_register, _dis_trap_group, \ + _mirror_reason) \ { \ .trap_id = MLXSW_TRAP_ID_##_trap_id, \ .rx_listener = \ { \ .func = _func, \ .local_port = MLXSW_PORT_DONT_CARE, \ + .mirror_reason = _mirror_reason, \ .trap_id = MLXSW_TRAP_ID_##_trap_id, \ }, \ .en_action = MLXSW_REG_HPKT_ACTION_##_en_action, \ @@ -109,12 +111,17 @@ struct mlxsw_listener { #define MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \ _dis_action) \ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _trap_group, \ - _dis_action, true, _trap_group) + _dis_action, true, _trap_group, 0) #define MLXSW_RXL_DIS(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \ _dis_action, _dis_trap_group) \ __MLXSW_RXL(_func, _trap_id, _en_action, _is_ctrl, _en_trap_group, \ - _dis_action, false, _dis_trap_group) + _dis_action, false, _dis_trap_group, 0) + +#define MLXSW_RXL_MIRROR(_func, _session_id, _trap_group, _mirror_reason) \ + __MLXSW_RXL(_func, MIRROR_SESSION##_session_id, TRAP_TO_CPU, false, \ + _trap_group, TRAP_TO_CPU, true, _trap_group, \ + _mirror_reason) #define MLXSW_EVENTL(_func, _trap_id, _trap_group) \ { \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 28a2576eb783..079b080de7f7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5614,6 +5614,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_L3_EXCEPTIONS, MLXSW_REG_HTGT_TRAP_GROUP_SP_TUNNEL_DISCARDS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ACL_DISCARDS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS, __MLXSW_REG_HTGT_TRAP_GROUP_MAX, MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 93dd88abbe23..16bf154076b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -21,6 +21,7 @@ struct mlxsw_sp_trap_group_item { struct devlink_trap_group group; u16 hw_group_id; u8 priority; + u8 fixed_policer:1; /* Whether policer binding can change */ }; #define MLXSW_SP_TRAP_LISTENERS_MAX 3 @@ -28,6 +29,7 @@ struct mlxsw_sp_trap_group_item { struct mlxsw_sp_trap_item { struct devlink_trap trap; struct mlxsw_listener listeners_arr[MLXSW_SP_TRAP_LISTENERS_MAX]; + u8 is_source:1; }; /* All driver-specific traps must be documented in @@ -46,6 +48,11 @@ enum { #define MLXSW_SP_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT +enum { + /* Packet was early dropped. */ + MLXSW_SP_MIRROR_REASON_INGRESS_WRED = 9, +}; + static int mlxsw_sp_rx_listener(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb, u8 local_port, struct mlxsw_sp_port *mlxsw_sp_port) @@ -222,6 +229,11 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port, DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ MLXSW_SP_TRAP_METADATA | (_metadata)) +#define MLXSW_SP_TRAP_BUFFER_DROP(_id) \ + DEVLINK_TRAP_GENERIC(DROP, TRAP, _id, \ + DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, \ + MLXSW_SP_TRAP_METADATA) + #define MLXSW_SP_TRAP_DRIVER_DROP(_id, _group_id) \ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_MLXSW_TRAP_ID_##_id, \ DEVLINK_MLXSW_TRAP_NAME_##_id, \ @@ -248,6 +260,10 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port, TRAP_EXCEPTION_TO_CPU, false, SP_##_en_group_id, \ SET_FW_DEFAULT, SP_##_dis_group_id) +#define MLXSW_SP_RXL_BUFFER_DISCARD(_mirror_reason) \ + MLXSW_RXL_MIRROR(mlxsw_sp_rx_drop_listener, 0, SP_BUFFER_DISCARDS, \ + MLXSW_SP_MIRROR_REASON_##_mirror_reason) + #define MLXSW_SP_RXL_EXCEPTION(_id, _group_id, _action) \ MLXSW_RXL(mlxsw_sp_rx_mark_listener, _id, \ _action, false, SP_##_group_id, SET_FW_DEFAULT) @@ -331,6 +347,9 @@ mlxsw_sp_trap_policer_items_arr[] = { { .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512), }, + { + .policer = MLXSW_SP_TRAP_POLICER(20, 10240, 4096), + }, }; static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = { @@ -1429,6 +1448,11 @@ int mlxsw_sp_trap_action_set(struct mlxsw_core *mlxsw_core, if (WARN_ON(!trap_item)) return -EINVAL; + if (trap_item->is_source) { + NL_SET_ERR_MSG_MOD(extack, "Changing the action of source traps is not supported"); + return -EOPNOTSUPP; + } + for (i = 0; i < MLXSW_SP_TRAP_LISTENERS_MAX; i++) { const struct mlxsw_listener *listener; bool enabled; @@ -1470,6 +1494,11 @@ __mlxsw_sp_trap_group_init(struct mlxsw_core *mlxsw_core, if (WARN_ON(!group_item)) return -EINVAL; + if (group_item->fixed_policer && policer_id != group->init_policer_id) { + NL_SET_ERR_MSG_MOD(extack, "Changing the policer binding of this group is not supported"); + return -EOPNOTSUPP; + } + if (policer_id) { struct mlxsw_sp_trap_policer_item *policer_item; @@ -1682,10 +1711,23 @@ const struct mlxsw_sp_trap_ops mlxsw_sp1_trap_ops = { static const struct mlxsw_sp_trap_group_item mlxsw_sp2_trap_group_items_arr[] = { + { + .group = DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 20), + .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_BUFFER_DISCARDS, + .priority = 0, + .fixed_policer = true, + }, }; static const struct mlxsw_sp_trap_item mlxsw_sp2_trap_items_arr[] = { + { + .trap = MLXSW_SP_TRAP_BUFFER_DROP(EARLY_DROP), + .listeners_arr = { + MLXSW_SP_RXL_BUFFER_DISCARD(INGRESS_WRED), + }, + .is_source = true, + }, }; static int From 54a9238589c533690c241fd3667a67d376d0a9cf Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 3 Aug 2020 19:11:40 +0300 Subject: [PATCH 2003/2056] mlxsw: spectrum_qdisc: Offload action trap for qevents When offloading action trap on a qevent, pass to_dev of NULL to the SPAN module to trigger the mirror to the CPU port. Query the buffer drops policer and use it for policing of the trapped traffic. Signed-off-by: Petr Machata Reviewed-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum.h | 7 ++ .../ethernet/mellanox/mlxsw/spectrum_qdisc.c | 75 +++++++++++++++---- .../ethernet/mellanox/mlxsw/spectrum_trap.c | 26 +++++++ 3 files changed, 95 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index b808f6b4d670..f9ba59641b4d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -984,6 +984,10 @@ struct mlxsw_sp_mall_mirror_entry { int span_id; }; +struct mlxsw_sp_mall_trap_entry { + int span_id; +}; + struct mlxsw_sp_mall_entry { struct list_head list; unsigned long cookie; @@ -992,6 +996,7 @@ struct mlxsw_sp_mall_entry { bool ingress; union { struct mlxsw_sp_mall_mirror_entry mirror; + struct mlxsw_sp_mall_trap_entry trap; struct mlxsw_sp_port_sample sample; }; struct rcu_head rcu; @@ -1199,6 +1204,8 @@ int mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core, const struct devlink_trap_policer *policer, u64 *p_drops); +int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id, + bool *p_enabled, u16 *p_hw_id); static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index a5ce1eec5418..964fd444bb10 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -1289,19 +1289,18 @@ struct mlxsw_sp_qevent_binding { static LIST_HEAD(mlxsw_sp_qevent_block_cb_list); -static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mall_entry *mall_entry, - struct mlxsw_sp_qevent_binding *qevent_binding) +static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding, + const struct mlxsw_sp_span_agent_parms *agent_parms, + int *p_span_id) { struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = {}; - struct mlxsw_sp_span_agent_parms agent_parms = { - .to_dev = mall_entry->mirror.to_dev, - }; int span_id; int err; - err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms); + err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms); if (err) return err; @@ -1320,7 +1319,7 @@ static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, if (err) goto err_trigger_enable; - mall_entry->mirror.span_id = span_id; + *p_span_id = span_id; return 0; err_trigger_enable: @@ -1333,13 +1332,13 @@ static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, return err; } -static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mall_entry *mall_entry, - struct mlxsw_sp_qevent_binding *qevent_binding) +static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_qevent_binding *qevent_binding, + int span_id) { struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port; struct mlxsw_sp_span_trigger_parms trigger_parms = { - .span_id = mall_entry->mirror.span_id, + .span_id = span_id, }; mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger, @@ -1347,7 +1346,51 @@ static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port, &trigger_parms); mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true); - mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id); + mlxsw_sp_span_agent_put(mlxsw_sp, span_id); +} + +static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + struct mlxsw_sp_span_agent_parms agent_parms = { + .to_dev = mall_entry->mirror.to_dev, + }; + + return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding, + &agent_parms, &mall_entry->mirror.span_id); +} + +static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id); +} + +static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + struct mlxsw_sp_span_agent_parms agent_parms = {}; + int err; + + err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp, + DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, + &agent_parms.policer_enable, + &agent_parms.policer_id); + if (err) + return err; + + return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding, + &agent_parms, &mall_entry->trap.span_id); +} + +static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mall_entry *mall_entry, + struct mlxsw_sp_qevent_binding *qevent_binding) +{ + mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id); } static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, @@ -1357,6 +1400,8 @@ static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp, switch (mall_entry->type) { case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding); + case MLXSW_SP_MALL_ACTION_TYPE_TRAP: + return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding); default: /* This should have been validated away. */ WARN_ON(1); @@ -1371,6 +1416,8 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp, switch (mall_entry->type) { case MLXSW_SP_MALL_ACTION_TYPE_MIRROR: return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding); + case MLXSW_SP_MALL_ACTION_TYPE_TRAP: + return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding); default: WARN_ON(1); return; @@ -1490,6 +1537,8 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp, if (act->id == FLOW_ACTION_MIRRED) { mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR; mall_entry->mirror.to_dev = act->dev; + } else if (act->id == FLOW_ACTION_TRAP) { + mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP; } else { NL_SET_ERR_MSG(f->common.extack, "Unsupported action"); err = -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 16bf154076b3..2e41c5519c1b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -1675,6 +1675,32 @@ mlxsw_sp_trap_policer_counter_get(struct mlxsw_core *mlxsw_core, return 0; } +int mlxsw_sp_trap_group_policer_hw_id_get(struct mlxsw_sp *mlxsw_sp, u16 id, + bool *p_enabled, u16 *p_hw_id) +{ + struct mlxsw_sp_trap_policer_item *pol_item; + struct mlxsw_sp_trap_group_item *gr_item; + u32 pol_id; + + gr_item = mlxsw_sp_trap_group_item_lookup(mlxsw_sp, id); + if (!gr_item) + return -ENOENT; + + pol_id = gr_item->group.init_policer_id; + if (!pol_id) { + *p_enabled = false; + return 0; + } + + pol_item = mlxsw_sp_trap_policer_item_lookup(mlxsw_sp, pol_id); + if (WARN_ON(!pol_item)) + return -ENOENT; + + *p_enabled = true; + *p_hw_id = pol_item->hw_id; + return 0; +} + static const struct mlxsw_sp_trap_group_item mlxsw_sp1_trap_group_items_arr[] = { }; From 8fb6ac457d5be8516eeed2b92342f7f95c448275 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 3 Aug 2020 19:11:41 +0300 Subject: [PATCH 2004/2056] selftests: mlxsw: RED: Test offload of trapping on RED qevents Add a selftest for RED early_drop and mark qevents when a trap action is attached at the associated block. Signed-off-by: Petr Machata Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- .../drivers/net/mlxsw/sch_red_core.sh | 35 +++++++++++++++---- .../drivers/net/mlxsw/sch_red_ets.sh | 11 ++++++ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh index 45042105ead7..517297a14ecf 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh @@ -568,17 +568,12 @@ do_drop_test() busywait 1100 until_counter_is ">= $((base + 1))" $fetch_counter >/dev/null check_fail $? "Spurious packets observed without buffer pressure" - qevent_rule_uninstall_$subtest - # Push to the queue until it's at the limit. The configured limit is # rounded by the qdisc and then by the driver, so this is the best we - # can do to get to the real limit of the system. Do this with the rules - # uninstalled so that the inevitable drops don't get counted. + # can do to get to the real limit of the system. build_backlog $vlan $((3 * limit / 2)) udp >/dev/null - qevent_rule_install_$subtest base=$($fetch_counter) - send_packets $vlan udp 11 now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter) @@ -631,3 +626,31 @@ do_drop_mirror_test() tc filter del dev $h2 ingress pref 1 handle 101 flower } + +qevent_rule_install_trap() +{ + tc filter add block 10 pref 1234 handle 102 matchall skip_sw \ + action trap hw_stats disabled +} + +qevent_rule_uninstall_trap() +{ + tc filter del block 10 pref 1234 handle 102 matchall +} + +qevent_counter_fetch_trap() +{ + local trap_name=$1; shift + + devlink_trap_rx_packets_get "$trap_name" +} + +do_drop_trap_test() +{ + local vlan=$1; shift + local limit=$1; shift + local trap_name=$1; shift + + do_drop_test "$vlan" "$limit" "$trap_name" trap \ + "qevent_counter_fetch_trap $trap_name" +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh index c8968b041bea..3f007c5f8361 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh @@ -8,6 +8,7 @@ ALL_TESTS=" red_test mc_backlog_test red_mirror_test + red_trap_test " : ${QDISC:=ets} source sch_red_core.sh @@ -94,6 +95,16 @@ red_mirror_test() uninstall_qdisc } +red_trap_test() +{ + install_qdisc qevent early_drop block 10 + + do_drop_trap_test 10 $BACKLOG1 early_drop + do_drop_trap_test 11 $BACKLOG2 early_drop + + uninstall_qdisc +} + trap cleanup EXIT setup_prepare From 8555c6bfd5fddb1cf363d3cd157d70a1bb27f718 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Mon, 3 Aug 2020 18:40:39 +0200 Subject: [PATCH 2005/2056] mptcp: fix bogus sendmsg() return code under pressure In case of memory pressure, mptcp_sendmsg() may call sk_stream_wait_memory() after succesfully xmitting some bytes. If the latter fails we currently return to the user-space the error code, ignoring the succeful xmit. Address the issue always checking for the xmitted bytes before mptcp_sendmsg() completes. Fixes: f296234c98a8 ("mptcp: Add handling of incoming MP_JOIN requests") Reviewed-by: Matthieu Baerts Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/mptcp/protocol.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 400824eabf73..8c1d1a595701 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -984,7 +984,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) mptcp_set_timeout(sk, ssk); if (copied) { - ret = copied; tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, size_goal); @@ -997,7 +996,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) release_sock(ssk); out: release_sock(sk); - return ret; + return copied ? : ret; } static void mptcp_wait_data(struct sock *sk, long *timeo) From af9fdd2bf8b135bcef0cd1872ab67124e9e1a3a9 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 3 Aug 2020 20:51:58 +0300 Subject: [PATCH 2006/2056] net: dsa: sja1105: poll for extts events from a timer The current poll interval is enough to ensure that rising and falling edge events are not lost for a 1 PPS signal with 50% duty cycle. But when we deliver the events to user space, it will try to infer if they were corresponding to a rising or to a falling edge (the kernel driver doesn't know that either). User space will try to make that inference based on the time at which the PPS master had emitted the pulse (i.e. if it's a .0 time, it's rising edge, if it's .5 time, it's falling edge). But there is no in-kernel API for retrieving the precise timestamp corresponding to a PPS master (aka perout) pulse. So user space has to guess even that. It will read the PTP time on the PPS master right after we've delivered the extts event, and declare that the PPS master time was just the closest integer second, based on 2 thresholds (lower than .25, or higher than .75, and ignore anything else). Except that, if we poll for extts events (and our hardware doesn't really help us, by not providing an interrupt), then there is a risk that the poll period (and therefore the time at which the event is delivered) might confuse user space. Because we are always scheduling the next extts poll at SJA1105_EXTTS_INTERVAL "from now" (that's the only thing that the schedule_delayed_work() API gives us), it means that the start time of the next delayed workqueue will always be shifted to the right a little bit (shifted with the SPI access duration of this workqueue run). In turn, because user space sees extts events that are non-periodic compared to the PPS master's time, this means that it might start making wrong guesses about rising/falling edge. To understand the effect, here is the output of ts2phc currently. Notice the 'src' timestamps of the 'SKIP extts' events, and how they have a large wander. They keep increasing until the upper limit for the ignore threshold (.75 seconds), after which the application starts ignoring the _other_ edge. ts2phc[26.624]: /dev/ptp3 SKIP extts index 0 at 21.449898912 src 21.657784518 ts2phc[27.133]: adding tstamp 21.949894240 to clock /dev/ptp3 ts2phc[27.133]: adding tstamp 22.000000000 to clock /dev/ptp1 ts2phc[27.133]: /dev/ptp3 offset 640 s2 freq +5112 ts2phc[27.636]: /dev/ptp3 SKIP extts index 0 at 22.449889360 src 22.669398022 ts2phc[28.140]: adding tstamp 22.949884376 to clock /dev/ptp3 ts2phc[28.140]: adding tstamp 23.000000000 to clock /dev/ptp1 ts2phc[28.140]: /dev/ptp3 offset 96 s2 freq +4760 ts2phc[28.644]: /dev/ptp3 SKIP extts index 0 at 23.449879504 src 23.677420422 ts2phc[29.153]: adding tstamp 23.949874704 to clock /dev/ptp3 ts2phc[29.153]: adding tstamp 24.000000000 to clock /dev/ptp1 ts2phc[29.153]: /dev/ptp3 offset -264 s2 freq +4429 ts2phc[29.656]: /dev/ptp3 SKIP extts index 0 at 24.449870008 src 24.689407238 ts2phc[30.160]: adding tstamp 24.949865376 to clock /dev/ptp3 ts2phc[30.160]: adding tstamp 25.000000000 to clock /dev/ptp1 ts2phc[30.160]: /dev/ptp3 offset -280 s2 freq +4334 ts2phc[30.664]: /dev/ptp3 SKIP extts index 0 at 25.449860760 src 25.697449926 ts2phc[31.168]: adding tstamp 25.949856176 to clock /dev/ptp3 ts2phc[31.168]: adding tstamp 26.000000000 to clock /dev/ptp1 ts2phc[31.168]: /dev/ptp3 offset -176 s2 freq +4354 ts2phc[31.672]: /dev/ptp3 SKIP extts index 0 at 26.449851584 src 26.705433606 ts2phc[32.180]: adding tstamp 26.949846992 to clock /dev/ptp3 ts2phc[32.180]: adding tstamp 27.000000000 to clock /dev/ptp1 ts2phc[32.180]: /dev/ptp3 offset -80 s2 freq +4397 ts2phc[32.684]: /dev/ptp3 SKIP extts index 0 at 27.449842384 src 27.717415110 ts2phc[33.192]: adding tstamp 27.949837768 to clock /dev/ptp3 ts2phc[33.192]: adding tstamp 28.000000000 to clock /dev/ptp1 ts2phc[33.192]: /dev/ptp3 offset 0 s2 freq +4453 ts2phc[33.696]: /dev/ptp3 SKIP extts index 0 at 28.449833128 src 28.729412902 ts2phc[34.200]: adding tstamp 28.949828472 to clock /dev/ptp3 ts2phc[34.200]: adding tstamp 29.000000000 to clock /dev/ptp1 ts2phc[34.200]: /dev/ptp3 offset 8 s2 freq +4461 ts2phc[34.704]: /dev/ptp3 SKIP extts index 0 at 29.449823816 src 29.737416038 ts2phc[35.208]: adding tstamp 29.949819152 to clock /dev/ptp3 ts2phc[35.208]: adding tstamp 30.000000000 to clock /dev/ptp1 ts2phc[35.208]: /dev/ptp3 offset -8 s2 freq +4447 ts2phc[35.712]: /dev/ptp3 SKIP extts index 0 at 30.449814496 src 30.745554982 ts2phc[36.216]: adding tstamp 30.949809840 to clock /dev/ptp3 ts2phc[36.216]: adding tstamp 31.000000000 to clock /dev/ptp1 ts2phc[36.216]: /dev/ptp3 offset -8 s2 freq +4445 ts2phc[36.468]: /dev/ptp3 SKIP extts index 0 at 31.449805184 src 31.501109446 ts2phc[36.972]: adding tstamp 31.949800536 to clock /dev/ptp3 ts2phc[36.972]: adding tstamp 32.000000000 to clock /dev/ptp1 ts2phc[36.972]: /dev/ptp3 offset -8 s2 freq +4442 ts2phc[37.480]: /dev/ptp3 SKIP extts index 0 at 32.449795896 src 32.513320070 ts2phc[37.984]: adding tstamp 32.949791248 to clock /dev/ptp3 ts2phc[37.984]: adding tstamp 33.000000000 to clock /dev/ptp1 ts2phc[37.984]: /dev/ptp3 offset 0 s2 freq +4448 Fix that by taking the following measures: - Schedule the poll from a timer. Because we are really scheduling the timer periodically, the extts events delivered to user space are periodic too, and don't suffer from the "shift-to-the-right" effect. - Increase the poll period to 6 times a second. This imposes a smaller upper bound to the shift that can occur to the delivery time of extts events, and makes user space (ts2phc) to always interpret correctly which events should be skipped and which shouldn't. - Move the SPI readout itself to the main PTP kernel thread, instead of the generic workqueue. This is because the timer runs in atomic context, but is also better than before, because if needed, we can chrt & taskset this kernel thread, to ensure it gets enough priority under load. After this patch, one can notice that the wander is greatly reduced, and that the latencies of one extts poll are not propagated to the next. The 'src' timestamp that is skipped is never larger than .65 seconds (which means .15 seconds larger than the time at which the real event occurred at, and .10 seconds smaller than the .75 upper threshold for ignoring the falling edge): ts2phc[40.076]: adding tstamp 34.949261296 to clock /dev/ptp3 ts2phc[40.076]: adding tstamp 35.000000000 to clock /dev/ptp1 ts2phc[40.076]: /dev/ptp3 offset 48 s2 freq +4631 ts2phc[40.568]: /dev/ptp3 SKIP extts index 0 at 35.449256496 src 35.595791078 ts2phc[41.064]: adding tstamp 35.949251744 to clock /dev/ptp3 ts2phc[41.064]: adding tstamp 36.000000000 to clock /dev/ptp1 ts2phc[41.064]: /dev/ptp3 offset -224 s2 freq +4374 ts2phc[41.552]: /dev/ptp3 SKIP extts index 0 at 36.449247088 src 36.579825574 ts2phc[42.044]: adding tstamp 36.949242456 to clock /dev/ptp3 ts2phc[42.044]: adding tstamp 37.000000000 to clock /dev/ptp1 ts2phc[42.044]: /dev/ptp3 offset -240 s2 freq +4290 ts2phc[42.536]: /dev/ptp3 SKIP extts index 0 at 37.449237848 src 37.563828774 ts2phc[43.028]: adding tstamp 37.949233264 to clock /dev/ptp3 ts2phc[43.028]: adding tstamp 38.000000000 to clock /dev/ptp1 ts2phc[43.028]: /dev/ptp3 offset -144 s2 freq +4314 ts2phc[43.520]: /dev/ptp3 SKIP extts index 0 at 38.449228656 src 38.547823238 ts2phc[44.012]: adding tstamp 38.949224048 to clock /dev/ptp3 ts2phc[44.012]: adding tstamp 39.000000000 to clock /dev/ptp1 ts2phc[44.012]: /dev/ptp3 offset -80 s2 freq +4335 ts2phc[44.508]: /dev/ptp3 SKIP extts index 0 at 39.449219432 src 39.535846118 ts2phc[44.996]: adding tstamp 39.949214816 to clock /dev/ptp3 ts2phc[44.996]: adding tstamp 40.000000000 to clock /dev/ptp1 ts2phc[44.996]: /dev/ptp3 offset -32 s2 freq +4359 ts2phc[45.488]: /dev/ptp3 SKIP extts index 0 at 40.449210192 src 40.515824678 ts2phc[45.980]: adding tstamp 40.949205568 to clock /dev/ptp3 ts2phc[45.980]: adding tstamp 41.000000000 to clock /dev/ptp1 ts2phc[45.980]: /dev/ptp3 offset 8 s2 freq +4390 ts2phc[46.636]: /dev/ptp3 SKIP extts index 0 at 41.449200928 src 41.664176902 ts2phc[47.132]: adding tstamp 41.949196288 to clock /dev/ptp3 ts2phc[47.132]: adding tstamp 42.000000000 to clock /dev/ptp1 ts2phc[47.132]: /dev/ptp3 offset 0 s2 freq +4384 ts2phc[47.620]: /dev/ptp3 SKIP extts index 0 at 42.449191656 src 42.648117190 ts2phc[48.112]: adding tstamp 42.949187016 to clock /dev/ptp3 ts2phc[48.112]: adding tstamp 43.000000000 to clock /dev/ptp1 ts2phc[48.112]: /dev/ptp3 offset 0 s2 freq +4384 ts2phc[48.604]: /dev/ptp3 SKIP extts index 0 at 43.449182384 src 43.632112582 ts2phc[49.100]: adding tstamp 43.949177736 to clock /dev/ptp3 ts2phc[49.100]: adding tstamp 44.000000000 to clock /dev/ptp1 ts2phc[49.100]: /dev/ptp3 offset -8 s2 freq +4376 ts2phc[49.588]: /dev/ptp3 SKIP extts index 0 at 44.449173096 src 44.616136774 ts2phc[50.080]: adding tstamp 44.949168464 to clock /dev/ptp3 ts2phc[50.080]: adding tstamp 45.000000000 to clock /dev/ptp1 ts2phc[50.080]: /dev/ptp3 offset 8 s2 freq +4390 ts2phc[50.572]: /dev/ptp3 SKIP extts index 0 at 45.449163816 src 45.600134662 ts2phc[51.064]: adding tstamp 45.949159160 to clock /dev/ptp3 ts2phc[51.064]: adding tstamp 46.000000000 to clock /dev/ptp1 ts2phc[51.064]: /dev/ptp3 offset -8 s2 freq +4376 ts2phc[51.556]: /dev/ptp3 SKIP extts index 0 at 46.449154528 src 46.584588550 ts2phc[52.048]: adding tstamp 46.949149896 to clock /dev/ptp3 ts2phc[52.048]: adding tstamp 47.000000000 to clock /dev/ptp1 ts2phc[52.048]: /dev/ptp3 offset 0 s2 freq +4382 ts2phc[52.540]: /dev/ptp3 SKIP extts index 0 at 47.449145256 src 47.568132198 ts2phc[53.032]: adding tstamp 47.949140616 to clock /dev/ptp3 ts2phc[53.032]: adding tstamp 48.000000000 to clock /dev/ptp1 ts2phc[53.032]: /dev/ptp3 offset 0 s2 freq +4382 ts2phc[53.524]: /dev/ptp3 SKIP extts index 0 at 48.449135968 src 48.552121446 ts2phc[54.016]: adding tstamp 48.949131320 to clock /dev/ptp3 ts2phc[54.016]: adding tstamp 49.000000000 to clock /dev/ptp1 ts2phc[54.016]: /dev/ptp3 offset 0 s2 freq +4382 ts2phc[54.512]: /dev/ptp3 SKIP extts index 0 at 49.449126680 src 49.540147014 ts2phc[55.000]: adding tstamp 49.949122040 to clock /dev/ptp3 ts2phc[55.000]: adding tstamp 50.000000000 to clock /dev/ptp1 ts2phc[55.000]: /dev/ptp3 offset 0 s2 freq +4382 ts2phc[55.492]: /dev/ptp3 SKIP extts index 0 at 50.449117400 src 50.520119078 ts2phc[55.988]: adding tstamp 50.949112768 to clock /dev/ptp3 ts2phc[55.988]: adding tstamp 51.000000000 to clock /dev/ptp1 ts2phc[55.988]: /dev/ptp3 offset 8 s2 freq +4390 ts2phc[56.476]: /dev/ptp3 SKIP extts index 0 at 51.449108120 src 51.504175910 ts2phc[57.132]: adding tstamp 51.949103480 to clock /dev/ptp3 ts2phc[57.132]: adding tstamp 52.000000000 to clock /dev/ptp1 ts2phc[57.132]: /dev/ptp3 offset 0 s2 freq +4384 ts2phc[57.624]: /dev/ptp3 SKIP extts index 0 at 52.449098840 src 52.651833574 ts2phc[58.116]: adding tstamp 52.949094200 to clock /dev/ptp3 ts2phc[58.116]: adding tstamp 53.000000000 to clock /dev/ptp1 ts2phc[58.116]: /dev/ptp3 offset 8 s2 freq +4392 ts2phc[58.612]: /dev/ptp3 SKIP extts index 0 at 53.449089560 src 53.639826918 ts2phc[59.100]: adding tstamp 53.949084920 to clock /dev/ptp3 ts2phc[59.100]: adding tstamp 54.000000000 to clock /dev/ptp1 ts2phc[59.100]: /dev/ptp3 offset 8 s2 freq +4394 ts2phc[59.592]: /dev/ptp3 SKIP extts index 0 at 54.449080272 src 54.619842278 ts2phc[60.084]: adding tstamp 54.949075624 to clock /dev/ptp3 ts2phc[60.084]: adding tstamp 55.000000000 to clock /dev/ptp1 ts2phc[60.084]: /dev/ptp3 offset 8 s2 freq +4397 ts2phc[60.576]: /dev/ptp3 SKIP extts index 0 at 55.449070968 src 55.603885542 ts2phc[61.068]: adding tstamp 55.949066312 to clock /dev/ptp3 ts2phc[61.068]: adding tstamp 56.000000000 to clock /dev/ptp1 ts2phc[61.068]: /dev/ptp3 offset 0 s2 freq +4391 ts2phc[61.560]: /dev/ptp3 SKIP extts index 0 at 56.449061680 src 56.587885798 ts2phc[62.052]: adding tstamp 56.949057032 to clock /dev/ptp3 ts2phc[62.052]: adding tstamp 57.000000000 to clock /dev/ptp1 ts2phc[62.052]: /dev/ptp3 offset -8 s2 freq +4383 Signed-off-by: Vladimir Oltean Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_ptp.c | 79 ++++++++++++++++----------- drivers/net/dsa/sja1105/sja1105_ptp.h | 5 +- 2 files changed, 50 insertions(+), 34 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index 177134596458..1b90570b257b 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -24,7 +24,7 @@ * this hardware can do (but may be enough for some setups). Anything of higher * frequency than 1 Hz will be lost, since there is no timestamp FIFO. */ -#define SJA1105_EXTTS_INTERVAL (HZ / 4) +#define SJA1105_EXTTS_INTERVAL (HZ / 6) /* This range is actually +/- SJA1105_MAX_ADJ_PPB * divided by 1000 (ppb -> ppm) and with a 16-bit @@ -51,8 +51,8 @@ enum sja1105_ptp_clk_mode { PTP_SET_MODE = 0, }; -#define extts_to_data(d) \ - container_of((d), struct sja1105_ptp_data, extts_work) +#define extts_to_data(t) \ + container_of((t), struct sja1105_ptp_data, extts_timer) #define ptp_caps_to_data(d) \ container_of((d), struct sja1105_ptp_data, caps) #define ptp_data_to_sja1105(d) \ @@ -350,6 +350,30 @@ static int sja1105_ptpclkval_write(struct sja1105_private *priv, u64 ticks, ptp_sts); } +static void sja1105_extts_poll(struct sja1105_private *priv) +{ + struct sja1105_ptp_data *ptp_data = &priv->ptp_data; + const struct sja1105_regs *regs = priv->info->regs; + struct ptp_clock_event event; + u64 ptpsyncts = 0; + int rc; + + rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts, + NULL); + if (rc < 0) + dev_err_ratelimited(priv->ds->dev, + "Failed to read PTPSYNCTS: %d\n", rc); + + if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) { + event.index = 0; + event.type = PTP_CLOCK_EXTTS; + event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts)); + ptp_clock_event(ptp_data->clock, &event); + + ptp_data->ptpsyncts = ptpsyncts; + } +} + static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp) { struct sja1105_ptp_data *ptp_data = ptp_caps_to_data(ptp); @@ -380,6 +404,9 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp) netif_rx_ni(skb); } + if (ptp_data->extts_enabled) + sja1105_extts_poll(priv); + mutex_unlock(&ptp_data->lock); /* Don't restart */ @@ -595,36 +622,21 @@ static int sja1105_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return rc; } -static void sja1105_ptp_extts_work(struct work_struct *work) +static void sja1105_ptp_extts_setup_timer(struct sja1105_ptp_data *ptp_data) { - struct delayed_work *dw = to_delayed_work(work); - struct sja1105_ptp_data *ptp_data = extts_to_data(dw); - struct sja1105_private *priv = ptp_data_to_sja1105(ptp_data); - const struct sja1105_regs *regs = priv->info->regs; - struct ptp_clock_event event; - u64 ptpsyncts = 0; - int rc; + unsigned long expires = ((jiffies / SJA1105_EXTTS_INTERVAL) + 1) * + SJA1105_EXTTS_INTERVAL; - mutex_lock(&ptp_data->lock); + mod_timer(&ptp_data->extts_timer, expires); +} - rc = sja1105_xfer_u64(priv, SPI_READ, regs->ptpsyncts, &ptpsyncts, - NULL); - if (rc < 0) - dev_err_ratelimited(priv->ds->dev, - "Failed to read PTPSYNCTS: %d\n", rc); +static void sja1105_ptp_extts_timer(struct timer_list *t) +{ + struct sja1105_ptp_data *ptp_data = extts_to_data(t); - if (ptpsyncts && ptp_data->ptpsyncts != ptpsyncts) { - event.index = 0; - event.type = PTP_CLOCK_EXTTS; - event.timestamp = ns_to_ktime(sja1105_ticks_to_ns(ptpsyncts)); - ptp_clock_event(ptp_data->clock, &event); + ptp_schedule_worker(ptp_data->clock, 0); - ptp_data->ptpsyncts = ptpsyncts; - } - - mutex_unlock(&ptp_data->lock); - - schedule_delayed_work(&ptp_data->extts_work, SJA1105_EXTTS_INTERVAL); + sja1105_ptp_extts_setup_timer(ptp_data); } static int sja1105_change_ptp_clk_pin_func(struct sja1105_private *priv, @@ -771,11 +783,12 @@ static int sja1105_extts_enable(struct sja1105_private *priv, if (rc) return rc; + priv->ptp_data.extts_enabled = on; + if (on) - schedule_delayed_work(&priv->ptp_data.extts_work, - SJA1105_EXTTS_INTERVAL); + sja1105_ptp_extts_setup_timer(&priv->ptp_data); else - cancel_delayed_work_sync(&priv->ptp_data.extts_work); + del_timer_sync(&priv->ptp_data.extts_timer); return 0; } @@ -858,7 +871,7 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds) ptp_data->cmd.corrclk4ts = true; ptp_data->cmd.ptpclkadd = PTP_SET_MODE; - INIT_DELAYED_WORK(&ptp_data->extts_work, sja1105_ptp_extts_work); + timer_setup(&ptp_data->extts_timer, sja1105_ptp_extts_timer, 0); return sja1105_ptp_reset(ds); } @@ -871,7 +884,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds) if (IS_ERR_OR_NULL(ptp_data->clock)) return; - cancel_delayed_work_sync(&ptp_data->extts_work); + del_timer_sync(&ptp_data->extts_timer); ptp_cancel_worker_sync(ptp_data->clock); skb_queue_purge(&ptp_data->skb_rxtstamp_queue); ptp_clock_unregister(ptp_data->clock); diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 6408d1158f2d..3daa33e98e77 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -4,6 +4,8 @@ #ifndef _SJA1105_PTP_H #define _SJA1105_PTP_H +#include + #if IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) /* Timestamps are in units of 8 ns clock ticks (equivalent to @@ -72,13 +74,14 @@ struct sja1105_ptp_cmd { }; struct sja1105_ptp_data { - struct delayed_work extts_work; + struct timer_list extts_timer; struct sk_buff_head skb_rxtstamp_queue; struct ptp_clock_info caps; struct ptp_clock *clock; struct sja1105_ptp_cmd cmd; /* Serializes all operations on the PTP hardware clock */ struct mutex lock; + bool extts_enabled; u64 ptpsyncts; }; From 59b328cf566001fce32c33375ea9bc570625af2f Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 4 Aug 2020 00:00:08 +0530 Subject: [PATCH 2007/2056] cxgb4: add TC-MATCHALL IPv6 support Matching IPv6 traffic require allocating their own individual slots in TCAM. So, fetch additional slots to insert IPv6 rules. Also, fetch the cumulative stats of all the slots occupied by the Matchall rule. Signed-off-by: Rahul Lakkireddy Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 + .../chelsio/cxgb4/cxgb4_tc_matchall.c | 101 +++++++++++++----- .../chelsio/cxgb4/cxgb4_tc_matchall.h | 5 +- 3 files changed, 82 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index adbc0d088070..9cb8b229c1b3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1438,6 +1438,8 @@ enum { NAT_MODE_ALL /* NAT on entire 4-tuple */ }; +#define CXGB4_FILTER_TYPE_MAX 2 + /* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the * firmware command. The use of bit-field structure elements is purely to diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c index e377e50c2492..2e309f6673f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c @@ -231,8 +231,26 @@ static void cxgb4_matchall_mirror_free(struct net_device *dev) tc_port_matchall->ingress.viid_mirror = 0; } -static int cxgb4_matchall_alloc_filter(struct net_device *dev, - struct tc_cls_matchall_offload *cls) +static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type], + &tc_port_matchall->ingress.fs[filter_type]); + if (ret) + return ret; + + tc_port_matchall->ingress.tid[filter_type] = 0; + return 0; +} + +static int cxgb4_matchall_add_filter(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + u8 filter_type) { struct netlink_ext_ack *extack = cls->common.extack; struct cxgb4_tc_port_matchall *tc_port_matchall; @@ -244,28 +262,24 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev, /* Get a free filter entry TID, where we can insert this new * rule. Only insert rule if its prio doesn't conflict with * existing rules. - * - * 1 slot is enough to create a wildcard matchall VIID rule. */ - fidx = cxgb4_get_free_ftid(dev, PF_INET, false, cls->common.prio); + fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET, + false, cls->common.prio); if (fidx < 0) { NL_SET_ERR_MSG_MOD(extack, "No free LETCAM index available"); return -ENOMEM; } - ret = cxgb4_matchall_mirror_alloc(dev, cls); - if (ret) - return ret; - tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; - fs = &tc_port_matchall->ingress.fs; + fs = &tc_port_matchall->ingress.fs[filter_type]; memset(fs, 0, sizeof(*fs)); if (fidx < adap->tids.nhpftids) fs->prio = 1; fs->tc_prio = cls->common.prio; fs->tc_cookie = cls->cookie; + fs->type = filter_type; fs->hitcnts = 1; fs->val.pfvf_vld = 1; @@ -276,13 +290,39 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev, ret = cxgb4_set_filter(dev, fidx, fs); if (ret) - goto out_free; + return ret; + + tc_port_matchall->ingress.tid[filter_type] = fidx; + return 0; +} + +static int cxgb4_matchall_alloc_filter(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct cxgb4_tc_port_matchall *tc_port_matchall; + struct port_info *pi = netdev2pinfo(dev); + struct adapter *adap = netdev2adap(dev); + int ret, i; + + tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; + + ret = cxgb4_matchall_mirror_alloc(dev, cls); + if (ret) + return ret; + + for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { + ret = cxgb4_matchall_add_filter(dev, cls, i); + if (ret) + goto out_free; + } - tc_port_matchall->ingress.tid = fidx; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED; return 0; out_free: + while (i-- > 0) + cxgb4_matchall_del_filter(dev, i); + cxgb4_matchall_mirror_free(dev); return ret; } @@ -293,20 +333,21 @@ static int cxgb4_matchall_free_filter(struct net_device *dev) struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); int ret; + u8 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; - ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid, - &tc_port_matchall->ingress.fs); - if (ret) - return ret; + for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { + ret = cxgb4_matchall_del_filter(dev, i); + if (ret) + return ret; + } cxgb4_matchall_mirror_free(dev); tc_port_matchall->ingress.packets = 0; tc_port_matchall->ingress.bytes = 0; tc_port_matchall->ingress.last_used = 0; - tc_port_matchall->ingress.tid = 0; tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED; return 0; } @@ -362,8 +403,12 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev, tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (ingress) { + /* All the filter types of this matchall rule save the + * same cookie. So, checking for the first one is + * enough. + */ if (cls_matchall->cookie != - tc_port_matchall->ingress.fs.tc_cookie) + tc_port_matchall->ingress.fs[0].tc_cookie) return -ENOENT; return cxgb4_matchall_free_filter(dev); @@ -379,21 +424,29 @@ int cxgb4_tc_matchall_destroy(struct net_device *dev, int cxgb4_tc_matchall_stats(struct net_device *dev, struct tc_cls_matchall_offload *cls_matchall) { + u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0; struct cxgb4_tc_port_matchall *tc_port_matchall; + struct cxgb4_matchall_ingress_entry *ingress; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - u64 packets, bytes; int ret; + u8 i; tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id]; if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED) return -ENOENT; - ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid, - &packets, &bytes, - tc_port_matchall->ingress.fs.hash); - if (ret) - return ret; + ingress = &tc_port_matchall->ingress; + for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) { + ret = cxgb4_get_filter_counters(dev, ingress->tid[i], + &tmp_packets, &tmp_bytes, + ingress->fs[i].hash); + if (ret) + return ret; + + packets += tmp_packets; + bytes += tmp_bytes; + } if (tc_port_matchall->ingress.packets != packets) { flow_stats_update(&cls_matchall->stats, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h index e264b6e606c4..fe7ec423a4c9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h @@ -19,8 +19,9 @@ struct cxgb4_matchall_egress_entry { struct cxgb4_matchall_ingress_entry { enum cxgb4_matchall_state state; /* Current MATCHALL offload state */ - u32 tid; /* Index to hardware filter entry */ - struct ch_filter_specification fs; /* Filter entry */ + u32 tid[CXGB4_FILTER_TYPE_MAX]; /* Index to hardware filter entries */ + /* Filter entries */ + struct ch_filter_specification fs[CXGB4_FILTER_TYPE_MAX]; u16 viid_mirror; /* Identifier for allocated Mirror VI */ u64 bytes; /* # of bytes hitting the filter */ u64 packets; /* # of packets hitting the filter */ From 81d4e8e0731408c7fc66409cbf97f10f38964dc1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:50 -0700 Subject: [PATCH 2008/2056] net: dsa: loop: PVID should be per-port The PVID should be per-port, this is a preliminary change to support a 802.1Q data path in the driver. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index f8bc85a6e670..4a57238cdfd8 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -45,6 +45,7 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = { struct dsa_loop_port { struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; + u16 pvid; }; #define DSA_LOOP_VLANS 5 @@ -55,7 +56,6 @@ struct dsa_loop_priv { struct dsa_loop_vlan vlans[DSA_LOOP_VLANS]; struct net_device *netdev; struct dsa_loop_port ports[DSA_MAX_PORTS]; - u16 pvid; }; static struct phy_device *phydevs[PHY_MAX_ADDR]; @@ -224,7 +224,7 @@ static void dsa_loop_port_vlan_add(struct dsa_switch *ds, int port, } if (pvid) - ps->pvid = vid; + ps->ports[port].pvid = vid; } static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, @@ -234,7 +234,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, struct dsa_loop_priv *ps = ds->priv; struct mii_bus *bus = ps->bus; struct dsa_loop_vlan *vl; - u16 vid, pvid = ps->pvid; + u16 vid, pvid = ps->ports[port].pvid; /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); @@ -252,7 +252,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, dev_dbg(ds->dev, "%s: port: %d vlan: %d, %stagged, pvid: %d\n", __func__, port, vid, untagged ? "un" : "", pvid); } - ps->pvid = pvid; + ps->ports[port].pvid = pvid; return 0; } From 916a8d168e8aea52e6f7eb2e323d5dcbd5dc23d5 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:51 -0700 Subject: [PATCH 2009/2056] net: dsa: loop: Support 4K VLANs Allocate a 4K array of VLANs instead of limiting ourselves to just 5 which is arbitrary. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 4a57238cdfd8..6e97b44c6f3f 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -48,12 +48,10 @@ struct dsa_loop_port { u16 pvid; }; -#define DSA_LOOP_VLANS 5 - struct dsa_loop_priv { struct mii_bus *bus; unsigned int port_base; - struct dsa_loop_vlan vlans[DSA_LOOP_VLANS]; + struct dsa_loop_vlan vlans[VLAN_N_VID]; struct net_device *netdev; struct dsa_loop_port ports[DSA_MAX_PORTS]; }; @@ -191,7 +189,7 @@ dsa_loop_port_vlan_prepare(struct dsa_switch *ds, int port, /* Just do a sleeping operation to make lockdep checks effective */ mdiobus_read(bus, ps->port_base + port, MII_BMSR); - if (vlan->vid_end > DSA_LOOP_VLANS) + if (vlan->vid_end > ARRAY_SIZE(ps->vlans)) return -ERANGE; return 0; From 6c84a589972f6458da3168a68e7d65f6ca8687f8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:52 -0700 Subject: [PATCH 2010/2056] net: dsa: loop: Move data structures to header In preparation for adding support for a mockup data path, move the driver data structures to include/linux/dsa/loop.h such that we can share them between net/dsa/ and drivers/net/dsa/ later on. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 32 +----------------------------- include/linux/dsa/loop.h | 40 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 31 deletions(-) create mode 100644 include/linux/dsa/loop.h diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 6e97b44c6f3f..ed0b580c9944 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -14,28 +14,11 @@ #include #include #include +#include #include #include "dsa_loop.h" -struct dsa_loop_vlan { - u16 members; - u16 untagged; -}; - -struct dsa_loop_mib_entry { - char name[ETH_GSTRING_LEN]; - unsigned long val; -}; - -enum dsa_loop_mib_counters { - DSA_LOOP_PHY_READ_OK, - DSA_LOOP_PHY_READ_ERR, - DSA_LOOP_PHY_WRITE_OK, - DSA_LOOP_PHY_WRITE_ERR, - __DSA_LOOP_CNT_MAX, -}; - static struct dsa_loop_mib_entry dsa_loop_mibs[] = { [DSA_LOOP_PHY_READ_OK] = { "phy_read_ok", }, [DSA_LOOP_PHY_READ_ERR] = { "phy_read_err", }, @@ -43,19 +26,6 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = { [DSA_LOOP_PHY_WRITE_ERR] = { "phy_write_err", }, }; -struct dsa_loop_port { - struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; - u16 pvid; -}; - -struct dsa_loop_priv { - struct mii_bus *bus; - unsigned int port_base; - struct dsa_loop_vlan vlans[VLAN_N_VID]; - struct net_device *netdev; - struct dsa_loop_port ports[DSA_MAX_PORTS]; -}; - static struct phy_device *phydevs[PHY_MAX_ADDR]; static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds, diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h new file mode 100644 index 000000000000..bb39401a8056 --- /dev/null +++ b/include/linux/dsa/loop.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DSA_LOOP_H +#define DSA_LOOP_H + +#include +#include +#include + +struct dsa_loop_vlan { + u16 members; + u16 untagged; +}; + +struct dsa_loop_mib_entry { + char name[ETH_GSTRING_LEN]; + unsigned long val; +}; + +enum dsa_loop_mib_counters { + DSA_LOOP_PHY_READ_OK, + DSA_LOOP_PHY_READ_ERR, + DSA_LOOP_PHY_WRITE_OK, + DSA_LOOP_PHY_WRITE_ERR, + __DSA_LOOP_CNT_MAX, +}; + +struct dsa_loop_port { + struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; + u16 pvid; +}; + +struct dsa_loop_priv { + struct mii_bus *bus; + unsigned int port_base; + struct dsa_loop_vlan vlans[VLAN_N_VID]; + struct net_device *netdev; + struct dsa_loop_port ports[DSA_MAX_PORTS]; +}; + +#endif /* DSA_LOOP_H */ From c99194eded25deb58f606e59b3101ba05e786021 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:53 -0700 Subject: [PATCH 2011/2056] net: dsa: loop: Wire-up MTU callbacks For now we simply store the port MTU into a per-port member. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 17 +++++++++++++++++ include/linux/dsa/loop.h | 1 + 2 files changed, 18 insertions(+) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index ed0b580c9944..6a7d661b5a59 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -225,6 +225,21 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, return 0; } +static int dsa_loop_port_change_mtu(struct dsa_switch *ds, int port, + int new_mtu) +{ + struct dsa_loop_priv *priv = ds->priv; + + priv->ports[port].mtu = new_mtu; + + return 0; +} + +static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port) +{ + return ETH_MAX_MTU; +} + static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, @@ -241,6 +256,8 @@ static const struct dsa_switch_ops dsa_loop_driver = { .port_vlan_prepare = dsa_loop_port_vlan_prepare, .port_vlan_add = dsa_loop_port_vlan_add, .port_vlan_del = dsa_loop_port_vlan_del, + .port_change_mtu = dsa_loop_port_change_mtu, + .port_max_mtu = dsa_loop_port_max_mtu, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h index bb39401a8056..5a3470bcc8a7 100644 --- a/include/linux/dsa/loop.h +++ b/include/linux/dsa/loop.h @@ -27,6 +27,7 @@ enum dsa_loop_mib_counters { struct dsa_loop_port { struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; u16 pvid; + int mtu; }; struct dsa_loop_priv { From 947b6ef9f7aeb2c9ebf63dcd6619068f3cec353a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 3 Aug 2020 13:03:54 -0700 Subject: [PATCH 2012/2056] net: dsa: loop: Set correct number of ports We only support DSA_LOOP_NUM_PORTS in the switch, do not tell the DSA core to allocate up to DSA_MAX_PORTS which is nearly the double (6 vs. 11). Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 6a7d661b5a59..eb600b3dbf26 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -275,7 +275,7 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev) return -ENOMEM; ds->dev = &mdiodev->dev; - ds->num_ports = DSA_MAX_PORTS; + ds->num_ports = DSA_LOOP_NUM_PORTS; ps = devm_kzalloc(&mdiodev->dev, sizeof(*ps), GFP_KERNEL); if (!ps) From 6c33ae1ad58434133e2f0b7770bfb670428abda7 Mon Sep 17 00:00:00 2001 From: Jiafei Pan Date: Mon, 3 Aug 2020 23:10:08 +0300 Subject: [PATCH 2013/2056] dpaa2-eth: use napi_schedule to be compatible with PREEMPT_RT The driver calls napi_schedule_irqoff() from a context where, in RT, hardirqs are not disabled, since the IRQ handler is force-threaded. In the call path of this function, __raise_softirq_irqoff() is modifying its per-CPU mask of pending softirqs that must be processed, using or_softirq_pending(). The or_softirq_pending() function is not atomic, but since interrupts are supposed to be disabled, nobody should be preempting it, and the operation should be safe. Nonetheless, when running with hardirqs on, as in the PREEMPT_RT case, it isn't safe, and the pending softirqs mask can get corrupted, resulting in softirqs being lost and never processed. To have common code that works with PREEMPT_RT and with mainline Linux, we can use plain napi_schedule() instead. The difference is that napi_schedule() (via __napi_schedule) also calls local_irq_save, which disables hardirqs if they aren't already. But, since they already are disabled in non-RT, this means that in practice we don't see any measurable difference in throughput or latency with this patch. Signed-off-by: Jiafei Pan Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 9b4028c0e34c..50f52fe2012f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2330,7 +2330,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) /* Update NAPI statistics */ ch->stats.cdan++; - napi_schedule_irqoff(&ch->napi); + napi_schedule(&ch->napi); } /* Allocate and configure a DPCON object */ From 215602a8d21209545923d39b92e9839093b1dcec Mon Sep 17 00:00:00 2001 From: Jiafei Pan Date: Mon, 3 Aug 2020 23:10:09 +0300 Subject: [PATCH 2014/2056] enetc: use napi_schedule to be compatible with PREEMPT_RT The driver calls napi_schedule_irqoff() from a context where, in RT, hardirqs are not disabled, since the IRQ handler is force-threaded. In the call path of this function, __raise_softirq_irqoff() is modifying its per-CPU mask of pending softirqs that must be processed, using or_softirq_pending(). The or_softirq_pending() function is not atomic, but since interrupts are supposed to be disabled, nobody should be preempting it, and the operation should be safe. Nonetheless, when running with hardirqs on, as in the PREEMPT_RT case, it isn't safe, and the pending softirqs mask can get corrupted, resulting in softirqs being lost and never processed. To have common code that works with PREEMPT_RT and with mainline Linux, we can use plain napi_schedule() instead. The difference is that napi_schedule() (via __napi_schedule) also calls local_irq_save, which disables hardirqs if they aren't already. But, since they already are disabled in non-RT, this means that in practice we don't see any measurable difference in throughput or latency with this patch. Signed-off-by: Jiafei Pan Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/enetc/enetc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index f50353cbb4db..f78ca7b343d2 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -270,7 +270,7 @@ static irqreturn_t enetc_msix(int irq, void *data) for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS) enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0); - napi_schedule_irqoff(&v->napi); + napi_schedule(&v->napi); return IRQ_HANDLED; } From 8e737145e8b27f04fe1c348ac99644f6cad4cfbb Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:32:05 +0100 Subject: [PATCH 2015/2056] sfc_ef100: check firmware version at start-of-day Early in EF100 development there was a different format of event descriptor; if the NIC is somehow running the very old firmware which will use that format, fail the probe. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 40 ++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 6a00f2a2dc2b..75131bcf4f1a 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -485,6 +485,36 @@ const struct efx_nic_type ef100_pf_nic_type = { }; +static int compare_versions(const char *a, const char *b) +{ + int a_major, a_minor, a_point, a_patch; + int b_major, b_minor, b_point, b_patch; + int a_matched, b_matched; + + a_matched = sscanf(a, "%d.%d.%d.%d", &a_major, &a_minor, &a_point, &a_patch); + b_matched = sscanf(b, "%d.%d.%d.%d", &b_major, &b_minor, &b_point, &b_patch); + + if (a_matched == 4 && b_matched != 4) + return +1; + + if (a_matched != 4 && b_matched == 4) + return -1; + + if (a_matched != 4 && b_matched != 4) + return 0; + + if (a_major != b_major) + return a_major - b_major; + + if (a_minor != b_minor) + return a_minor - b_minor; + + if (a_point != b_point) + return a_point - b_point; + + return a_patch - b_patch; +} + /* NIC probe and remove */ static int ef100_probe_main(struct efx_nic *efx) @@ -492,6 +522,7 @@ static int ef100_probe_main(struct efx_nic *efx) unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]); struct net_device *net_dev = efx->net_dev; struct ef100_nic_data *nic_data; + char fw_version[32]; int i, rc; if (WARN_ON(bar_size == 0)) @@ -562,6 +593,15 @@ static int ef100_probe_main(struct efx_nic *efx) goto fail; efx->port_num = rc; + efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version)); + netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version); + + if (compare_versions(fw_version, "1.1.0.1000") < 0) { + netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n"); + rc = -EINVAL; + goto fail; + } + rc = ef100_phy_probe(efx); if (rc) goto fail; From 4496363bec32df36df51db7f915a11d3b38615e3 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:32:16 +0100 Subject: [PATCH 2016/2056] sfc_ef100: fail the probe if NIC uses unsol_ev credits In the future, EF100 is planned to have a credit-based scheme for handling unsolicited events, which drivers will need to use in order to function correctly. However, current EF100 hardware does not yet generate unsolicited events and the credit scheme has not yet been implemented in firmware. To prevent compatibility problems later if the current driver is used with future firmware which does implement it, we check for the corresponding capability flag (which that future firmware will set), and if found, we refuse to probe. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 75131bcf4f1a..c2bec2bdbc1f 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -602,6 +602,12 @@ static int ef100_probe_main(struct efx_nic *efx) goto fail; } + if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) { + netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n"); + rc = -EINVAL; + goto fail; + } + rc = ef100_phy_probe(efx); if (rc) goto fail; From adcfc3482ffff813fa2c34e5902005853f79c2aa Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:33:20 +0100 Subject: [PATCH 2017/2056] sfc_ef100: read Design Parameters at probe time Several parts of the EF100 architecture are parameterised (to allow varying capabilities on FPGAs according to resource constraints), and these parameters are exposed to the driver through a TLV-encoded region of the BAR. For the most part we either don't care about these values at all or just need to sanity-check them against the driver's assumptions, but there are a number of TSO limits which we record so that we will be able to check against them in the TX path when handling GSO skbs. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 216 +++++++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100_nic.h | 4 + 2 files changed, 220 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index c2bec2bdbc1f..9b5e4b42fe51 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -515,6 +515,208 @@ static int compare_versions(const char *a, const char *b) return a_patch - b_patch; } +enum ef100_tlv_state_machine { + EF100_TLV_TYPE, + EF100_TLV_TYPE_CONT, + EF100_TLV_LENGTH, + EF100_TLV_VALUE +}; + +struct ef100_tlv_state { + enum ef100_tlv_state_machine state; + u64 value; + u32 value_offset; + u16 type; + u8 len; +}; + +static int ef100_tlv_feed(struct ef100_tlv_state *state, u8 byte) +{ + switch (state->state) { + case EF100_TLV_TYPE: + state->type = byte & 0x7f; + state->state = (byte & 0x80) ? EF100_TLV_TYPE_CONT + : EF100_TLV_LENGTH; + /* Clear ready to read in a new entry */ + state->value = 0; + state->value_offset = 0; + return 0; + case EF100_TLV_TYPE_CONT: + state->type |= byte << 7; + state->state = EF100_TLV_LENGTH; + return 0; + case EF100_TLV_LENGTH: + state->len = byte; + /* We only handle TLVs that fit in a u64 */ + if (state->len > sizeof(state->value)) + return -EOPNOTSUPP; + /* len may be zero, implying a value of zero */ + state->state = state->len ? EF100_TLV_VALUE : EF100_TLV_TYPE; + return 0; + case EF100_TLV_VALUE: + state->value |= ((u64)byte) << (state->value_offset * 8); + state->value_offset++; + if (state->value_offset >= state->len) + state->state = EF100_TLV_TYPE; + return 0; + default: /* state machine error, can't happen */ + WARN_ON_ONCE(1); + return -EIO; + } +} + +static int ef100_process_design_param(struct efx_nic *efx, + const struct ef100_tlv_state *reader) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + + switch (reader->type) { + case ESE_EF100_DP_GZ_PAD: /* padding, skip it */ + return 0; + case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS: + /* Driver doesn't support timestamping yet, so we don't care */ + return 0; + case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS: + /* Driver doesn't support unsolicited-event credits yet, so + * we don't care + */ + return 0; + case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE: + /* Driver doesn't manage the NMMU (so we don't care) */ + return 0; + case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS: + /* Driver uses CHECKSUM_COMPLETE, so we don't care about + * protocol checksum validation + */ + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN: + nic_data->tso_max_hdr_len = min_t(u64, reader->value, 0xffff); + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS: + /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */ + if (!reader->value) { + netif_err(efx, probe, efx->net_dev, + "TSO_MAX_HDR_NUM_SEGS < 1\n"); + return -EOPNOTSUPP; + } + return 0; + case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY: + case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY: + /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by + * EFX_MIN_DMAQ_SIZE, so we just need to check that + * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY. + * This is very unlikely to fail. + */ + if (EFX_MIN_DMAQ_SIZE % reader->value) { + netif_err(efx, probe, efx->net_dev, + "%s size granularity is %llu, can't guarantee safety\n", + reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", + reader->value); + return -EOPNOTSUPP; + } + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN: + nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE); + efx->net_dev->gso_max_size = nic_data->tso_max_payload_len; + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS: + nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff); + efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs; + return 0; + case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES: + nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff); + return 0; + case ESE_EF100_DP_GZ_COMPAT: + if (reader->value) { + netif_err(efx, probe, efx->net_dev, + "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n", + reader->value); + return -EOPNOTSUPP; + } + return 0; + case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN: + /* Driver doesn't use mem2mem transfers */ + return 0; + case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS: + /* Driver doesn't currently use EVQ_TIMER */ + return 0; + case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES: + /* Driver doesn't manage the NMMU (so we don't care) */ + return 0; + case ESE_EF100_DP_GZ_VI_STRIDES: + /* We never try to set the VI stride, and we don't rely on + * being able to find VIs past VI 0 until after we've learned + * the current stride from MC_CMD_GET_CAPABILITIES. + * So the value of this shouldn't matter. + */ + if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT) + netif_dbg(efx, probe, efx->net_dev, + "NIC has other than default VI_STRIDES (mask " + "%#llx), early probing might use wrong one\n", + reader->value); + return 0; + case ESE_EF100_DP_GZ_RX_MAX_RUNT: + /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't + * care whether it indicates runt or overlength for any given + * packet, so we don't care about this parameter. + */ + return 0; + default: + /* Host interface says "Drivers should ignore design parameters + * that they do not recognise." + */ + netif_dbg(efx, probe, efx->net_dev, + "Ignoring unrecognised design parameter %u\n", + reader->type); + return 0; + } +} + +static int ef100_check_design_params(struct efx_nic *efx) +{ + struct ef100_tlv_state reader = {}; + u32 total_len, offset = 0; + efx_dword_t reg; + int rc = 0, i; + u32 data; + + efx_readd(efx, ®, ER_GZ_PARAMS_TLV_LEN); + total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n", + total_len); + while (offset < total_len) { + efx_readd(efx, ®, ER_GZ_PARAMS_TLV + offset); + data = EFX_DWORD_FIELD(reg, EFX_DWORD_0); + for (i = 0; i < sizeof(data); i++) { + rc = ef100_tlv_feed(&reader, data); + /* Got a complete value? */ + if (!rc && reader.state == EF100_TLV_TYPE) + rc = ef100_process_design_param(efx, &reader); + if (rc) + goto out; + data >>= 8; + offset++; + } + } + /* Check we didn't end halfway through a TLV entry, which could either + * mean that the TLV stream is truncated or just that it's corrupted + * and our state machine is out of sync. + */ + if (reader.state != EF100_TLV_TYPE) { + if (reader.state == EF100_TLV_TYPE_CONT) + netif_err(efx, probe, efx->net_dev, + "truncated design parameter (incomplete type %u)\n", + reader.type); + else + netif_err(efx, probe, efx->net_dev, + "truncated design parameter %u\n", + reader.type); + rc = -EIO; + } +out: + return rc; +} + /* NIC probe and remove */ static int ef100_probe_main(struct efx_nic *efx) @@ -536,6 +738,20 @@ static int ef100_probe_main(struct efx_nic *efx) net_dev->features |= efx->type->offload_features; net_dev->hw_features |= efx->type->offload_features; + /* Populate design-parameter defaults */ + nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT; + nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT; + nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT; + nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT; + net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT; + /* Read design parameters */ + rc = ef100_check_design_params(efx); + if (rc) { + netif_err(efx, probe, efx->net_dev, + "Unsupported design parameters\n"); + goto fail; + } + /* we assume later that we can copy from this buffer in dwords */ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4); diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 6367bbb2c9b3..c8816bc6ae78 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -26,6 +26,10 @@ struct ef100_nic_data { u16 warm_boot_count; u8 port_id[ETH_ALEN]; DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); + u16 tso_max_hdr_len; + u16 tso_max_payload_num_segs; + u16 tso_max_frames; + unsigned int tso_max_payload_len; }; #define efx_ef100_has_cap(caps, flag) \ From d19a5372186336df8a90391c1ae2011e03310dca Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:34:00 +0100 Subject: [PATCH 2018/2056] sfc_ef100: TX path for EF100 NICs Includes checksum offload and TSO, so declare those in our netdev features. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 8 + drivers/net/ethernet/sfc/ef100_tx.c | 367 +++++++++++++++++++++++++- drivers/net/ethernet/sfc/ef100_tx.h | 4 + drivers/net/ethernet/sfc/net_driver.h | 21 ++ drivers/net/ethernet/sfc/tx_common.c | 1 + 5 files changed, 396 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 9b5e4b42fe51..fcc5b0de76d7 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -263,6 +263,9 @@ static int ef100_ev_process(struct efx_channel *channel, int quota) case ESE_GZ_EF100_EV_MCDI: efx_mcdi_process_event(channel, p_event); break; + case ESE_GZ_EF100_EV_TX_COMPLETION: + ef100_ev_tx(channel, p_event); + break; case ESE_GZ_EF100_EV_DRIVER: netif_info(efx, drv, efx->net_dev, "Driver initiated event " EFX_QWORD_FMT "\n", @@ -436,10 +439,15 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx, /* NIC level access functions */ +#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | \ + NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | \ + NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX) + const struct efx_nic_type ef100_pf_nic_type = { .revision = EFX_REV_EF100, .is_vf = false, .probe = ef100_probe_pf, + .offload_features = EF100_OFFLOAD_FEATURES, .mcdi_max_ver = 2, .mcdi_request = ef100_mcdi_request, .mcdi_poll_response = ef100_mcdi_poll_response, diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c index 15e646f8c3e0..a09546e43408 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.c +++ b/drivers/net/ethernet/sfc/ef100_tx.c @@ -9,14 +9,24 @@ * by the Free Software Foundation, incorporated herein by reference. */ +#include + #include "net_driver.h" #include "tx_common.h" #include "nic_common.h" +#include "mcdi_functions.h" +#include "ef100_regs.h" +#include "io.h" #include "ef100_tx.h" +#include "ef100_nic.h" -/* TX queue stubs */ int ef100_tx_probe(struct efx_tx_queue *tx_queue) { + /* Allocate an extra descriptor for the QMDA status completion entry */ + return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, + (tx_queue->ptr_mask + 2) * + sizeof(efx_oword_t), + GFP_KERNEL); return 0; } @@ -27,10 +37,286 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue) netdev_get_tx_queue(tx_queue->efx->net_dev, tx_queue->channel->channel - tx_queue->efx->tx_channel_offset); + + if (efx_mcdi_tx_init(tx_queue, false)) + netdev_WARN(tx_queue->efx->net_dev, + "failed to initialise TXQ %d\n", tx_queue->queue); +} + +static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) +{ + struct efx_nic *efx = tx_queue->efx; + struct ef100_nic_data *nic_data; + struct efx_tx_buffer *buffer; + struct tcphdr *tcphdr; + struct iphdr *iphdr; + size_t header_len; + u32 mss; + + nic_data = efx->nic_data; + + if (!skb_is_gso_tcp(skb)) + return false; + if (!(efx->net_dev->features & NETIF_F_TSO)) + return false; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(mss < 4)) { + WARN_ONCE(1, "MSS of %u is too small for TSO\n", mss); + return false; + } + + header_len = efx_tx_tso_header_length(skb); + if (header_len > nic_data->tso_max_hdr_len) + return false; + + if (skb_shinfo(skb)->gso_segs > nic_data->tso_max_payload_num_segs) { + /* net_dev->gso_max_segs should've caught this */ + WARN_ON_ONCE(1); + return false; + } + + if (skb->data_len / mss > nic_data->tso_max_frames) + return false; + + /* net_dev->gso_max_size should've caught this */ + if (WARN_ON_ONCE(skb->data_len > nic_data->tso_max_payload_len)) + return false; + + /* Reserve an empty buffer for the TSO V3 descriptor. + * Convey the length of the header since we already know it. + */ + buffer = efx_tx_queue_get_insert_buffer(tx_queue); + buffer->flags = EFX_TX_BUF_TSO_V3 | EFX_TX_BUF_CONT; + buffer->len = header_len; + buffer->unmap_len = 0; + buffer->skb = skb; + ++tx_queue->insert_count; + + /* Adjust the TCP checksum to exclude the total length, since we set + * ED_INNER_IP_LEN in the descriptor. + */ + tcphdr = tcp_hdr(skb); + if (skb_is_gso_v6(skb)) { + tcphdr->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } else { + iphdr = ip_hdr(skb); + tcphdr->check = ~csum_tcpudp_magic(iphdr->saddr, iphdr->daddr, + 0, IPPROTO_TCP, 0); + } + return true; +} + +static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) +{ + if (likely(tx_queue->txd.buf.addr)) + return ((efx_oword_t *)tx_queue->txd.buf.addr) + index; + else + return NULL; +} + +void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue) +{ + unsigned int write_ptr; + efx_dword_t reg; + + if (unlikely(tx_queue->notify_count == tx_queue->write_count)) + return; + + write_ptr = tx_queue->write_count & tx_queue->ptr_mask; + /* The write pointer goes into the high word */ + EFX_POPULATE_DWORD_1(reg, ERF_GZ_TX_RING_PIDX, write_ptr); + efx_writed_page(tx_queue->efx, ®, + ER_GZ_TX_RING_DOORBELL, tx_queue->queue); + tx_queue->notify_count = tx_queue->write_count; + tx_queue->xmit_more_available = false; +} + +static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue) +{ + ef100_notify_tx_desc(tx_queue); + ++tx_queue->pushes; +} + +static void ef100_set_tx_csum_partial(const struct sk_buff *skb, + struct efx_tx_buffer *buffer, efx_oword_t *txd) +{ + efx_oword_t csum; + int csum_start; + + if (!skb || skb->ip_summed != CHECKSUM_PARTIAL) + return; + + /* skb->csum_start has the offset from head, but we need the offset + * from data. + */ + csum_start = skb_checksum_start_offset(skb); + EFX_POPULATE_OWORD_3(csum, + ESF_GZ_TX_SEND_CSO_PARTIAL_EN, 1, + ESF_GZ_TX_SEND_CSO_PARTIAL_START_W, + csum_start >> 1, + ESF_GZ_TX_SEND_CSO_PARTIAL_CSUM_W, + skb->csum_offset >> 1); + EFX_OR_OWORD(*txd, *txd, csum); +} + +static void ef100_set_tx_hw_vlan(const struct sk_buff *skb, efx_oword_t *txd) +{ + u16 vlan_tci = skb_vlan_tag_get(skb); + efx_oword_t vlan; + + EFX_POPULATE_OWORD_2(vlan, + ESF_GZ_TX_SEND_VLAN_INSERT_EN, 1, + ESF_GZ_TX_SEND_VLAN_INSERT_TCI, vlan_tci); + EFX_OR_OWORD(*txd, *txd, vlan); +} + +static void ef100_make_send_desc(struct efx_nic *efx, + const struct sk_buff *skb, + struct efx_tx_buffer *buffer, efx_oword_t *txd, + unsigned int segment_count) +{ + /* TX send descriptor */ + EFX_POPULATE_OWORD_3(*txd, + ESF_GZ_TX_SEND_NUM_SEGS, segment_count, + ESF_GZ_TX_SEND_LEN, buffer->len, + ESF_GZ_TX_SEND_ADDR, buffer->dma_addr); + + if (likely(efx->net_dev->features & NETIF_F_HW_CSUM)) + ef100_set_tx_csum_partial(skb, buffer, txd); + if (efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX && + skb && skb_vlan_tag_present(skb)) + ef100_set_tx_hw_vlan(skb, txd); +} + +static void ef100_make_tso_desc(struct efx_nic *efx, + const struct sk_buff *skb, + struct efx_tx_buffer *buffer, efx_oword_t *txd, + unsigned int segment_count) +{ + u32 mangleid = (efx->net_dev->features & NETIF_F_TSO_MANGLEID) || + skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID ? + ESE_GZ_TX_DESC_IP4_ID_NO_OP : + ESE_GZ_TX_DESC_IP4_ID_INC_MOD16; + u16 vlan_enable = efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_TX ? + skb_vlan_tag_present(skb) : 0; + unsigned int len, ip_offset, tcp_offset, payload_segs; + u16 vlan_tci = skb_vlan_tag_get(skb); + u32 mss = skb_shinfo(skb)->gso_size; + + len = skb->len - buffer->len; + /* We use 1 for the TSO descriptor and 1 for the header */ + payload_segs = segment_count - 2; + ip_offset = skb_network_offset(skb); + tcp_offset = skb_transport_offset(skb); + + EFX_POPULATE_OWORD_13(*txd, + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_TSO, + ESF_GZ_TX_TSO_MSS, mss, + ESF_GZ_TX_TSO_HDR_NUM_SEGS, 1, + ESF_GZ_TX_TSO_PAYLOAD_NUM_SEGS, payload_segs, + ESF_GZ_TX_TSO_HDR_LEN_W, buffer->len >> 1, + ESF_GZ_TX_TSO_PAYLOAD_LEN, len, + ESF_GZ_TX_TSO_CSO_INNER_L4, 1, + ESF_GZ_TX_TSO_INNER_L3_OFF_W, ip_offset >> 1, + ESF_GZ_TX_TSO_INNER_L4_OFF_W, tcp_offset >> 1, + ESF_GZ_TX_TSO_ED_INNER_IP4_ID, mangleid, + ESF_GZ_TX_TSO_ED_INNER_IP_LEN, 1, + ESF_GZ_TX_TSO_VLAN_INSERT_EN, vlan_enable, + ESF_GZ_TX_TSO_VLAN_INSERT_TCI, vlan_tci + ); +} + +static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + unsigned int segment_count) +{ + unsigned int old_write_count = tx_queue->write_count; + unsigned int new_write_count = old_write_count; + struct efx_tx_buffer *buffer; + unsigned int next_desc_type; + unsigned int write_ptr; + efx_oword_t *txd; + unsigned int nr_descs = tx_queue->insert_count - old_write_count; + + if (unlikely(nr_descs == 0)) + return; + + if (segment_count) + next_desc_type = ESE_GZ_TX_DESC_TYPE_TSO; + else + next_desc_type = ESE_GZ_TX_DESC_TYPE_SEND; + + /* if it's a raw write (such as XDP) then always SEND single frames */ + if (!skb) + nr_descs = 1; + + do { + write_ptr = new_write_count & tx_queue->ptr_mask; + buffer = &tx_queue->buffer[write_ptr]; + txd = ef100_tx_desc(tx_queue, write_ptr); + ++new_write_count; + + /* Create TX descriptor ring entry */ + tx_queue->packet_write_count = new_write_count; + + switch (next_desc_type) { + case ESE_GZ_TX_DESC_TYPE_SEND: + ef100_make_send_desc(tx_queue->efx, skb, + buffer, txd, nr_descs); + break; + case ESE_GZ_TX_DESC_TYPE_TSO: + /* TX TSO descriptor */ + WARN_ON_ONCE(!(buffer->flags & EFX_TX_BUF_TSO_V3)); + ef100_make_tso_desc(tx_queue->efx, skb, + buffer, txd, nr_descs); + break; + default: + /* TX segment descriptor */ + EFX_POPULATE_OWORD_3(*txd, + ESF_GZ_TX_DESC_TYPE, ESE_GZ_TX_DESC_TYPE_SEG, + ESF_GZ_TX_SEG_LEN, buffer->len, + ESF_GZ_TX_SEG_ADDR, buffer->dma_addr); + } + /* if it's a raw write (such as XDP) then always SEND */ + next_desc_type = skb ? ESE_GZ_TX_DESC_TYPE_SEG : + ESE_GZ_TX_DESC_TYPE_SEND; + + } while (new_write_count != tx_queue->insert_count); + + wmb(); /* Ensure descriptors are written before they are fetched */ + + tx_queue->write_count = new_write_count; + + /* The write_count above must be updated before reading + * channel->holdoff_doorbell to avoid a race with the + * completion path, so ensure these operations are not + * re-ordered. This also flushes the update of write_count + * back into the cache. + */ + smp_mb(); } void ef100_tx_write(struct efx_tx_queue *tx_queue) { + ef100_tx_make_descriptors(tx_queue, NULL, 0); + ef100_tx_push_buffers(tx_queue); +} + +void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event) +{ + unsigned int tx_done = + EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_NUM_DESC); + unsigned int qlabel = + EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_TXCMPL_Q_LABEL); + struct efx_tx_queue *tx_queue = + efx_channel_get_tx_queue(channel, qlabel); + unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & + tx_queue->ptr_mask; + + efx_xmit_done(tx_queue, tx_index); } /* Add a socket buffer to a TX queue @@ -42,10 +328,81 @@ void ef100_tx_write(struct efx_tx_queue *tx_queue) */ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { - /* Stub. No TX path yet. */ + unsigned int old_insert_count = tx_queue->insert_count; struct efx_nic *efx = tx_queue->efx; + bool xmit_more = netdev_xmit_more(); + unsigned int fill_level; + unsigned int segments; + int rc; - netif_stop_queue(efx->net_dev); - dev_kfree_skb_any(skb); - return -ENODEV; + if (!tx_queue->buffer || !tx_queue->ptr_mask) { + netif_stop_queue(efx->net_dev); + dev_kfree_skb_any(skb); + return -ENODEV; + } + + segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; + if (segments == 1) + segments = 0; /* Don't use TSO/GSO for a single segment. */ + if (segments && !ef100_tx_can_tso(tx_queue, skb)) { + rc = efx_tx_tso_fallback(tx_queue, skb); + tx_queue->tso_fallbacks++; + if (rc) + goto err; + else + return 0; + } + + /* Map for DMA and create descriptors */ + rc = efx_tx_map_data(tx_queue, skb, segments); + if (rc) + goto err; + ef100_tx_make_descriptors(tx_queue, skb, segments); + + fill_level = efx_channel_tx_fill_level(tx_queue->channel); + if (fill_level > efx->txq_stop_thresh) { + netif_tx_stop_queue(tx_queue->core_txq); + /* Re-read after a memory barrier in case we've raced with + * the completion path. Otherwise there's a danger we'll never + * restart the queue if all completions have just happened. + */ + smp_mb(); + fill_level = efx_channel_tx_fill_level(tx_queue->channel); + if (fill_level < efx->txq_stop_thresh) + netif_tx_start_queue(tx_queue->core_txq); + } + + if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more)) + tx_queue->xmit_more_available = false; /* push doorbell */ + else if (tx_queue->write_count - tx_queue->notify_count > 255) + /* Ensure we never push more than 256 packets at once */ + tx_queue->xmit_more_available = false; /* push */ + else + tx_queue->xmit_more_available = true; /* don't push yet */ + + if (!tx_queue->xmit_more_available) + ef100_tx_push_buffers(tx_queue); + + if (segments) { + tx_queue->tso_bursts++; + tx_queue->tso_packets += segments; + tx_queue->tx_packets += segments; + } else { + tx_queue->tx_packets++; + } + return 0; + +err: + efx_enqueue_unwind(tx_queue, old_insert_count); + if (!IS_ERR_OR_NULL(skb)) + dev_kfree_skb_any(skb); + + /* If we're not expecting another transmit and we had something to push + * on this queue then we need to push here to get the previous packets + * out. We only enter this branch from before the 'Update BQL' section + * above, so xmit_more_available still refers to the old state. + */ + if (tx_queue->xmit_more_available && !xmit_more) + ef100_tx_push_buffers(tx_queue); + return rc; } diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h index 9a472f7aff43..fa23e430bdd7 100644 --- a/drivers/net/ethernet/sfc/ef100_tx.h +++ b/drivers/net/ethernet/sfc/ef100_tx.h @@ -17,6 +17,10 @@ int ef100_tx_probe(struct efx_tx_queue *tx_queue); void ef100_tx_init(struct efx_tx_queue *tx_queue); void ef100_tx_write(struct efx_tx_queue *tx_queue); +void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue); +unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx); + +void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event); netdev_tx_t ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 9791fac0b649..7bb7ecb480ae 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -173,6 +173,7 @@ struct efx_tx_buffer { #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ #define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */ #define EFX_TX_BUF_XDP 0x20 /* buffer was sent with XDP */ +#define EFX_TX_BUF_TSO_V3 0x40 /* empty buffer for a TSO_V3 descriptor */ /** * struct efx_tx_queue - An Efx TX queue @@ -245,6 +246,7 @@ struct efx_tx_buffer { * @pio_packets: Number of times the TX PIO feature has been used * @xmit_more_available: Are any packets waiting to be pushed to the NIC * @cb_packets: Number of times the TX copybreak feature has been used + * @notify_count: Count of notified descriptors to the NIC * @empty_read_count: If the completion path has seen the queue as empty * and the transmission path has not yet checked this, the value of * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. @@ -292,6 +294,7 @@ struct efx_tx_queue { unsigned int pio_packets; bool xmit_more_available; unsigned int cb_packets; + unsigned int notify_count; /* Statistics to supplement MAC stats */ unsigned long tx_packets; @@ -1669,6 +1672,24 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } +/* Get the max fill level of the TX queues on this channel */ +static inline unsigned int +efx_channel_tx_fill_level(struct efx_channel *channel) +{ + struct efx_tx_queue *tx_queue; + unsigned int fill_level = 0; + + /* This function is currently only used by EF100, which maybe + * could do something simpler and just compute the fill level + * of the single TXQ that's really in use. + */ + efx_for_each_channel_tx_queue(tx_queue, channel) + fill_level = max(fill_level, + tx_queue->insert_count - tx_queue->read_count); + + return fill_level; +} + /* Get all supported features. * If a feature is not fixed, it is present in hw_features. * If a feature is fixed, it does not present in hw_features, but diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c index 11b64c609550..793e234819a8 100644 --- a/drivers/net/ethernet/sfc/tx_common.c +++ b/drivers/net/ethernet/sfc/tx_common.c @@ -71,6 +71,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) "initialising TX queue %d\n", tx_queue->queue); tx_queue->insert_count = 0; + tx_queue->notify_count = 0; tx_queue->write_count = 0; tx_queue->packet_write_count = 0; tx_queue->old_write_count = 0; From a9dc3d5612ce6b48bd1d230d0e3c21478a9538b3 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:34:47 +0100 Subject: [PATCH 2019/2056] sfc_ef100: RX filter table management and related gubbins Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_netdev.c | 10 ++++ drivers/net/ethernet/sfc/ef100_nic.c | 67 +++++++++++++++++++++++++ 2 files changed, 77 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index ec9ca81fed85..362a915c836a 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -89,6 +89,7 @@ static int ef100_net_stop(struct net_device *net_dev) efx_disable_interrupts(efx); efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); + efx_remove_filters(efx); efx_fini_napi(efx); efx_remove_channels(efx); efx_mcdi_free_vis(efx); @@ -138,6 +139,10 @@ static int ef100_net_open(struct net_device *net_dev) efx_init_napi(efx); + rc = efx_probe_filters(efx); + if (rc) + goto fail; + rc = efx_nic_init_interrupt(efx); if (rc) goto fail; @@ -207,8 +212,13 @@ static const struct net_device_ops ef100_netdev_ops = { .ndo_open = ef100_net_open, .ndo_stop = ef100_net_stop, .ndo_start_xmit = ef100_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */ .ndo_get_phys_port_id = efx_get_phys_port_id, .ndo_get_phys_port_name = efx_get_phys_port_name, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = efx_filter_rfs, +#endif }; /* Netdev registration diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index fcc5b0de76d7..728e2ffd1d77 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -347,6 +347,37 @@ static int ef100_phy_probe(struct efx_nic *efx) return 0; } +static int ef100_filter_table_probe(struct efx_nic *efx) +{ + return efx_mcdi_filter_table_probe(efx, true); +} + +static int ef100_filter_table_up(struct efx_nic *efx) +{ + int rc; + + rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC); + if (rc) { + efx_mcdi_filter_table_down(efx); + return rc; + } + + rc = efx_mcdi_filter_add_vlan(efx, 0); + if (rc) { + efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC); + efx_mcdi_filter_table_down(efx); + } + + return rc; +} + +static void ef100_filter_table_down(struct efx_nic *efx) +{ + efx_mcdi_filter_del_vlan(efx, 0); + efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC); + efx_mcdi_filter_table_down(efx); +} + /* Other */ static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only) @@ -393,12 +424,24 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) __clear_bit(reset_type, &efx->reset_pending); rc = dev_open(efx->net_dev, NULL); } else if (reset_type == RESET_TYPE_ALL) { + /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters + * and reprobe after reset to avoid removing filters twice + */ + down_read(&efx->filter_sem); + ef100_filter_table_down(efx); + up_read(&efx->filter_sem); rc = efx_mcdi_reset(efx, reset_type); if (rc) return rc; netif_device_attach(efx->net_dev); + down_read(&efx->filter_sem); + rc = ef100_filter_table_up(efx); + up_read(&efx->filter_sem); + if (rc) + return rc; + rc = dev_open(efx->net_dev, NULL); } else { rc = 1; /* Leave the device closed */ @@ -480,6 +523,20 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_remove = efx_mcdi_rx_remove, .rx_write = ef100_rx_write, .rx_packet = __ef100_rx_packet, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .filter_table_probe = ef100_filter_table_up, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_remove = ef100_filter_table_down, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif .get_phys_port_id = efx_ef100_get_phys_port_id, @@ -840,6 +897,12 @@ static int ef100_probe_main(struct efx_nic *efx) if (rc) goto fail; + down_write(&efx->filter_sem); + rc = ef100_filter_table_probe(efx); + up_write(&efx->filter_sem); + if (rc) + goto fail; + rc = ef100_register_netdev(efx); if (rc) goto fail; @@ -877,6 +940,10 @@ void ef100_remove(struct efx_nic *efx) struct ef100_nic_data *nic_data = efx->nic_data; ef100_unregister_netdev(efx); + + down_write(&efx->filter_sem); + efx_mcdi_filter_table_remove(efx); + up_write(&efx->filter_sem); efx_fini_channels(efx); kfree(efx->phy_data); efx->phy_data = NULL; From 8e57daf70671e482209b5d231a181f04845cf73e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:36:28 +0100 Subject: [PATCH 2020/2056] sfc_ef100: RX path for EF100 Includes RSS spreading. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 25 ++++- drivers/net/ethernet/sfc/ef100_rx.c | 150 +++++++++++++++++++++++++-- drivers/net/ethernet/sfc/ef100_rx.h | 1 + 3 files changed, 167 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 728e2ffd1d77..bb753856d88f 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -260,6 +260,10 @@ static int ef100_ev_process(struct efx_channel *channel, int quota) ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE); switch (ev_type) { + case ESE_GZ_EF100_EV_RX_PKTS: + efx_ef100_ev_rx(channel, p_event); + ++spent; + break; case ESE_GZ_EF100_EV_MCDI: efx_mcdi_process_event(channel, p_event); break; @@ -482,9 +486,10 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx, /* NIC level access functions */ -#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | \ +#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | \ - NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX) + NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \ + NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX) const struct efx_nic_type ef100_pf_nic_type = { .revision = EFX_REV_EF100, @@ -540,6 +545,16 @@ const struct efx_nic_type ef100_pf_nic_type = { .get_phys_port_id = efx_ef100_get_phys_port_id, + .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN, + .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8, + .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8, + .rx_hash_key_size = 40, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, + .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config, + .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + .reconfigure_mac = ef100_reconfigure_mac, /* Per-type bar/size configuration not used on ef100. Location of @@ -903,6 +918,12 @@ static int ef100_probe_main(struct efx_nic *efx) if (rc) goto fail; + netdev_rss_key_fill(efx->rss_context.rx_hash_key, + sizeof(efx->rss_context.rx_hash_key)); + + /* Don't fail init if RSS setup doesn't work. */ + efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels); + rc = ef100_register_netdev(efx); if (rc) goto fail; diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c index 4223a38f46d3..13ba1a4f66fc 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.c +++ b/drivers/net/ethernet/sfc/ef100_rx.c @@ -12,20 +12,156 @@ #include "ef100_rx.h" #include "rx_common.h" #include "efx.h" +#include "nic_common.h" +#include "mcdi_functions.h" +#include "ef100_regs.h" +#include "ef100_nic.h" +#include "io.h" -/* RX stubs */ +/* Get the value of a field in the RX prefix */ +#define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32) +#define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32) +#define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1) +#define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)]) +#define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \ + PREFIX_WIDTH_MASK(_f)) -void ef100_rx_write(struct efx_rx_queue *rx_queue) +#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \ + (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN) +#define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \ + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH + +static bool check_fcs(struct efx_channel *channel, u32 *prefix) { + u16 rxclass; + u8 l2status; + + rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS)); + l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS); + + if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK)) + /* Everything is ok */ + return 0; + + if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR) + channel->n_rx_eth_crc_err++; + return 1; } void __ef100_rx_packet(struct efx_channel *channel) { - /* Stub. No RX path yet. Discard the buffer. */ - struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, - channel->rx_pkt_index); - struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index); + struct efx_nic *efx = channel->efx; + u8 *eh = efx_rx_buf_va(rx_buf); + __wsum csum = 0; + u32 *prefix; - efx_free_rx_buffers(rx_queue, rx_buf, 1); + prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN); + + if (check_fcs(channel, prefix) && + unlikely(!(efx->net_dev->features & NETIF_F_RXALL))) + goto out; + + rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH)); + if (rx_buf->len <= sizeof(struct ethhdr)) { + if (net_ratelimit()) + netif_err(channel->efx, rx_err, channel->efx->net_dev, + "RX packet too small (%d)\n", rx_buf->len); + ++channel->n_rx_frm_trunc; + goto out; + } + + if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) { + if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) { + ++channel->n_rx_ip_hdr_chksum_err; + } else { + u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME)); + + csum = (__force __wsum) sum; + } + } + + if (channel->type->receive_skb) { + struct efx_rx_queue *rx_queue = + efx_channel_get_rx_queue(channel); + + /* no support for special channels yet, so just discard */ + WARN_ON_ONCE(1); + efx_free_rx_buffers(rx_queue, rx_buf, 1); + goto out; + } + + efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum); + +out: channel->rx_pkt_n_frags = 0; } + +static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index) +{ + struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index); + struct efx_channel *channel = efx_rx_queue_channel(rx_queue); + struct efx_nic *efx = rx_queue->efx; + + ++rx_queue->rx_packets; + + netif_vdbg(efx, rx_status, efx->net_dev, + "RX queue %d received id %x\n", + efx_rx_queue_index(rx_queue), index); + + efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len); + + prefetch(efx_rx_buf_va(rx_buf)); + + rx_buf->page_offset += efx->rx_prefix_size; + + efx_recycle_rx_pages(channel, rx_buf, 1); + + efx_rx_flush_packet(channel); + channel->rx_pkt_n_frags = 1; + channel->rx_pkt_index = index; +} + +void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event) +{ + struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); + unsigned int n_packets = + EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT); + int i; + + WARN_ON_ONCE(!n_packets); + if (n_packets > 1) + ++channel->n_rx_merge_events; + + channel->irq_mod_score += 2 * n_packets; + + for (i = 0; i < n_packets; ++i) { + ef100_rx_packet(rx_queue, + rx_queue->removed_count & rx_queue->ptr_mask); + ++rx_queue->removed_count; + } +} + +void ef100_rx_write(struct efx_rx_queue *rx_queue) +{ + struct efx_rx_buffer *rx_buf; + unsigned int idx; + efx_qword_t *rxd; + efx_dword_t rxdb; + + while (rx_queue->notified_count != rx_queue->added_count) { + idx = rx_queue->notified_count & rx_queue->ptr_mask; + rx_buf = efx_rx_buffer(rx_queue, idx); + rxd = efx_rx_desc(rx_queue, idx); + + EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr); + + ++rx_queue->notified_count; + } + + wmb(); + EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX, + rx_queue->added_count & rx_queue->ptr_mask); + efx_writed_page(rx_queue->efx, &rxdb, + ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue)); +} diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h index b5dadf741aa0..f2f266863966 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.h +++ b/drivers/net/ethernet/sfc/ef100_rx.h @@ -14,6 +14,7 @@ #include "net_driver.h" +void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event); void ef100_rx_write(struct efx_rx_queue *rx_queue); void __ef100_rx_packet(struct efx_channel *channel); From b780feac367e5b77028b8088829b7958944bd71a Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:36:44 +0100 Subject: [PATCH 2021/2056] sfc_ef100: plumb in fini_dmaq Bring down the TX and RX queues at ifdown, so that we can then fini the EVQs (otherwise the MC would return EBUSY because they're still in use). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index bb753856d88f..1953e16b2b96 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -528,6 +528,7 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_remove = efx_mcdi_rx_remove, .rx_write = ef100_rx_write, .rx_packet = __ef100_rx_packet, + .fini_dmaq = efx_fini_dmaq, .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, .filter_table_probe = ef100_filter_table_up, .filter_table_restore = efx_mcdi_filter_table_restore, From b593b6f1b4921700c00394d35e098259e3d04913 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:37:20 +0100 Subject: [PATCH 2022/2056] sfc_ef100: statistics gathering MAC stats work much the same as on EF10, with a periodic DMA to a region specified via an MCDI. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_netdev.c | 6 + drivers/net/ethernet/sfc/ef100_nic.c | 170 ++++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100_nic.h | 41 ++++++ 3 files changed, 217 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 362a915c836a..63c311ba28b9 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -86,6 +86,7 @@ static int ef100_net_stop(struct net_device *net_dev) netif_stop_queue(net_dev); efx_stop_all(efx); + efx_mcdi_mac_fini_stats(efx); efx_disable_interrupts(efx); efx_clear_interrupt_affinity(efx); efx_nic_fini_interrupt(efx); @@ -157,6 +158,10 @@ static int ef100_net_open(struct net_device *net_dev) */ (void) efx_mcdi_poll_reboot(efx); + rc = efx_mcdi_mac_init_stats(efx); + if (rc) + goto fail; + efx_start_all(efx); /* Link state detection is normally event-driven; we have @@ -212,6 +217,7 @@ static const struct net_device_ops ef100_netdev_ops = { .ndo_open = ef100_net_open, .ndo_stop = ef100_net_stop, .ndo_start_xmit = ef100_hard_start_xmit, + .ndo_get_stats64 = efx_net_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */ .ndo_get_phys_port_id = efx_get_phys_port_id, diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 1953e16b2b96..c81da0b2f0c1 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -453,6 +453,171 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type) return rc; } +static void ef100_common_stat_mask(unsigned long *mask) +{ + __set_bit(EF100_STAT_port_rx_packets, mask); + __set_bit(EF100_STAT_port_tx_packets, mask); + __set_bit(EF100_STAT_port_rx_bytes, mask); + __set_bit(EF100_STAT_port_tx_bytes, mask); + __set_bit(EF100_STAT_port_rx_multicast, mask); + __set_bit(EF100_STAT_port_rx_bad, mask); + __set_bit(EF100_STAT_port_rx_align_error, mask); + __set_bit(EF100_STAT_port_rx_overflow, mask); +} + +static void ef100_ethtool_stat_mask(unsigned long *mask) +{ + __set_bit(EF100_STAT_port_tx_pause, mask); + __set_bit(EF100_STAT_port_tx_unicast, mask); + __set_bit(EF100_STAT_port_tx_multicast, mask); + __set_bit(EF100_STAT_port_tx_broadcast, mask); + __set_bit(EF100_STAT_port_tx_lt64, mask); + __set_bit(EF100_STAT_port_tx_64, mask); + __set_bit(EF100_STAT_port_tx_65_to_127, mask); + __set_bit(EF100_STAT_port_tx_128_to_255, mask); + __set_bit(EF100_STAT_port_tx_256_to_511, mask); + __set_bit(EF100_STAT_port_tx_512_to_1023, mask); + __set_bit(EF100_STAT_port_tx_1024_to_15xx, mask); + __set_bit(EF100_STAT_port_tx_15xx_to_jumbo, mask); + __set_bit(EF100_STAT_port_rx_good, mask); + __set_bit(EF100_STAT_port_rx_pause, mask); + __set_bit(EF100_STAT_port_rx_unicast, mask); + __set_bit(EF100_STAT_port_rx_broadcast, mask); + __set_bit(EF100_STAT_port_rx_lt64, mask); + __set_bit(EF100_STAT_port_rx_64, mask); + __set_bit(EF100_STAT_port_rx_65_to_127, mask); + __set_bit(EF100_STAT_port_rx_128_to_255, mask); + __set_bit(EF100_STAT_port_rx_256_to_511, mask); + __set_bit(EF100_STAT_port_rx_512_to_1023, mask); + __set_bit(EF100_STAT_port_rx_1024_to_15xx, mask); + __set_bit(EF100_STAT_port_rx_15xx_to_jumbo, mask); + __set_bit(EF100_STAT_port_rx_gtjumbo, mask); + __set_bit(EF100_STAT_port_rx_bad_gtjumbo, mask); + __set_bit(EF100_STAT_port_rx_length_error, mask); + __set_bit(EF100_STAT_port_rx_nodesc_drops, mask); + __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask); + __set_bit(GENERIC_STAT_rx_noskb_drops, mask); +} + +#define EF100_DMA_STAT(ext_name, mcdi_name) \ + [EF100_STAT_ ## ext_name] = \ + { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name } + +static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = { + EF100_DMA_STAT(port_tx_bytes, TX_BYTES), + EF100_DMA_STAT(port_tx_packets, TX_PKTS), + EF100_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS), + EF100_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS), + EF100_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS), + EF100_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS), + EF100_DMA_STAT(port_tx_lt64, TX_LT64_PKTS), + EF100_DMA_STAT(port_tx_64, TX_64_PKTS), + EF100_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS), + EF100_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS), + EF100_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS), + EF100_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS), + EF100_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS), + EF100_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS), + EF100_DMA_STAT(port_rx_bytes, RX_BYTES), + EF100_DMA_STAT(port_rx_packets, RX_PKTS), + EF100_DMA_STAT(port_rx_good, RX_GOOD_PKTS), + EF100_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS), + EF100_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS), + EF100_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS), + EF100_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS), + EF100_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS), + EF100_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS), + EF100_DMA_STAT(port_rx_64, RX_64_PKTS), + EF100_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS), + EF100_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS), + EF100_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS), + EF100_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS), + EF100_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS), + EF100_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS), + EF100_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS), + EF100_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS), + EF100_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS), + EF100_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS), + EF100_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS), + EF100_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS), + EFX_GENERIC_SW_STAT(rx_nodesc_trunc), + EFX_GENERIC_SW_STAT(rx_noskb_drops), +}; + +static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names) +{ + DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; + + ef100_ethtool_stat_mask(mask); + return efx_nic_describe_stats(ef100_stat_desc, EF100_STAT_COUNT, + mask, names); +} + +static size_t ef100_update_stats_common(struct efx_nic *efx, u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + struct ef100_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; + size_t stats_count = 0, index; + u64 *stats = nic_data->stats; + + ef100_ethtool_stat_mask(mask); + + if (full_stats) { + for_each_set_bit(index, mask, EF100_STAT_COUNT) { + if (ef100_stat_desc[index].name) { + *full_stats++ = stats[index]; + ++stats_count; + } + } + } + + if (!core_stats) + return stats_count; + + core_stats->rx_packets = stats[EF100_STAT_port_rx_packets]; + core_stats->tx_packets = stats[EF100_STAT_port_tx_packets]; + core_stats->rx_bytes = stats[EF100_STAT_port_rx_bytes]; + core_stats->tx_bytes = stats[EF100_STAT_port_tx_bytes]; + core_stats->rx_dropped = stats[EF100_STAT_port_rx_nodesc_drops] + + stats[GENERIC_STAT_rx_nodesc_trunc] + + stats[GENERIC_STAT_rx_noskb_drops]; + core_stats->multicast = stats[EF100_STAT_port_rx_multicast]; + core_stats->rx_length_errors = + stats[EF100_STAT_port_rx_gtjumbo] + + stats[EF100_STAT_port_rx_length_error]; + core_stats->rx_crc_errors = stats[EF100_STAT_port_rx_bad]; + core_stats->rx_frame_errors = + stats[EF100_STAT_port_rx_align_error]; + core_stats->rx_fifo_errors = stats[EF100_STAT_port_rx_overflow]; + core_stats->rx_errors = (core_stats->rx_length_errors + + core_stats->rx_crc_errors + + core_stats->rx_frame_errors); + + return stats_count; +} + +static size_t ef100_update_stats(struct efx_nic *efx, + u64 *full_stats, + struct rtnl_link_stats64 *core_stats) +{ + __le64 *mc_stats = kmalloc(array_size(efx->num_mac_stats, sizeof(__le64)), GFP_ATOMIC); + struct ef100_nic_data *nic_data = efx->nic_data; + DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {}; + u64 *stats = nic_data->stats; + + ef100_common_stat_mask(mask); + ef100_ethtool_stat_mask(mask); + + efx_nic_copy_stats(efx, mc_stats); + efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask, + stats, mc_stats, false); + + kfree(mc_stats); + + return ef100_update_stats_common(efx, full_stats, core_stats); +} + static int efx_ef100_get_phys_port_id(struct efx_nic *efx, struct netdev_phys_item_id *ppid) { @@ -557,6 +722,11 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, .reconfigure_mac = ef100_reconfigure_mac, + .describe_stats = ef100_describe_stats, + .start_stats = efx_mcdi_mac_start_stats, + .update_stats = ef100_update_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, /* Per-type bar/size configuration not used on ef100. Location of * registers is defined by extended capabilities. diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index c8816bc6ae78..7c2d37490074 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -17,6 +17,46 @@ extern const struct efx_nic_type ef100_pf_nic_type; int ef100_probe_pf(struct efx_nic *efx); void ef100_remove(struct efx_nic *efx); +enum { + EF100_STAT_port_tx_bytes = GENERIC_STAT_COUNT, + EF100_STAT_port_tx_packets, + EF100_STAT_port_tx_pause, + EF100_STAT_port_tx_unicast, + EF100_STAT_port_tx_multicast, + EF100_STAT_port_tx_broadcast, + EF100_STAT_port_tx_lt64, + EF100_STAT_port_tx_64, + EF100_STAT_port_tx_65_to_127, + EF100_STAT_port_tx_128_to_255, + EF100_STAT_port_tx_256_to_511, + EF100_STAT_port_tx_512_to_1023, + EF100_STAT_port_tx_1024_to_15xx, + EF100_STAT_port_tx_15xx_to_jumbo, + EF100_STAT_port_rx_bytes, + EF100_STAT_port_rx_packets, + EF100_STAT_port_rx_good, + EF100_STAT_port_rx_bad, + EF100_STAT_port_rx_pause, + EF100_STAT_port_rx_unicast, + EF100_STAT_port_rx_multicast, + EF100_STAT_port_rx_broadcast, + EF100_STAT_port_rx_lt64, + EF100_STAT_port_rx_64, + EF100_STAT_port_rx_65_to_127, + EF100_STAT_port_rx_128_to_255, + EF100_STAT_port_rx_256_to_511, + EF100_STAT_port_rx_512_to_1023, + EF100_STAT_port_rx_1024_to_15xx, + EF100_STAT_port_rx_15xx_to_jumbo, + EF100_STAT_port_rx_gtjumbo, + EF100_STAT_port_rx_bad_gtjumbo, + EF100_STAT_port_rx_align_error, + EF100_STAT_port_rx_length_error, + EF100_STAT_port_rx_overflow, + EF100_STAT_port_rx_nodesc_drops, + EF100_STAT_COUNT +}; + struct ef100_nic_data { struct efx_nic *efx; struct efx_buffer mcdi_buf; @@ -26,6 +66,7 @@ struct ef100_nic_data { u16 warm_boot_count; u8 port_id[ETH_ALEN]; DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); + u64 stats[EF100_STAT_COUNT]; u16 tso_max_hdr_len; u16 tso_max_payload_num_segs; u16 tso_max_frames; From 43c3df0d56474dd27e1f2a1ca70e9060d341e695 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:37:50 +0100 Subject: [PATCH 2023/2056] sfc_ef100: functions for selftests Self-tests for event and interrupt reception and NVRAM. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 47 ++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index c81da0b2f0c1..90fc44052abf 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -632,6 +632,50 @@ static int efx_ef100_get_phys_port_id(struct efx_nic *efx, return 0; } +static int efx_ef100_irq_test_generate(struct efx_nic *efx) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN); + + BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0); + + MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level); + return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT, + inbuf, sizeof(inbuf), NULL, 0, NULL); +} + +#define EFX_EF100_TEST 1 + +static void efx_ef100_ev_test_generate(struct efx_channel *channel) +{ + MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN); + struct efx_nic *efx = channel->efx; + efx_qword_t event; + int rc; + + EFX_POPULATE_QWORD_2(event, + ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER, + ESF_GZ_DRIVER_DATA, EFX_EF100_TEST); + + MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel); + + /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has + * already swapped the data to little-endian order. + */ + memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0], + sizeof(efx_qword_t)); + + rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf), + NULL, 0, NULL); + if (rc && (rc != -ENETDOWN)) + goto fail; + + return; + +fail: + WARN_ON(true); + netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); +} + static unsigned int ef100_check_caps(const struct efx_nic *efx, u8 flag, u32 offset) { @@ -668,6 +712,7 @@ const struct efx_nic_type ef100_pf_nic_type = { .mcdi_poll_reboot = ef100_mcdi_poll_reboot, .mcdi_reboot_detected = ef100_mcdi_reboot_detected, .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef100_irq_test_generate, .irq_disable_non_ev = efx_port_dummy_op_void, .push_irq_moderation = efx_channel_dummy_op_void, .min_interrupt_mode = EFX_INT_MODE_MSIX, @@ -684,6 +729,7 @@ const struct efx_nic_type ef100_pf_nic_type = { .irq_handle_msi = ef100_msi_interrupt, .ev_process = ef100_ev_process, .ev_read_ack = ef100_ev_read_ack, + .ev_test_generate = efx_ef100_ev_test_generate, .tx_probe = ef100_tx_probe, .tx_init = ef100_tx_init, .tx_write = ef100_tx_write, @@ -722,6 +768,7 @@ const struct efx_nic_type ef100_pf_nic_type = { .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, .reconfigure_mac = ef100_reconfigure_mac, + .test_nvram = efx_new_mcdi_nvram_test_all, .describe_stats = ef100_describe_stats, .start_stats = efx_mcdi_mac_start_stats, .update_stats = ef100_update_stats, From ef2c57b956143c300adef4b89438e1da3db4cf32 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:38:49 +0100 Subject: [PATCH 2024/2056] sfc_ef100: read pf_index at probe time We'll need it later, for VF representors. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 4 ++++ drivers/net/ethernet/sfc/ef100_nic.h | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 90fc44052abf..10748efbf98e 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -1096,6 +1096,10 @@ static int ef100_probe_main(struct efx_nic *efx) if (rc) goto fail; + rc = efx_get_pf_index(efx, &nic_data->pf_index); + if (rc) + goto fail; + rc = efx_ef100_init_datapath_caps(efx); if (rc < 0) goto fail; diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 7c2d37490074..4a64c9438493 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -63,6 +63,7 @@ struct ef100_nic_data { u32 datapath_caps; u32 datapath_caps2; u32 datapath_caps3; + unsigned int pf_index; u16 warm_boot_count; u8 port_id[ETH_ALEN]; DECLARE_BITMAP(evq_phases, EFX_MAX_CHANNELS); From d61592a112928a11cd04081adf51ba4f752859ed Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 3 Aug 2020 21:40:01 +0100 Subject: [PATCH 2025/2056] sfc_ef100: add nic-type for VFs, and bind to them We don't yet have a .sriov_configure() to create them, though. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100.c | 2 + drivers/net/ethernet/sfc/ef100_nic.c | 77 ++++++++++++++++++++++++++++ drivers/net/ethernet/sfc/ef100_nic.h | 2 + 3 files changed, 81 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c index de611c0f94e7..9729983f4840 100644 --- a/drivers/net/ethernet/sfc/ef100.c +++ b/drivers/net/ethernet/sfc/ef100.c @@ -527,6 +527,8 @@ static int ef100_pci_probe(struct pci_dev *pci_dev, static const struct pci_device_id ef100_pci_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x0100), /* Riverhead PF */ .driver_data = (unsigned long) &ef100_pf_nic_type }, + {PCI_DEVICE(PCI_VENDOR_ID_XILINX, 0x1100), /* Riverhead VF */ + .driver_data = (unsigned long) &ef100_vf_nic_type }, {0} /* end of list */ }; diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 10748efbf98e..8a2126fec078 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -783,6 +783,78 @@ const struct efx_nic_type ef100_pf_nic_type = { }; +const struct efx_nic_type ef100_vf_nic_type = { + .revision = EFX_REV_EF100, + .is_vf = true, + .probe = ef100_probe_vf, + .offload_features = EF100_OFFLOAD_FEATURES, + .mcdi_max_ver = 2, + .mcdi_request = ef100_mcdi_request, + .mcdi_poll_response = ef100_mcdi_poll_response, + .mcdi_read_response = ef100_mcdi_read_response, + .mcdi_poll_reboot = ef100_mcdi_poll_reboot, + .mcdi_reboot_detected = ef100_mcdi_reboot_detected, + .irq_enable_master = efx_port_dummy_op_void, + .irq_test_generate = efx_ef100_irq_test_generate, + .irq_disable_non_ev = efx_port_dummy_op_void, + .push_irq_moderation = efx_channel_dummy_op_void, + .min_interrupt_mode = EFX_INT_MODE_MSIX, + .map_reset_reason = ef100_map_reset_reason, + .map_reset_flags = ef100_map_reset_flags, + .reset = ef100_reset, + .check_caps = ef100_check_caps, + .ev_probe = ef100_ev_probe, + .ev_init = ef100_ev_init, + .ev_fini = efx_mcdi_ev_fini, + .ev_remove = efx_mcdi_ev_remove, + .irq_handle_msi = ef100_msi_interrupt, + .ev_process = ef100_ev_process, + .ev_read_ack = ef100_ev_read_ack, + .ev_test_generate = efx_ef100_ev_test_generate, + .tx_probe = ef100_tx_probe, + .tx_init = ef100_tx_init, + .tx_write = ef100_tx_write, + .tx_enqueue = ef100_enqueue_skb, + .rx_probe = efx_mcdi_rx_probe, + .rx_init = efx_mcdi_rx_init, + .rx_remove = efx_mcdi_rx_remove, + .rx_write = ef100_rx_write, + .rx_packet = __ef100_rx_packet, + .fini_dmaq = efx_fini_dmaq, + .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS, + .filter_table_probe = ef100_filter_table_up, + .filter_table_restore = efx_mcdi_filter_table_restore, + .filter_table_remove = ef100_filter_table_down, + .filter_insert = efx_mcdi_filter_insert, + .filter_remove_safe = efx_mcdi_filter_remove_safe, + .filter_get_safe = efx_mcdi_filter_get_safe, + .filter_clear_rx = efx_mcdi_filter_clear_rx, + .filter_count_rx_used = efx_mcdi_filter_count_rx_used, + .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, + .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, + .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, + + .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN, + .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8, + .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8, + .rx_hash_key_size = 40, + .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config, + .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config, + .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts, + + .reconfigure_mac = ef100_reconfigure_mac, + .test_nvram = efx_new_mcdi_nvram_test_all, + .describe_stats = ef100_describe_stats, + .start_stats = efx_mcdi_mac_start_stats, + .update_stats = ef100_update_stats, + .pull_stats = efx_mcdi_mac_pull_stats, + .stop_stats = efx_mcdi_mac_stop_stats, + + .mem_bar = NULL, + .mem_map_size = NULL, + +}; + static int compare_versions(const char *a, const char *b) { int a_major, a_minor, a_point, a_patch; @@ -1178,6 +1250,11 @@ int ef100_probe_pf(struct efx_nic *efx) return rc; } +int ef100_probe_vf(struct efx_nic *efx) +{ + return ef100_probe_main(efx); +} + void ef100_remove(struct efx_nic *efx) { struct ef100_nic_data *nic_data = efx->nic_data; diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h index 4a64c9438493..e799688d5264 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.h +++ b/drivers/net/ethernet/sfc/ef100_nic.h @@ -13,8 +13,10 @@ #include "nic_common.h" extern const struct efx_nic_type ef100_pf_nic_type; +extern const struct efx_nic_type ef100_vf_nic_type; int ef100_probe_pf(struct efx_nic *efx); +int ef100_probe_vf(struct efx_nic *efx); void ef100_remove(struct efx_nic *efx); enum { From da7955405fb25c11a541234b32d06d9c09f81be6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 3 Aug 2020 18:29:39 -0700 Subject: [PATCH 2026/2056] sfc: Fix build with CONFIG_RFS_ACCEL disabled. drivers/net/ethernet/sfc/ef100_nic.c:835:3: error: 'const struct efx_nic_type' has no member named 'filter_rfs_expire_one' 835 | .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, | ^~~~~~~~~~~~~~~~~~~~~ >> drivers/net/ethernet/sfc/ef100_nic.c:835:27: error: initialization of 'void (*)(struct efx_nic *, u32)' {aka 'void (*)(struct efx_nic *, unsigned int)'} from incompatible pointer type 'bool (*)(struct efx_nic *, u32, unsigned int)' {aka '_Bool (*)(struct efx_nic *, unsigned int, unsigned int)'} [-Werror=incompatible-pointer-types] 835 | .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Reported-by: kernel test robot Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef100_nic.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 8a2126fec078..36598d0542ed 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -832,7 +832,9 @@ const struct efx_nic_type ef100_vf_nic_type = { .filter_count_rx_used = efx_mcdi_filter_count_rx_used, .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit, .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids, +#ifdef CONFIG_RFS_ACCEL .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one, +#endif .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN, .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8, From 088c5f0d1a7c7f01e668d9d2d75e7d93b43b7690 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Tue, 4 Aug 2020 10:19:11 +0800 Subject: [PATCH 2027/2056] hinic: add generating mailbox random index support add support to generate mailbox random id of VF to ensure that mailbox messages PF received are from the correct VF. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_dev.h | 13 ++ .../net/ethernet/huawei/hinic/hinic_hw_mbox.c | 137 ++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_mbox.h | 8 + .../net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 2 + .../net/ethernet/huawei/hinic/hinic_sriov.c | 7 + 5 files changed, 167 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 2fb5f784f116..dc6e645f2689 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -28,6 +28,8 @@ #define HINIC_MGMT_STATUS_EXIST 0x6 #define HINIC_MGMT_CMD_UNSUPPORTED 0xFF +#define HINIC_CMD_VER_FUNC_ID 2 + struct hinic_cap { u16 max_qps; u16 num_qps; @@ -313,6 +315,17 @@ struct hinic_msix_config { u8 rsvd1[3]; }; +struct hinic_set_random_id { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 vf_in_pf; + u8 rsvd1; + u16 func_idx; + u32 random_id; +}; + struct hinic_board_info { u32 board_type; u32 port_num; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c index 47c93f946b94..96f89dcc35dc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c @@ -486,6 +486,111 @@ static void recv_mbox_handler(struct hinic_mbox_func_to_func *func_to_func, kfree(rcv_mbox_temp); } +static int set_vf_mbox_random_id(struct hinic_hwdev *hwdev, u16 func_id) +{ + struct hinic_mbox_func_to_func *func_to_func = hwdev->func_to_func; + struct hinic_set_random_id rand_info = {0}; + u16 out_size = sizeof(rand_info); + struct hinic_pfhwdev *pfhwdev; + int ret; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + rand_info.version = HINIC_CMD_VER_FUNC_ID; + rand_info.func_idx = func_id; + rand_info.vf_in_pf = func_id - hinic_glb_pf_vf_offset(hwdev->hwif); + rand_info.random_id = get_random_u32(); + + func_to_func->vf_mbx_rand_id[func_id] = rand_info.random_id; + + ret = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID, + &rand_info, sizeof(rand_info), + &rand_info, &out_size, HINIC_MGMT_MSG_SYNC); + if ((rand_info.status != HINIC_MGMT_CMD_UNSUPPORTED && + rand_info.status) || !out_size || ret) { + dev_err(&hwdev->hwif->pdev->dev, "Set VF random id failed, err: %d, status: 0x%x, out size: 0x%x\n", + ret, rand_info.status, out_size); + return -EIO; + } + + if (rand_info.status == HINIC_MGMT_CMD_UNSUPPORTED) + return rand_info.status; + + func_to_func->vf_mbx_old_rand_id[func_id] = + func_to_func->vf_mbx_rand_id[func_id]; + + return 0; +} + +static void update_random_id_work_handler(struct work_struct *work) +{ + struct hinic_mbox_work *mbox_work = + container_of(work, struct hinic_mbox_work, work); + struct hinic_mbox_func_to_func *func_to_func; + u16 src = mbox_work->src_func_idx; + + func_to_func = mbox_work->func_to_func; + + if (set_vf_mbox_random_id(func_to_func->hwdev, src)) + dev_warn(&func_to_func->hwdev->hwif->pdev->dev, "Update VF id: 0x%x random id failed\n", + mbox_work->src_func_idx); + + kfree(mbox_work); +} + +static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func, + u8 *header) +{ + struct hinic_hwdev *hwdev = func_to_func->hwdev; + struct hinic_mbox_work *mbox_work = NULL; + u64 mbox_header = *((u64 *)header); + u16 offset, src; + u32 random_id; + int vf_in_pf; + + src = HINIC_MBOX_HEADER_GET(mbox_header, SRC_GLB_FUNC_IDX); + + if (IS_PF_OR_PPF_SRC(src) || !func_to_func->support_vf_random) + return true; + + if (!HINIC_IS_PPF(hwdev->hwif)) { + offset = hinic_glb_pf_vf_offset(hwdev->hwif); + vf_in_pf = src - offset; + + if (vf_in_pf < 1 || vf_in_pf > hwdev->nic_cap.max_vf) { + dev_warn(&hwdev->hwif->pdev->dev, + "Receive vf id(0x%x) is invalid, vf id should be from 0x%x to 0x%x\n", + src, offset + 1, + hwdev->nic_cap.max_vf + offset); + return false; + } + } + + random_id = be32_to_cpu(*(u32 *)(header + MBOX_SEG_LEN + + MBOX_HEADER_SZ)); + + if (random_id == func_to_func->vf_mbx_rand_id[src] || + random_id == func_to_func->vf_mbx_old_rand_id[src]) + return true; + + dev_warn(&hwdev->hwif->pdev->dev, + "The mailbox random id(0x%x) of func_id(0x%x) doesn't match with pf reservation(0x%x)\n", + random_id, src, func_to_func->vf_mbx_rand_id[src]); + + mbox_work = kzalloc(sizeof(*mbox_work), GFP_KERNEL); + if (!mbox_work) + return false; + + mbox_work->func_to_func = func_to_func; + mbox_work->src_func_idx = src; + + INIT_WORK(&mbox_work->work, update_random_id_work_handler); + queue_work(func_to_func->workq, &mbox_work->work); + + return false; +} + void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size) { struct hinic_mbox_func_to_func *func_to_func; @@ -504,6 +609,9 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size) return; } + if (!check_vf_mbox_random_id(func_to_func, header)) + return; + recv_mbox = (dir == HINIC_HWIF_DIRECT_SEND) ? &func_to_func->mbox_send[src] : &func_to_func->mbox_resp[src]; @@ -1210,3 +1318,32 @@ void hinic_func_to_func_free(struct hinic_hwdev *hwdev) kfree(func_to_func); } + +int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev) +{ + u16 vf_offset; + u8 vf_in_pf; + int err = 0; + + if (HINIC_IS_VF(hwdev->hwif)) + return 0; + + vf_offset = hinic_glb_pf_vf_offset(hwdev->hwif); + + for (vf_in_pf = 1; vf_in_pf <= hwdev->nic_cap.max_vf; vf_in_pf++) { + err = set_vf_mbox_random_id(hwdev, vf_offset + vf_in_pf); + if (err) + break; + } + + if (err == HINIC_MGMT_CMD_UNSUPPORTED) { + hwdev->func_to_func->support_vf_random = false; + err = 0; + dev_warn(&hwdev->hwif->pdev->dev, "Mgmt is unsupported to set VF%d random id\n", + vf_in_pf - 1); + } else if (!err) { + hwdev->func_to_func->support_vf_random = true; + } + + return err; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h index 7b18559bfe80..0618fe515d9c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h @@ -22,6 +22,8 @@ #define HINIC_FUNC_CSR_MAILBOX_RESULT_H_OFF 0x0108 #define HINIC_FUNC_CSR_MAILBOX_RESULT_L_OFF 0x010C +#define MAX_FUNCTION_NUM 512 + enum hinic_mbox_ack_type { MBOX_ACK, MBOX_NO_ACK, @@ -100,6 +102,10 @@ struct hinic_mbox_func_to_func { /* lock for mbox event flag */ spinlock_t mbox_lock; + + u32 vf_mbx_old_rand_id[MAX_FUNCTION_NUM]; + u32 vf_mbx_rand_id[MAX_FUNCTION_NUM]; + bool support_vf_random; }; struct hinic_mbox_work { @@ -151,4 +157,6 @@ int hinic_mbox_to_vf(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, u32 timeout); +int hinic_vf_mbox_random_id_init(struct hinic_hwdev *hwdev); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index f626100b85c1..4ca81cc838db 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -93,6 +93,8 @@ enum hinic_comm_cmd { HINIC_COMM_CMD_WATCHDOG_INFO = 0x56, + HINIC_MGMT_CMD_SET_VF_RANDOM_ID = 0x61, + HINIC_COMM_CMD_MAX, }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index 141206917e4d..1d8a115cb9ec 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -1108,6 +1108,13 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev) int err = 0; u32 size, i; + err = hinic_vf_mbox_random_id_init(hwdev); + if (err) { + dev_err(&hwdev->hwif->pdev->dev, "Failed to init vf mbox random id, err: %d\n", + err); + return err; + } + nic_io = &hwdev->func_to_io; if (HINIC_IS_VF(hwdev->hwif)) { From c8c29ec3c5fa04137a0e1afa43e1b23ba509c3c4 Mon Sep 17 00:00:00 2001 From: Luo bin Date: Tue, 4 Aug 2020 10:19:12 +0800 Subject: [PATCH 2028/2056] hinic: add check for mailbox msg from VF PF should check whether the cmd from VF is supported and its content is right before passing it to hw. Signed-off-by: Luo bin Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 8 + .../net/ethernet/huawei/hinic/hinic_hw_mbox.c | 173 +++++++++++++++++- .../net/ethernet/huawei/hinic/hinic_hw_mbox.h | 14 ++ .../net/ethernet/huawei/hinic/hinic_sriov.c | 62 ++++++- 4 files changed, 255 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h index f40c31e1879f..9c413e963a04 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -31,6 +31,10 @@ (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ << HINIC_CMDQ_CTXT_##member##_SHIFT) +#define HINIC_CMDQ_CTXT_PAGE_INFO_GET(val, member) \ + (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \ + & HINIC_CMDQ_CTXT_##member##_MASK) + #define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ << HINIC_CMDQ_CTXT_##member##_SHIFT))) @@ -45,6 +49,10 @@ (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ << HINIC_CMDQ_CTXT_##member##_SHIFT) +#define HINIC_CMDQ_CTXT_BLOCK_INFO_GET(val, member) \ + (((u64)(val) >> HINIC_CMDQ_CTXT_##member##_SHIFT) \ + & HINIC_CMDQ_CTXT_##member##_MASK) + #define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ << HINIC_CMDQ_CTXT_##member##_SHIFT))) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c index 96f89dcc35dc..5078c0c73863 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c @@ -153,7 +153,6 @@ enum hinic_mbox_tx_status { (MBOX_MSG_ID(func_to_func_mbox) + 1) & MBOX_MSG_ID_MASK) #define FUNC_ID_OFF_SET_8B 8 -#define FUNC_ID_OFF_SET_10B 10 /* max message counter wait to process for one function */ #define HINIC_MAX_MSG_CNT_TO_PROCESS 10 @@ -189,6 +188,37 @@ enum mbox_aeq_trig_type { TRIGGER, }; +static bool check_func_id(struct hinic_hwdev *hwdev, u16 src_func_idx, + const void *buf_in, u16 in_size, u16 offset) +{ + u16 func_idx; + + if (in_size < offset + sizeof(func_idx)) { + dev_warn(&hwdev->hwif->pdev->dev, + "Receive mailbox msg len: %d less than %d Bytes is invalid\n", + in_size, offset); + return false; + } + + func_idx = *((u16 *)((u8 *)buf_in + offset)); + + if (src_func_idx != func_idx) { + dev_warn(&hwdev->hwif->pdev->dev, + "Receive mailbox function id: 0x%x not equal to msg function id: 0x%x\n", + src_func_idx, func_idx); + return false; + } + + return true; +} + +bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + return check_func_id(hwdev, func_idx, buf_in, in_size, + FUNC_ID_OFF_SET_8B); +} + /** * hinic_register_pf_mbox_cb - register mbox callback for pf * @hwdev: the pointer to hw device @@ -1205,15 +1235,156 @@ static void free_mbox_wb_status(struct hinic_mbox_func_to_func *func_to_func) send_mbox->wb_paddr); } +bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, u8 size) +{ + u16 src_idx = vf_id + hinic_glb_pf_vf_offset(hwdev->hwif); + int i; + + for (i = 0; i < size; i++) { + if (cmd == cmd_handle[i].cmd) { + if (cmd_handle[i].check_cmd) + return cmd_handle[i].check_cmd(hwdev, src_idx, + buf_in, in_size); + else + return true; + } + } + + dev_err(&hwdev->hwif->pdev->dev, + "PF Receive VF(%d) unsupported cmd(0x%x)\n", + vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd); + + return false; +} + +static bool hinic_cmdq_check_vf_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdq_ctxt *cmdq_ctxt) +{ + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 curr_pg_pfn, wq_block_pfn; + + if (cmdq_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif) || + cmdq_ctxt->cmdq_type > HINIC_MAX_CMDQ_TYPES) + return false; + + curr_pg_pfn = HINIC_CMDQ_CTXT_PAGE_INFO_GET + (ctxt_info->curr_wqe_page_pfn, CURR_WQE_PAGE_PFN); + wq_block_pfn = HINIC_CMDQ_CTXT_BLOCK_INFO_GET + (ctxt_info->wq_block_pfn, WQ_BLOCK_PFN); + /* VF must use 0-level CLA */ + if (curr_pg_pfn != wq_block_pfn) + return false; + + return true; +} + +static bool check_cmdq_ctxt(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + return hinic_cmdq_check_vf_ctxt(hwdev, buf_in); +} + +#define HW_CTX_QPS_VALID(hw_ctxt) \ + ((hw_ctxt)->rq_depth >= HINIC_QUEUE_MIN_DEPTH && \ + (hw_ctxt)->rq_depth <= HINIC_QUEUE_MAX_DEPTH && \ + (hw_ctxt)->sq_depth >= HINIC_QUEUE_MIN_DEPTH && \ + (hw_ctxt)->sq_depth <= HINIC_QUEUE_MAX_DEPTH && \ + (hw_ctxt)->rx_buf_sz_idx <= HINIC_MAX_RX_BUFFER_SIZE) + +static bool hw_ctxt_qps_param_valid(struct hinic_cmd_hw_ioctxt *hw_ctxt) +{ + if (HW_CTX_QPS_VALID(hw_ctxt)) + return true; + + if (!hw_ctxt->rq_depth && !hw_ctxt->sq_depth && + !hw_ctxt->rx_buf_sz_idx) + return true; + + return false; +} + +static bool check_hwctxt(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_cmd_hw_ioctxt *hw_ctxt = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + if (hw_ctxt->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) + return false; + + if (hw_ctxt->set_cmdq_depth) { + if (hw_ctxt->cmdq_depth >= HINIC_QUEUE_MIN_DEPTH && + hw_ctxt->cmdq_depth <= HINIC_QUEUE_MAX_DEPTH) + return true; + + return false; + } + + return hw_ctxt_qps_param_valid(hw_ctxt); +} + +static bool check_set_wq_page_size(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_wq_page_size *page_size_info = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size)) + return false; + + if (page_size_info->ppf_idx != HINIC_HWIF_PPF_IDX(hwdev->hwif)) + return false; + + if (((1U << page_size_info->page_size) * SZ_4K) != + HINIC_DEFAULT_WQ_PAGE_SIZE) + return false; + + return true; +} + +static struct vf_cmd_check_handle hw_cmd_support_vf[] = { + {HINIC_COMM_CMD_START_FLR, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_DMA_ATTR_SET, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_CMDQ_CTXT_SET, check_cmdq_ctxt}, + {HINIC_COMM_CMD_CMDQ_CTXT_GET, check_cmdq_ctxt}, + {HINIC_COMM_CMD_HWCTXT_SET, check_hwctxt}, + {HINIC_COMM_CMD_HWCTXT_GET, check_hwctxt}, + {HINIC_COMM_CMD_SQ_HI_CI_SET, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_RES_STATE_SET, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_IO_RES_CLEAR, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_L2NIC_RESET, hinic_mbox_check_func_id_8B}, + {HINIC_COMM_CMD_PAGESIZE_SET, check_set_wq_page_size}, +}; + static int comm_pf_mbox_handler(void *handle, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { + u8 size = ARRAY_SIZE(hw_cmd_support_vf); struct hinic_hwdev *hwdev = handle; struct hinic_pfhwdev *pfhwdev; int err = 0; pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + if (!hinic_mbox_check_cmd_valid(handle, hw_cmd_support_vf, vf_id, cmd, + buf_in, in_size, size)) { + dev_err(&hwdev->hwif->pdev->dev, + "PF Receive VF: %d common cmd: 0x%x or mbox len: 0x%x is invalid\n", + vf_id + hinic_glb_pf_vf_offset(hwdev->hwif), cmd, + in_size); + return HINIC_MBOX_VF_CMD_ERROR; + } + if (cmd == HINIC_COMM_CMD_START_FLR) { *out_size = 0; } else { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h index 0618fe515d9c..46953190d29e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h @@ -24,6 +24,12 @@ #define MAX_FUNCTION_NUM 512 +struct vf_cmd_check_handle { + u8 cmd; + bool (*check_cmd)(struct hinic_hwdev *hwdev, u16 src_func_idx, + void *buf_in, u16 in_size); +}; + enum hinic_mbox_ack_type { MBOX_ACK, MBOX_NO_ACK, @@ -122,6 +128,14 @@ struct vf_cmd_msg_handle { void *buf_out, u16 *out_size); }; +bool hinic_mbox_check_func_id_8B(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size); + +bool hinic_mbox_check_cmd_valid(struct hinic_hwdev *hwdev, + struct vf_cmd_check_handle *cmd_handle, + u16 vf_id, u8 cmd, void *buf_in, + u16 in_size, u8 size); + int hinic_register_pf_mbox_cb(struct hinic_hwdev *hwdev, enum hinic_mod_type mod, hinic_pf_mbox_cb callback); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index 1d8a115cb9ec..4d63680f2143 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -429,6 +429,18 @@ static int hinic_get_vf_link_status_msg_handler(void *hwdev, u16 vf_id, return 0; } +static bool check_func_table(struct hinic_hwdev *hwdev, u16 func_idx, + void *buf_in, u16 in_size) +{ + struct hinic_cmd_fw_ctxt *function_table = buf_in; + + if (!hinic_mbox_check_func_id_8B(hwdev, func_idx, buf_in, in_size) || + !function_table->rx_buf_sz) + return false; + + return true; +} + static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = { {HINIC_PORT_CMD_VF_REGISTER, hinic_register_vf_msg_handler}, {HINIC_PORT_CMD_VF_UNREGISTER, hinic_unregister_vf_msg_handler}, @@ -439,6 +451,45 @@ static struct vf_cmd_msg_handle nic_vf_cmd_msg_handler[] = { {HINIC_PORT_CMD_GET_LINK_STATE, hinic_get_vf_link_status_msg_handler}, }; +static struct vf_cmd_check_handle nic_cmd_support_vf[] = { + {HINIC_PORT_CMD_VF_REGISTER, NULL}, + {HINIC_PORT_CMD_VF_UNREGISTER, NULL}, + {HINIC_PORT_CMD_CHANGE_MTU, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_ADD_VLAN, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_DEL_VLAN, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_DEL_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RX_MODE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_PAUSE_INFO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_LINK_STATE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_LRO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RX_CSUM, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RX_VLAN_OFFLOAD, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_VPORT_STAT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_CLEAN_VPORT_STAT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_INDIR_TBL, + hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_TEMPLATE_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_HASH_ENGINE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RSS_CTX_TBL, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_RSS_TEMP_MGR, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_RSS_CFG, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_FWCTXT_INIT, check_func_table}, + {HINIC_PORT_CMD_GET_MGMT_VERSION, NULL}, + {HINIC_PORT_CMD_SET_FUNC_STATE, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_GLOBAL_QPN, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_TSO, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_SET_RQ_IQ_MAP, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_LINK_STATUS_REPORT, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_UPDATE_MAC, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_CAP, hinic_mbox_check_func_id_8B}, + {HINIC_PORT_CMD_GET_LINK_MODE, hinic_mbox_check_func_id_8B}, +}; + #define CHECK_IPSU_15BIT 0X8000 static @@ -972,6 +1023,7 @@ int hinic_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size) { + u8 size = ARRAY_SIZE(nic_cmd_support_vf); struct vf_cmd_msg_handle *vf_msg_handle; struct hinic_hwdev *dev = hwdev; struct hinic_func_to_io *nic_io; @@ -980,7 +1032,15 @@ static int nic_pf_mbox_handler(void *hwdev, u16 vf_id, u8 cmd, void *buf_in, u32 i; if (!hwdev) - return -EFAULT; + return -EINVAL; + + if (!hinic_mbox_check_cmd_valid(hwdev, nic_cmd_support_vf, vf_id, cmd, + buf_in, in_size, size)) { + dev_err(&dev->hwif->pdev->dev, + "PF Receive VF nic cmd: 0x%x, mbox len: 0x%x is invalid\n", + cmd, in_size); + return HINIC_MBOX_VF_CMD_ERROR; + } pfhwdev = container_of(dev, struct hinic_pfhwdev, hwdev); nic_io = &dev->func_to_io; From 93f4ddd64b7dca8279ad052e0d58d92b67c33907 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 3 Aug 2020 20:23:13 -0700 Subject: [PATCH 2029/2056] via-velocity: Use more typical logging styles Use netdev_ in place of VELOCITY_PRT. Use pr_ in place of printk(KERN_. Miscellanea: o Add pr_fmt to prefix pr_ output with "via-velocity: " o Remove now unused functions and macros o Realign some logging lines o Remove devname where pr_ is also used Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/via/via-velocity.c | 157 +++++++++++++----------- drivers/net/ethernet/via/via-velocity.h | 44 ------- 2 files changed, 82 insertions(+), 119 deletions(-) diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 713dbc04b25b..6d2a31488a74 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -32,6 +32,8 @@ * MODULE_LICENSE("GPL"); */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -80,7 +82,6 @@ enum velocity_bus_type { }; static int velocity_nics; -static int msglevel = MSG_LEVEL_INFO; static void velocity_set_power_state(struct velocity_info *vptr, char state) { @@ -405,24 +406,22 @@ static const char *get_chip_name(enum chip_type chip_id) * @max: highest value allowed * @def: default value * @name: property name - * @dev: device name * * Set an integer property in the module options. This function does * all the verification and checking as well as reporting so that * we don't duplicate code for each option. */ static void velocity_set_int_opt(int *opt, int val, int min, int max, int def, - char *name, const char *devname) + char *name) { if (val == -1) *opt = def; else if (val < min || val > max) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", - devname, name, min, max); + pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n", + name, min, max); *opt = def; } else { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n", - devname, name, val); + pr_info("set value of parameter %s to %d\n", name, val); *opt = val; } } @@ -434,25 +433,24 @@ static void velocity_set_int_opt(int *opt, int val, int min, int max, int def, * @def: default value (yes/no) * @flag: numeric value to set for true. * @name: property name - * @dev: device name * * Set a boolean property in the module options. This function does * all the verification and checking as well as reporting so that * we don't duplicate code for each option. */ static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, - char *name, const char *devname) + char *name) { (*opt) &= (~flag); if (val == -1) *opt |= (def ? flag : 0); else if (val < 0 || val > 1) { - printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n", - devname, name); + pr_notice("the value of parameter %s is invalid, the valid range is (%d-%d)\n", + name, 0, 1); *opt |= (def ? flag : 0); } else { - printk(KERN_INFO "%s: set parameter %s to %s\n", - devname, name, val ? "TRUE" : "FALSE"); + pr_info("set parameter %s to %s\n", + name, val ? "TRUE" : "FALSE"); *opt |= (val ? flag : 0); } } @@ -461,24 +459,38 @@ static void velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, * velocity_get_options - set options on device * @opts: option structure for the device * @index: index of option to use in module options array - * @devname: device name * * Turn the module and command options into a single structure * for the current device */ -static void velocity_get_options(struct velocity_opt *opts, int index, - const char *devname) +static void velocity_get_options(struct velocity_opt *opts, int index) { - velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname); - velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname); - velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); - velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); + velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], + RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, + "rx_thresh"); + velocity_set_int_opt(&opts->DMA_length, DMA_length[index], + DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, + "DMA_length"); + velocity_set_int_opt(&opts->numrx, RxDescriptors[index], + RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, + "RxDescriptors"); + velocity_set_int_opt(&opts->numtx, TxDescriptors[index], + TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, + "TxDescriptors"); - velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); - velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); - velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); - velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); + velocity_set_int_opt(&opts->flow_cntl, flow_control[index], + FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, + "flow_control"); + velocity_set_bool_opt(&opts->flags, IP_byte_align[index], + IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, + "IP_byte_align"); + velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], + MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, + "Media link mode"); + velocity_set_int_opt(&opts->wol_opts, wol_opts[index], + WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, + "Wake On Lan options"); opts->numrx = (opts->numrx & ~3); } @@ -880,7 +892,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) (mii_status==curr_status)) { vptr->mii_status=mii_check_media_mode(vptr->mac_regs); vptr->mii_status=check_connection_type(vptr->mac_regs); - VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n"); + netdev_info(vptr->netdev, "Velocity link no change\n"); return 0; } */ @@ -892,7 +904,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) * If connection type is AUTO */ if (mii_status & VELOCITY_AUTONEG_ENABLE) { - VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n"); + netdev_info(vptr->netdev, "Velocity is in AUTO mode\n"); /* clear force MAC mode bit */ BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); /* set duplex mode of MAC according to duplex mode of MII */ @@ -927,12 +939,14 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) if (mii_status & VELOCITY_DUPLEX_FULL) { CHIPGCR |= CHIPGCR_FCFDX; writeb(CHIPGCR, ®s->CHIPGCR); - VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n"); + netdev_info(vptr->netdev, + "set Velocity to forced full mode\n"); if (vptr->rev_id < REV_ID_VT3216_A0) BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); } else { CHIPGCR &= ~CHIPGCR_FCFDX; - VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n"); + netdev_info(vptr->netdev, + "set Velocity to forced half mode\n"); writeb(CHIPGCR, ®s->CHIPGCR); if (vptr->rev_id < REV_ID_VT3216_A0) BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); @@ -985,45 +999,61 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status) */ static void velocity_print_link_status(struct velocity_info *vptr) { + const char *link; + const char *speed; + const char *duplex; if (vptr->mii_status & VELOCITY_LINK_FAIL) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); - } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); + netdev_notice(vptr->netdev, "failed to detect cable link\n"); + return; + } + + if (vptr->options.spd_dpx == SPD_DPX_AUTO) { + link = "auto-negotiation"; if (vptr->mii_status & VELOCITY_SPEED_1000) - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); + speed = "1000"; else if (vptr->mii_status & VELOCITY_SPEED_100) - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps"); + speed = "100"; else - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps"); + speed = "10"; if (vptr->mii_status & VELOCITY_DUPLEX_FULL) - VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n"); + duplex = "full"; else - VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); + duplex = "half"; } else { - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); + link = "forced"; + switch (vptr->options.spd_dpx) { case SPD_DPX_1000_FULL: - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); + speed = "1000"; + duplex = "full"; break; case SPD_DPX_100_HALF: - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n"); + speed = "100"; + duplex = "half"; break; case SPD_DPX_100_FULL: - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n"); + speed = "100"; + duplex = "full"; break; case SPD_DPX_10_HALF: - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n"); + speed = "10"; + duplex = "half"; break; case SPD_DPX_10_FULL: - VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n"); + speed = "10"; + duplex = "full"; break; default: + speed = "unknown"; + duplex = "unknown"; break; } } + netdev_notice(vptr->netdev, "Link %s speed %sM bps %s duplex\n", + link, speed, duplex); } /** @@ -1621,8 +1651,7 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) velocity_init_rx_ring_indexes(vptr); if (velocity_rx_refill(vptr) != vptr->options.numrx) { - VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR - "%s: failed to allocate RX buffer.\n", vptr->netdev->name); + netdev_err(vptr->netdev, "failed to allocate RX buffer\n"); velocity_free_rd_ring(vptr); goto out; } @@ -1805,7 +1834,8 @@ static void velocity_error(struct velocity_info *vptr, int status) if (status & ISR_TXSTLI) { struct mac_regs __iomem *regs = vptr->mac_regs; - printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(®s->TDIdx[0])); + netdev_err(vptr->netdev, "TD structure error TDindex=%hx\n", + readw(®s->TDIdx[0])); BYTE_REG_BITS_ON(TXESR_TDSTR, ®s->TXESR); writew(TRDCSR_RUN, ®s->TDCSRClr); netif_stop_queue(vptr->netdev); @@ -2036,7 +2066,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) { if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) - VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name); + netdev_err(vptr->netdev, "received frame spans multiple RDs\n"); stats->rx_length_errors++; return -EINVAL; } @@ -2721,11 +2751,8 @@ static int velocity_get_platform_info(struct velocity_info *vptr) */ static void velocity_print_info(struct velocity_info *vptr) { - struct net_device *dev = vptr->netdev; - - printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); - printk(KERN_INFO "%s: Ethernet Address: %pM\n", - dev->name, dev->dev_addr); + netdev_info(vptr->netdev, "%s - Ethernet Address: %pM\n", + get_chip_name(vptr->chip_id), vptr->netdev->dev_addr); } static u32 velocity_get_link(struct net_device *dev) @@ -2748,10 +2775,8 @@ static int velocity_probe(struct device *dev, int irq, const struct velocity_info_tbl *info, enum velocity_bus_type bustype) { - static int first = 1; struct net_device *netdev; int i; - const char *drv_string; struct velocity_info *vptr; struct mac_regs __iomem *regs; int ret = -ENOMEM; @@ -2773,13 +2798,9 @@ static int velocity_probe(struct device *dev, int irq, SET_NETDEV_DEV(netdev, dev); vptr = netdev_priv(netdev); - if (first) { - printk(KERN_INFO "%s Ver. %s\n", - VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); - printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); - printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n"); - first = 0; - } + pr_info_once("%s Ver. %s\n", VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION); + pr_info_once("Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n"); + pr_info_once("Copyright (c) 2004 Red Hat Inc.\n"); netdev->irq = irq; vptr->netdev = netdev; @@ -2815,9 +2836,7 @@ static int velocity_probe(struct device *dev, int irq, netdev->dev_addr[i] = readb(®s->PAR[i]); - drv_string = dev_driver_string(dev); - - velocity_get_options(&vptr->options, velocity_nics, drv_string); + velocity_get_options(&vptr->options, velocity_nics); /* * Mask out the options cannot be set to the chip @@ -3469,16 +3488,6 @@ static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolin return 0; } -static u32 velocity_get_msglevel(struct net_device *dev) -{ - return msglevel; -} - -static void velocity_set_msglevel(struct net_device *dev, u32 value) -{ - msglevel = value; -} - static int get_pending_timer_val(int val) { int mult_bits = val >> 6; @@ -3653,8 +3662,6 @@ static const struct ethtool_ops velocity_ethtool_ops = { .get_drvinfo = velocity_get_drvinfo, .get_wol = velocity_ethtool_get_wol, .set_wol = velocity_ethtool_set_wol, - .get_msglevel = velocity_get_msglevel, - .set_msglevel = velocity_set_msglevel, .get_link = velocity_get_link, .get_strings = velocity_get_strings, .get_sset_count = velocity_get_sset_count, diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index f196e71d2c04..d3f960cc7c6e 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1286,50 +1286,6 @@ struct velocity_context { velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\ (id);}) -/* - * Inline debug routine - */ - - -enum velocity_msg_level { - MSG_LEVEL_ERR = 0, //Errors that will cause abnormal operation. - MSG_LEVEL_NOTICE = 1, //Some errors need users to be notified. - MSG_LEVEL_INFO = 2, //Normal message. - MSG_LEVEL_VERBOSE = 3, //Will report all trival errors. - MSG_LEVEL_DEBUG = 4 //Only for debug purpose. -}; - -#ifdef VELOCITY_DEBUG -#define ASSERT(x) { \ - if (!(x)) { \ - printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ - __func__, __LINE__);\ - BUG(); \ - }\ -} -#define VELOCITY_DBG(p,args...) printk(p, ##args) -#else -#define ASSERT(x) -#define VELOCITY_DBG(x) -#endif - -#define VELOCITY_PRT(l, p, args...) do {if (l<=msglevel) printk( p ,##args);} while (0) - -#define VELOCITY_PRT_CAMMASK(p,t) {\ - int i;\ - if ((t)==VELOCITY_MULTICAST_CAM) {\ - for (i=0;i<(MCAM_SIZE/8);i++)\ - printk("%02X",(p)->mCAMmask[i]);\ - }\ - else {\ - for (i=0;i<(VCAM_SIZE/8);i++)\ - printk("%02X",(p)->vCAMmask[i]);\ - }\ - printk("\n");\ -} - - - #define VELOCITY_WOL_MAGIC 0x00000000UL #define VELOCITY_WOL_PHY 0x00000001UL #define VELOCITY_WOL_ARP 0x00000002UL From df23bb18b44b9a1f2b54358201730e710a9df57f Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:42 +0200 Subject: [PATCH 2030/2056] ipv4: route: Ignore output interface in FIB lookup for PMTU route Currently, processes sending traffic to a local bridge with an encapsulation device as a port don't get ICMP errors if they exceed the PMTU of the encapsulated link. David Ahern suggested this as a hack, but it actually looks like the correct solution: when we update the PMTU for a given destination by means of updating or creating a route exception, the encapsulation might trigger this because of PMTU discovery happening either on the encapsulation device itself, or its lower layer. This happens on bridged encapsulations only. The output interface shouldn't matter, because we already have a valid destination. Drop the output interface restriction from the associated route lookup. For UDP tunnels, we will now have a route exception created for the encapsulation itself, with a MTU value reflecting its headroom, which allows a bridge forwarding IP packets originated locally to deliver errors back to the sending socket. The behaviour is now consistent with IPv6 and verified with selftests pmtu_ipv{4,6}_br_{geneve,vxlan}{4,6}_exception introduced later in this series. v2: - reset output interface only for bridge ports (David Ahern) - add and use netif_is_any_bridge_port() helper (David Ahern) Suggested-by: David Ahern Signed-off-by: Stefano Brivio Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/netdevice.h | 5 +++++ net/ipv4/route.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 88d40b9abaa1..90444622b703 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4840,6 +4840,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev) return dev->priv_flags & IFF_OVS_DATAPATH; } +static inline bool netif_is_any_bridge_port(const struct net_device *dev) +{ + return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); +} + static inline bool netif_is_team_master(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a01efa062f6b..8ca6bcab7b03 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1050,6 +1050,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); + + /* Don't make lookup fail for bridged encapsulations */ + if (skb && netif_is_any_bridge_port(skb->dev)) + fl4.flowi4_oif = 0; + __ip_rt_update_pmtu(rt, &fl4, mtu); } From 4cb47a8644cc9eb8ec81190a50e79e6530d0297f Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:43 +0200 Subject: [PATCH 2031/2056] tunnels: PMTU discovery support for directly bridged IP packets It's currently possible to bridge Ethernet tunnels carrying IP packets directly to external interfaces without assigning them addresses and routes on the bridged network itself: this is the case for UDP tunnels bridged with a standard bridge or by Open vSwitch. PMTU discovery is currently broken with those configurations, because the encapsulation effectively decreases the MTU of the link, and while we are able to account for this using PMTU discovery on the lower layer, we don't have a way to relay ICMP or ICMPv6 messages needed by the sender, because we don't have valid routes to it. On the other hand, as a tunnel endpoint, we can't fragment packets as a general approach: this is for instance clearly forbidden for VXLAN by RFC 7348, section 4.3: VTEPs MUST NOT fragment VXLAN packets. Intermediate routers may fragment encapsulated VXLAN packets due to the larger frame size. The destination VTEP MAY silently discard such VXLAN fragments. The same paragraph recommends that the MTU over the physical network accomodates for encapsulations, but this isn't a practical option for complex topologies, especially for typical Open vSwitch use cases. Further, it states that: Other techniques like Path MTU discovery (see [RFC1191] and [RFC1981]) MAY be used to address this requirement as well. Now, PMTU discovery already works for routed interfaces, we get route exceptions created by the encapsulation device as they receive ICMP Fragmentation Needed and ICMPv6 Packet Too Big messages, and we already rebuild those messages with the appropriate MTU and route them back to the sender. Add the missing bits for bridged cases: - checks in skb_tunnel_check_pmtu() to understand if it's appropriate to trigger a reply according to RFC 1122 section 3.2.2 for ICMP and RFC 4443 section 2.4 for ICMPv6. This function is already called by UDP tunnels - a new function generating those ICMP or ICMPv6 replies. We can't reuse icmp_send() and icmp6_send() as we don't see the sender as a valid destination. This doesn't need to be generic, as we don't cover any other type of ICMP errors given that we only provide an encapsulation function to the sender While at it, make the MTU check in skb_tunnel_check_pmtu() accurate: we might receive GSO buffers here, and the passed headroom already includes the inner MAC length, so we don't have to account for it a second time (that would imply three MAC headers on the wire, but there are just two). This issue became visible while bridging IPv6 packets with 4500 bytes of payload over GENEVE using IPv4 with a PMTU of 4000. Given the 50 bytes of encapsulation headroom, we would advertise MTU as 3950, and we would reject fragmented IPv6 datagrams of 3958 bytes size on the wire. We're exclusively dealing with network MTU here, though, so we could get Ethernet frames up to 3964 octets in that case. v2: - moved skb_tunnel_check_pmtu() to ip_tunnel_core.c (David Ahern) - split IPv4/IPv6 functions (David Ahern) Signed-off-by: Stefano Brivio Reviewed-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/bareudp.c | 5 +- drivers/net/geneve.c | 5 +- drivers/net/vxlan.c | 4 +- include/net/dst.h | 10 -- include/net/ip_tunnels.h | 2 + net/ipv4/ip_tunnel_core.c | 244 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 254 insertions(+), 16 deletions(-) diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c index 3b6664c7e73c..841910f1db65 100644 --- a/drivers/net/bareudp.c +++ b/drivers/net/bareudp.c @@ -308,7 +308,7 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, - BAREUDP_IPV4_HLEN + info->options_len); + BAREUDP_IPV4_HLEN + info->options_len, false); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, @@ -369,7 +369,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); - skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len); + skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len, + false); sport = udp_flow_src_port(bareudp->net, skb, bareudp->sport_min, USHRT_MAX, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 017c13acc911..de86b6d82132 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -894,7 +894,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return PTR_ERR(rt); skb_tunnel_check_pmtu(skb, &rt->dst, - GENEVE_IPV4_HLEN + info->options_len); + GENEVE_IPV4_HLEN + info->options_len, false); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { @@ -955,7 +955,8 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); - skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); + skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len, + false); sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 77658425db8a..1432544da507 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2720,7 +2720,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ndst = &rt->dst; - skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); + skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM, false); tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); @@ -2760,7 +2760,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } - skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); + skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM, false); tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); diff --git a/include/net/dst.h b/include/net/dst.h index 852d8fb36ab7..6ae2e625050d 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -535,14 +535,4 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) dst->ops->update_pmtu(dst, NULL, skb, mtu, false); } -static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, - struct dst_entry *encap_dst, - int headroom) -{ - u32 encap_mtu = dst_mtu(encap_dst); - - if (skb->len > encap_mtu - headroom) - skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom); -} - #endif /* _NET_DST_H */ diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 36025dea7612..02ccd32542d0 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -420,6 +420,8 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, u8 tos, u8 ttl, __be16 df, bool xnet); struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); +int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, + int headroom, bool reply); int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index f8b419e2475c..9ddee2a0c66d 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -184,6 +184,250 @@ int iptunnel_handle_offloads(struct sk_buff *skb, } EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); +/** + * iptunnel_pmtud_build_icmp() - Build ICMP error message for PMTUD + * @skb: Original packet with L2 header + * @mtu: MTU value for ICMP error + * + * Return: length on success, negative error code if message couldn't be built. + */ +static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) +{ + const struct iphdr *iph = ip_hdr(skb); + struct icmphdr *icmph; + struct iphdr *niph; + struct ethhdr eh; + int len, err; + + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr))) + return -EINVAL; + + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); + + err = pskb_trim(skb, 576 - sizeof(*niph) - sizeof(*icmph)); + if (err) + return err; + + len = skb->len + sizeof(*icmph); + err = skb_cow(skb, sizeof(*niph) + sizeof(*icmph) + ETH_HLEN); + if (err) + return err; + + icmph = skb_push(skb, sizeof(*icmph)); + *icmph = (struct icmphdr) { + .type = ICMP_DEST_UNREACH, + .code = ICMP_FRAG_NEEDED, + .checksum = 0, + .un.frag.__unused = 0, + .un.frag.mtu = ntohs(mtu), + }; + icmph->checksum = ip_compute_csum(icmph, len); + skb_reset_transport_header(skb); + + niph = skb_push(skb, sizeof(*niph)); + *niph = (struct iphdr) { + .ihl = sizeof(*niph) / 4u, + .version = 4, + .tos = 0, + .tot_len = htons(len + sizeof(*niph)), + .id = 0, + .frag_off = htons(IP_DF), + .ttl = iph->ttl, + .protocol = IPPROTO_ICMP, + .saddr = iph->daddr, + .daddr = iph->saddr, + }; + ip_send_check(niph); + skb_reset_network_header(skb); + + skb->ip_summed = CHECKSUM_NONE; + + eth_header(skb, skb->dev, htons(eh.h_proto), eh.h_source, eh.h_dest, 0); + skb_reset_mac_header(skb); + + return skb->len; +} + +/** + * iptunnel_pmtud_check_icmp() - Trigger ICMP reply if needed and allowed + * @skb: Buffer being sent by encapsulation, L2 headers expected + * @mtu: Network MTU for path + * + * Return: 0 for no ICMP reply, length if built, negative value on error. + */ +static int iptunnel_pmtud_check_icmp(struct sk_buff *skb, int mtu) +{ + const struct icmphdr *icmph = icmp_hdr(skb); + const struct iphdr *iph = ip_hdr(skb); + + if (mtu <= 576 || iph->frag_off != htons(IP_DF)) + return 0; + + if (ipv4_is_lbcast(iph->daddr) || ipv4_is_multicast(iph->daddr) || + ipv4_is_zeronet(iph->saddr) || ipv4_is_loopback(iph->saddr) || + ipv4_is_lbcast(iph->saddr) || ipv4_is_multicast(iph->saddr)) + return 0; + + if (iph->protocol == IPPROTO_ICMP && icmp_is_err(icmph->type)) + return 0; + + return iptunnel_pmtud_build_icmp(skb, mtu); +} + +#if IS_ENABLED(CONFIG_IPV6) +/** + * iptunnel_pmtud_build_icmpv6() - Build ICMPv6 error message for PMTUD + * @skb: Original packet with L2 header + * @mtu: MTU value for ICMPv6 error + * + * Return: length on success, negative error code if message couldn't be built. + */ +static int iptunnel_pmtud_build_icmpv6(struct sk_buff *skb, int mtu) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct icmp6hdr *icmp6h; + struct ipv6hdr *nip6h; + struct ethhdr eh; + int len, err; + __wsum csum; + + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr))) + return -EINVAL; + + skb_copy_bits(skb, skb_mac_offset(skb), &eh, ETH_HLEN); + pskb_pull(skb, ETH_HLEN); + skb_reset_network_header(skb); + + err = pskb_trim(skb, IPV6_MIN_MTU - sizeof(*nip6h) - sizeof(*icmp6h)); + if (err) + return err; + + len = skb->len + sizeof(*icmp6h); + err = skb_cow(skb, sizeof(*nip6h) + sizeof(*icmp6h) + ETH_HLEN); + if (err) + return err; + + icmp6h = skb_push(skb, sizeof(*icmp6h)); + *icmp6h = (struct icmp6hdr) { + .icmp6_type = ICMPV6_PKT_TOOBIG, + .icmp6_code = 0, + .icmp6_cksum = 0, + .icmp6_mtu = htonl(mtu), + }; + skb_reset_transport_header(skb); + + nip6h = skb_push(skb, sizeof(*nip6h)); + *nip6h = (struct ipv6hdr) { + .priority = 0, + .version = 6, + .flow_lbl = { 0 }, + .payload_len = htons(len), + .nexthdr = IPPROTO_ICMPV6, + .hop_limit = ip6h->hop_limit, + .saddr = ip6h->daddr, + .daddr = ip6h->saddr, + }; + skb_reset_network_header(skb); + + csum = csum_partial(icmp6h, len, 0); + icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, len, + IPPROTO_ICMPV6, csum); + + skb->ip_summed = CHECKSUM_NONE; + + eth_header(skb, skb->dev, htons(eh.h_proto), eh.h_source, eh.h_dest, 0); + skb_reset_mac_header(skb); + + return skb->len; +} + +/** + * iptunnel_pmtud_check_icmpv6() - Trigger ICMPv6 reply if needed and allowed + * @skb: Buffer being sent by encapsulation, L2 headers expected + * @mtu: Network MTU for path + * + * Return: 0 for no ICMPv6 reply, length if built, negative value on error. + */ +static int iptunnel_pmtud_check_icmpv6(struct sk_buff *skb, int mtu) +{ + const struct ipv6hdr *ip6h = ipv6_hdr(skb); + int stype = ipv6_addr_type(&ip6h->saddr); + u8 proto = ip6h->nexthdr; + __be16 frag_off; + int offset; + + if (mtu <= IPV6_MIN_MTU) + return 0; + + if (stype == IPV6_ADDR_ANY || stype == IPV6_ADDR_MULTICAST || + stype == IPV6_ADDR_LOOPBACK) + return 0; + + offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, + &frag_off); + if (offset < 0 || (frag_off & htons(~0x7))) + return 0; + + if (proto == IPPROTO_ICMPV6) { + struct icmp6hdr *icmp6h; + + if (!pskb_may_pull(skb, skb_network_header(skb) + + offset + 1 - skb->data)) + return 0; + + icmp6h = (struct icmp6hdr *)(skb_network_header(skb) + offset); + if (icmpv6_is_err(icmp6h->icmp6_type) || + icmp6h->icmp6_type == NDISC_REDIRECT) + return 0; + } + + return iptunnel_pmtud_build_icmpv6(skb, mtu); +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + +/** + * skb_tunnel_check_pmtu() - Check, update PMTU and trigger ICMP reply as needed + * @skb: Buffer being sent by encapsulation, L2 headers expected + * @encap_dst: Destination for tunnel encapsulation (outer IP) + * @headroom: Encapsulation header size, bytes + * @reply: Build matching ICMP or ICMPv6 message as a result + * + * L2 tunnel implementations that can carry IP and can be directly bridged + * (currently UDP tunnels) can't always rely on IP forwarding paths to handle + * PMTU discovery. In the bridged case, ICMP or ICMPv6 messages need to be built + * based on payload and sent back by the encapsulation itself. + * + * For routable interfaces, we just need to update the PMTU for the destination. + * + * Return: 0 if ICMP error not needed, length if built, negative value on error + */ +int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, + int headroom, bool reply) +{ + u32 mtu = dst_mtu(encap_dst) - headroom; + + if ((skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) || + (!skb_is_gso(skb) && (skb->len - skb_mac_header_len(skb)) <= mtu)) + return 0; + + skb_dst_update_pmtu_no_confirm(skb, mtu); + + if (!reply || skb->pkt_type == PACKET_HOST) + return 0; + + if (skb->protocol == htons(ETH_P_IP)) + return iptunnel_pmtud_check_icmp(skb, mtu); + +#if IS_ENABLED(CONFIG_IPV6) + if (skb->protocol == htons(ETH_P_IPV6)) + return iptunnel_pmtud_check_icmpv6(skb, mtu); +#endif + return 0; +} +EXPORT_SYMBOL(skb_tunnel_check_pmtu); + /* Often modified stats are per cpu, other are shared (netdev->stats) */ void ip_tunnel_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot) From fc68c99577cc66e38d11b3e29304efb83fa08d53 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:44 +0200 Subject: [PATCH 2032/2056] vxlan: Support for PMTU discovery on directly bridged links If the interface is a bridge or Open vSwitch port, and we can't forward a packet because it exceeds the local PMTU estimate, trigger an ICMP or ICMPv6 reply to the sender, using the same interface to forward it back. If metadata collection is enabled, reverse destination and source addresses, so that Open vSwitch is able to match this packet against the existing, reverse flow. v2: Use netif_is_any_bridge_port() (David Ahern) Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 47 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1432544da507..6d5816be6131 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2500,7 +2500,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, - struct vxlan_dev *dst_vxlan, __be32 vni) + struct vxlan_dev *dst_vxlan, __be32 vni, + bool snoop) { struct pcpu_sw_netstats *tx_stats, *rx_stats; union vxlan_addr loopback; @@ -2532,7 +2533,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, goto drop; } - if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) + if ((dst_vxlan->cfg.flags & VXLAN_F_LEARN) && snoop) vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); u64_stats_update_begin(&tx_stats->syncp); @@ -2581,7 +2582,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, return -ENOENT; } - vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni); + vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni, true); return 1; } @@ -2617,7 +2618,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (vxlan_addr_any(dst)) { if (did_rsc) { /* short-circuited back to local bridge */ - vxlan_encap_bypass(skb, vxlan, vxlan, default_vni); + vxlan_encap_bypass(skb, vxlan, vxlan, + default_vni, true); return; } goto drop; @@ -2720,7 +2722,23 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ndst = &rt->dst; - skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM, false); + err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM, + netif_is_any_bridge_port(dev)); + if (err < 0) { + goto tx_error; + } else if (err) { + if (info) { + struct in_addr src, dst; + + src = remote_ip.sin.sin_addr; + dst = local_ip.sin.sin_addr; + info->key.u.ipv4.src = src.s_addr; + info->key.u.ipv4.dst = dst.s_addr; + } + vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); + dst_release(ndst); + goto out_unlock; + } tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); @@ -2760,7 +2778,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } - skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM, false); + err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM, + netif_is_any_bridge_port(dev)); + if (err < 0) { + goto tx_error; + } else if (err) { + if (info) { + struct in6_addr src, dst; + + src = remote_ip.sin6.sin6_addr; + dst = local_ip.sin6.sin6_addr; + info->key.u.ipv6.src = src; + info->key.u.ipv6.dst = dst; + } + + vxlan_encap_bypass(skb, vxlan, vxlan, vni, false); + dst_release(ndst); + goto out_unlock; + } tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); From c1a800e88dbffca4ef48000cb3f9ad618dc7ad89 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:45 +0200 Subject: [PATCH 2033/2056] geneve: Support for PMTU discovery on directly bridged links If the interface is a bridge or Open vSwitch port, and we can't forward a packet because it exceeds the local PMTU estimate, trigger an ICMP or ICMPv6 reply to the sender, using the same interface to forward it back. If metadata collection is enabled, set destination and source addresses for the flow as if we were receiving the packet, so that Open vSwitch can match the ICMP error against the existing association. v2: Use netif_is_any_bridge_port() (David Ahern) Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller --- drivers/net/geneve.c | 56 ++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 5 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de86b6d82132..c71f994fbc73 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -893,8 +893,31 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(rt)) return PTR_ERR(rt); - skb_tunnel_check_pmtu(skb, &rt->dst, - GENEVE_IPV4_HLEN + info->options_len, false); + err = skb_tunnel_check_pmtu(skb, &rt->dst, + GENEVE_IPV4_HLEN + info->options_len, + netif_is_any_bridge_port(dev)); + if (err < 0) { + dst_release(&rt->dst); + return err; + } else if (err) { + struct ip_tunnel_info *info; + + info = skb_tunnel_info(skb); + if (info) { + info->key.u.ipv4.dst = fl4.saddr; + info->key.u.ipv4.src = fl4.daddr; + } + + if (!pskb_may_pull(skb, ETH_HLEN)) { + dst_release(&rt->dst); + return -EINVAL; + } + + skb->protocol = eth_type_trans(skb, geneve->dev); + netif_rx(skb); + dst_release(&rt->dst); + return -EMSGSIZE; + } sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { @@ -955,8 +978,30 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (IS_ERR(dst)) return PTR_ERR(dst); - skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len, - false); + err = skb_tunnel_check_pmtu(skb, dst, + GENEVE_IPV6_HLEN + info->options_len, + netif_is_any_bridge_port(dev)); + if (err < 0) { + dst_release(dst); + return err; + } else if (err) { + struct ip_tunnel_info *info = skb_tunnel_info(skb); + + if (info) { + info->key.u.ipv6.dst = fl6.saddr; + info->key.u.ipv6.src = fl6.daddr; + } + + if (!pskb_may_pull(skb, ETH_HLEN)) { + dst_release(dst); + return -EINVAL; + } + + skb->protocol = eth_type_trans(skb, geneve->dev); + netif_rx(skb); + dst_release(dst); + return -EMSGSIZE; + } sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { @@ -1013,7 +1058,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!err)) return NETDEV_TX_OK; - dev_kfree_skb(skb); + if (err != -EMSGSIZE) + dev_kfree_skb(skb); if (err == -ELOOP) dev->stats.collisions++; From df40e39c0df025da38dd819152a877a07ab1c115 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:46 +0200 Subject: [PATCH 2034/2056] selftests: pmtu.sh: Add tests for bridged UDP tunnels The new tests check that IP and IPv6 packets exceeding the local PMTU estimate, both locally generated and forwarded by a bridge from another node, result in the correct route exceptions being created, and that communication with end-to-end fragmentation over VXLAN and GENEVE tunnels is now possible as a result of PMTU discovery. Part of the existing setup functions aren't generic enough to simply add a namespace and a bridge to the existing routing setup. This rework is in progress and we can easily shrink this once more generic topology functions are available. Signed-off-by: Stefano Brivio Reviewed-by: David Ahern Signed-off-by: David S. Miller --- tools/testing/selftests/net/pmtu.sh | 166 ++++++++++++++++++++++++++-- 1 file changed, 159 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 77c09cd339c3..774e526573f3 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -59,6 +59,25 @@ # Same as pmtu_ipv6_vxlan6_exception, but using a GENEVE tunnel instead of # VXLAN # +# - pmtu_ipv{4,6}_br_vxlan{4,6}_exception +# Set up three namespaces, A, B, and C, with routing between A and B over +# R1. R2 is unused in these tests. A has a veth connection to C, and is +# connected to B via a VXLAN endpoint, which is directly bridged to C. +# MTU on the B-R1 link is lower than other MTUs. +# +# Check that both C and A are able to communicate with B over the VXLAN +# tunnel, and that PMTU exceptions with the correct values are created. +# +# segment a_r1 segment b_r1 b_r1: 4000 +# .--------------R1--------------. everything +# C---veth A B else: 5000 +# ' bridge | +# '---- - - - - - VXLAN - - - - - - - ' +# +# - pmtu_ipv{4,6}_br_geneve{4,6}_exception +# Same as pmtu_ipv{4,6}_br_vxlan{4,6}_exception, with a GENEVE tunnel +# instead. +# # - pmtu_ipv{4,6}_fou{4,6}_exception # Same as pmtu_ipv4_vxlan4, but using a direct IPv4/IPv6 encapsulation # (FoU) over IPv4/IPv6, instead of VXLAN @@ -147,6 +166,14 @@ tests=" pmtu_ipv6_geneve4_exception IPv6 over geneve4: PMTU exceptions 1 pmtu_ipv4_geneve6_exception IPv4 over geneve6: PMTU exceptions 1 pmtu_ipv6_geneve6_exception IPv6 over geneve6: PMTU exceptions 1 + pmtu_ipv4_br_vxlan4_exception IPv4, bridged vxlan4: PMTU exceptions 1 + pmtu_ipv6_br_vxlan4_exception IPv6, bridged vxlan4: PMTU exceptions 1 + pmtu_ipv4_br_vxlan6_exception IPv4, bridged vxlan6: PMTU exceptions 1 + pmtu_ipv6_br_vxlan6_exception IPv6, bridged vxlan6: PMTU exceptions 1 + pmtu_ipv4_br_geneve4_exception IPv4, bridged geneve4: PMTU exceptions 1 + pmtu_ipv6_br_geneve4_exception IPv6, bridged geneve4: PMTU exceptions 1 + pmtu_ipv4_br_geneve6_exception IPv4, bridged geneve6: PMTU exceptions 1 + pmtu_ipv6_br_geneve6_exception IPv6, bridged geneve6: PMTU exceptions 1 pmtu_ipv4_fou4_exception IPv4 over fou4: PMTU exceptions 1 pmtu_ipv6_fou4_exception IPv6 over fou4: PMTU exceptions 1 pmtu_ipv4_fou6_exception IPv4 over fou6: PMTU exceptions 1 @@ -173,10 +200,12 @@ tests=" NS_A="ns-A" NS_B="ns-B" +NS_C="ns-C" NS_R1="ns-R1" NS_R2="ns-R2" ns_a="ip netns exec ${NS_A}" ns_b="ip netns exec ${NS_B}" +ns_c="ip netns exec ${NS_C}" ns_r1="ip netns exec ${NS_R1}" ns_r2="ip netns exec ${NS_R2}" @@ -239,9 +268,11 @@ routes_nh=" veth4_a_addr="192.168.1.1" veth4_b_addr="192.168.1.2" +veth4_c_addr="192.168.2.10" veth4_mask="24" veth6_a_addr="fd00:1::a" veth6_b_addr="fd00:1::b" +veth6_c_addr="fd00:2::c" veth6_mask="64" tunnel4_a_addr="192.168.2.1" @@ -428,7 +459,7 @@ setup_ip6ip6() { } setup_namespaces() { - for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do + for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do ip netns add ${n} || return 1 # Disable DAD, so that we don't have to wait to use the @@ -484,6 +515,7 @@ setup_vxlan_or_geneve() { a_addr="${2}" b_addr="${3}" opts="${4}" + br_if_a="${5}" if [ "${type}" = "vxlan" ]; then opts="${opts} ttl 64 dstport 4789" @@ -497,10 +529,16 @@ setup_vxlan_or_geneve() { run_cmd ${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1 run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts} - run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a - run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b + if [ -n "${br_if_a}" ]; then + run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${br_if_a} + run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${br_if_a} + run_cmd ${ns_a} ip link set ${type}_a master ${br_if_a} + else + run_cmd ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a + run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a + fi - run_cmd ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a + run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b run_cmd ${ns_a} ip link set ${type}_a up @@ -516,11 +554,27 @@ setup_vxlan4() { } setup_geneve6() { - setup_vxlan_or_geneve geneve ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 + setup_vxlan_or_geneve geneve ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "" } setup_vxlan6() { - setup_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 + setup_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "" +} + +setup_bridged_geneve4() { + setup_vxlan_or_geneve geneve ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "df set" "br0" +} + +setup_bridged_vxlan4() { + setup_vxlan_or_geneve vxlan ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "df set" "br0" +} + +setup_bridged_geneve6() { + setup_vxlan_or_geneve geneve ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "" "br0" +} + +setup_bridged_vxlan6() { + setup_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 "" "br0" } setup_xfrm() { @@ -630,6 +684,20 @@ setup_routing() { return 0 } +setup_bridge() { + run_cmd ${ns_a} ip link add br0 type bridge || return 2 + run_cmd ${ns_a} ip link set br0 up + + run_cmd ${ns_c} ip link add veth_C-A type veth peer name veth_A-C + run_cmd ${ns_c} ip link set veth_A-C netns ns-A + + run_cmd ${ns_a} ip link set veth_A-C up + run_cmd ${ns_c} ip link set veth_C-A up + run_cmd ${ns_c} ip addr add ${veth4_c_addr}/${veth4_mask} dev veth_C-A + run_cmd ${ns_c} ip addr add ${veth6_c_addr}/${veth6_mask} dev veth_C-A + run_cmd ${ns_a} ip link set veth_A-C master br0 +} + setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip @@ -657,7 +725,7 @@ cleanup() { done tcpdump_pids= - for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do + for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do ip netns del ${n} 2> /dev/null done } @@ -892,6 +960,90 @@ test_pmtu_ipv6_geneve6_exception() { test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 6 6 } +test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception() { + type=${1} + family=${2} + outer_family=${3} + ll_mtu=4000 + + if [ ${outer_family} -eq 4 ]; then + setup namespaces routing bridge bridged_${type}4 || return 2 + # IPv4 header UDP header VXLAN/GENEVE header Ethernet header + exp_mtu=$((${ll_mtu} - 20 - 8 - 8 - 14)) + else + setup namespaces routing bridge bridged_${type}6 || return 2 + # IPv6 header UDP header VXLAN/GENEVE header Ethernet header + exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - 14)) + fi + + trace "${ns_a}" ${type}_a "${ns_b}" ${type}_b \ + "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \ + "${ns_a}" br0 "${ns_a}" veth-A-C \ + "${ns_c}" veth_C-A + + if [ ${family} -eq 4 ]; then + ping=ping + dst=${tunnel4_b_addr} + else + ping=${ping6} + dst=${tunnel6_b_addr} + fi + + # Create route exception by exceeding link layer MTU + mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000)) + mtu "${ns_a}" br0 $((${ll_mtu} + 1000)) + mtu "${ns_a}" veth_A-C $((${ll_mtu} + 1000)) + mtu "${ns_c}" veth_C-A $((${ll_mtu} + 1000)) + mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000)) + mtu "${ns_b}" veth_B-R1 ${ll_mtu} + mtu "${ns_r1}" veth_R1-B ${ll_mtu} + + mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000)) + mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000)) + + run_cmd ${ns_c} ${ping} -q -M want -i 0.1 -c 10 -s $((${ll_mtu} + 500)) ${dst} || return 1 + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s $((${ll_mtu} + 500)) ${dst} || return 1 + + # Check that exceptions were created + pmtu="$(route_get_dst_pmtu_from_exception "${ns_c}" ${dst})" + check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on bridged ${type} interface" + pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})" + check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on locally bridged ${type} interface" +} + +test_pmtu_ipv4_br_vxlan4_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception vxlan 4 4 +} + +test_pmtu_ipv6_br_vxlan4_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception vxlan 6 4 +} + +test_pmtu_ipv4_br_geneve4_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception geneve 4 4 +} + +test_pmtu_ipv6_br_geneve4_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception geneve 6 4 +} + +test_pmtu_ipv4_br_vxlan6_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception vxlan 4 6 +} + +test_pmtu_ipv6_br_vxlan6_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception vxlan 6 6 +} + +test_pmtu_ipv4_br_geneve6_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception geneve 4 6 +} + +test_pmtu_ipv6_br_geneve6_exception() { + test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception geneve 6 6 +} + test_pmtu_ipvX_over_fouY_or_gueY() { inner_family=${1} outer_family=${2} From 7b53682c94032a7a7adec400400c65d0af7fea5a Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Tue, 4 Aug 2020 07:53:47 +0200 Subject: [PATCH 2035/2056] selftests: pmtu.sh: Add tests for UDP tunnels handled by Open vSwitch The new tests check that IP and IPv6 packets exceeding the local PMTU estimate, forwarded by an Open vSwitch instance from another node, result in the correct route exceptions being created, and that communication with end-to-end fragmentation, over GENEVE and VXLAN Open vSwitch ports, is now possible as a result of PMTU discovery. Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller --- tools/testing/selftests/net/pmtu.sh | 180 ++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 774e526573f3..6bbf69a28e12 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -78,6 +78,26 @@ # Same as pmtu_ipv{4,6}_br_vxlan{4,6}_exception, with a GENEVE tunnel # instead. # +# - pmtu_ipv{4,6}_ovs_vxlan{4,6}_exception +# Set up two namespaces, B, and C, with routing between the init namespace +# and B over R1. A and R2 are unused in these tests. The init namespace +# has a veth connection to C, and is connected to B via a VXLAN endpoint, +# which is handled by Open vSwitch and bridged to C. MTU on the B-R1 link +# is lower than other MTUs. +# +# Check that C is able to communicate with B over the VXLAN tunnel, and +# that PMTU exceptions with the correct values are created. +# +# segment a_r1 segment b_r1 b_r1: 4000 +# .--------------R1--------------. everything +# C---veth init B else: 5000 +# '- ovs | +# '---- - - - - - VXLAN - - - - - - - ' +# +# - pmtu_ipv{4,6}_ovs_geneve{4,6}_exception +# Same as pmtu_ipv{4,6}_ovs_vxlan{4,6}_exception, with a GENEVE tunnel +# instead. +# # - pmtu_ipv{4,6}_fou{4,6}_exception # Same as pmtu_ipv4_vxlan4, but using a direct IPv4/IPv6 encapsulation # (FoU) over IPv4/IPv6, instead of VXLAN @@ -174,6 +194,14 @@ tests=" pmtu_ipv6_br_geneve4_exception IPv6, bridged geneve4: PMTU exceptions 1 pmtu_ipv4_br_geneve6_exception IPv4, bridged geneve6: PMTU exceptions 1 pmtu_ipv6_br_geneve6_exception IPv6, bridged geneve6: PMTU exceptions 1 + pmtu_ipv4_ovs_vxlan4_exception IPv4, OVS vxlan4: PMTU exceptions 1 + pmtu_ipv6_ovs_vxlan4_exception IPv6, OVS vxlan4: PMTU exceptions 1 + pmtu_ipv4_ovs_vxlan6_exception IPv4, OVS vxlan6: PMTU exceptions 1 + pmtu_ipv6_ovs_vxlan6_exception IPv6, OVS vxlan6: PMTU exceptions 1 + pmtu_ipv4_ovs_geneve4_exception IPv4, OVS geneve4: PMTU exceptions 1 + pmtu_ipv6_ovs_geneve4_exception IPv6, OVS geneve4: PMTU exceptions 1 + pmtu_ipv4_ovs_geneve6_exception IPv4, OVS geneve6: PMTU exceptions 1 + pmtu_ipv6_ovs_geneve6_exception IPv6, OVS geneve6: PMTU exceptions 1 pmtu_ipv4_fou4_exception IPv4 over fou4: PMTU exceptions 1 pmtu_ipv6_fou4_exception IPv6 over fou4: PMTU exceptions 1 pmtu_ipv4_fou6_exception IPv4 over fou6: PMTU exceptions 1 @@ -698,6 +726,66 @@ setup_bridge() { run_cmd ${ns_a} ip link set veth_A-C master br0 } +setup_ovs_vxlan_or_geneve() { + type="${1}" + a_addr="${2}" + b_addr="${3}" + + if [ "${type}" = "vxlan" ]; then + opts="${opts} ttl 64 dstport 4789" + opts_b="local ${b_addr}" + fi + + run_cmd ovs-vsctl add-port ovs_br0 ${type}_a -- \ + set interface ${type}_a type=${type} \ + options:remote_ip=${b_addr} options:key=1 options:csum=true || return 1 + + run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts} || return 1 + + run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b + run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b + + run_cmd ${ns_b} ip link set ${type}_b up +} + +setup_ovs_geneve4() { + setup_ovs_vxlan_or_geneve geneve ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 +} + +setup_ovs_vxlan4() { + setup_ovs_vxlan_or_geneve vxlan ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 +} + +setup_ovs_geneve6() { + setup_ovs_vxlan_or_geneve geneve ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 +} + +setup_ovs_vxlan6() { + setup_ovs_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 +} + +setup_ovs_bridge() { + run_cmd ovs-vsctl add-br ovs_br0 || return 2 + run_cmd ip link set ovs_br0 up + + run_cmd ${ns_c} ip link add veth_C-A type veth peer name veth_A-C + run_cmd ${ns_c} ip link set veth_A-C netns 1 + + run_cmd ip link set veth_A-C up + run_cmd ${ns_c} ip link set veth_C-A up + run_cmd ${ns_c} ip addr add ${veth4_c_addr}/${veth4_mask} dev veth_C-A + run_cmd ${ns_c} ip addr add ${veth6_c_addr}/${veth6_mask} dev veth_C-A + run_cmd ovs-vsctl add-port ovs_br0 veth_A-C + + # Move veth_A-R1 to init + run_cmd ${ns_a} ip link set veth_A-R1 netns 1 + run_cmd ip addr add ${prefix4}.${a_r1}.1/${veth4_mask} dev veth_A-R1 + run_cmd ip addr add ${prefix6}:${a_r1}::1/${veth6_mask} dev veth_A-R1 + run_cmd ip link set veth_A-R1 up + run_cmd ip route add ${prefix4}.${b_r1}.1 via ${prefix4}.${a_r1}.2 + run_cmd ip route add ${prefix6}:${b_r1}::1 via ${prefix6}:${a_r1}::2 +} + setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip @@ -728,6 +816,11 @@ cleanup() { for n in ${NS_A} ${NS_B} ${NS_C} ${NS_R1} ${NS_R2}; do ip netns del ${n} 2> /dev/null done + + ip link del veth_A-C 2>/dev/null + ip link del veth_A-R1 2>/dev/null + ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null + ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null } mtu() { @@ -1044,6 +1137,93 @@ test_pmtu_ipv6_br_geneve6_exception() { test_pmtu_ipvX_over_bridged_vxlanY_or_geneveY_exception geneve 6 6 } +test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() { + type=${1} + family=${2} + outer_family=${3} + ll_mtu=4000 + + if [ ${outer_family} -eq 4 ]; then + setup namespaces routing ovs_bridge ovs_${type}4 || return 2 + # IPv4 header UDP header VXLAN/GENEVE header Ethernet header + exp_mtu=$((${ll_mtu} - 20 - 8 - 8 - 14)) + else + setup namespaces routing ovs_bridge ovs_${type}6 || return 2 + # IPv6 header UDP header VXLAN/GENEVE header Ethernet header + exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - 14)) + fi + + if [ "${type}" = "vxlan" ]; then + tun_a="vxlan_sys_4789" + elif [ "${type}" = "geneve" ]; then + tun_a="genev_sys_6081" + fi + + trace "" "${tun_a}" "${ns_b}" ${type}_b \ + "" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \ + "" ovs_br0 "" veth-A-C \ + "${ns_c}" veth_C-A + + if [ ${family} -eq 4 ]; then + ping=ping + dst=${tunnel4_b_addr} + else + ping=${ping6} + dst=${tunnel6_b_addr} + fi + + # Create route exception by exceeding link layer MTU + mtu "" veth_A-R1 $((${ll_mtu} + 1000)) + mtu "" ovs_br0 $((${ll_mtu} + 1000)) + mtu "" veth_A-C $((${ll_mtu} + 1000)) + mtu "${ns_c}" veth_C-A $((${ll_mtu} + 1000)) + mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000)) + mtu "${ns_b}" veth_B-R1 ${ll_mtu} + mtu "${ns_r1}" veth_R1-B ${ll_mtu} + + mtu "" ${tun_a} $((${ll_mtu} + 1000)) + mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000)) + + run_cmd ${ns_c} ${ping} -q -M want -i 0.1 -c 20 -s $((${ll_mtu} + 500)) ${dst} || return 1 + + # Check that exceptions were created + pmtu="$(route_get_dst_pmtu_from_exception "${ns_c}" ${dst})" + check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on Open vSwitch ${type} interface" +} + +test_pmtu_ipv4_ovs_vxlan4_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception vxlan 4 4 +} + +test_pmtu_ipv6_ovs_vxlan4_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception vxlan 6 4 +} + +test_pmtu_ipv4_ovs_geneve4_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception geneve 4 4 +} + +test_pmtu_ipv6_ovs_geneve4_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception geneve 6 4 +} + +test_pmtu_ipv4_ovs_vxlan6_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception vxlan 4 6 +} + +test_pmtu_ipv6_ovs_vxlan6_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception vxlan 6 6 +} + +test_pmtu_ipv4_ovs_geneve6_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception geneve 4 6 +} + +test_pmtu_ipv6_ovs_geneve6_exception() { + test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception geneve 6 6 +} + test_pmtu_ipvX_over_fouY_or_gueY() { inner_family=${1} outer_family=${2} From bab9693a9a8c6dd19f670408ec1e78e12a320682 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 4 Aug 2020 15:02:30 +0800 Subject: [PATCH 2036/2056] net: thunderx: use spin_lock_bh in nicvf_set_rx_mode_task() A dead lock was triggered on thunderx driver: CPU0 CPU1 ---- ---- [01] lock(&(&nic->rx_mode_wq_lock)->rlock); [11] lock(&(&mc->mca_lock)->rlock); [12] lock(&(&nic->rx_mode_wq_lock)->rlock); [02] lock(&(&mc->mca_lock)->rlock); The path for each is: [01] worker_thread() -> process_one_work() -> nicvf_set_rx_mode_task() [02] mld_ifc_timer_expire() [11] ipv6_add_dev() -> ipv6_dev_mc_inc() -> igmp6_group_added() -> [12] dev_mc_add() -> __dev_set_rx_mode() -> nicvf_set_rx_mode() To fix it, it needs to disable bh on [1], so that the timer on [2] wouldn't be triggered until rx_mode_wq_lock is released. So change to use spin_lock_bh() instead of spin_lock(). Thanks to Paolo for helping with this. v1->v2: - post to netdev. Reported-by: Rafael P. Tested-by: Dean Nelson Fixes: 469998c861fa ("net: thunderx: prevent concurrent data re-writing by nicvf_set_rx_mode") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 1c6163934e20..d22c9d350355 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2039,11 +2039,11 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg) /* Save message data locally to prevent them from * being overwritten by next ndo_set_rx_mode call(). */ - spin_lock(&nic->rx_mode_wq_lock); + spin_lock_bh(&nic->rx_mode_wq_lock); mode = vf_work->mode; mc = vf_work->mc; vf_work->mc = NULL; - spin_unlock(&nic->rx_mode_wq_lock); + spin_unlock_bh(&nic->rx_mode_wq_lock); __nicvf_set_rx_mode_task(mode, mc, nic); } From f7ba7dbf4f7af67b5936ff1cbd40a3254b409ebf Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Tue, 4 Aug 2020 14:17:16 +0200 Subject: [PATCH 2037/2056] net: macb: Properly handle phylink on at91sam9x I just recently noticed that ethernet does not work anymore since v5.5 on the GARDENA smart Gateway, which is based on the AT91SAM9G25. Debugging showed that the "GEM bits" in the NCFGR register are now unconditionally accessed, which is incorrect for the !macb_is_gem() case. This patch adds the macb_is_gem() checks back to the code (in macb_mac_config() & macb_mac_link_up()), so that the GEM register bits are not accessed in this case any more. Fixes: 7897b071ac3b ("net: macb: convert to phylink") Signed-off-by: Stefan Roese Cc: Reto Schneider Cc: Alexandre Belloni Cc: Nicolas Ferre Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 636f760d518f..6761f404b8aa 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -578,7 +578,7 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode, if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { if (state->interface == PHY_INTERFACE_MODE_RMII) ctrl |= MACB_BIT(RM9200_RMII); - } else { + } else if (macb_is_gem(bp)) { ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL)); if (state->interface == PHY_INTERFACE_MODE_SGMII) @@ -639,10 +639,13 @@ static void macb_mac_link_up(struct phylink_config *config, ctrl |= MACB_BIT(FD); if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { - ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(PAE)); + ctrl &= ~MACB_BIT(PAE); + if (macb_is_gem(bp)) { + ctrl &= ~GEM_BIT(GBE); - if (speed == SPEED_1000) - ctrl |= GEM_BIT(GBE); + if (speed == SPEED_1000) + ctrl |= GEM_BIT(GBE); + } /* We do not support MLO_PAUSE_RX yet */ if (tx_pause) From 02afa9c66bb954c6959877c70d9e128dcf0adce7 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 4 Aug 2020 21:26:43 +0800 Subject: [PATCH 2038/2056] dpaa2-eth: Fix passing zero to 'PTR_ERR' warning Fix smatch warning: drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c:2419 alloc_channel() warn: passing zero to 'ERR_PTR' setup_dpcon() should return ERR_PTR(err) instead of zero in error handling case. Fixes: d7f5a9d89a55 ("dpaa2-eth: defer probe on object allocate") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 83b1e974bff0..457106e761be 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2370,7 +2370,7 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) free: fsl_mc_object_free(dpcon); - return NULL; + return ERR_PTR(err); } static void free_dpcon(struct dpaa2_eth_priv *priv, @@ -2394,8 +2394,8 @@ alloc_channel(struct dpaa2_eth_priv *priv) return NULL; channel->dpcon = setup_dpcon(priv); - if (IS_ERR_OR_NULL(channel->dpcon)) { - err = PTR_ERR_OR_ZERO(channel->dpcon); + if (IS_ERR(channel->dpcon)) { + err = PTR_ERR(channel->dpcon); goto err_setup; } From 7c9864bbccc23e1812ac82966555d68c13ea4006 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 4 Aug 2020 09:54:15 -0700 Subject: [PATCH 2039/2056] hv_netvsc: do not use VF device if link is down If the accelerated networking SRIOV VF device has lost carrier use the synthetic network device which is available as backup path. This is a rare case since if VF link goes down, normally the VMBus device will also loose external connectivity as well. But if the communication is between two VM's on the same host the VMBus device will still work. Reported-by: "Shah, Ashish N" Fixes: 0c195567a8f6 ("netvsc: transparent VF management") Signed-off-by: Stephen Hemminger Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d27b90b4e2bb..787f17e2a971 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -532,12 +532,13 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; - /* if VF is present and up then redirect packets - * already called with rcu_read_lock_bh + /* If VF is present and up then redirect packets to it. + * Skip the VF if it is marked down or has no carrier. + * If netpoll is in uses, then VF can not be used either. */ vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); if (vf_netdev && netif_running(vf_netdev) && - !netpoll_tx_running(net)) + netif_carrier_ok(vf_netdev) && !netpoll_tx_running(net)) return netvsc_vf_xmit(net, vf_netdev, skb); /* We will atmost need two pages to describe the rndis From 24dd377a76b08145782e6f81edf7e2dafca406bd Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 4 Aug 2020 22:08:09 +0200 Subject: [PATCH 2040/2056] wan: wanxl: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'wanxl_pci_init_one()', GFP_KERNEL can be used because it is a probe function and no lock is acquired. Moreover, just a few lines above, GFP_KERNEL is already used. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/wan/wanxl.c | 54 ++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c index 499f7cd19a4a..a83133388de9 100644 --- a/drivers/net/wan/wanxl.c +++ b/drivers/net/wan/wanxl.c @@ -99,7 +99,7 @@ static inline port_status_t *get_status(struct port *port) static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr, size_t size, int direction) { - dma_addr_t addr = pci_map_single(pdev, ptr, size, direction); + dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction); if (addr + size > 0x100000000LL) pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n", pci_name(pdev), (unsigned long long)addr); @@ -180,8 +180,8 @@ static inline void wanxl_tx_intr(struct port *port) dev->stats.tx_bytes += skb->len; } desc->stat = PACKET_EMPTY; /* Free descriptor */ - pci_unmap_single(port->card->pdev, desc->address, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&port->card->pdev->dev, desc->address, + skb->len, DMA_TO_DEVICE); dev_consume_skb_irq(skb); port->tx_in = (port->tx_in + 1) % TX_BUFFERS; } @@ -207,9 +207,9 @@ static inline void wanxl_rx_intr(struct card *card) if (!skb) dev->stats.rx_dropped++; else { - pci_unmap_single(card->pdev, desc->address, - BUFFER_LENGTH, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&card->pdev->dev, + desc->address, BUFFER_LENGTH, + DMA_FROM_DEVICE); skb_put(skb, desc->length); #ifdef DEBUG_PKT @@ -227,9 +227,10 @@ static inline void wanxl_rx_intr(struct card *card) if (!skb) { skb = dev_alloc_skb(BUFFER_LENGTH); desc->address = skb ? - pci_map_single(card->pdev, skb->data, + dma_map_single(&card->pdev->dev, + skb->data, BUFFER_LENGTH, - PCI_DMA_FROMDEVICE) : 0; + DMA_FROM_DEVICE) : 0; card->rx_skbs[card->rx_in] = skb; } } @@ -291,8 +292,8 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) #endif port->tx_skbs[port->tx_out] = skb; - desc->address = pci_map_single(port->card->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE); + desc->address = dma_map_single(&port->card->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); desc->length = skb->len; desc->stat = PACKET_FULL; writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node), @@ -451,9 +452,9 @@ static int wanxl_close(struct net_device *dev) if (desc->stat != PACKET_EMPTY) { desc->stat = PACKET_EMPTY; - pci_unmap_single(port->card->pdev, desc->address, - port->tx_skbs[i]->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&port->card->pdev->dev, + desc->address, port->tx_skbs[i]->len, + DMA_TO_DEVICE); dev_kfree_skb(port->tx_skbs[i]); } } @@ -524,9 +525,9 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev) for (i = 0; i < RX_QUEUE_LENGTH; i++) if (card->rx_skbs[i]) { - pci_unmap_single(card->pdev, + dma_unmap_single(&card->pdev->dev, card->status->rx_descs[i].address, - BUFFER_LENGTH, PCI_DMA_FROMDEVICE); + BUFFER_LENGTH, DMA_FROM_DEVICE); dev_kfree_skb(card->rx_skbs[i]); } @@ -534,8 +535,8 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev) iounmap(card->plx); if (card->status) - pci_free_consistent(pdev, sizeof(struct card_status), - card->status, card->status_address); + dma_free_coherent(&pdev->dev, sizeof(struct card_status), + card->status, card->status_address); pci_release_regions(pdev); pci_disable_device(pdev); @@ -579,8 +580,8 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, We set both dma_mask and consistent_dma_mask to 28 bits and pray pci_alloc_consistent() will use this info. It should work on most platforms */ - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) || - pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) { + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) || + dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) { pr_err("No usable DMA configuration\n"); pci_disable_device(pdev); return -EIO; @@ -608,9 +609,9 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, card); card->pdev = pdev; - card->status = pci_alloc_consistent(pdev, - sizeof(struct card_status), - &card->status_address); + card->status = dma_alloc_coherent(&pdev->dev, + sizeof(struct card_status), + &card->status_address, GFP_KERNEL); if (card->status == NULL) { wanxl_pci_remove_one(pdev); return -ENOBUFS; @@ -625,8 +626,8 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, /* FIXME when PCI/DMA subsystems are fixed. We set both dma_mask and consistent_dma_mask back to 32 bits to indicate the card can do 32-bit DMA addressing */ - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) || - pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) || + dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { pr_err("No usable DMA configuration\n"); wanxl_pci_remove_one(pdev); return -EIO; @@ -699,9 +700,8 @@ static int wanxl_pci_init_one(struct pci_dev *pdev, card->rx_skbs[i] = skb; if (skb) card->status->rx_descs[i].address = - pci_map_single(card->pdev, skb->data, - BUFFER_LENGTH, - PCI_DMA_FROMDEVICE); + dma_map_single(&card->pdev->dev, skb->data, + BUFFER_LENGTH, DMA_FROM_DEVICE); } mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware)); From 4c900a6b4e057fc9623240682988e203ae98ca94 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 4 Aug 2020 22:19:24 +0200 Subject: [PATCH 2041/2056] farsync: switch from 'pci_' to 'dma_' API The wrappers in include/linux/pci-dma-compat.h should go away. The patch has been generated with the coccinelle script below and has been hand modified to replace GFP_ with a correct flag. It has been compile tested. When memory is allocated in 'fst_add_one()', GFP_KERNEL can be used because it is a probe function and no lock is acquired. @@ @@ - PCI_DMA_BIDIRECTIONAL + DMA_BIDIRECTIONAL @@ @@ - PCI_DMA_TODEVICE + DMA_TO_DEVICE @@ @@ - PCI_DMA_FROMDEVICE + DMA_FROM_DEVICE @@ @@ - PCI_DMA_NONE + DMA_NONE @@ expression e1, e2, e3; @@ - pci_alloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3; @@ - pci_zalloc_consistent(e1, e2, e3) + dma_alloc_coherent(&e1->dev, e2, e3, GFP_) @@ expression e1, e2, e3, e4; @@ - pci_free_consistent(e1, e2, e3, e4) + dma_free_coherent(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_single(e1, e2, e3, e4) + dma_map_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_single(e1, e2, e3, e4) + dma_unmap_single(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4, e5; @@ - pci_map_page(e1, e2, e3, e4, e5) + dma_map_page(&e1->dev, e2, e3, e4, e5) @@ expression e1, e2, e3, e4; @@ - pci_unmap_page(e1, e2, e3, e4) + dma_unmap_page(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_map_sg(e1, e2, e3, e4) + dma_map_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_unmap_sg(e1, e2, e3, e4) + dma_unmap_sg(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_cpu(e1, e2, e3, e4) + dma_sync_single_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_single_for_device(e1, e2, e3, e4) + dma_sync_single_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_cpu(e1, e2, e3, e4) + dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4) @@ expression e1, e2, e3, e4; @@ - pci_dma_sync_sg_for_device(e1, e2, e3, e4) + dma_sync_sg_for_device(&e1->dev, e2, e3, e4) @@ expression e1, e2; @@ - pci_dma_mapping_error(e1, e2) + dma_mapping_error(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_dma_mask(e1, e2) + dma_set_mask(&e1->dev, e2) @@ expression e1, e2; @@ - pci_set_consistent_dma_mask(e1, e2) + dma_set_coherent_mask(&e1->dev, e2) Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/wan/farsync.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index fe4650260bc6..087c21b39b8d 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2553,16 +2553,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) * Allocate a dma buffer for transmit and receives */ card->rx_dma_handle_host = - pci_alloc_consistent(card->device, FST_MAX_MTU, - &card->rx_dma_handle_card); + dma_alloc_coherent(&card->device->dev, FST_MAX_MTU, + &card->rx_dma_handle_card, GFP_KERNEL); if (card->rx_dma_handle_host == NULL) { pr_err("Could not allocate rx dma buffer\n"); err = -ENOMEM; goto rx_dma_fail; } card->tx_dma_handle_host = - pci_alloc_consistent(card->device, FST_MAX_MTU, - &card->tx_dma_handle_card); + dma_alloc_coherent(&card->device->dev, FST_MAX_MTU, + &card->tx_dma_handle_card, GFP_KERNEL); if (card->tx_dma_handle_host == NULL) { pr_err("Could not allocate tx dma buffer\n"); err = -ENOMEM; @@ -2572,9 +2572,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; /* Success */ tx_dma_fail: - pci_free_consistent(card->device, FST_MAX_MTU, - card->rx_dma_handle_host, - card->rx_dma_handle_card); + dma_free_coherent(&card->device->dev, FST_MAX_MTU, + card->rx_dma_handle_host, card->rx_dma_handle_card); rx_dma_fail: fst_disable_intr(card); for (i = 0 ; i < card->nports ; i++) @@ -2625,12 +2624,12 @@ fst_remove_one(struct pci_dev *pdev) /* * Free dma buffers */ - pci_free_consistent(card->device, FST_MAX_MTU, - card->rx_dma_handle_host, - card->rx_dma_handle_card); - pci_free_consistent(card->device, FST_MAX_MTU, - card->tx_dma_handle_host, - card->tx_dma_handle_card); + dma_free_coherent(&card->device->dev, FST_MAX_MTU, + card->rx_dma_handle_host, + card->rx_dma_handle_card); + dma_free_coherent(&card->device->dev, FST_MAX_MTU, + card->tx_dma_handle_host, + card->tx_dma_handle_card); } fst_card_array[card->card_no] = NULL; } From c29f9aa35016729fc0cf72994e8b57f33466b64c Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Wed, 5 Aug 2020 03:10:47 +0300 Subject: [PATCH 2042/2056] ptp: only allow phase values lower than 1 period The way we define the phase (the difference between the time of the signal's rising edge, and the closest integer multiple of the period), it doesn't make sense to have a phase value equal or larger than 1 period. So deny these settings coming from the user. Signed-off-by: Vladimir Oltean Acked-by: Richard Cochran Acked-by: Jacob Keller Signed-off-by: David S. Miller --- drivers/ptp/ptp_chardev.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index e0e6f85966e1..af3bc65c4595 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -218,6 +218,19 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; } } + if (perout->flags & PTP_PEROUT_PHASE) { + /* + * The phase should be specified modulo the + * period, therefore anything equal or larger + * than 1 period is invalid. + */ + if (perout->phase.sec > perout->period.sec || + (perout->phase.sec == perout->period.sec && + perout->phase.nsec >= perout->period.nsec)) { + err = -ERANGE; + break; + } + } } else if (cmd == PTP_PEROUT_REQUEST) { req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS; req.perout.rsv[0] = 0; From a0dced17ad9dc08b1b25e0065b54c97a318e6e8b Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 5 Aug 2020 10:41:31 +0800 Subject: [PATCH 2043/2056] Revert "vxlan: fix tos value before xmit" This reverts commit 71130f29979c7c7956b040673e6b9d5643003176. In commit 71130f29979c ("vxlan: fix tos value before xmit") we want to make sure the tos value are filtered by RT_TOS() based on RFC1349. 0 1 2 3 4 5 6 7 +-----+-----+-----+-----+-----+-----+-----+-----+ | PRECEDENCE | TOS | MBZ | +-----+-----+-----+-----+-----+-----+-----+-----+ But RFC1349 has been obsoleted by RFC2474. The new DSCP field defined like 0 1 2 3 4 5 6 7 +-----+-----+-----+-----+-----+-----+-----+-----+ | DS FIELD, DSCP | ECN FIELD | +-----+-----+-----+-----+-----+-----+-----+-----+ So with IPTOS_TOS_MASK 0x1E RT_TOS(tos) ((tos)&IPTOS_TOS_MASK) the first 3 bits DSCP info will get lost. To take all the DSCP info in xmit, we should revert the patch and just push all tos bits to ip_tunnel_ecn_encap(), which will handling ECN field later. Fixes: 71130f29979c ("vxlan: fix tos value before xmit") Signed-off-by: Hangbin Liu Acked-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6d5816be6131..b9fefe27e3e8 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2740,7 +2740,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr), vni, md, flags, udp_sum); @@ -2797,7 +2797,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } - tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), From 5845589ed652da9b65833044bd2d0bcb1d676ed0 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Wed, 5 Aug 2020 15:19:11 +0800 Subject: [PATCH 2044/2056] net: openvswitch: silence suspicious RCU usage warning ovs_flow_tbl_destroy always is called from RCU callback or error path. It is no need to check if rcu_read_lock or lockdep_ovsl_is_held was held. ovs_dp_cmd_fill_info always is called with ovs_mutex, So use the rcu_dereference_ovsl instead of rcu_dereference in ovs_flow_tbl_masks_cache_size. Fixes: 9bf24f594c6a ("net: openvswitch: make masks cache size configurable") Cc: Eelco Chaudron Reported-by: syzbot+c0eb9e7cdde04e4eb4be@syzkaller.appspotmail.com Reported-by: syzbot+f612c02823acb02ff9bc@syzkaller.appspotmail.com Signed-off-by: Tonghao Zhang Acked-by: Paolo Abeni Signed-off-by: David S. Miller --- net/openvswitch/flow_table.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 6527d84c3ea6..8c12675cbb67 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -518,8 +518,8 @@ void ovs_flow_tbl_destroy(struct flow_table *table) { struct table_instance *ti = rcu_dereference_raw(table->ti); struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); - struct mask_cache *mc = rcu_dereference(table->mask_cache); - struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); + struct mask_cache *mc = rcu_dereference_raw(table->mask_cache); + struct mask_array *ma = rcu_dereference_raw(table->mask_array); call_rcu(&mc->rcu, mask_cache_rcu_cb); call_rcu(&ma->rcu, mask_array_rcu_cb); @@ -937,7 +937,7 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table) u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) { - struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); return READ_ONCE(mc->cache_size); } From 81f6cb31222d286dab65579d61f96664eeb99e0b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 3 Aug 2020 23:34:46 +0800 Subject: [PATCH 2045/2056] ipv6: add ipv6_dev_find() This is to add an ip_dev_find like function for ipv6, used to find the dev by saddr. It will be used by TIPC protocol. So also export it. Signed-off-by: Xin Long Acked-by: Ying Xue Signed-off-by: David S. Miller --- include/net/addrconf.h | 2 ++ net/ipv6/addrconf.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 8418b7d38468..ba3f6c15ad2b 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -97,6 +97,8 @@ bool ipv6_chk_custom_prefix(const struct in6_addr *addr, int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev); +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr); + struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, struct net_device *dev, int strict); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0acf6a9796ca..8e761b8c47c6 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1983,6 +1983,45 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev) } EXPORT_SYMBOL(ipv6_chk_prefix); +/** + * ipv6_dev_find - find the first device with a given source address. + * @net: the net namespace + * @addr: the source address + * + * The caller should be protected by RCU, or RTNL. + */ +struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr) +{ + unsigned int hash = inet6_addr_hash(net, addr); + struct inet6_ifaddr *ifp, *result = NULL; + struct net_device *dev = NULL; + + rcu_read_lock(); + hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) { + if (net_eq(dev_net(ifp->idev->dev), net) && + ipv6_addr_equal(&ifp->addr, addr)) { + result = ifp; + break; + } + } + + if (!result) { + struct rt6_info *rt; + + rt = rt6_lookup(net, addr, NULL, 0, NULL, 0); + if (rt) { + dev = rt->dst.dev; + ip6_rt_put(rt); + } + } else { + dev = result->idev->dev; + } + rcu_read_unlock(); + + return dev; +} +EXPORT_SYMBOL(ipv6_dev_find); + struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, struct net_device *dev, int strict) { From 5a6f6f579178dbeb33002d93b4f646c31348fac9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 3 Aug 2020 23:34:47 +0800 Subject: [PATCH 2046/2056] tipc: set ub->ifindex for local ipv6 address Without ub->ifindex set for ipv6 address in tipc_udp_enable(), ipv6_sock_mc_join() may make the wrong dev join the multicast address in enable_mcast(). This causes that tipc links would never be created. So fix it by getting the right netdev and setting ub->ifindex, as it does for ipv4 address. Reported-by: Shuang Li Signed-off-by: Xin Long Acked-by: Ying Xue Signed-off-by: David S. Miller --- net/tipc/udp_media.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index d91b7c543e39..53f0de0676b7 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -738,6 +738,13 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, b->mtu = b->media->mtu; #if IS_ENABLED(CONFIG_IPV6) } else if (local.proto == htons(ETH_P_IPV6)) { + struct net_device *dev; + + dev = ipv6_dev_find(net, &local.ipv6); + if (!dev) { + err = -ENODEV; + goto err; + } udp_conf.family = AF_INET6; udp_conf.use_udp6_tx_checksums = true; udp_conf.use_udp6_rx_checksums = true; @@ -745,6 +752,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, udp_conf.local_ip6 = in6addr_any; else udp_conf.local_ip6 = local.ipv6; + ub->ifindex = dev->ifindex; b->mtu = 1280; #endif } else { From 0b0e299720bb99428892a23ecbd2b4b7f61ccf6d Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Mon, 3 Aug 2020 19:48:23 +0300 Subject: [PATCH 2047/2056] net: dsa: sja1105: use detected device id instead of DT one on mismatch Although we can detect the chip revision 100% at runtime, it is useful to specify it in the device tree compatible string too, because otherwise there would be no way to assess the correctness of device tree bindings statically, without booting a board (only some switch versions have internal RGMII delays and/or an SGMII port). But for testing the P/Q/R/S support, what I have is a reworked board with the SJA1105T replaced by a pin-compatible SJA1105Q, and I don't want to keep a separate device tree blob just for this one-off board. Since just the chip has been replaced, its RGMII delay setup is inherently the same (meaning: delays added by the PHY on the slave ports, and by PCB traces on the fixed-link CPU port). For this board, I'd rather have the driver shout at me, but go ahead and use what it found even if it doesn't match what it's been told is there. [ 2.970826] sja1105 spi0.1: Device tree specifies chip SJA1105T but found SJA1105Q, please fix it! [ 2.980010] sja1105 spi0.1: Probed switch chip: SJA1105Q [ 3.005082] sja1105 spi0.1: Enabled switch tagging Signed-off-by: Vladimir Oltean Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_main.c | 35 ++++++++++++++++++-------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 5079e4aeef80..c3f6f124e5f0 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3391,11 +3391,14 @@ static const struct dsa_switch_ops sja1105_switch_ops = { .devlink_param_set = sja1105_devlink_param_set, }; +static const struct of_device_id sja1105_dt_ids[]; + static int sja1105_check_device_id(struct sja1105_private *priv) { const struct sja1105_regs *regs = priv->info->regs; u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0}; struct device *dev = &priv->spidev->dev; + const struct of_device_id *match; u32 device_id; u64 part_no; int rc; @@ -3405,12 +3408,6 @@ static int sja1105_check_device_id(struct sja1105_private *priv) if (rc < 0) return rc; - if (device_id != priv->info->device_id) { - dev_err(dev, "Expected device ID 0x%llx but read 0x%x\n", - priv->info->device_id, device_id); - return -ENODEV; - } - rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id, SJA1105_SIZE_DEVICE_ID); if (rc < 0) @@ -3418,13 +3415,29 @@ static int sja1105_check_device_id(struct sja1105_private *priv) sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID); - if (part_no != priv->info->part_no) { - dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n", - priv->info->part_no, part_no); - return -ENODEV; + for (match = sja1105_dt_ids; match->compatible; match++) { + const struct sja1105_info *info = match->data; + + /* Is what's been probed in our match table at all? */ + if (info->device_id != device_id || info->part_no != part_no) + continue; + + /* But is it what's in the device tree? */ + if (priv->info->device_id != device_id || + priv->info->part_no != part_no) { + dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n", + priv->info->name, info->name); + /* It isn't. No problem, pick that up. */ + priv->info = info; + } + + return 0; } - return 0; + dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n", + device_id, part_no); + + return -ENODEV; } static int sja1105_probe(struct spi_device *spi) From c2a4d2747996ee6a1397e2064d44a4f57ac442e6 Mon Sep 17 00:00:00 2001 From: Po-Hsu Lin Date: Tue, 4 Aug 2020 18:18:02 +0800 Subject: [PATCH 2048/2056] selftests: rtnetlink: correct the final return value for the test The return value "ret" will be reset to 0 from the beginning of each sub-test in rtnetlink.sh, therefore this test will always pass if the last sub-test has passed: $ sudo ./rtnetlink.sh PASS: policy routing PASS: route get PASS: preferred_lft addresses have expired PASS: promote_secondaries complete PASS: tc htb hierarchy PASS: gre tunnel endpoint PASS: gretap PASS: ip6gretap PASS: erspan PASS: ip6erspan PASS: bridge setup PASS: ipv6 addrlabel PASS: set ifalias a39ee707-e36b-41d3-802f-63179ed4d580 for test-dummy0 PASS: vrf PASS: vxlan FAIL: can't add fou port 7777, skipping test PASS: macsec PASS: ipsec 3,7c3,7 < sa[0] spi=0x00000009 proto=0x32 salt=0x64636261 crypt=1 < sa[0] key=0x31323334 35363738 39303132 33343536 < sa[1] rx ipaddr=0x00000000 00000000 00000000 c0a87b03 < sa[1] spi=0x00000009 proto=0x32 salt=0x64636261 crypt=1 < sa[1] key=0x31323334 35363738 39303132 33343536 --- > sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 > sa[0] key=0x34333231 38373635 32313039 36353433 > sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0 > sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 > sa[1] key=0x34333231 38373635 32313039 36353433 FAIL: ipsec_offload incorrect driver data FAIL: ipsec_offload PASS: bridge fdb get PASS: neigh get $ echo $? 0 Make "ret" become a local variable for all sub-tests. Also, check the sub-test results in kci_test_rtnl() and return the final result for this test. Signed-off-by: Po-Hsu Lin Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 65 ++++++++++++++++-------- 1 file changed, 43 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index bdbf4b3125b6..9db66bef6f1e 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -5,7 +5,6 @@ # set -e devdummy="test-dummy0" -ret=0 # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -66,7 +65,7 @@ kci_test_bridge() devbr="test-br0" vlandev="testbr-vlan1" - ret=0 + local ret=0 ip link add name "$devbr" type bridge check_err $? @@ -113,7 +112,7 @@ kci_test_gre() rem=10.42.42.1 loc=10.0.0.1 - ret=0 + local ret=0 ip tunnel add $gredev mode gre remote $rem local $loc ttl 1 check_err $? ip link set $gredev up @@ -149,7 +148,7 @@ kci_test_gre() kci_test_tc() { dev=lo - ret=0 + local ret=0 tc qdisc add dev "$dev" root handle 1: htb check_err $? @@ -184,7 +183,7 @@ kci_test_tc() kci_test_polrouting() { - ret=0 + local ret=0 ip rule add fwmark 1 lookup 100 check_err $? ip route add local 0.0.0.0/0 dev lo table 100 @@ -207,7 +206,7 @@ kci_test_route_get() { local hash_policy=$(sysctl -n net.ipv4.fib_multipath_hash_policy) - ret=0 + local ret=0 ip route get 127.0.0.1 > /dev/null check_err $? @@ -290,7 +289,7 @@ kci_test_promote_secondaries() kci_test_addrlabel() { - ret=0 + local ret=0 ip addrlabel add prefix dead::/64 dev lo label 1 check_err $? @@ -330,7 +329,7 @@ kci_test_addrlabel() kci_test_ifalias() { - ret=0 + local ret=0 namewant=$(uuidgen) syspathname="/sys/class/net/$devdummy/ifalias" @@ -385,7 +384,7 @@ kci_test_ifalias() kci_test_vrf() { vrfname="test-vrf" - ret=0 + local ret=0 ip link show type vrf 2>/dev/null if [ $? -ne 0 ]; then @@ -425,7 +424,7 @@ kci_test_vrf() kci_test_encap_vxlan() { - ret=0 + local ret=0 vxlan="test-vxlan0" vlan="test-vlan0" testns="$1" @@ -511,7 +510,7 @@ kci_test_encap_vxlan() kci_test_encap_fou() { - ret=0 + local ret=0 name="test-fou" testns="$1" @@ -548,7 +547,7 @@ kci_test_encap_fou() kci_test_encap() { testns="testns" - ret=0 + local ret=0 ip netns add "$testns" if [ $? -ne 0 ]; then @@ -573,7 +572,7 @@ kci_test_encap() kci_test_macsec() { msname="test_macsec0" - ret=0 + local ret=0 ip macsec help 2>&1 | grep -q "^Usage: ip macsec" if [ $? -ne 0 ]; then @@ -631,7 +630,7 @@ kci_test_macsec() #------------------------------------------------------------------- kci_test_ipsec() { - ret=0 + local ret=0 algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" srcip=192.168.123.1 dstip=192.168.123.2 @@ -731,7 +730,7 @@ kci_test_ipsec() #------------------------------------------------------------------- kci_test_ipsec_offload() { - ret=0 + local ret=0 algo="aead rfc4106(gcm(aes)) 0x3132333435363738393031323334353664636261 128" srcip=192.168.123.3 dstip=192.168.123.4 @@ -841,7 +840,7 @@ kci_test_gretap() { testns="testns" DEV_NS=gretap00 - ret=0 + local ret=0 ip netns add "$testns" if [ $? -ne 0 ]; then @@ -891,7 +890,7 @@ kci_test_ip6gretap() { testns="testns" DEV_NS=ip6gretap00 - ret=0 + local ret=0 ip netns add "$testns" if [ $? -ne 0 ]; then @@ -941,7 +940,7 @@ kci_test_erspan() { testns="testns" DEV_NS=erspan00 - ret=0 + local ret=0 ip link help erspan 2>&1 | grep -q "^Usage:" if [ $? -ne 0 ];then @@ -1006,7 +1005,7 @@ kci_test_ip6erspan() { testns="testns" DEV_NS=ip6erspan00 - ret=0 + local ret=0 ip link help ip6erspan 2>&1 | grep -q "^Usage:" if [ $? -ne 0 ];then @@ -1077,7 +1076,7 @@ kci_test_fdb_get() test_mac=de:ad:be:ef:13:37 localip="10.0.2.2" dstip="10.0.2.3" - ret=0 + local ret=0 bridge fdb help 2>&1 |grep -q 'bridge fdb get' if [ $? -ne 0 ];then @@ -1125,7 +1124,7 @@ kci_test_neigh_get() dstmac=de:ad:be:ef:13:37 dstip=10.0.2.4 dstip6=dead::2 - ret=0 + local ret=0 ip neigh help 2>&1 |grep -q 'ip neigh get' if [ $? -ne 0 ];then @@ -1175,6 +1174,7 @@ kci_test_neigh_get() kci_test_rtnl() { + local ret=0 kci_add_dummy if [ $ret -ne 0 ];then echo "FAIL: cannot add dummy interface" @@ -1182,27 +1182,48 @@ kci_test_rtnl() fi kci_test_polrouting + check_err $? kci_test_route_get + check_err $? kci_test_addrlft + check_err $? kci_test_promote_secondaries + check_err $? kci_test_tc + check_err $? kci_test_gre + check_err $? kci_test_gretap + check_err $? kci_test_ip6gretap + check_err $? kci_test_erspan + check_err $? kci_test_ip6erspan + check_err $? kci_test_bridge + check_err $? kci_test_addrlabel + check_err $? kci_test_ifalias + check_err $? kci_test_vrf + check_err $? kci_test_encap + check_err $? kci_test_macsec + check_err $? kci_test_ipsec + check_err $? kci_test_ipsec_offload + check_err $? kci_test_fdb_get + check_err $? kci_test_neigh_get + check_err $? kci_del_dummy + return $ret } #check for needed privileges @@ -1221,4 +1242,4 @@ done kci_test_rtnl -exit $ret +exit $? From 72f70c159b53e1363191953875e0223ed959e143 Mon Sep 17 00:00:00 2001 From: Po-Hsu Lin Date: Tue, 4 Aug 2020 18:18:03 +0800 Subject: [PATCH 2049/2056] selftests: rtnetlink: make kci_test_encap() return sub-test result kci_test_encap() is actually composed by two different sub-tests, kci_test_encap_vxlan() and kci_test_encap_fou() Therefore we should check the test result of these two in kci_test_encap() to let the script be aware of the pass / fail status. Otherwise it will generate false-negative result like below: $ sudo ./test.sh PASS: policy routing PASS: route get PASS: preferred_lft addresses have expired PASS: promote_secondaries complete PASS: tc htb hierarchy PASS: gre tunnel endpoint PASS: gretap PASS: ip6gretap PASS: erspan PASS: ip6erspan PASS: bridge setup PASS: ipv6 addrlabel PASS: set ifalias 5b193daf-0a08-46d7-af2c-e7aadd422ded for test-dummy0 PASS: vrf PASS: vxlan FAIL: can't add fou port 7777, skipping test PASS: macsec PASS: bridge fdb get PASS: neigh get $ echo $? 0 Signed-off-by: Po-Hsu Lin Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 9db66bef6f1e..7c38a909f8b8 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -564,9 +564,12 @@ kci_test_encap() check_err $? kci_test_encap_vxlan "$testns" + check_err $? kci_test_encap_fou "$testns" + check_err $? ip netns del "$testns" + return $ret } kci_test_macsec() From adf7341064982de923a1f8a11bcdec48be6b3004 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Tue, 4 Aug 2020 18:31:06 +0200 Subject: [PATCH 2050/2056] mptcp: be careful on subflow creation Nicolas reported the following oops: [ 1521.392541] BUG: kernel NULL pointer dereference, address: 00000000000000c0 [ 1521.394189] #PF: supervisor read access in kernel mode [ 1521.395376] #PF: error_code(0x0000) - not-present page [ 1521.396607] PGD 0 P4D 0 [ 1521.397156] Oops: 0000 [#1] SMP PTI [ 1521.398020] CPU: 0 PID: 22986 Comm: kworker/0:2 Not tainted 5.8.0-rc4+ #109 [ 1521.399618] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014 [ 1521.401728] Workqueue: events mptcp_worker [ 1521.402651] RIP: 0010:mptcp_subflow_create_socket+0xf1/0x1c0 [ 1521.403954] Code: 24 08 89 44 24 04 48 8b 7a 18 e8 2a 48 d4 ff 8b 44 24 04 85 c0 75 7a 48 8b 8b 78 02 00 00 48 8b 54 24 08 48 8d bb 80 00 00 00 <48> 8b 89 c0 00 00 00 48 89 8a c0 00 00 00 48 8b 8b 78 02 00 00 8b [ 1521.408201] RSP: 0000:ffffabc4002d3c60 EFLAGS: 00010246 [ 1521.409433] RAX: 0000000000000000 RBX: ffffa0b9ad8c9a00 RCX: 0000000000000000 [ 1521.411096] RDX: ffffa0b9ae78a300 RSI: 00000000fffffe01 RDI: ffffa0b9ad8c9a80 [ 1521.412734] RBP: ffffa0b9adff2e80 R08: ffffa0b9af02d640 R09: ffffa0b9ad923a00 [ 1521.414333] R10: ffffabc4007139f8 R11: fefefefefefefeff R12: ffffabc4002d3cb0 [ 1521.415918] R13: ffffa0b9ad91fa58 R14: ffffa0b9ad8c9f9c R15: 0000000000000000 [ 1521.417592] FS: 0000000000000000(0000) GS:ffffa0b9af000000(0000) knlGS:0000000000000000 [ 1521.419490] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1521.420839] CR2: 00000000000000c0 CR3: 000000002951e006 CR4: 0000000000160ef0 [ 1521.422511] Call Trace: [ 1521.423103] __mptcp_subflow_connect+0x94/0x1f0 [ 1521.425376] mptcp_pm_create_subflow_or_signal_addr+0x200/0x2a0 [ 1521.426736] mptcp_worker+0x31b/0x390 [ 1521.431324] process_one_work+0x1fc/0x3f0 [ 1521.432268] worker_thread+0x2d/0x3b0 [ 1521.434197] kthread+0x117/0x130 [ 1521.435783] ret_from_fork+0x22/0x30 on some unconventional configuration. The MPTCP protocol is trying to create a subflow for an unaccepted server socket. That is allowed by the RFC, even if subflow creation will likely fail. Unaccepted sockets have still a NULL sk_socket field, avoid the issue by failing earlier. Reported-and-tested-by: Nicolas Rybowski Fixes: 7d14b0d2b9b3 ("mptcp: set correct vfs info for subflows") Signed-off-by: Paolo Abeni Reviewed-by: Matthieu Baerts Signed-off-by: David S. Miller --- net/mptcp/subflow.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index a4cc4591bd4e..96f4f2fe50ad 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1137,6 +1137,12 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock) struct socket *sf; int err; + /* un-accepted server sockets can reach here - on bad configuration + * bail early to avoid greater trouble later + */ + if (unlikely(!sk->sk_socket)) + return -EINVAL; + err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP, &sf); if (err) From 16f6458f2478b55e2b628797bc81a4455045c74e Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 5 Aug 2020 04:40:45 -0400 Subject: [PATCH 2051/2056] selftests/net: relax cpu affinity requirement in msg_zerocopy test The msg_zerocopy test pins the sender and receiver threads to separate cores to reduce variance between runs. But it hardcodes the cores and skips core 0, so it fails on machines with the selected cores offline, or simply fewer cores. The test mainly gives code coverage in automated runs. The throughput of zerocopy ('-z') and non-zerocopy runs is logged for manual inspection. Continue even when sched_setaffinity fails. Just log to warn anyone interpreting the data. Fixes: 07b65c5b31ce ("test: add msg_zerocopy test") Reported-by: Colin Ian King Signed-off-by: Willem de Bruijn Acked-by: Colin Ian King Signed-off-by: David S. Miller --- tools/testing/selftests/net/msg_zerocopy.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c index 4b02933cab8a..bdc03a2097e8 100644 --- a/tools/testing/selftests/net/msg_zerocopy.c +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -125,9 +125,8 @@ static int do_setcpu(int cpu) CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask)) - error(1, 0, "setaffinity %d", cpu); - - if (cfg_verbose) + fprintf(stderr, "cpu: unable to pin, may increase variance.\n"); + else if (cfg_verbose) fprintf(stderr, "cpu: %u\n", cpu); return 0; From 8ed54f167abda44da48498876953f5b7843378df Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Wed, 5 Aug 2020 15:39:31 +0200 Subject: [PATCH 2052/2056] ip_tunnel_core: Fix build for archs without _HAVE_ARCH_IPV6_CSUM On architectures defining _HAVE_ARCH_IPV6_CSUM, we get csum_ipv6_magic() defined by means of arch checksum.h headers. On other architectures, we actually need to include net/ip6_checksum.h to be able to use it. Without this include, building with defconfig breaks at least for s390. Reported-by: Stephen Rothwell Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets") Signed-off-by: Stefano Brivio Signed-off-by: David S. Miller --- net/ipv4/ip_tunnel_core.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 9ddee2a0c66d..75c6013ff9a4 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include From 5fcfb6d0bfcda17f0d0656e4e5b3710af2bbaae5 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 5 Aug 2020 14:07:07 +0200 Subject: [PATCH 2053/2056] hso: fix bailout in error case of probe The driver tries to reuse code for disconnect in case of a failed probe. If resources need to be freed after an error in probe, the netdev must not be freed because it has never been registered. Fix it by telling the helper which path we are in. Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/hso.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index d2fdb5430d27..031a5ad25500 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2357,7 +2357,7 @@ static int remove_net_device(struct hso_device *hso_dev) } /* Frees our network device */ -static void hso_free_net_device(struct hso_device *hso_dev) +static void hso_free_net_device(struct hso_device *hso_dev, bool bailout) { int i; struct hso_net *hso_net = dev2net(hso_dev); @@ -2380,7 +2380,7 @@ static void hso_free_net_device(struct hso_device *hso_dev) kfree(hso_net->mux_bulk_tx_buf); hso_net->mux_bulk_tx_buf = NULL; - if (hso_net->net) + if (hso_net->net && !bailout) free_netdev(hso_net->net); kfree(hso_dev); @@ -2556,7 +2556,7 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface, return hso_dev; exit: - hso_free_net_device(hso_dev); + hso_free_net_device(hso_dev, true); return NULL; } @@ -3133,7 +3133,7 @@ static void hso_free_interface(struct usb_interface *interface) rfkill_unregister(rfk); rfkill_destroy(rfk); } - hso_free_net_device(network_table[i]); + hso_free_net_device(network_table[i], false); } } } From 11c5f6d2f2a8f34f64dfcdec4090d2c17539c2c3 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 5 Aug 2020 14:07:08 +0200 Subject: [PATCH 2054/2056] usb: hso: no complaint about kmalloc failure If this fails, kmalloc() will print a report including a stack trace. There is no need for a separate complaint. Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/hso.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 031a5ad25500..5762876e3105 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -2465,10 +2465,9 @@ static void hso_create_rfkill(struct hso_device *hso_dev, &interface_to_usbdev(interface)->dev, RFKILL_TYPE_WWAN, &hso_rfkill_ops, hso_dev); - if (!hso_net->rfkill) { - dev_err(dev, "%s - Out of memory\n", __func__); + if (!hso_net->rfkill) return; - } + if (rfkill_register(hso_net->rfkill) < 0) { rfkill_destroy(hso_net->rfkill); hso_net->rfkill = NULL; From abaf00ff04625275a40e407fc8da04948ae4f95b Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 5 Aug 2020 14:07:09 +0200 Subject: [PATCH 2055/2056] usb: hso: remove bogus check for EINPROGRESS This check an inherent race. It opens a race where an error code has already been set or cleared yet the URB has not been given back. We cannot do such an optimization and must unlink unconditionally. Signed-off-by: Oliver Neukum Signed-off-by: David S. Miller --- drivers/net/usb/hso.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 5762876e3105..2bb28db89432 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -831,8 +831,7 @@ static void hso_net_tx_timeout(struct net_device *net, unsigned int txqueue) dev_warn(&net->dev, "Tx timed out.\n"); /* Tear the waiting frame off the list */ - if (odev->mux_bulk_tx_urb && - (odev->mux_bulk_tx_urb->status == -EINPROGRESS)) + if (odev->mux_bulk_tx_urb) usb_unlink_urb(odev->mux_bulk_tx_urb); /* Update statistics */ From c1055b76ad00aed0e8b79417080f212d736246b6 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Wed, 5 Aug 2020 13:18:48 -0500 Subject: [PATCH 2056/2056] net: thunderx: initialize VF's mailbox mutex before first usage A VF's mailbox mutex is not getting initialized by nicvf_probe() until after it is first used. And such usage is resulting in... [ 28.270927] ------------[ cut here ]------------ [ 28.270934] DEBUG_LOCKS_WARN_ON(lock->magic != lock) [ 28.270980] WARNING: CPU: 9 PID: 675 at kernel/locking/mutex.c:938 __mutex_lock+0xdac/0x12f0 [ 28.270985] Modules linked in: ast(+) nicvf(+) i2c_algo_bit drm_vram_helper drm_ttm_helper ttm nicpf(+) drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm ixgbe(+) sg thunder_bgx mdio i2c_thunderx mdio_thunder thunder_xcv mdio_cavium dm_mirror dm_region_hash dm_log dm_mod [ 28.271064] CPU: 9 PID: 675 Comm: systemd-udevd Not tainted 4.18.0+ #1 [ 28.271070] Hardware name: GIGABYTE R120-T34-00/MT30-GS2-00, BIOS F02 08/06/2019 [ 28.271078] pstate: 60000005 (nZCv daif -PAN -UAO) [ 28.271086] pc : __mutex_lock+0xdac/0x12f0 [ 28.271092] lr : __mutex_lock+0xdac/0x12f0 [ 28.271097] sp : ffff800d42146fb0 [ 28.271103] x29: ffff800d42146fb0 x28: 0000000000000000 [ 28.271113] x27: ffff800d24361180 x26: dfff200000000000 [ 28.271122] x25: 0000000000000000 x24: 0000000000000002 [ 28.271132] x23: ffff20001597cc80 x22: ffff2000139e9848 [ 28.271141] x21: 0000000000000000 x20: 1ffff001a8428e0c [ 28.271151] x19: ffff200015d5d000 x18: 1ffff001ae0f2184 [ 28.271160] x17: 0000000000000000 x16: 0000000000000000 [ 28.271170] x15: ffff800d70790c38 x14: ffff20001597c000 [ 28.271179] x13: ffff20001597cc80 x12: ffff040002b2f779 [ 28.271189] x11: 1fffe40002b2f778 x10: ffff040002b2f778 [ 28.271199] x9 : 0000000000000000 x8 : 00000000f1f1f1f1 [ 28.271208] x7 : 00000000f2f2f2f2 x6 : 0000000000000000 [ 28.271217] x5 : 1ffff001ae0f2186 x4 : 1fffe400027eb03c [ 28.271227] x3 : dfff200000000000 x2 : ffff1001a8428dbe [ 28.271237] x1 : c87fdfac7ea11d00 x0 : 0000000000000000 [ 28.271246] Call trace: [ 28.271254] __mutex_lock+0xdac/0x12f0 [ 28.271261] mutex_lock_nested+0x3c/0x50 [ 28.271297] nicvf_send_msg_to_pf+0x40/0x3a0 [nicvf] [ 28.271316] nicvf_register_misc_interrupt+0x20c/0x328 [nicvf] [ 28.271334] nicvf_probe+0x508/0xda0 [nicvf] [ 28.271344] local_pci_probe+0xc4/0x180 [ 28.271352] pci_device_probe+0x3ec/0x528 [ 28.271363] driver_probe_device+0x21c/0xb98 [ 28.271371] device_driver_attach+0xe8/0x120 [ 28.271379] __driver_attach+0xe0/0x2a0 [ 28.271386] bus_for_each_dev+0x118/0x190 [ 28.271394] driver_attach+0x48/0x60 [ 28.271401] bus_add_driver+0x328/0x558 [ 28.271409] driver_register+0x148/0x398 [ 28.271416] __pci_register_driver+0x14c/0x1b0 [ 28.271437] nicvf_init_module+0x54/0x10000 [nicvf] [ 28.271447] do_one_initcall+0x18c/0xc18 [ 28.271457] do_init_module+0x18c/0x618 [ 28.271464] load_module+0x2bc0/0x4088 [ 28.271472] __se_sys_finit_module+0x110/0x188 [ 28.271479] __arm64_sys_finit_module+0x70/0xa0 [ 28.271490] el0_svc_handler+0x15c/0x380 [ 28.271496] el0_svc+0x8/0xc [ 28.271502] irq event stamp: 52649 [ 28.271513] hardirqs last enabled at (52649): [] _raw_spin_unlock_irqrestore+0xc0/0xd8 [ 28.271522] hardirqs last disabled at (52648): [] _raw_spin_lock_irqsave+0x3c/0xf0 [ 28.271530] softirqs last enabled at (52330): [] __do_softirq+0xacc/0x117c [ 28.271540] softirqs last disabled at (52313): [] irq_exit+0x3cc/0x500 [ 28.271545] ---[ end trace a9b90324c8a0d4ee ]--- This problem is resolved by moving the call to mutex_init() up earlier in nicvf_probe(). Fixes: 609ea65c65a0 ("net: thunderx: add mutex to protect mailbox from concurrent calls for same VF") Signed-off-by: Dean Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index d22c9d350355..c1378b5c780c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2177,6 +2177,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->max_queues *= 2; nic->ptp_clock = ptp_clock; + /* Initialize mutex that serializes usage of VF's mailbox */ + mutex_init(&nic->rx_mode_mtx); + /* MAP VF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!nic->reg_base) { @@ -2253,7 +2256,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); spin_lock_init(&nic->rx_mode_wq_lock); - mutex_init(&nic->rx_mode_mtx); err = register_netdev(netdev); if (err) {